id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3293925 | import requests
import random
from joblib import Parallel, delayed
def register():
r = requests.post('http://52.59.7.147/process', json={
'email': ''.join([random.choice('abcdefghijklmn') for i in range(10)])
})
return r.json()
n = 35
r = Parallel(n_jobs=n)(delayed(register)() for i in range(n))
vals = []
for v in r:
if 'ip' in v:
vip = v['ip']
if vip in vals:
print("!!!!!!!! repeated IP")
vals.append(vip)
print(r) | StarcoderdataPython |
1607137 | <reponame>DazEB2/SimplePyScripts<filename>exchange_rates/banki_ru.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def exchange_rate(currency_id, timestamp=None):
if timestamp is None:
from datetime import datetime
timestamp = int(datetime.today().timestamp())
data = {
'currency_id': currency_id,
'date': timestamp
}
headers = {
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'
}
import requests
rs = requests.post('http://www.banki.ru/products/currency/ajax/quotations/value/cbr/', json=data, headers=headers)
return rs.json()['value']
if __name__ == '__main__':
# 840 -- USD
print('USD:', exchange_rate(840))
# 978 -- EUR
print('EUR:', exchange_rate(978))
| StarcoderdataPython |
1604832 | <gh_stars>1-10
import unittest
from Card import Card
from Player import Player
from Shoe import Shoe
from Hand import Hand
class ShoeTester(unittest.TestCase):
def test_gen_cards(self):
shoe = Shoe()
self.assertEqual(len(shoe.cards), 8*52)
self.assertEqual(type(shoe.cards[0]), Card)
def test_end_of_shoe(self):
shoe = Shoe()
[shoe.pop() for iter in range(416)]
self.assertEqual(shoe.end_of_shoe, False)
shoe.pop()
self.assertEqual(shoe.end_of_shoe, True)
self.assertEqual(len(shoe.cards), 0)
def test_removeCards(self):
# removes 2 cards
shoe = Shoe()
shoe.cards = [Card('H10'), Card('CJ'), Card('D2')]
result = shoe.removeCards(set([10]), 2)
self.assertEqual(result, True)
self.assertEqual(set([c.name for c in shoe.cards]), set(['D2']) )
result = shoe.removeCards(set([10]), 1)
self.assertEqual(set([c.name for c in shoe.cards]), set(['D2']) )
self.assertEqual(result, False)
#removeCards should not remove too many cards
shoe = Shoe()
shoe.cards = [Card('H9'), Card('C9'), Card('D2')]
result = shoe.removeCards(set([9]), 1)
self.assertEqual(result, True)
self.assertEqual(len(shoe.cards), 2 )
# remove 1 card from large shoe
shoe = Shoe()
shoe.removeCards(set([10]),1)
self.assertTrue(len(shoe.cards), 415)
#remove a card from a range of cards
shoe = Shoe()
shoe.cards = [Card('H10'), Card('CJ'), Card('D2')]
result = shoe.removeCards(set([2,3,5]),1)
self.assertEqual(result, True)
self.assertEqual(set([c.name for c in shoe.cards]), set(['H10', 'CJ']) ) | StarcoderdataPython |
104052 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 13:11:49 2020
@author: abdulroqeeb
"""
host = "127.0.0.1"
port = 7497
ticktypes = {
66: "Bid",
67: "Ask",
68: "Last",
69: "Bid Size",
70: "Ask Size",
71: "Last Size",
72: "High",
73: "Low",
74: "Volume",
75: "Prior Close",
76: "Prior Open",
88: "Timestamp",
}
account_details_params = [
'AccountCode',
'AccountType',
'AccruedCash',
'AvailableFunds',
'BuyingPower',
'CashBalance',
'NetLiquidation'
]
port_chart_lim = 600 #minutes
states = {1: 'Long',
0: 'Flat',
-1: 'Short'} | StarcoderdataPython |
1651608 | <filename>colorbrewer/__init__.py
#!/usr/bin/env python
from __future__ import absolute_import, division
__version__ = "0.2.0"
from six.moves import map
"""
__init__: DESCRIPTION
data copyright <NAME>, <NAME>, and The Pennsylvania State University
"""
# Copyright 2009, 2012 <NAME> <<EMAIL>>
from collections import defaultdict
from pkg_resources import resource_string
from csv import DictReader, reader as csv_reader
try:
# Python 2.6+
PKG = __name__
except NameError:
PKG = "colorbrewer"
try:
# Python 2.6+
next
except NameError:
def next(obj):
return obj.__next__()
PKG_DATA = ".".join([PKG, "data"])
RES_COLORBREWER = "ColorBrewer_all_schemes_RGBonly3.csv"
DIALECT= "excel-tab"
def read_colorbrewer(iterable):
res = defaultdict(dict)
iterator = iter(iterable)
fieldnames = next(csv_reader(iterator, DIALECT))
reader = DictReader(iterator, fieldnames, dialect = DIALECT)
for row in reader:
def int_cell(colname):
return int(row[colname])
color_name = row["ColorName"]
if color_name:
num_of_colors = int_cell("NumOfColors")
colors = []
res[color_name][num_of_colors] = colors
try:
colors.append(tuple(map(int_cell, "RGB")))
except ValueError:
# data section is over
break
return res
def _load_schemes():
lines = [line.decode() \
for line in resource_string(PKG_DATA, RES_COLORBREWER).splitlines()]
schemes = read_colorbrewer(lines)
# copy schemes to module global variables
globals().update(schemes)
_load_schemes()
| StarcoderdataPython |
3368824 | <reponame>chatto-hub-test2/github-permission
from chatto_transform.transforms.transform_base import Transform
pipeline_type_error_msg = """Invalid transform list: Transform {i1}'s output schema does not match Transform {i2}'s input schema.
{i1} output schema: {i1s}
{i2} input schema: {i2s}"""
class PipelineTransformException(Exception):
pass
class PipelineTransform(Transform):
def __init__(self, transform_list, intermediate_storage=None):
self.transform_list = list(transform_list)
self.intermediate_storage = intermediate_storage
self._check_transform_list()
def _check_transform_list(self):
if len(self.transform_list) == 0:
raise TypeError('Invalid transform list: empty.')
for i, ts in enumerate(zip(self.transform_list, self.transform_list[1:])):
t1, t2 = ts
if t1.output_schema() != t2.input_schema():
raise TypeError(pipeline_type_error_msg.format(i1=i, i2=i+1, i1s=t1.output_schema(), i2s=t2.input_schema()))
def input_schema(self):
return self.transform_list[0].input_schema()
def output_schema(self):
return self.transform_list[-1].output_schema()
def _load(self):
return self.transform_list[0].load()
def _transform(self, data):
for i, t in enumerate(self.transform_list):
try:
data = t.transform(data)
except Exception as exc:
msg = 'Encountered exception while running transform #{}, {} in pipeline.'.format(i, t)
raise PipelineTransformException(msg) from exc
return data | StarcoderdataPython |
3303309 | <gh_stars>1-10
"""Interface for all network clients to follow."""
from __future__ import annotations
from abc import ABC, abstractmethod
from xrpl.models.requests.request import Request
from xrpl.models.response import Response
class Client(ABC):
"""
Interface for all network clients to follow.
:meta private:
"""
url: str
@abstractmethod
async def request_impl(self: Client, request: Request) -> Response:
"""
This is the actual driver for a given Client's request. It must be
async because all of the helper functions in this library are
async-first. Implement this in a given Client.
Arguments:
request: The Request to send.
Raises:
NotImplementedError: always.
"""
raise NotImplementedError(
f"{self.__class__.__name__}.request_impl not implemented."
)
| StarcoderdataPython |
4803004 | <gh_stars>0
def kmp(t, p):
n = len(t)
m = len(p)
begin = 0
matched = 0
res = []
f = failure_function(p)
while begin <= n - m:
if matched < m and t[begin + matched] == p[matched]:
matched += 1
if matched == m:
res.append(begin)
else:
if matched == 0:
begin += 1
else:
# begin 초기화 및 matched 초기화
begin += (matched + f[matched - 1])
matched = f[matched - 1]
return res
def failure_function(p):
m = len(p)
begin = 1
matched = 0
f = [0 for i in range(m)]
while begin + matched < m:
if p[begin + matched] == p[matched]:
matched += 1
f.append(matched)
else:
if matched == 0:
begin += 1
else:
begin += matched - f[matched - 1]
matched = f[matched - 1]
return f
print(kmp('abaabababc', 'ababab')) | StarcoderdataPython |
3222983 | """ Schema for Auth Models """
# pylint: disable=no-self-argument
import re
from typing import Dict, List, Optional
from pydantic import BaseModel, validator
from app.core.security.password import validate_password
from app.helpers.expressions import VALID_EMAIL
class GroupBase(BaseModel):
""" Base Schema for Groups """
name: str
description: Optional[str] = None
class GroupBaseDB(GroupBase):
""" Base Schema for DB """
id: int
class Group(GroupBaseDB):
""" Final Schema for API """
class UserBasic(BaseModel):
""" Basic user info - combine with id to give clients linking and list abilities """
first_name: Optional[str] = None
last_name: Optional[str] = None
class UserBase(UserBasic):
""" Base Schema for User with optional data to be collected"""
username: Optional[str] = None
email: Optional[str] = None
is_active: Optional[bool] = True
is_superuser: Optional[bool] = False
@validator('email')
def validate_email(cls, value):
""" validates the email provided is valid form """
if value is not None and not re.search(VALID_EMAIL, value):
raise ValueError('email address is not valid')
return value
@validator('username')
def validate_username(cls, value):
""" validates the username is alphanumeric """
if value is not None and not value.isalnum():
raise ValueError('username must be alphanumeric')
return value
class UserBaseDB(UserBase):
""" Base Schema for User after DB save to return most non sensitive data """
id: int = None
class UserList(UserBasic):
""" Add ID into UserBasic so we can provide a list for linking and name building """
id: int
def password(value: str, values: Dict[str, str]) -> str:
""" make sure the password supplied meets our criteria """
# We will assume all attempts will fail so start with least intense first
if 'password' not in values or value != values['password']:
raise ValueError('passwords do not match')
# Validate returns True if valid, or raises Value error if not
validate_password(value)
return value
class UserCreate(UserBase):
""" Add required fields required to create a user """
password: str
password_validate: str
_validate_password = validator('password_validate', allow_reuse=True, always=True)(password)
class UserUpdate(UserBaseDB):
""" Schema to allow user to update password """
password: Optional[str] = None
class User(UserBaseDB):
""" Does not include hashed password, could include other extra's """
groups: Optional[List] = None
class UserDB(UserBaseDB):
""" Final DB Object """
hashed_password: str
class UserDBCreate(UserBase):
""" Object to save in the database / does not include key """
hashed_password: Optional[str] = None
| StarcoderdataPython |
1725810 | <gh_stars>0
from django.apps import AppConfig
class DbcallsConfig(AppConfig):
name = 'DBCalls'
| StarcoderdataPython |
1784609 | <reponame>mccolm-robotics/Claver-AI-Assistant<gh_stars>1-10
import numpy as np
from pyrr import Vector3
class BasicTile:
TILE_SIZE = 11
vertices = [
1.0, 0.0, -1.0,
-1.0, 0.0, 1.0,
1.0, 0.0, 1.0,
-1.0, 0.0, -1.0,
-1.0, 0.0, 1.0,
1.0, 0.0, -1.0
]
def __init__(self, loader, position):
# self.__model = loader.loadToVAO(self.vertices)
self.__model = loader.load2DToVAO(self.vertices, 3)
self.__position = Vector3(position)
self.__x = self.__position.x
self.__z = self.__position.z
self.__height = self.__position.y
def getModel(self):
return self.__model
def getPosition(self):
return self.__position
def getX(self):
return self.__x
def getZ(self):
return self.__z
def getHeight(self):
return self.__height | StarcoderdataPython |
129353 | <reponame>Lezval/horizon
#!/usr/bin/env python
"""Generates files for sphinx documentation using a simple Autodoc based
template.
To use, just run as a script:
$ python doc/generate_autodoc_index.py
"""
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
RSTDIR = os.path.join(base_dir, "source", "sourcecode")
SRCS = {'dashboard': os.path.join(base_dir, "..", "openstack-dashboard"),
'django_openstack': os.path.join(base_dir, "..", "django-openstack")}
def find_autodoc_modules(module_name, sourcedir):
"""returns a list of modules in the SOURCE directory"""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print "SEARCHING %s" % sourcedir
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# root = ./dashboard/test/unit
# filename = base.py
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
#print result
modlist.append(result)
return modlist
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
INDEXOUT = open("%s/autoindex.rst" % RSTDIR, "w")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
for modulename in SRCS:
for module in find_autodoc_modules(modulename, SRCS[modulename]):
generated_file = "%s/%s.rst" % (RSTDIR, module)
print "Generating %s" % generated_file
INDEXOUT.write(" %s\n" % module)
FILEOUT = open(generated_file, "w")
FILEOUT.write("The :mod:`%s` Module\n" % module)
FILEOUT.write("=============================="
"=============================="
"==============================\n")
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.close()
INDEXOUT.close()
| StarcoderdataPython |
170500 | <gh_stars>1000+
import numpy as np
import openml
classification_tasks = [
232, 236, 241, 245, 253, 254, 256, 258, 260, 262, 267, 271, 273, 275, 279, 288, 336,
340, 2119, 2120, 2121, 2122, 2123, 2125, 2356, 3044, 3047, 3048, 3049, 3053, 3054,
3055, 75089, 75092, 75093, 75098, 75100, 75108, 75109, 75112, 75114, 75115, 75116,
75118, 75120, 75121, 75125, 75126, 75129, 75131, 75133, 75134, 75136, 75139, 75141,
75142, 75143, 75146, 75147, 75148, 75149, 75153, 75154, 75156, 75157, 75159, 75161,
75163, 75166, 75169, 75171, 75173, 75174, 75176, 75178, 75179, 75180, 75184, 75185,
75187, 75192, 75195, 75196, 75199, 75210, 75212, 75213, 75215, 75217, 75219, 75221,
75223, 75225, 75232, 75233, 75234, 75235, 75236, 75237, 75239, 75250, 126021, 126024,
126028, 126030, 126031, 146574, 146575, 146576, 146577, 146578, 146583, 146586,
146592, 146593, 146594, 146596, 146597, 146600, 146601, 146602, 146603, 146679,
166859, 166866, 166872, 166875, 166882, 166897, 166905, 166906, 166913, 166915,
166931, 166932, 166944, 166950, 166951, 166953, 166956, 166957, 166958, 166959,
166970, 166996, 167085, 167086, 167087, 167088, 167089, 167090, 167094, 167096,
167097, 167099, 167100, 167101, 167103, 167105, 167106, 167202, 167203, 167204,
167205, 168785, 168791, 189779, 189786, 189828, 189829, 189836, 189840, 189841,
189843, 189844, 189845, 189846, 189857, 189858, 189859, 189863, 189864, 189869,
189870, 189875, 189878, 189880, 189881, 189882, 189883, 189884, 189887, 189890,
189893, 189894, 189899, 189900, 189902, 190154, 190155, 190156, 190157, 190158,
190159, 211720, 211721, 211722, 211723, 211724
]
regression_tasks = [
359997, 359998, 359999, 360000, 360001, 360002, 360003, 167146, 360004, 360005, 360006,
360007, 211696, 360009, 360010, 360011, 360012, 360013, 360014, 360015, 360016, 360017,
360018, 360019, 360020, 360021, 360022, 360023, 360024, 360025, 360026, 360027, 360028,
360029, 360030, 360031, 360032, 360033, 360034, 360035, 360036, 360037, 360038, 360039,
360040, 360041, 360042, 360043, 360044, 360045, 360046, 360047, 360048, 360049, 360050,
360051, 360052, 360053, 360054, 360055, 360056, 360057, 360058, 360059, 360060, 360061,
360062, 360063, 360064, 360066, 360067, 360068, 360069, 360070, 360071, 360072, 360073,
360074, 360075, 360076, 360077, 360078, 360079, 360080, 360081, 360082, 360083, 360084,
360085, 360086, 360087, 360088, 360089, 360090, 360091, 360092, 360093, 360094, 360095,
360096, 360097, 360098, 360100, 360101, 360102, 360103, 360104, 360105, 360106, 360107,
360108,
]
def load_task(task_id):
task = openml.tasks.get_task(task_id)
X, y = task.get_X_and_y()
train_indices, test_indices = task.get_train_test_split_indices()
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
dataset = openml.datasets.get_dataset(task.dataset_id)
_, _, cat, _ = dataset.get_data(target=task.target_name)
name = dataset.name.lower()
del _
del dataset
cat = {i: 'categorical' if c else 'numerical' for i, c in enumerate(cat)}
if isinstance(task, openml.tasks.OpenMLClassificationTask):
task_type = 'classification'
elif isinstance(task, openml.tasks.OpenMLRegressionTask):
task_type = 'regression'
else:
raise ValueError('Unknown task type')
return X_train, y_train, X_test, y_test, cat, task_type, name
| StarcoderdataPython |
3398344 | <filename>simulation/aivika/modeler/__init__.py<gh_stars>0
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.specs import *
from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.expr import *
from simulation.aivika.modeler.expr_random import *
from simulation.aivika.modeler.expr_run import *
from simulation.aivika.modeler.transform import *
from simulation.aivika.modeler.port import *
from simulation.aivika.modeler.data_type import *
from simulation.aivika.modeler.stream import *
from simulation.aivika.modeler.stream_random import *
from simulation.aivika.modeler.queue_strategy import *
from simulation.aivika.modeler.queue import *
from simulation.aivika.modeler.resource import *
from simulation.aivika.modeler.resource_preemption import *
from simulation.aivika.modeler.server import *
from simulation.aivika.modeler.server_random import *
from simulation.aivika.modeler.arrival_timer import *
from simulation.aivika.modeler.ref import *
from simulation.aivika.modeler.stats import *
from simulation.aivika.modeler.results import *
from simulation.aivika.modeler.experiment import *
| StarcoderdataPython |
1600599 | # -*- coding: utf-8 -*-
"""Qlik Engine."""
import math
from typing import Any, Dict, List, Union
from luft.vendor.pyqlikengine.engine_app_api import EngineAppApi
from luft.vendor.pyqlikengine.engine_field_api import EngineFieldApi
from luft.vendor.pyqlikengine.engine_generic_object_api import EngineGenericObjectApi
from luft.vendor.pyqlikengine.engine_global_api import EngineGlobalApi
from luft.vendor.pyqlikengine.structs import Structs
def get_hypercube_data(connection: object, app_handle: int,
measures: Union[List[Dict[str, str]], None] = None,
dimensions: Union[List[str], None] = None,
selections: Union[Dict[str, List[Any]]] = None,
date_valid: str = None):
"""Get data from Qlik App in json format."""
mes_width = len(measures) if measures else 0
dim_width = len(dimensions) if dimensions else 0
ega = EngineGlobalApi(connection)
# Define Dimensions of hypercube
dimensions = dimensions or []
hc_inline_dim = Structs.nx_inline_dimension_def(dimensions)
# Set sorting of Dimension by Measure
hc_mes_sort = Structs.nx_sort_by()
# Build hypercube from above definition
hc_dim = Structs.nx_hypercube_dimensions(hc_inline_dim)
meas_ids = [mea.get('id') for mea in (measures or [])]
hc_mes = Structs.nx_hypercube_measure_ids(hc_mes_sort, meas_ids)
width = mes_width + dim_width
height = int(math.floor(10000 / width))
nx_page = Structs.nx_page(0, 0, height, width)
hc_def = Structs.hypercube_def('$', hc_dim, hc_mes, [nx_page])
eaa = EngineAppApi(connection)
app_layout = eaa.get_app_layout(app_handle).get('qLayout')
hc_response = eaa.create_object(
app_handle, 'CH01', 'Chart', 'qHyperCubeDef', hc_def)
hc_handle = ega.get_handle(hc_response)
egoa = EngineGenericObjectApi(connection)
efa = EngineFieldApi(connection)
if selections:
for field in selections.keys():
field_handle = ega.get_handle(
eaa.get_field(app_handle, field))
values: List[Dict[str, Any]] = []
for select_value in selections[field]:
if isinstance(select_value, str):
values.append({'qText': select_value})
else:
values.append(
{'qIsNumeric': True, 'qNumber': select_value})
efa.select_values(field_handle, values)
i = 0
while i % height == 0:
nx_page = Structs.nx_page(i, 0, height, width)
hc_data = egoa.get_hypercube_data(
hc_handle, '/qHyperCubeDef', [nx_page])
elems = hc_data['qDataPages'][0]['qMatrix']
results = []
for elem in elems:
j = 0
dim_dict = {}
for dim in (dimensions or []):
if 'qText' in elem[j].keys():
dim_dict[dim.lower()] = elem[j]['qText']
else:
dim_dict[dim.lower()] = None
j += 1
for meas in (measures or []):
result = {}
result['date_valid'] = date_valid
result['app_id'] = app_layout.get('qFileName')
result['app_name'] = app_layout.get('qTitle')
result['app_stream_id'] = app_layout.get('stream').get('id')
result['app_stream_name'] = app_layout.get(
'stream').get('name')
result['dimensions'] = dim_dict
result['selections'] = selections
result['measure_id'] = meas.get('id')
result['measure_name'] = meas.get('name')
if 'qNum' in elem[j].keys() and not elem[j].get('qIsNull'):
result['measure_value'] = elem[j]['qNum']
else:
result['measure_value'] = None
results.append(result)
j += 1
i += 1
return results
| StarcoderdataPython |
186474 | <gh_stars>1-10
import sys
import getopt
import logging
import botocore
import boto3
import time
from packaging import version
from time import sleep
from botocore.exceptions import ClientError
logger = logging.getLogger()
personalize = None
def _get_dataset_group_arn(dataset_group_name):
dsg_arn = None
paginator = personalize.get_paginator('list_dataset_groups')
for paginate_result in paginator.paginate():
for dataset_group in paginate_result["datasetGroups"]:
if dataset_group['name'] == dataset_group_name:
dsg_arn = dataset_group['datasetGroupArn']
break
if dsg_arn:
break
if not dsg_arn:
raise NameError(f'Dataset Group "{dataset_group_name}" does not exist; verify region is correct')
return dsg_arn
def _get_solutions(dataset_group_arn):
solution_arns = []
paginator = personalize.get_paginator('list_solutions')
for paginate_result in paginator.paginate(datasetGroupArn = dataset_group_arn):
for solution in paginate_result['solutions']:
solution_arns.append(solution['solutionArn'])
return solution_arns
def _delete_campaigns(solution_arns):
campaign_arns = []
for solution_arn in solution_arns:
paginator = personalize.get_paginator('list_campaigns')
for paginate_result in paginator.paginate(solutionArn = solution_arn):
for campaign in paginate_result['campaigns']:
if campaign['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting campaign: ' + campaign['campaignArn'])
personalize.delete_campaign(campaignArn = campaign['campaignArn'])
elif campaign['status'].startswith('DELETE'):
logger.warning('Campaign {} is already being deleted so will wait for delete to complete'.format(campaign['campaignArn']))
else:
raise Exception('Campaign {} has a status of {} so cannot be deleted'.format(campaign['campaignArn'], campaign['status']))
campaign_arns.append(campaign['campaignArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for campaign_arn in campaign_arns:
try:
describe_response = personalize.describe_campaign(campaignArn = campaign_arn)
logger.debug('Campaign {} status is {}'.format(campaign_arn, describe_response['campaign']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
campaign_arns.remove(campaign_arn)
if len(campaign_arns) == 0:
logger.info('All campaigns have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} campaign(s) to be deleted'.format(len(campaign_arns)))
time.sleep(20)
if len(campaign_arns) > 0:
raise Exception('Timed out waiting for all campaigns to be deleted')
def _delete_solutions(solution_arns):
for solution_arn in solution_arns:
try:
describe_response = personalize.describe_solution(solutionArn = solution_arn)
solution = describe_response['solution']
if solution['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting solution: ' + solution_arn)
personalize.delete_solution(solutionArn = solution_arn)
elif solution['status'].startswith('DELETE'):
logger.warning('Solution {} is already being deleted so will wait for delete to complete'.format(solution_arn))
else:
raise Exception('Solution {} has a status of {} so cannot be deleted'.format(solution_arn, solution['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code != 'ResourceNotFoundException':
raise e
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for solution_arn in solution_arns:
try:
describe_response = personalize.describe_solution(solutionArn = solution_arn)
logger.debug('Solution {} status is {}'.format(solution_arn, describe_response['solution']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
solution_arns.remove(solution_arn)
if len(solution_arns) == 0:
logger.info('All solutions have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} solution(s) to be deleted'.format(len(solution_arns)))
time.sleep(20)
if len(solution_arns) > 0:
raise Exception('Timed out waiting for all solutions to be deleted')
def _delete_event_trackers(dataset_group_arn):
event_tracker_arns = []
event_trackers_paginator = personalize.get_paginator('list_event_trackers')
for event_tracker_page in event_trackers_paginator.paginate(datasetGroupArn = dataset_group_arn):
for event_tracker in event_tracker_page['eventTrackers']:
if event_tracker['status'] in [ 'ACTIVE', 'CREATE FAILED' ]:
logger.info('Deleting event tracker {}'.format(event_tracker['eventTrackerArn']))
personalize.delete_event_tracker(eventTrackerArn = event_tracker['eventTrackerArn'])
elif event_tracker['status'].startswith('DELETE'):
logger.warning('Event tracker {} is already being deleted so will wait for delete to complete'.format(event_tracker['eventTrackerArn']))
else:
raise Exception('Solution {} has a status of {} so cannot be deleted'.format(event_tracker['eventTrackerArn'], event_tracker['status']))
event_tracker_arns.append(event_tracker['eventTrackerArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for event_tracker_arn in event_tracker_arns:
try:
describe_response = personalize.describe_event_tracker(eventTrackerArn = event_tracker_arn)
logger.debug('Event tracker {} status is {}'.format(event_tracker_arn, describe_response['eventTracker']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
event_tracker_arns.remove(event_tracker_arn)
if len(event_tracker_arns) == 0:
logger.info('All event trackers have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} event tracker(s) to be deleted'.format(len(event_tracker_arns)))
time.sleep(20)
if len(event_tracker_arns) > 0:
raise Exception('Timed out waiting for all event trackers to be deleted')
def _delete_filters(dataset_group_arn):
filter_arns = []
filters_response = personalize.list_filters(datasetGroupArn = dataset_group_arn, maxResults = 100)
for filter in filters_response['Filters']:
logger.info('Deleting filter ' + filter['filterArn'])
personalize.delete_filter(filterArn = filter['filterArn'])
filter_arns.append(filter['filterArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for filter_arn in filter_arns:
try:
describe_response = personalize.describe_filter(filterArn = filter_arn)
logger.debug('Filter {} status is {}'.format(filter_arn, describe_response['filter']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
filter_arns.remove(filter_arn)
if len(filter_arns) == 0:
logger.info('All filters have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} filter(s) to be deleted'.format(len(filter_arns)))
time.sleep(20)
if len(filter_arns) > 0:
raise Exception('Timed out waiting for all filter to be deleted')
def _delete_datasets_and_schemas(dataset_group_arn):
dataset_arns = []
schema_arns = []
dataset_paginator = personalize.get_paginator('list_datasets')
for dataset_page in dataset_paginator.paginate(datasetGroupArn = dataset_group_arn):
for dataset in dataset_page['datasets']:
describe_response = personalize.describe_dataset(datasetArn = dataset['datasetArn'])
schema_arns.append(describe_response['dataset']['schemaArn'])
if dataset['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting dataset ' + dataset['datasetArn'])
personalize.delete_dataset(datasetArn = dataset['datasetArn'])
elif dataset['status'].startswith('DELETE'):
logger.warning('Dataset {} is already being deleted so will wait for delete to complete'.format(dataset['datasetArn']))
else:
raise Exception('Dataset {} has a status of {} so cannot be deleted'.format(dataset['datasetArn'], dataset['status']))
dataset_arns.append(dataset['datasetArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for dataset_arn in dataset_arns:
try:
describe_response = personalize.describe_dataset(datasetArn = dataset_arn)
logger.debug('Dataset {} status is {}'.format(dataset_arn, describe_response['dataset']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
dataset_arns.remove(dataset_arn)
if len(dataset_arns) == 0:
logger.info('All datasets have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} dataset(s) to be deleted'.format(len(dataset_arns)))
time.sleep(20)
if len(dataset_arns) > 0:
raise Exception('Timed out waiting for all datasets to be deleted')
for schema_arn in schema_arns:
try:
logger.info('Deleting schema ' + schema_arn)
personalize.delete_schema(schemaArn = schema_arn)
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceInUseException':
logger.info('Schema {} is still in-use by another dataset (likely in another dataset group)'.format(schema_arn))
else:
raise e
logger.info('All schemas used exclusively by datasets have been deleted or none exist for dataset group')
def _delete_dataset_group(dataset_group_arn):
logger.info('Deleting dataset group ' + dataset_group_arn)
personalize.delete_dataset_group(datasetGroupArn = dataset_group_arn)
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
try:
describe_response = personalize.describe_dataset_group(datasetGroupArn = dataset_group_arn)
logger.debug('Dataset group {} status is {}'.format(dataset_group_arn, describe_response['datasetGroup']['status']))
break
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
logger.info('Dataset group {} has been fully deleted'.format(dataset_group_arn))
else:
raise e
logger.info('Waiting for dataset group to be deleted')
time.sleep(20)
def delete_dataset_groups(dataset_group_arns, region = None):
global personalize
personalize = boto3.client(service_name = 'personalize', region_name = region)
for dataset_group_arn in dataset_group_arns:
logger.info('Dataset Group ARN: ' + dataset_group_arn)
solution_arns = _get_solutions(dataset_group_arn)
# 1. Delete campaigns
_delete_campaigns(solution_arns)
# 2. Delete solutions
_delete_solutions(solution_arns)
# 3. Delete event trackers
_delete_event_trackers(dataset_group_arn)
# 4. Delete filters
_delete_filters(dataset_group_arn)
# 5. Delete datasets and their schemas
_delete_datasets_and_schemas(dataset_group_arn)
# 6. Delete dataset group
_delete_dataset_group(dataset_group_arn)
logger.info(f'Dataset group {dataset_group_arn} fully deleted')
| StarcoderdataPython |
1631499 | <gh_stars>1-10
import threading, queue
import time
lock = threading.Lock()
q = queue.Queue()
c = 0
def task(i):
global c
d = 0
for v in range(100000):
d += 1
with lock:
c += d
print("Thread %s" % (i))
if __name__ == "__main__":
tasks = []
for i in range(100):
add_task = threading.Thread(target=task, args=(i,))
add_task.start()
tasks.append(add_task)
for task in tasks:
task.join()
print("Final result of 100 times 100000: %s" % c) | StarcoderdataPython |
3350401 | <reponame>Thommy257/discopy
# -*- coding: utf-8 -*-
"""
Implements classical-quantum circuits.
Objects are :class:`Ty` generated by two basic types
:code:`bit` and :code:`qubit`.
Arrows are diagrams generated by :class:`QuantumGate`, :class:`ClassicalGate`,
:class:`Discard`, :class:`Measure` and :class:`Encode`.
>>> from discopy.quantum.gates import Ket, CX, H, X, Rz, sqrt, Controlled
>>> circuit = Ket(0, 0) >> CX >> Controlled(Rz(0.25)) >> Measure() @ Discard()
>>> circuit.draw(
... figsize=(3, 6),
... path='docs/_static/imgs/quantum/circuit-example.png')
.. image:: ../_static/imgs/quantum/circuit-example.png
:align: center
>>> from discopy.grammar.pregroup import Word
>>> from discopy.rigid import Ty, Cup, Id
>>> s, n = Ty('s'), Ty('n')
>>> Alice = Word('Alice', n)
>>> loves = Word('loves', n.r @ s @ n.l)
>>> Bob = Word('Bob', n)
>>> grammar = Cup(n, n.r) @ Id(s) @ Cup(n.l, n)
>>> sentence = grammar << Alice @ loves @ Bob
>>> ob = {s: 0, n: 1}
>>> ar = {Alice: Ket(0),
... loves: CX << sqrt(2) @ H @ X << Ket(0, 0),
... Bob: Ket(1)}
>>> F = Functor(ob, ar)
>>> assert abs(F(sentence).eval().array) ** 2
>>> from discopy import drawing
>>> drawing.equation(
... sentence, F(sentence), symbol='$\\\\mapsto$',
... figsize=(6, 3), nodesize=.5,
... path='docs/_static/imgs/quantum/functor-example.png')
.. image:: ../_static/imgs/quantum/functor-example.png
:align: center
"""
import random
from itertools import takewhile, chain
from collections.abc import Mapping
from discopy import messages, monoidal, rigid, tensor
from discopy.cat import AxiomError
from discopy.rigid import Diagram
from discopy.tensor import Dim, Tensor
from math import pi
from functools import reduce, partial
class AntiConjugate:
def conjugate(self):
return type(self)(-self.phase)
l = r = property(conjugate)
class RealConjugate:
def conjugate(self):
return self
l = r = property(conjugate)
class Anti2QubitConjugate:
def conjugate(self):
algebraic_conj = type(self)(-self.phase)
return Swap(qubit, qubit) >> algebraic_conj >> Swap(qubit, qubit)
l = r = property(conjugate)
def index2bitstring(i, length):
""" Turns an index into a bitstring of a given length. """
if i >= 2 ** length:
raise ValueError("Index should be less than 2 ** length.")
if not i and not length:
return ()
return tuple(map(int, '{{:0{}b}}'.format(length).format(i)))
def bitstring2index(bitstring):
""" Turns a bitstring into an index. """
return sum(value * 2 ** i for i, value in enumerate(bitstring[::-1]))
class Ob(RealConjugate, rigid.Ob):
"""
Implements the generating objects of :class:`Circuit`, i.e.
information units of some integer dimension greater than 1.
Examples
--------
>>> assert bit.objects == [Ob("bit", dim=2)]
>>> assert qubit.objects == [Ob("qubit", dim=2)]
"""
def __init__(self, name, dim=2, z=0):
super().__init__(name)
if z != 0:
raise AxiomError("circuit.Ob are self-dual.")
if not isinstance(dim, int) or dim < 2:
raise ValueError("Dimension should be an int greater than 1.")
self._dim = dim
@property
def dim(self):
""" Dimension of the unit, e.g. :code:`dim=2` for bits and qubits. """
return self._dim
def __repr__(self):
return self.name
class Digit(Ob):
"""
Classical unit of information of some dimension :code:`dim`.
Examples
--------
>>> assert bit.objects == [Digit(2)] == [Ob("bit", dim=2)]
"""
def __init__(self, dim, z=0):
name = "bit" if dim == 2 else "Digit({})".format(dim)
super().__init__(name, dim)
class Qudit(Ob):
"""
Quantum unit of information of some dimension :code:`dim`.
Examples
--------
>>> assert qubit.objects == [Qudit(2)] == [Ob("qubit", dim=2)]
"""
def __init__(self, dim, z=0):
name = "qubit" if dim == 2 else "Qudit({})".format(dim)
super().__init__(name, dim)
class Ty(rigid.Ty):
"""
Implements the input and output types of :class:`Circuit`.
Examples
--------
>>> assert bit == Ty(Digit(2))
>>> assert qubit == Ty(Qudit(2))
>>> assert bit @ qubit != qubit @ bit
You can construct :code:`n` qubits by taking powers of :code:`qubit`:
>>> print(bit ** 2 @ qubit ** 3)
bit @ bit @ qubit @ qubit @ qubit
"""
@staticmethod
def upgrade(old):
return Ty(*old.objects)
def __repr__(self):
return str(self)
bit, qubit = Ty(Digit(2)), Ty(Qudit(2))
@monoidal.Diagram.subclass
class Circuit(tensor.Diagram):
""" Classical-quantum circuits. """
def __repr__(self):
return super().__repr__().replace('Diagram', 'Circuit')
def conjugate(self):
return self.l
@property
def is_mixed(self):
"""
Whether the circuit is mixed, i.e. it contains both bits and qubits
or it discards qubits. Mixed circuits can be evaluated only by a
:class:`CQMapFunctor` not a :class:`discopy.tensor.Functor`.
"""
both_bits_and_qubits = self.dom.count(bit) and self.dom.count(qubit)\
or any(layer.cod.count(bit) and layer.cod.count(qubit)
for layer in self.layers)
return both_bits_and_qubits or any(box.is_mixed for box in self.boxes)
def init_and_discard(self):
""" Returns a circuit with empty domain and only bits as codomain. """
from discopy.quantum.gates import Bits, Ket
circuit = self
if circuit.dom:
init = Id(0).tensor(*(
Bits(0) if x.name == "bit" else Ket(0) for x in circuit.dom))
circuit = init >> circuit
if circuit.cod != bit ** len(circuit.cod):
discards = Id(0).tensor(*(
Discard() if x.name == "qubit"
else Id(bit) for x in circuit.cod))
circuit = circuit >> discards
return circuit
def eval(self, *others, backend=None, mixed=False,
contractor=None, **params):
"""
Evaluate a circuit on a backend, or simulate it with numpy.
Parameters
----------
others : :class:`discopy.quantum.circuit.Circuit`
Other circuits to process in batch.
backend : pytket.Backend, optional
Backend on which to run the circuit, if none then we apply
:class:`discopy.tensor.Functor` or :class:`CQMapFunctor` instead.
mixed : bool, optional
Whether to apply :class:`discopy.tensor.Functor`
or :class:`CQMapFunctor`.
contractor : callable, optional
Use :class:`tensornetwork` contraction
instead of discopy's basic eval feature.
params : kwargs, optional
Get passed to Circuit.get_counts.
Returns
-------
tensor : :class:`discopy.tensor.Tensor`
If :code:`backend is not None` or :code:`mixed=False`.
cqmap : :class:`CQMap`
Otherwise.
Examples
--------
We can evaluate a pure circuit (i.e. with :code:`not circuit.is_mixed`)
as a unitary :class:`discopy.tensor.Tensor` or as a :class:`CQMap`:
>>> from discopy.quantum import *
>>> H.eval().round(2) # doctest: +ELLIPSIS
Tensor(dom=Dim(2), cod=Dim(2), array=[0.71+0.j, ..., -0.71+0.j])
>>> H.eval(mixed=True).round(1) # doctest: +ELLIPSIS
CQMap(dom=Q(Dim(2)), cod=Q(Dim(2)), array=[0.5+0.j, ..., 0.5+0.j])
We can evaluate a mixed circuit as a :class:`CQMap`:
>>> assert Measure().eval()\\
... == CQMap(dom=Q(Dim(2)), cod=C(Dim(2)),
... array=[1, 0, 0, 0, 0, 0, 0, 1])
>>> circuit = Bits(1, 0) @ Ket(0) >> Discard(bit ** 2 @ qubit)
>>> assert circuit.eval() == CQMap(dom=CQ(), cod=CQ(), array=[1])
We can execute any circuit on a `pytket.Backend`:
>>> circuit = Ket(0, 0) >> sqrt(2) @ H @ X >> CX >> Measure() @ Bra(0)
>>> from discopy.quantum.tk import mockBackend
>>> backend = mockBackend({(0, 1): 512, (1, 0): 512})
>>> assert circuit.eval(backend, n_shots=2**10).round()\\
... == Tensor(dom=Dim(1), cod=Dim(2), array=[0., 1.])
"""
from discopy.quantum import cqmap
if contractor is not None:
array = contractor(*self.to_tn(mixed=mixed)).tensor
if self.is_mixed or mixed:
f = cqmap.Functor()
return cqmap.CQMap(f(self.dom), f(self.cod), array)
f = tensor.Functor(lambda x: x[0].dim, {})
return Tensor(f(self.dom), f(self.cod), array)
from discopy import cqmap
from discopy.quantum.gates import Bits, scalar
if len(others) == 1 and not isinstance(others[0], Circuit):
# This allows the syntax :code:`circuit.eval(backend)`
return self.eval(backend=others[0], mixed=mixed, **params)
if backend is None:
if others:
return [circuit.eval(mixed=mixed, **params)
for circuit in (self, ) + others]
functor = cqmap.Functor() if mixed or self.is_mixed\
else tensor.Functor(lambda x: x[0].dim, lambda f: f.array)
box = functor(self)
return type(box)(box.dom, box.cod, box.array + 0j)
circuits = [circuit.to_tk() for circuit in (self, ) + others]
results, counts = [], circuits[0].get_counts(
*circuits[1:], backend=backend, **params)
for i, circuit in enumerate(circuits):
n_bits = len(circuit.post_processing.dom)
result = Tensor.zeros(Dim(1), Dim(*(n_bits * (2, ))))
for bitstring, count in counts[i].items():
result += (scalar(count) @ Bits(*bitstring)).eval()
if circuit.post_processing:
result = result >> circuit.post_processing.eval()
results.append(result)
return results if len(results) > 1 else results[0]
def get_counts(self, *others, backend=None, **params):
"""
Get counts from a backend, or simulate them with numpy.
Parameters
----------
others : :class:`discopy.quantum.circuit.Circuit`
Other circuits to process in batch.
backend : pytket.Backend, optional
Backend on which to run the circuit, if none then `numpy`.
n_shots : int, optional
Number of shots, default is :code:`2**10`.
measure_all : bool, optional
Whether to measure all qubits, default is :code:`False`.
normalize : bool, optional
Whether to normalize the counts, default is :code:`True`.
post_select : bool, optional
Whether to perform post-selection, default is :code:`True`.
scale : bool, optional
Whether to scale the output, default is :code:`True`.
seed : int, optional
Seed to feed the backend, default is :code:`None`.
compilation : callable, optional
Compilation function to apply before getting counts.
Returns
-------
counts : dict
From bitstrings to counts.
Examples
--------
>>> from discopy.quantum import *
>>> circuit = H @ X >> CX >> Measure(2)
>>> from discopy.quantum.tk import mockBackend
>>> backend = mockBackend({(0, 1): 512, (1, 0): 512})
>>> circuit.get_counts(backend, n_shots=2**10)
{(0, 1): 0.5, (1, 0): 0.5}
"""
if len(others) == 1 and not isinstance(others[0], Circuit):
# This allows the syntax :code:`circuit.get_counts(backend)`
return self.get_counts(backend=others[0], **params)
if backend is None:
if others:
return [circuit.get_counts(**params)
for circuit in (self, ) + others]
utensor, counts = self.init_and_discard().eval(), dict()
for i in range(2**len(utensor.cod)):
bits = index2bitstring(i, len(utensor.cod))
if utensor.array[bits]:
counts[bits] = utensor.array[bits].real
return counts
counts = self.to_tk().get_counts(
*(other.to_tk() for other in others), backend=backend, **params)
return counts if len(counts) > 1 else counts[0]
def measure(self, mixed=False):
"""
Measures a circuit on the computational basis using :code:`numpy`.
Parameters
----------
mixed : bool, optional
Whether to apply :class:`tensor.Functor` or :class:`cqmap.Functor`.
Returns
-------
array : numpy.ndarray
"""
from discopy.quantum.gates import Bra, Ket
if mixed or self.is_mixed:
return self.init_and_discard().eval(mixed=True).array.real
state = (Ket(*(len(self.dom) * [0])) >> self).eval()
effects = [Bra(*index2bitstring(j, len(self.cod))).eval()
for j in range(2 ** len(self.cod))]
array = Tensor.np.zeros(len(self.cod) * (2, )) + 0j
for effect in effects:
array +=\
effect.array * Tensor.np.absolute((state >> effect).array) ** 2
return array
def to_tn(self, mixed=False):
"""
Sends a diagram to a mixed :code:`tensornetwork`.
Parameters
----------
mixed : bool, default: False
Whether to perform mixed (also known as density matrix) evaluation
of the circuit.
Returns
-------
nodes : :class:`tensornetwork.Node`
Nodes of the network.
output_edge_order : list of :class:`tensornetwork.Edge`
Output edges of the network.
"""
if not mixed and not self.is_mixed:
return super().to_tn()
import tensornetwork as tn
from discopy.quantum import (
qubit, bit, ClassicalGate, Copy, Match, Discard, SWAP)
for box in self.boxes + [self]:
if set(box.dom @ box.cod) - set(bit @ qubit):
raise ValueError(
"Only circuits with qubits and bits are supported.")
# try to decompose some gates
diag = Id(self.dom)
last_i = 0
for i, box in enumerate(self.boxes):
if hasattr(box, '_decompose'):
decomp = box._decompose()
if box != decomp:
diag >>= self[last_i:i]
left, _, right = self.layers[i]
diag >>= Id(left) @ decomp @ Id(right)
last_i = i + 1
diag >>= self[last_i:]
self = diag
c_nodes = [tn.CopyNode(2, 2, f'c_input_{i}', dtype=complex)
for i in range(self.dom.count(bit))]
q_nodes1 = [tn.CopyNode(2, 2, f'q1_input_{i}', dtype=complex)
for i in range(self.dom.count(qubit))]
q_nodes2 = [tn.CopyNode(2, 2, f'q2_input_{i}', dtype=complex)
for i in range(self.dom.count(qubit))]
inputs = [n[0] for n in c_nodes + q_nodes1 + q_nodes2]
c_scan = [n[1] for n in c_nodes]
q_scan1 = [n[1] for n in q_nodes1]
q_scan2 = [n[1] for n in q_nodes2]
nodes = c_nodes + q_nodes1 + q_nodes2
for box, layer, offset in zip(self.boxes, self.layers, self.offsets):
if box == Circuit.swap(bit, bit):
left, _, _ = layer
c_offset = left.count(bit)
c_scan[c_offset], c_scan[c_offset + 1] =\
c_scan[c_offset + 1], c_scan[c_offset]
elif box.is_mixed or isinstance(box, ClassicalGate):
c_dom = box.dom.count(bit)
q_dom = box.dom.count(qubit)
c_cod = box.cod.count(bit)
left, _, _ = layer
c_offset = left.count(bit)
q_offset = left.count(qubit)
if isinstance(box, Discard):
assert box.n_qubits == 1
tn.connect(q_scan1[q_offset], q_scan2[q_offset])
del q_scan1[q_offset]
del q_scan2[q_offset]
continue
if isinstance(box, (Copy, Match, Measure, Encode)):
assert len(box.dom) == 1 or len(box.cod) == 1
node = tn.CopyNode(3, 2, 'cq_' + str(box), dtype=complex)
else:
# only unoptimised gate is MixedState()
array = box.eval(mixed=True).array
node = tn.Node(array + 0j, 'cq_' + str(box))
for i in range(c_dom):
tn.connect(c_scan[c_offset + i], node[i])
for i in range(q_dom):
tn.connect(q_scan1[q_offset + i], node[c_dom + i])
for i in range(q_dom):
tn.connect(q_scan2[q_offset + i], node[c_dom + q_dom + i])
cq_dom = c_dom + 2 * q_dom
c_edges = node[cq_dom:cq_dom + c_cod]
q_edges1 = node[cq_dom + c_cod::2]
q_edges2 = node[cq_dom + c_cod + 1::2]
c_scan = (c_scan[:c_offset] + c_edges
+ c_scan[c_offset + c_dom:])
q_scan1 = (q_scan1[:q_offset] + q_edges1
+ q_scan1[q_offset + q_dom:])
q_scan2 = (q_scan2[:q_offset] + q_edges2
+ q_scan2[q_offset + q_dom:])
nodes.append(node)
else:
left, _, _ = layer
q_offset = left[:offset + 1].count(qubit)
if box == SWAP:
q_scan1[q_offset], q_scan1[q_offset + 1] =\
q_scan1[q_offset + 1], q_scan1[q_offset]
q_scan2[q_offset], q_scan2[q_offset + 1] =\
q_scan2[q_offset + 1], q_scan2[q_offset]
continue
utensor = box.array
node1 = tn.Node(utensor.conjugate() + 0j, 'q1_' + str(box))
node2 = tn.Node(utensor + 0j, 'q2_' + str(box))
for i in range(len(box.dom)):
tn.connect(q_scan1[q_offset + i], node1[i])
tn.connect(q_scan2[q_offset + i], node2[i])
edges1 = node1[len(box.dom):]
edges2 = node2[len(box.dom):]
q_scan1 = (q_scan1[:q_offset] + edges1
+ q_scan1[q_offset + len(box.dom):])
q_scan2 = (q_scan2[:q_offset] + edges2
+ q_scan2[q_offset + len(box.dom):])
nodes.extend([node1, node2])
outputs = c_scan + q_scan1 + q_scan2
return nodes, inputs + outputs
def to_tk(self):
"""
Export to t|ket>.
Returns
-------
tk_circuit : pytket.Circuit
A :class:`pytket.Circuit`.
Note
----
* No measurements are performed.
* SWAP gates are treated as logical swaps.
* If the circuit contains scalars or a :class:`Bra`,
then :code:`tk_circuit` will hold attributes
:code:`post_selection` and :code:`scalar`.
Examples
--------
>>> from discopy.quantum import *
>>> bell_test = H @ Id(1) >> CX >> Measure() @ Measure()
>>> bell_test.to_tk()
tk.Circuit(2, 2).H(0).CX(0, 1).Measure(0, 0).Measure(1, 1)
>>> circuit0 = sqrt(2) @ H @ Rx(0.5) >> CX >> Measure() @ Discard()
>>> circuit0.to_tk()
tk.Circuit(2, 1).H(0).Rx(1.0, 1).CX(0, 1).Measure(0, 0).scale(2)
>>> circuit1 = Ket(1, 0) >> CX >> Id(1) @ Ket(0) @ Id(1)
>>> circuit1.to_tk()
tk.Circuit(3).X(0).CX(0, 2)
>>> circuit2 = X @ Id(2) >> Id(1) @ SWAP >> CX @ Id(1) >> Id(1) @ SWAP
>>> circuit2.to_tk()
tk.Circuit(3).X(0).CX(0, 2)
>>> circuit3 = Ket(0, 0)\\
... >> H @ Id(1)\\
... >> Id(1) @ X\\
... >> CX\\
... >> Id(1) @ Bra(0)
>>> print(repr(circuit3.to_tk()))
tk.Circuit(2, 1).H(0).X(1).CX(0, 1).Measure(1, 0).post_select({0: 0})
"""
# pylint: disable=import-outside-toplevel
from discopy.quantum.tk import to_tk
return to_tk(self)
@staticmethod
def from_tk(*tk_circuits):
"""
Translates a :class:`pytket.Circuit` into a :class:`Circuit`, or
a list of :class:`pytket` circuits into a :class:`Sum`.
Parameters
----------
tk_circuits : pytket.Circuit
potentially with :code:`scalar` and
:code:`post_selection` attributes.
Returns
-------
circuit : :class:`Circuit`
Such that :code:`Circuit.from_tk(circuit.to_tk()) == circuit`.
Note
----
* :meth:`Circuit.init_and_discard` is applied beforehand.
* SWAP gates are introduced when applying gates to non-adjacent qubits.
Examples
--------
>>> from discopy.quantum import *
>>> import pytket as tk
>>> c = Rz(0.5) @ Id(1) >> Id(1) @ Rx(0.25) >> CX
>>> assert Circuit.from_tk(c.to_tk()) == c.init_and_discard()
>>> tk_GHZ = tk.Circuit(3).H(1).CX(1, 2).CX(1, 0)
>>> pprint = lambda c: print(str(c).replace(' >>', '\\n >>'))
>>> pprint(Circuit.from_tk(tk_GHZ))
Ket(0)
>> Id(1) @ Ket(0)
>> Id(2) @ Ket(0)
>> Id(1) @ H @ Id(1)
>> Id(1) @ CX
>> SWAP @ Id(1)
>> CX @ Id(1)
>> SWAP @ Id(1)
>> Discard(qubit) @ Id(2)
>> Discard(qubit) @ Id(1)
>> Discard(qubit)
>>> circuit = Ket(1, 0) >> CX >> Id(1) @ Ket(0) @ Id(1)
>>> print(Circuit.from_tk(circuit.to_tk())[3:-3])
X @ Id(2) >> Id(1) @ SWAP >> CX @ Id(1) >> Id(1) @ SWAP
>>> bell_state = Circuit.caps(qubit, qubit)
>>> bell_effect = bell_state[::-1]
>>> circuit = bell_state @ Id(1) >> Id(1) @ bell_effect >> Bra(0)
>>> pprint(Circuit.from_tk(circuit.to_tk())[3:])
H @ Id(2)
>> CX @ Id(1)
>> Id(1) @ CX
>> Id(1) @ H @ Id(1)
>> Bra(0) @ Id(2)
>> Bra(0) @ Id(1)
>> Bra(0)
>> scalar(4)
"""
# pylint: disable=import-outside-toplevel
from discopy.quantum.tk import from_tk
if not tk_circuits:
return Sum([], qubit ** 0, qubit ** 0)
if len(tk_circuits) == 1:
return from_tk(tk_circuits[0])
return sum(Circuit.from_tk(c) for c in tk_circuits)
def grad(self, var, **params):
"""
Gradient with respect to :code:`var`.
Parameters
----------
var : sympy.Symbol
Differentiated variable.
Returns
-------
circuit : `discopy.quantum.circuit.Sum`
Examples
--------
>>> from sympy.abc import phi
>>> from discopy.quantum import *
>>> circuit = Rz(phi / 2) @ Rz(phi + 1) >> CX
>>> assert circuit.grad(phi, mixed=False)\\
... == (Rz(phi / 2) @ scalar(pi) @ Rz(phi + 1.5) >> CX)\\
... + (scalar(pi/2) @ Rz(phi/2 + .5) @ Rz(phi + 1) >> CX)
"""
return super().grad(var, **params)
def jacobian(self, variables, **params):
"""
Jacobian with respect to :code:`variables`.
Parameters
----------
variables : List[sympy.Symbol]
Differentiated variables.
Returns
-------
circuit : `discopy.quantum.circuit.Sum`
with :code:`circuit.dom == self.dom`
and :code:`circuit.cod == Digit(len(variables)) @ self.cod`.
Examples
--------
>>> from sympy.abc import x, y
>>> from discopy.quantum.gates import Bits, Ket, Rx, Rz
>>> circuit = Ket(0) >> Rx(x) >> Rz(y)
>>> assert circuit.jacobian([x, y])\\
... == (Bits(0) @ circuit.grad(x)) + (Bits(1) @ circuit.grad(y))
>>> assert not circuit.jacobian([])
>>> assert circuit.jacobian([x]) == circuit.grad(x)
"""
if not variables:
return Sum([], self.dom, self.cod)
if len(variables) == 1:
return self.grad(variables[0], **params)
from discopy.quantum.gates import Digits
return sum(Digits(i, dim=len(variables)) @ self.grad(x, **params)
for i, x in enumerate(variables))
def draw(self, **params):
""" We draw the labels of a circuit whenever it's mixed. """
draw_type_labels = params.get('draw_type_labels') or self.is_mixed
params = dict({'draw_type_labels': draw_type_labels}, **params)
return super().draw(**params)
@staticmethod
def swap(left, right):
return monoidal.Diagram.swap(
left, right, ar_factory=Circuit, swap_factory=Swap)
@staticmethod
def permutation(perm, dom=None):
if dom is None:
dom = qubit ** len(perm)
return monoidal.Diagram.permutation(perm, dom, ar_factory=Circuit)
@staticmethod
def cups(left, right):
from discopy.quantum.gates import CX, H, sqrt, Bra, Match
def cup_factory(left, right):
if left == right == qubit:
return CX >> H @ sqrt(2) @ Id(1) >> Bra(0, 0)
if left == right == bit:
return Match() >> Discard(bit)
raise ValueError
return rigid.cups(
left, right, ar_factory=Circuit, cup_factory=cup_factory)
@staticmethod
def caps(left, right):
return Circuit.cups(left, right).dagger()
@staticmethod
def spiders(n_legs_in, n_legs_out, dim):
from discopy.quantum.gates import CX, H, Bra, sqrt
t = rigid.Ty('PRO')
if len(dim) == 0:
return Id()
def decomp_ar(spider):
return spider.decompose()
def spider_ar(spider):
dom, cod = len(spider.dom), len(spider.cod)
if dom < cod:
return spider_ar(spider.dagger()).dagger()
circ = Id(qubit)
if dom == 2:
circ = CX >> Id(qubit) @ Bra(0)
if cod == 0:
circ >>= H >> Bra(0) @ sqrt(2)
return circ
diag = Diagram.spiders(n_legs_in, n_legs_out, t ** len(dim))
decomp = monoidal.Functor(ob={t: t}, ar=decomp_ar)
to_circ = monoidal.Functor(ob={t: qubit}, ar=spider_ar,
ar_factory=Circuit, ob_factory=Ty)
circ = to_circ(decomp(diag))
return circ
def _apply_gate(self, gate, position):
""" Apply gate at position """
if position < 0 or position >= len(self.cod):
raise ValueError(f'Index {position} out of range.')
left = Id(position)
right = Id(len(self.cod) - len(left.cod) - len(gate.cod))
return self >> left @ gate @ right
def _apply_controlled(self, base_gate, *xs):
from discopy.quantum import Controlled
if len(set(xs)) != len(xs):
raise ValueError(f'Indices {xs} not unique.')
if min(xs) < 0 or max(xs) >= len(self.cod):
raise ValueError(f'Indices {xs} out of range.')
before = sorted(filter(lambda x: x < xs[-1], xs[:-1]))
after = sorted(filter(lambda x: x > xs[-1], xs[:-1]))
gate = base_gate
last_x = xs[-1]
for x in before[::-1]:
gate = Controlled(gate, distance=last_x - x)
last_x = x
last_x = xs[-1]
for x in after[::-1]:
gate = Controlled(gate, distance=last_x - x)
last_x = x
return self._apply_gate(gate, min(xs))
def H(self, x):
""" Apply Hadamard gate to circuit. """
from discopy.quantum import H
return self._apply_gate(H, x)
def S(self, x):
""" Apply S gate to circuit. """
from discopy.quantum import S
return self._apply_gate(S, x)
def X(self, x):
""" Apply Pauli X gate to circuit. """
from discopy.quantum import X
return self._apply_gate(X, x)
def Y(self, x):
""" Apply Pauli Y gate to circuit. """
from discopy.quantum import Y
return self._apply_gate(Y, x)
def Z(self, x):
""" Apply Pauli Z gate to circuit. """
from discopy.quantum import Z
return self._apply_gate(Z, x)
def Rx(self, phase, x):
""" Apply Rx gate to circuit. """
from discopy.quantum import Rx
return self._apply_gate(Rx(phase), x)
def Ry(self, phase, x):
""" Apply Rx gate to circuit. """
from discopy.quantum import Ry
return self._apply_gate(Ry(phase), x)
def Rz(self, phase, x):
""" Apply Rz gate to circuit. """
from discopy.quantum import Rz
return self._apply_gate(Rz(phase), x)
def CX(self, x, y):
""" Apply Controlled X / CNOT gate to circuit. """
from discopy.quantum import X
return self._apply_controlled(X, x, y)
def CY(self, x, y):
""" Apply Controlled Y gate to circuit. """
from discopy.quantum import Y
return self._apply_controlled(Y, x, y)
def CZ(self, x, y):
""" Apply Controlled Z gate to circuit. """
from discopy.quantum import Z
return self._apply_controlled(Z, x, y)
def CCX(self, x, y, z):
""" Apply Controlled CX / Toffoli gate to circuit. """
from discopy.quantum import X
return self._apply_controlled(X, x, y, z)
def CCZ(self, x, y, z):
""" Apply Controlled CZ gate to circuit. """
from discopy.quantum import Z
return self._apply_controlled(Z, x, y, z)
def CRx(self, phase, x, y):
""" Apply Controlled Rx gate to circuit. """
from discopy.quantum import Rx
return self._apply_controlled(Rx(phase), x, y)
def CRz(self, phase, x, y):
""" Apply Controlled Rz gate to circuit. """
from discopy.quantum import Rz
return self._apply_controlled(Rz(phase), x, y)
class Id(rigid.Id, Circuit):
""" Identity circuit. """
def __init__(self, dom=0):
if isinstance(dom, int):
dom = qubit ** dom
self._qubit_only = all(x.name == "qubit" for x in dom)
rigid.Id.__init__(self, dom)
Circuit.__init__(self, dom, dom, [], [])
def __repr__(self):
return "Id({})".format(len(self.dom) if self._qubit_only else self.dom)
def __str__(self):
return repr(self)
Circuit.id = Id
class Box(rigid.Box, Circuit):
"""
Boxes in a circuit diagram.
Parameters
----------
name : any
dom : discopy.quantum.circuit.Ty
cod : discopy.quantum.circuit.Ty
is_mixed : bool, optional
Whether the box is mixed, default is :code:`True`.
_dagger : bool, optional
If set to :code:`None` then the box is self-adjoint.
"""
def __init__(self, name, dom, cod,
is_mixed=True, data=None, _dagger=False, _conjugate=False):
if dom and not isinstance(dom, Ty):
raise TypeError(messages.type_err(Ty, dom))
if cod and not isinstance(cod, Ty):
raise TypeError(messages.type_err(Ty, cod))
z = 1 if _conjugate else 0
self._conjugate = _conjugate
rigid.Box.__init__(
self, name, dom, cod, data=data, _dagger=_dagger, _z=z)
Circuit.__init__(self, dom, cod, [self], [0])
if not is_mixed:
if all(isinstance(x, Digit) for x in dom @ cod):
self.classical = True
elif all(isinstance(x, Qudit) for x in dom @ cod):
self.classical = False
else:
raise ValueError(
"dom and cod should be Digits only or Qudits only.")
self._mixed = is_mixed
def grad(self, var, **params):
if var not in self.free_symbols:
return Sum([], self.dom, self.cod)
raise NotImplementedError
@property
def is_mixed(self):
return self._mixed
def __repr__(self):
return self.name
class Sum(tensor.Sum, Box):
""" Sums of circuits. """
@staticmethod
def upgrade(old):
return Sum(old.terms, old.dom, old.cod)
@property
def is_mixed(self):
return any(circuit.is_mixed for circuit in self.terms)
def get_counts(self, backend=None, **params):
if not self.terms:
return {}
if len(self.terms) == 1:
return self.terms[0].get_counts(backend=backend, **params)
counts = Circuit.get_counts(*self.terms, backend=backend, **params)
result = {}
for circuit_counts in counts:
for bitstring, count in circuit_counts.items():
result[bitstring] = result.get(bitstring, 0) + count
return result
def eval(self, backend=None, mixed=False, **params):
mixed = mixed or any(t.is_mixed for t in self.terms)
if not self.terms:
return 0
if len(self.terms) == 1:
return self.terms[0].eval(backend=backend, mixed=mixed, **params)
return sum(
Circuit.eval(*self.terms, backend=backend, mixed=mixed, **params))
def grad(self, var, **params):
return sum(circuit.grad(var, **params) for circuit in self.terms)
def to_tk(self):
return [circuit.to_tk() for circuit in self.terms]
Circuit.sum = Sum
class Swap(rigid.Swap, Box):
""" Implements swaps of circuit wires. """
def __init__(self, left, right):
rigid.Swap.__init__(self, left, right)
Box.__init__(
self, self.name, self.dom, self.cod, is_mixed=left != right)
def dagger(self):
return Swap(self.right, self.left)
def conjugate(self):
return Swap(self.right, self.left)
l = r = property(conjugate)
def __repr__(self):
return "SWAP"\
if self.left == self.right == qubit else super().__repr__()
def __str__(self):
return repr(self)
class Discard(RealConjugate, Box):
""" Discard n qubits. If :code:`dom == bit` then marginal distribution. """
def __init__(self, dom=1):
if isinstance(dom, int):
dom = qubit ** dom
super().__init__(
"Discard({})".format(dom), dom, qubit ** 0, is_mixed=True)
self.draw_as_discards = True
self.n_qubits = len(dom)
def dagger(self):
return MixedState(self.dom)
def _decompose(self):
return Id().tensor(*[Discard()] * self.n_qubits)
class MixedState(RealConjugate, Box):
"""
Maximally-mixed state on n qubits.
If :code:`cod == bit` then uniform distribution.
"""
def __init__(self, cod=1):
if isinstance(cod, int):
cod = qubit ** cod
super().__init__(
"MixedState({})".format(cod), qubit ** 0, cod, is_mixed=True)
self.drawing_name = "MixedState"
if cod == bit:
self.drawing_name = ""
self.draw_as_spider, self.color = True, "black"
def dagger(self):
return Discard(self.cod)
def _decompose(self):
return Id().tensor(*[MixedState()] * len(self.cod))
class Measure(RealConjugate, Box):
"""
Measure n qubits into n bits.
Parameters
----------
n_qubits : int
Number of qubits to measure.
destructive : bool, optional
Whether to do a non-destructive measurement instead.
override_bits : bool, optional
Whether to override input bits, this is the standard behaviour of tket.
"""
def __init__(self, n_qubits=1, destructive=True, override_bits=False):
dom, cod = qubit ** n_qubits, bit ** n_qubits
name = "Measure({})".format("" if n_qubits == 1 else n_qubits)
if not destructive:
cod = qubit ** n_qubits @ cod
name = name\
.replace("()", "(1)").replace(')', ", destructive=False)")
if override_bits:
dom = dom @ bit ** n_qubits
name = name\
.replace("()", "(1)").replace(')', ", override_bits=True)")
super().__init__(name, dom, cod, is_mixed=True)
self.destructive, self.override_bits = destructive, override_bits
self.n_qubits = n_qubits
self.draw_as_measures = True
def dagger(self):
return Encode(self.n_qubits,
constructive=self.destructive,
reset_bits=self.override_bits)
def _decompose(self):
return Id().tensor(*[
Measure(destructive=self.destructive,
override_bits=self.override_bits)] * self.n_qubits)
class Encode(RealConjugate, Box):
"""
Controlled preparation, i.e. encode n bits into n qubits.
Parameters
----------
n_bits : int
Number of bits to encode.
constructive : bool, optional
Whether to do a classically-controlled correction instead.
reset_bits : bool, optional
Whether to reset the bits to the uniform distribution.
"""
def __init__(self, n_bits=1, constructive=True, reset_bits=False):
dom, cod = bit ** n_bits, qubit ** n_bits
name = Measure(n_bits, constructive, reset_bits).name\
.replace("Measure", "Encode")\
.replace("destructive", "constructive")\
.replace("override_bits", "reset_bits")
super().__init__(name, dom, cod, is_mixed=True)
self.constructive, self.reset_bits = constructive, reset_bits
self.n_bits = n_bits
def dagger(self):
return Measure(self.n_bits,
destructive=self.constructive,
override_bits=self.reset_bits)
def _decompose(self):
return Id().tensor(*[
Encode(constructive=self.constructive,
reset_bits=self.reset_bits)] * self.n_bits)
class Functor(rigid.Functor):
""" Functors into :class:`Circuit`. """
def __init__(self, ob, ar):
if isinstance(ob, Mapping):
ob = {x: qubit ** y if isinstance(y, int) else y
for x, y in ob.items()}
super().__init__(ob, ar, ob_factory=Ty, ar_factory=Circuit)
def __repr__(self):
return super().__repr__().replace("Functor", "circuit.Functor")
class IQPansatz(Circuit):
"""
Builds an IQP ansatz on n qubits, if n = 1 returns an Euler decomposition
>>> pprint = lambda c: print(str(c).replace(' >>', '\\n >>'))
>>> pprint(IQPansatz(3, [[0.1, 0.2], [0.3, 0.4]]))
H @ Id(2)
>> Id(1) @ H @ Id(1)
>> Id(2) @ H
>> CRz(0.1) @ Id(1)
>> Id(1) @ CRz(0.2)
>> H @ Id(2)
>> Id(1) @ H @ Id(1)
>> Id(2) @ H
>> CRz(0.3) @ Id(1)
>> Id(1) @ CRz(0.4)
>>> print(IQPansatz(1, [0.3, 0.8, 0.4]))
Rx(0.3) >> Rz(0.8) >> Rx(0.4)
"""
def __init__(self, n_qubits, params):
from discopy.quantum.gates import H, Rx, Rz, CRz
def layer(thetas):
hadamards = Id(0).tensor(*(n_qubits * [H]))
rotations = Id(n_qubits).then(*(
Id(i) @ CRz(thetas[i]) @ Id(n_qubits - 2 - i)
for i in range(n_qubits - 1)))
return hadamards >> rotations
if n_qubits == 1:
circuit = Rx(params[0]) >> Rz(params[1]) >> Rx(params[2])
elif len(Tensor.np.shape(params)) != 2\
or Tensor.np.shape(params)[1] != n_qubits - 1:
raise ValueError(
"Expected params of shape (depth, {})".format(n_qubits - 1))
else:
depth = Tensor.np.shape(params)[0]
circuit = Id(n_qubits).then(*(
layer(params[i]) for i in range(depth)))
super().__init__(
circuit.dom, circuit.cod, circuit.boxes, circuit.offsets)
def real_amp_ansatz(params: Tensor.np.ndarray, *, entanglement='full'):
"""
The real-amplitudes 2-local circuit. The shape of the params determines
the number of layers and the number of qubits respectively (layers, qubit).
This heuristic generates orthogonal operators so the imaginary part of the
correponding matrix is always the zero matrix.
:param params: A 2D numpy array of parameters.
:param entanglement: Configuration for the entaglement, currently either
'full' (default), 'linear' or 'circular'.
"""
from discopy.quantum.gates import CX, Ry, rewire
ext_cx = partial(rewire, CX)
assert entanglement in ('linear', 'circular', 'full')
params = Tensor.np.asarray(params)
assert params.ndim == 2
dom = qubit**params.shape[1]
def layer(v, is_last=False):
n = len(dom)
rys = Id(0).tensor(*(Ry(v[k]) for k in range(n)))
if is_last:
return rys
if entanglement == 'full':
cxs = [[ext_cx(k1, k2, dom=dom) for k2 in range(k1 + 1, n)] for
k1 in range(n - 1)]
cxs = reduce(lambda a, b: a >> b, chain(*cxs))
else:
cxs = [ext_cx(k, k + 1, dom=dom) for k in range(n - 1)]
cxs = reduce(lambda a, b: a >> b, cxs)
if entanglement == 'circular':
cxs = ext_cx(n - 1, 0, dom=dom) >> cxs
return rys >> cxs
circuit = [layer(v, is_last=idx == (len(params) - 1)) for
idx, v in enumerate(params)]
circuit = reduce(lambda a, b: a >> b, circuit)
return circuit
def random_tiling(n_qubits, depth=3, gateset=None, seed=None):
""" Returns a random Euler decomposition if n_qubits == 1,
otherwise returns a random tiling with the given depth and gateset.
>>> from discopy.quantum.gates import CX, H, T, Rx, Rz
>>> c = random_tiling(1, seed=420)
>>> print(c)
Rx(0.0263) >> Rz(0.781) >> Rx(0.273)
>>> print(random_tiling(2, 2, gateset=[CX, H, T], seed=420))
CX >> T @ Id(1) >> Id(1) @ T
>>> print(random_tiling(3, 2, gateset=[CX, H, T], seed=420))
CX @ Id(1) >> Id(2) @ T >> H @ Id(2) >> Id(1) @ H @ Id(1) >> Id(2) @ H
>>> print(random_tiling(2, 1, gateset=[Rz, Rx], seed=420))
Rz(0.673) @ Id(1) >> Id(1) @ Rx(0.273)
"""
from discopy.quantum.gates import H, CX, Rx, Rz, Parametrized
gateset = gateset or [H, Rx, CX]
if seed is not None:
random.seed(seed)
if n_qubits == 1:
phases = [random.random() for _ in range(3)]
return Rx(phases[0]) >> Rz(phases[1]) >> Rx(phases[2])
result = Id(n_qubits)
for _ in range(depth):
line, n_affected = Id(0), 0
while n_affected < n_qubits:
gate = random.choice(
gateset if n_qubits - n_affected > 1 else [
g for g in gateset
if g is Rx or g is Rz or len(g.dom) == 1])
if isinstance(gate, type) and issubclass(gate, Parametrized):
gate = gate(random.random())
line = line @ gate
n_affected += len(gate.dom)
result = result >> line
return result
| StarcoderdataPython |
3218631 | <gh_stars>1-10
#!/usr/bin/env python3
import os, sys
from lxml.etree import Element, ElementTree
class XmlBase (object):
nsmap = {
'ds': 'http://schema.programmfabrik.de/database-schema/0.1',
'es': 'http://schema.programmfabrik.de/easydb-database-schema/0.1',
'em': 'http://schema.programmfabrik.de/easydb-mask-schema/0.1',
}
class Searchable (XmlBase):
def __init__(self, xml):
self.search_expert = xml.find('em:search/em:expert', self.nsmap).attrib['enabled'] == '1'
self.search_facet = xml.find('em:search/em:facet', self.nsmap).attrib['enabled'] == '1'
self.search_fulltext = xml.find('em:search/em:fulltext', self.nsmap).attrib['enabled'] == '1'
self.search_flags = \
(self.search_expert and 'E' or '') + \
(self.search_facet and 'F' or '') + \
(self.search_fulltext and 'V' or '')
class Field (Searchable):
def __init__(self, xml):
super(Field, self).__init__(xml)
self.name = xml.attrib.get('column-name-hint')
class LinkedTable (Searchable):
def __init__(self, xml):
super(LinkedTable, self).__init__(xml)
self.name = xml.attrib.get('other-table-hint')
class ReverseLinkedTable (LinkedTable):
def __init__(self, xml):
super(ReverseLinkedTable, self).__init__(xml)
class Analyzer (XmlBase):
@classmethod
def analyze_masks(cls, maskxmlfile, mask_name):
tree = ElementTree()
tree.parse(maskxmlfile)
root = tree.getroot()
if mask_name is not None:
mask = root.find("em:mask[@name='{0}']".format(mask_name), cls.nsmap)
if mask is None:
sys.stderr.write("failed to find mask '{0}'\n".format(mask_name))
sys.exit(1)
cls._analyze_mask(mask)
else:
for mask in root.findall('em:mask', cls.nsmap):
cls._analyze_mask(mask)
@classmethod
def _analyze_mask(cls, mask, indent = ''):
print("{0}M:{1}".format(indent, mask.get('name', '<unnamed>')))
for rlinkedxml in mask.findall('em:fields/em:reverse-linked-table', cls.nsmap):
rlinked = ReverseLinkedTable(rlinkedxml)
if len(rlinked.search_flags):
print("{0} R:{1} ({2})".format(indent, rlinked.name, rlinked.search_flags))
maskxml = rlinkedxml.find('em:mask', cls.nsmap)
if maskxml is not None:
cls._analyze_mask(maskxml, indent + ' ')
for linkedxml in mask.findall('em:fields/em:linked-table', cls.nsmap):
linked = LinkedTable(linkedxml)
if len(linked.search_flags):
print("{0} N:{1} ({2})".format(indent, linked.name, linked.search_flags))
maskxml = linkedxml.find('em:mask', cls.nsmap)
if maskxml is not None:
cls._analyze_mask(maskxml, indent + ' ')
for fieldxml in mask.findall('em:fields/em:field', cls.nsmap):
field = Field(fieldxml)
if len(field.search_flags):
print("{0} F:{1} ({2})".format(indent, field.name, field.search_flags))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('usage: {0} <maskset.xml> [<mask name>]\n'.format(sys.argv[0]))
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
sys.stderr.write('failed to find {0}\n'.format(sys.argv[1]))
sys.exit(1)
Analyzer.analyze_masks(sys.argv[1], len(sys.argv) > 2 and sys.argv[2] or None)
| StarcoderdataPython |
10332 | from ..le_apcf_command_pkt import LE_APCF_Command
from struct import pack, unpack
from enum import IntEnum
"""
This pare base on spec <<Android BT HCI Requirement for BLE feature>> v0.52
Advertisement Package Content filter
"""
class APCF_Service_Data(LE_APCF_Command):
def __init__(self):
# TODO generate cmd
super().__init__()
def __str__(self):
return super().__str__()+''.join(['{}']).format("") | StarcoderdataPython |
1679339 | from pathlib import *
from winreg import *
import subprocess, time, os, re, sys
a, ot, devic, adbcommand = 0, '', [], []
exe = sys.executable
# pathtest = 0
adbpath = 0
adbpathdir = 0
# dname = Path(exe).parent
# dname = Path(dname, 'adb')
# fil = [Path(dname, 'adb.exe'), Path(dname, 'AdbWinApi.dll')]
# try: os.chdir(dname)
# except (ValueError, Exception): pass
def spr(q):
global adbpath, adbpathdir
flag = 0
if q == 1:
try:
a = OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\\BlueStacks', 0, KEY_WOW64_64KEY | KEY_ALL_ACCESS)
except WindowsError: return False
try:
m = 0
while True:
if EnumValue(a, m)[0] == 'InstallDir':
adbpath = Path(EnumValue(a, m)[1] + '\\HD-Adb.exe')
flag += 1
CloseKey(HKEY_LOCAL_MACHINE)
break
m += 1
except WindowsError: CloseKey(HKEY_LOCAL_MACHINE)
if flag == 0: return False
else:
try:
a = OpenKey(HKEY_LOCAL_MACHINE, r'SOFTWARE\DuoDianOnline\SetupInfo', 0, KEY_WOW64_32KEY | KEY_ALL_ACCESS)
except WindowsError: return False
try:
m = 0
while True:
if EnumValue(a, m)[0] == 'InstallPath':
adbpath = Path(EnumValue(a, m)[1]+'\\bin\\nox_adb.exe')
flag += 1
CloseKey(HKEY_LOCAL_MACHINE)
break
m += 1
except WindowsError: CloseKey(HKEY_LOCAL_MACHINE)
if flag == 0: return False
try:
adbpathdir = Path(adbpath).parent
if not adbpath.is_file(): return False
except: return False
os.chdir(adbpathdir)
return True
def sear(pr, em):
global a
a = 1
if em == 1:
if re.findall('emulator-[0-9]{1,6}', pr): devic.append(re.findall('emulator-[0-9]{1,6}', pr)); a = 3; return a
if em == 2:
if re.findall('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:[0-9]{1,6}', pr):
devic.append(re.findall('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:[0-9]{1,6}', pr)); a = 3; return a
return a
def adbad(q, pu):
global ot
for i in range(0, q):
process = subprocess.Popen(pu, stdout=subprocess.PIPE)
output, error = process.communicate()
ot = str(output)
if q > 1: time.sleep(1)
pass
def comanda(q, pu):
global adbcommand
adbcommand.clear()
# adbcommand = [adbpath]
adbcommand = pu.split()
adbcommand.insert(0, adbpath)
# print(adbcommand)
# adbcommand = [adbpath, lst]
adbad(q, adbcommand)
def main(q):
global adbcommand
adbcommand.clear()
adbcommand = [adbpath, 'devices']
# adbcommand = [adbpath, '-s', 'emulator-5564', 'shell', 'input', 'tap', '50', '50']
# print(adbcommand)
# if q > 0:
# try: os.system('taskkill /F /IM adb.exe')
# except (ValueError, Exception): pass
# return True
# adbad(1, 'adb.exe kill-server')
# time.sleep(1.5)
# adbad(1, 'adb.exe start-server')
# time.sleep(8)
adbad(3, adbcommand)
time.sleep(0.5)
# adbad(1, adbcommand)
# time.sleep(0.5)
# adbad(1, adbcommand)
# time.sleep(0.5)
# adbad(1, adbcommand)
# time.sleep(1)
sear(ot, q)
pass
# for i in fil:
# if i.is_file(): a += 1
#
#
# if a > 1:
# a = 0
# print('yes')
# else: print('no')
print('yes')
# print(spr(1))
# print(adbpath)
# print(Path.cwd())
# main(1)
# print(ot)
# sear(ot, 2)
# print(a)
# comanda(1, '-s emulator-5564 shell input tap 50 50')
# print(devic)
| StarcoderdataPython |
3283571 | <gh_stars>10-100
#!/usr/bin/env python
from __future__ import print_function
from scp import SCPClient
import argparse
import getpass
import inspect
import os
import paramiko
import errno
def parse_args():
parser = argparse.ArgumentParser(
description='Generate pyaci meta from APIC')
parser.add_argument('host', nargs=1,
help='hostname of APIC')
parser.add_argument('-P', '--port', type=int, default=22,
help='SSH port of APIC')
parser.add_argument('-u', '--user', type=str, default='admin',
help='authentication username')
parser.add_argument('-p', '--password', type=str,
help='authentication password')
parser.add_argument('-d', '--default', action='store_true',
help='set as default meta')
args = parser.parse_args()
if args.password is None:
args.password = getpass.getpass('Enter {} password for {}: '.format(
args.user, args.host[0]))
return args
def main():
args = parse_args()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(args.host[0], port=args.port, username=args.user,
password=args.password, allow_agent=False,
look_for_keys=False)
stdin, stdout, stderr = ssh.exec_command('acidiag version')
version = ''.join(stdout.readlines()).strip()
vlist = version.split('.')
version = '{}.{}({})'.format(vlist[0], vlist[1], '.'.join(vlist[2:]))
print('APIC is running version', version)
print('Copying metagen.py to APIC')
scp = SCPClient(ssh.get_transport())
filename = inspect.getframeinfo(inspect.currentframe()).filename
script_dir = os.path.dirname(os.path.abspath(filename))
metagen_path = os.path.join(script_dir, 'metagen.py')
scp.put(metagen_path, '/tmp/metagen.py')
print('Invoking metagen.py on APIC')
stdin, stdout, stderr = ssh.exec_command('python2.7 /tmp/metagen.py')
''.join(stdout.readlines()).strip()
# TODO (2015-09-14, <NAME>): Check the exit status properly.
# Create ~/.aci-meta if it does not exist.
aci_meta_dir = '~/.aci-meta'
destination_dir = os.path.expanduser('{}'.format(aci_meta_dir))
try:
os.makedirs(destination_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
destination = os.path.expanduser(
'{}/aci-meta.{}.json'.format(aci_meta_dir, version))
print('Copying generated meta from APIC to', destination)
scp.get('aci-meta.json', destination)
default = os.path.expanduser('{}/aci-meta.json'.format(aci_meta_dir))
if not os.path.isfile(default):
print('No default meta exist. '
'Setting the current meta as the default.')
should_link = True
else:
if args.default:
print('Forcing the current meta as the default.')
os.unlink(default)
should_link = True
else:
should_link = False
if should_link:
os.symlink(destination, default)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3275131 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Rewrite exceptions that are thrown and caught locally to jumps.
"""
from flypy.compiler import excmodel
from pykit.analysis import cfa
from pykit.optimizations import local_exceptions
def rewrite_local_exceptions(func, env):
"""
Rewrite exc_throw(exc) -> jump(handler_block) for statically determined
exceptions.
"""
local_exceptions.run(func, env, exc_model=excmodel.ExcModel(env))
def rewrite_exceptions(func, env):
blocks = set()
for op in func.ops:
if op.opcode == 'exc_throw':
raise NotImplementedError("Exception throwing", op, func)
if op.opcode in ('exc_catch', 'exc_setup'):
blocks.add(op.block)
op.delete()
update_outdated_incoming_blocks(func, blocks)
def update_outdated_incoming_blocks(func, candidates):
"""
Update phi nodes in blocks previously containing 'exc_catch'. 'exc_setup'
may span many blocks, and none, or only a subset of those blocks may be
actual predecessors.
"""
cfg = cfa.cfg(func)
for block in candidates:
preds = cfg.predecessors(block)
for op in block.leaders:
if op.opcode == 'phi':
blocks, values = op.args
newblocks = [block for block in blocks if block in preds]
newvalues = [val for block, val in zip(blocks, values)
if block in preds]
assert len(newblocks) == len(preds), (op.block, newblocks,
preds, blocks)
op.set_args([newblocks, newvalues]) | StarcoderdataPython |
1694509 | <gh_stars>10-100
RPL_WELCOME = 1
RPL_YOURHOST = 2
RPL_CREATED = 3
RPL_MYINFO = 4
RPL_BOUNCE = 5
RPL_USERHOST = 302
RPL_ISON = 303
RPL_AWAY = 301
RPL_UNAWAY = 305
RPL_NOWAWAY = 306
RPL_WHOISUSER = 311
RPL_WHOISSERVER = 312
RPL_WHOISOPERATOR = 313
RPL_WHOISSIDLE = 317
RPL_ENDOFWHOIS = 318
RPL_WHOISCHANNELS = 319
RPL_WHOWASUSER = 314
RPL_ENDOFWHOWAS = 369
RPL_LIST = 322
RPL_LISTEND = 323
RPL_UNIQOPIS = 325
RPL_CHANNELMODEIS = 324
RPL_NOTOPIC = 331
RPL_TOPIC = 332
RPL_INVITING = 341
RPL_SUMMONING = 342
RPL_INVITELIST = 346
RPL_ENDOFINVITELIST = 347
RPL_EXCEPTLIST = 348
RPL_ENDOFEXCEPTLIST = 349
RPL_VERSION = 351
RPL_WHOREPLY = 352
RPL_ENDOFWHO = 315
RPL_NAMREPLY = 353
RPL_ENDOFNAMES = 366
RPL_LINKS = 364
RPL_ENDOFLINKS = 365
RPL_BANLIST = 367
RPL_ENDOFBANLIST = 368
RPL_INFO = 371
RPL_ENDOFINFO = 374
RPL_MOTDSTART = 375
RPL_MOTD = 372
RPL_ENDOFMOTD = 376
RPL_YOUREOPER = 381
RPL_REHASHING = 382
RPL_YOURESERVICE = 383
RPL_TIME = 391
RPL_USERSSTART = 392
RPL_USERS = 393
RPL_ENDOFUSERS = 394
RPL_NOUSERS = 395
RPL_TRACELINK = 200
RPL_TRACECONNECTING = 201
RPL_TRACEHANDSHAKE = 202
RPL_TRACEUNKNOWN = 203
RPL_TRACEOPERATOR = 204
RPL_TRACEUSER = 205
RPL_TRACESERVER = 206
RPL_TRACESERVICE = 207
RPL_TRACENEWTYPE = 208
RPL_TRACECLASS = 209
RPL_TRACELOG = 261
RPL_TRACEEND = 262
RPL_STATSLINKINFO = 211
RPL_STATSCOMMANDS = 212
RPL_ENDOFSTATS = 219
RPL_STATSUPTIME = 242
RPL_STATSOLINE = 243
RPL_UMODEIS = 221
RPL_SERVLIST = 234
RPL_SERVLISTEND = 235
RPL_LUSERCLIENT = 251
RPL_LUSEROP = 252
RPL_LUSERUNKNOWN = 253
RPL_LUSERCHANNELS = 254
RPL_LUSERME = 255
RPL_ADMINME = 256
RPL_ADMINLOC1 = 257
RPL_ADMINLOC2 = 258
RPL_ADMINEMAIL = 259
RPL_TRYAGAIN = 263
ERR_NOSUCHNICK = 401
ERR_NOSUCHSERVER = 402
ERR_NOSUCHCHANNEL = 403
ERR_CANNOTSENDTOCHAN = 404
ERR_TOOMANYCHANNELS = 405
ERR_WASNOSUCHNICK = 406
ERR_TOOMANYTARGETS = 407
ERR_NOSUCHSERVICE = 408
ERR_NOORIGIN = 409
ERR_NORECIPIENT = 410
ERR_NOTEXTTOSEND = 412
ERR_NOTOPLEVEL = 413
ERR_WILDTOPLEVEL = 414
ERR_BADMASK = 415
ERR_UNKNOWNCOMMAND = 421
ERR_NOMOTD = 422
ERR_NOADMININFO = 423
ERR_FILEERROR = 424
ERR_NONICKNAMEGIVEN = 431
ERR_ERRONEUSNICKNAME = 432
ERR_NICKNAMEINUSE = 433
ERR_NICKCOLLISION = 436
ERR_UNAVAILRESOURCE = 437
ERR_USERNOTINCHANNEL = 441
ERR_NOTONCHANNEL = 442
ERR_USERONCHANNEL = 443
ERR_NOLOGIN = 444
ERR_SUMMONDISABLED = 445
ERR_USERSDISABLED = 446
ERR_NOTREGISTERED = 451
ERR_NEEDMOREPARAMS = 461
ERR_ALREADYREGISTRED = 462
ERR_NOPERMFORHOST = 463
ERR_PASSWDMISMATCH = 464
ERR_YOUREBANNEDCREEP = 465
ERR_YOUWILLBEBANNED = 466
ERR_KEYSET = 467
ERR_CHANNELISFULL = 471
ERR_UNKNOWNMODE = 472
ERR_INVITEONLYCHAN = 473
ERR_BANNEDFROMCHAN = 474
ERR_BADCHANNELKEY = 475
ERR_BADCHANMASK = 476
ERR_NOCHANMODES = 477
ERR_BANLISTFULL = 478
ERR_NOPRIVILEGES = 481
ERR_CHANOPRIVSNEEDED = 482
ERR_CANTKILLSERVER = 483
ERR_RESTRICTED = 484
ERR_UNIQOPPRIVSNEEDED = 485
ERR_NOOPERHOST = 491
ERR_UMODEUNKNOWNFLAG = 501
ERR_USERSDONTMATCH = 502
reply_names = {value: key for key, value in locals().items() if isinstance(value, int)}
| StarcoderdataPython |
15707 | <reponame>jama5262/Politico
import unittest
import json
from app import createApp
from app.api.database.migrations.migrations import migrate
class TestParties(unittest.TestCase):
def setUp(self):
self.app = createApp("testing")
self.client = self.app.test_client()
self.endpoint = "/api/v2/parties"
self.partyID = 3
self.data = {
"name": "Party Name",
"abbr": "Party Abbreviation",
"logo_url": "http://logo/url",
"hq_address": "Party HQ"
}
self.dataUpdate = {
"name": "Updated Party Name",
"abbr": "Updated Party Abbreviation",
"logo_url": "http://logo/url",
"hq_address": "Updated Party HQ"
}
self.dataNoNameProperty = {
"abbr": "Updated Party Abbreviation",
"logo_url": "http://logo/url",
"hq_address": "Updated Party HQ"
}
self.dataEmptyValues = {
"name": "",
"abbr": "",
"logo_url": "",
"hq_address": ""
}
self.loginData = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
def tearDown(self):
migrate()
def loginUser(self):
response = self.client.post(path="/api/v2/auth/login", data=json.dumps(self.loginData), content_type='application/json')
token = response.json["data"]["token"]
return {
"Authorization": "Bearer " + token
}
def post(self, path, data):
return self.client.post(path=path, data=json.dumps(data), content_type='application/json', headers=self.loginUser())
def get(self, path):
return self.client.get(path=path, content_type='application/json', headers=self.loginUser())
def patch(self, path, data):
return self.client.patch(path=path, data=json.dumps(data), content_type='application/json', headers=self.loginUser())
def delete(self, path):
return self.client.delete(path=path, content_type='application/json', headers=self.loginUser())
def test_create_party(self):
response = self.post(self.endpoint, self.data)
self.assertEqual(response.status_code, 200, response)
def test_get_all_parties(self):
response = self.get(self.endpoint)
self.assertEqual(response.status_code, 200)
def test_get_specific_party(self):
postParty = self.post(self.endpoint, self.data)
response = self.get(self.endpoint + "/" + str(self.partyID))
self.assertEqual(response.status_code, 200)
def test_get_specific_party_not_found(self):
response = self.get(self.endpoint + "/2000")
self.assertEqual(response.status_code, 404)
def test_edit_specific_party(self):
postParty = self.post(self.endpoint, self.data)
response = self.patch(self.endpoint + "/" + str(self.partyID), self.dataUpdate)
self.assertEqual(response.status_code, 200)
def test_edit_specific_party_not_found(self):
response = self.patch(self.endpoint + "/2000", self.dataUpdate)
self.assertEqual(response.status_code, 404)
def test_delete_specific_party(self):
postParty = self.post(self.endpoint, self.data)
response = self.delete(self.endpoint + "/" + str(self.partyID))
self.assertEqual(response.status_code, 200)
def test_delete_specific_party_not_found(self):
response = self.delete(self.endpoint + "/2000")
self.assertEqual(response.status_code, 404)
def test_with_empty_values(self):
response = self.post(self.endpoint, self.dataEmptyValues)
self.assertEqual(response.status_code, 400)
def test_with_no_name_property(self):
response = self.post(self.endpoint, self.dataNoNameProperty)
self.assertEqual(response.status_code, 400)
| StarcoderdataPython |
3223944 | <filename>checkpoint.py
import os
import torch
def save_checkpoint(epoch, step, model, optimizer, save_path):
"""
Save checkpoint pickle file with model weights and other experimental settings
Args:
epoch (Int): Current epoch when model is being saved
step (Int): Mini-batch iteration count when model is being saved
model (Object): Current copy of model
optimizer (Object): Optimizer object
save_path (String): Full directory path to results folder
Return:
None
"""
state = {'epoch': epoch,
'step': step,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, save_path)
def load_checkpoint(name, key_name='state_dict'):
"""
Load checkpoint pickle file and return selected element from pickle file
Args:
name (String): Full path, including pickle file name, to load
key_name (String): Key name to return from saved pickle file
Return:
Selected element from loaded checkpoint pickle file
"""
checkpoint = torch.load(name)
if key_name not in checkpoint:
return checkpoint
return checkpoint[key_name]
| StarcoderdataPython |
3313066 | <reponame>cesarin1981/ProjectBuscaAyuda
from src.projectbuscaayuda.modelo.persona import Persona
from src.projectbuscaayuda.modelo.servicio import Servicio, Categoria_Servicio
from src.projectbuscaayuda.modelo.declarative_base import Session, engine, Base
if __name__ == '__main__':
session = Session()
persona1 = session.query(Persona).get(1)
session.delete(persona1)
session.commit()
session.close() | StarcoderdataPython |
1678489 | from django.contrib import admin
from django.contrib.auth.models import Permission
# Register your models here.
from nomenclatoare import models
from guardian.admin import GuardedModelAdmin
from simple_history.admin import SimpleHistoryAdmin
class HistoryChangedFields(object):
history_list_display = ["changed_fields"]
def changed_fields(self, obj):
if obj.prev_record:
delta = obj.diff_against(obj.prev_record)
return delta.changed_fields
return None
@admin.register(models.Localitate)
class LocalitateAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
list_filter = ["judet"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Judet)
class JudetAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'cod', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.StatutBiserica)
class StatutBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.CultBiserica)
class CultBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.UtilizareBiserica)
class UtilizareBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.SingularitateBiserica)
class SingularitateBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.RegimProprietate)
class RegimProprietateAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.FunctiuneBiserica)
class FunctiuneBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici', 'nr_biserici_initiale']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
def nr_biserici_initiale(self, obj):
return obj.biserici_initiale.count()
@admin.register(models.SursaDatare)
class SursaDatareAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Secol)
class SecolAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'nr_biserici']
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.StudiuDendocronologic)
class StudiuDendocronologicAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume', 'fisier']
search_fields = ["nume"]
@admin.register(models.Persoana)
class PersoanaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume']
search_fields = ["nume"]
@admin.register(models.Eveniment)
class EvenimentAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume']
search_fields = ["nume"]
@admin.register(models.Studiu)
class StudiuAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ['nume']
search_fields = ["nume"]
class CtitorBisericaInline(admin.StackedInline):
model = models.CtitorBiserica
verbose_name = "Ctitor"
verbose_name_plural = "Ctitori"
extra = 1
class MesterBisericaInline(admin.StackedInline):
model = models.MesterBiserica
verbose_name = "Meșter"
verbose_name_plural = "Meșteri"
extra = 1
class ZugravBisericaInline(admin.StackedInline):
model = models.ZugravBiserica
verbose_name = "Zugrav"
verbose_name_plural = "Zugravi"
extra = 1
class PersonalitateBisericaInline(admin.StackedInline):
model = models.PersonalitateBiserica
verbose_name = "Personalitate"
verbose_name_plural = "Personalități"
extra = 1
class EvenimentBisericaInline(admin.StackedInline):
model = models.EvenimentBiserica
verbose_name = "Eveniment"
verbose_name_plural = "Evenimente"
extra = 1
class MutareBisericaInline(admin.StackedInline):
model = models.MutareBiserica
verbose_name = "Mutare biserică"
verbose_name_plural = "Mutări biserică"
extra = 1
class StudiuIstoricInline(admin.StackedInline):
model = models.StudiuIstoric
verbose_name = "Studiu istoric"
verbose_name_plural = "Studii istorice"
extra = 1
@admin.register(models.AmplasamentBiserica)
class AmplasamentBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TopografieBiserica)
class TopografieBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.RelatieCimitir)
class RelatieCimitirAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.PeisagisticaSit)
class PeisagisticaSitAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.ElementAnsambluConstruit)
class ElementAnsambluConstruitAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.ElementImportant)
class ElementImportantAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Planimetrie)
class PlanimetrieAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Material)
class MaterialAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.DimensiuneTurn)
class DimensiuneTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipTurn)
class TipTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.DecorTurn)
class DecorTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.PlanTurn)
class PlanTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.AmplasareTurn)
class AmplasareTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.GalerieTurn)
class GalerieTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipSarpanta)
class TipSarpantaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.FinisajExterior)
class FinisajExteriorAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipBatereSindrila)
class TipBatereSindrilaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipPrindereSindrila)
class TipPrindereSindrilaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipBotSindrila)
class TipBotSindrilaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipPrelucrareSindrila)
class TipPrelucrareSindrilaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.EsentaLemnoasa)
class EsentaLemnoasaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.ElementInteriorBiserica)
class ElementInteriorBisericaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.MaterialFinisajPardoseli)
class MaterialFinisajPardoseliAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.MaterialFinisajPeretiInteriori)
class MaterialFinisajPeretiInterioriAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Finisaj)
class FinisajAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipFundatie)
class TipFundatieAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipStructuraCheotoare)
class TipStructuraCheotoareAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipStructuraCatei)
class TipStructuraCateiAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.LocalizarePictura)
class LocalizarePicturaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TehnicaPictura)
class TehnicaPicturaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.RegistruIconostas)
class RegistruIconostasAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipUsiIconostas)
class TipUsiIconostasAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.SuportPictura)
class SuportPicturaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TehnicaIconostas)
class TehnicaIconostasAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipIconostas)
class TipIconostasAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.DetaliuPodTurn)
class DetaliuPodTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.AsezareTalpaTurn)
class AsezareTalpaTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.RelatieTalpaTurn)
class RelatieTalpaTurnAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.BoltaPesteAltar)
class BoltaPesteAltarAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipBoltaPesteAltar)
class TipBoltaPesteAltarAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipBoltaPronaos)
class TipBoltaPronaosAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.Mobilier)
class MobilierAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.ObiectCult)
class ObiectCultAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
@admin.register(models.TipArcBolta)
class TipArcBoltaAdmin(HistoryChangedFields, SimpleHistoryAdmin):
list_display = ["nume", "nr_biserici"]
search_fields = ["nume"]
def nr_biserici(self, obj):
try:
return obj.biserici.count()
except:
return 'N.A.'
| StarcoderdataPython |
4822386 | <reponame>vied12/django-moderation<filename>moderation/helpers.py
from __future__ import unicode_literals
from .register import RegistrationError
def automoderate(instance, user):
'''
Auto moderates given model instance on user. Returns moderation status:
0 - Rejected
1 - Approved
'''
try:
status = instance.moderated_object.automoderate(user)
except AttributeError:
msg = "%s has been registered with Moderation." % instance.__class__
raise RegistrationError(msg)
return status
def import_moderator(app):
'''
Import moderator module and register all models it contains with moderation
'''
from importlib import import_module
import imp
try:
paths = app.split('.')
# if .apps.AppConfig style...
if paths[-1][0].isupper():
app = '.'.join(paths[0:-1])
app_path = import_module(app).__path__
except AttributeError:
return None
try:
imp.find_module('moderator', app_path)
except ImportError:
return None
module = import_module("%s.moderator" % app)
return module
def auto_discover():
'''
Auto register all apps that have module moderator with moderation
'''
from django.conf import settings
for app in [app for app in settings.INSTALLED_APPS if app != 'moderation']:
import_moderator(app)
| StarcoderdataPython |
21181 | from OrderedVector import OrderedVector
class Greedy:
def __init__(self, goal):
self.goal = goal
self.found = False
self.travelled_distance = 0
self.previous = None
self.visited_cities = []
def search(self, current):
current.visited = True
self.visited_cities.append(current.name)
for a in current.adjacent:
if a.city == self.previous:
self.travelled_distance += a.distance
self.previous = current
if current == self.goal:
self.found = True
else:
self.border = OrderedVector(len(current.adjacent))
for a in current.adjacent:
if a.city.visited == False:
a.city.visited = True
self.border.insert(a.city)
if self.border.getFirst() != None:
Greedy.search(self, self.border.getFirst())
return (self.visited_cities, self.travelled_distance)
| StarcoderdataPython |
1741055 | <reponame>Saurav-Shrivastav/openwisp-users<filename>tests/testapp/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('member_view', views.api_member_view, name='test_api_member_view'),
path('manager_view', views.api_manager_view, name='test_api_manager_view'),
path('owner_view', views.api_owner_view, name='test_api_owner_view'),
path('base_org_view', views.base_org_view, name='test_base_org_permission_view'),
path('org_field_view', views.org_field_view, name='test_organization_field_view'),
path('error_field_view', views.error_field_view, name='test_error_field_view'),
path(
'member/shelf/<str:shelf_id>/books',
views.books_list_member_view,
name='test_books_list_member_view',
),
path(
'manager/shelf/<str:shelf_id>/books',
views.books_list_manager_view,
name='test_books_list_manager_view',
),
path(
'owner/shelf/<str:shelf_id>/books',
views.books_list_owner_view,
name='test_books_list_owner_view',
),
path(
'member/shelf',
views.shelf_list_member_view,
name='test_shelf_list_member_view',
),
path(
'manager/shelf',
views.shelf_list_manager_view,
name='test_shelf_list_manager_view',
),
path(
'owner/shelf', views.shelf_list_owner_view, name='test_shelf_list_owner_view',
),
path(
'user/shelf/<str:shelf_id>/books',
views.book_list_unauthorized_view,
name='test_book_list_unauthorized_view',
),
path(
'user/shelf',
views.shelf_list_unauthorized_view,
name='test_shelf_list_unauthorized_view',
),
]
| StarcoderdataPython |
3280234 | #!/usr/bin/python
# Classification (U)
"""Program: notmastererror.py
Description: Unit testing of NotMasterError in errors.py.
Usage:
test/unit/errors/notmastererror.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import errors
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
test_notmastererror
"""
def test_notmastererror(self):
"""Function: test_notmastererror
Description: Test with no arguments.
Arguments:
"""
self.assertTrue(errors.NotMasterError())
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
96721 | from utils import show_messages, get_input
# lets the teachers to add a score for a student of a course
class AddScoreView(object):
def run(self, site, messages=None):
site.clear()
show_messages(messages)
course = get_input('Course Serial: ')
course = site.get_course(serial=course)
if course is None:
return ["No such course!"]
elif course.owner != site.active_user:
return ["You don't have permission to add score to this course."]
elif not course.verified:
return ["This course hasn't been verified yet."]
student = get_input('Student Serial Number: ')
student = site.get_student(serial=student)
if student is None:
return ["No such student!"]
elif not student.has_course(course):
return ["This student has not this course."]
elif student.passed_course(course):
return ["This student already has a score fot this course."]
elif not student.verified:
return ["This student hasn't been verified yet."]
value = get_input('Score: ', output_type=int)
student.scores[course] = value
site.state = '/teacher/'
return ['We added score successfully.']
| StarcoderdataPython |
1658594 | <filename>experimental/k41_contactset/shift.py
#!/usr/bin/python
import sys
f = open(sys.argv[1])
for i in f.readlines():
x,y = map(float,i.split())
x = x + .25
y = y + .25
if x>1:
x = x - 1
if y>1:
y = y - 1
if y>x:
print x,y
else:
print y,x
f.close()
| StarcoderdataPython |
3351961 | <reponame>hritik5102/Awesome-Computer-Vision-Guide
'''
Application of gardients in images
Gradients here itself means partial derivatives.
Gradients are useful in detecting the edges based on color
gradients.
Derivative of a matrix is calculated by an operator called Laplacian.
For derivatives, we have to cal. two derivatives, Sobal derivatives - one is horizontal, vertical.
This is some mathematical details, let's go to implementation.
'''
import cv2
import numpy as numpy
import matplotlib.pyplot as plt
img = cv2.imread('../Images and Videos/gradients4.jpg',0)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1,0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0,1, ksize=5)
images = [img, laplacian, sobelx, sobely]
titles = ['Original', 'Laplacian', 'SobelX', 'SobelY']
for i in range(4):
plt.subplot(2,3,(i+1))
plt.imshow(images[i], cmap='gray')
plt.title(titles[i])
plt.show()
img = cv2.imread('../Images and Videos/gradients2.jpg',0)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1,0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0,1, ksize=5)
images = [img, laplacian, sobelx, sobely]
titles = ['Original', 'Laplacian', 'SobelX', 'SobelY']
for i in range(4):
plt.subplot(2,3,(i+1))
plt.imshow(images[i], cmap='gray')
plt.title(titles[i])
plt.show()
for i in range(4):
plt.subplot(2,3,(i+1))
plt.imshow(images[i])
plt.title(titles[i])
plt.show()
| StarcoderdataPython |
81024 | import json
import numpy as np
from pycocotools import mask as maskUtils
thresh = 0.5
# load retrieval results
results_image_id_all = []
results_query_score_all = []
results_query_cls_all = []
results_query_box_all = []
results_gallery_id_all = []
results_gallery_box_all = []
results_name = ' '
with open(results_name, 'r') as f:
results = json.loads(f.read())
for i in results:
box = i['query_bbox']
query_box = [box[0],box[1],box[2]-box[0],box[3]-box[1]]
box = np.array(i['gallery_bbox'])
gallery_box = [box[:,0], box[:,1], box[:,2] - box[:,0], box[:,3] - box[:,1]]
gallery_box = np.transpose(gallery_box,(1,0)).tolist()
results_image_id_all.append(i['query_image_id'])
results_query_score_all.append(i['query_score'])
results_query_cls_all.append(i['query_cls'])
results_query_box_all.append(query_box)
results_gallery_id_all.append(i['gallery_image_id'])
results_gallery_box_all.append(gellery_box)
f.close()
results_image_id_all = np.array(results_image_id_all)
results_query_score_all = np.array(results_query_score_all)
results_query_cls_all = np.array(results_query_cls_all)
results_query_box_all = np.array(results_query_box_all)
results_gallery_id_all = np.array(results_gallery_id_all)
results_gallery_box_all = np.array(results_gallery_box_all)
# load query ground truth
query_image_id_all = []
query_box_all = []
query_cls_all = []
query_style_all = []
query_pair_all = []
query_name = '.../query_gt.json'
with open(query_name, 'r') as f:
query = json.loads(f.read())
for i in query:
box = i['bbox']
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
query_image_id_all.append(i['query_image_id'])
query_box_all.append(box)
query_cls_all.append(i['cls'])
query_style_all.append(i['style'])
query_pair_all.append(i['pair_id'])
f.close()
# load gallery ground truth
query_image_id_all = np.array(query_image_id_all)
query_box_all = np.array(query_box_all)
query_cls_all = np.array(query_cls_all)
query_style_all = np.array(query_style_all)
query_pair_all = np.array(query_pair_all)
query_num = len(np.where(query_style_all>0)[0]) # the number of all query clothing items
query_id_real= np.unique(query_image_id_all) # image ids of query clothing items
gallery_image_id_all = []
gallery_box_all = []
gallery_style_all = []
gallery_pair_all = []
gallery_name = '.../gallery_gt.json'
with open(gallery_name, 'r') as f:
gallery = json.loads(f.read())
for i in gallery:
box = i['bbox']
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
gallery_image_id_all.append(i['gallery_image_id'])
gallery_box_all.append(box)
gallery_style_all.append(i['style'])
gallery_pair_all.append(i['pair_id'])
f.close()
gallery_image_id_all = np.array(gallery_image_id_all)
gallery_box_all = np.array(gallery_box_all)
gallery_style_all = np.array(gallery_style_all)
gallery_pair_all = np.array(gallery_pair_all)
correct_num_1 = 0
correct_num_5 = 0
correct_num_10 = 0
correct_num_15 = 0
correct_num_20 = 0
miss_num = 0 # the number of query items that fail to be detected
for id in query_id_real:
results_id_ind = np.where(results_image_id_all==id)[0]
if len(results_id_ind) == 0: # in case no clothing item is detected
continue
query_id_ind = np.where(query_image_id_all==id)[0] # all query items in the given image
pair_id = query_pair_all[query_id_ind]
assert len(np.unique(pair_id)) == 1
pair_id = pair_id[0]
results_id_score = results_query_score_all[results_id_ind]
results_id_box = results_query_box_all[results_id_ind]
results_id_cls = results_query_cls_all[results_id_ind]
results_id_gallery_id = results_gallery_id_all[results_id_ind]
results_id_gallery_box = results_gallery_box_all[results_id_ind]
query_id_box = query_box_all[query_id_ind]
query_id_cls = query_cls_all[query_id_ind]
query_id_style = query_style_all[query_id_ind]
is_crowd = np.zeros(len(query_id_box))
iou_id = maskUtils.iou(results_id_box,query_id_box,is_crowd)
iou_ind = np.argmax(iou_id,axis=1) # assign a ground truth label to each detected clothing item
for id_ind in range(0,len(query_id_ind)):
style = query_id_style[id_ind]
cls = query_id_cls[id_ind]
# For a given ground truth query item, select a detected item on behalf of it:
# First find out all detected items which are assigned the given ground truth label
# and are classified correctly.
# Then select the detected item with the highest score among these detected items.
if style>0:
results_style_ind1 = np.where(iou_ind==id_ind)[0]
results_style_ind2 = np.where(results_id_cls==cls)[0]
results_style_ind = np.intersect1d(results_style_ind1,results_style_ind2)
if len(results_style_ind)>0:
results_score_style = results_id_score[results_style_ind]
score_max_ind = np.argmax(results_score_style)
results_style_query_ind = results_style_ind[score_max_ind]
results_style_gallery_id = results_id_gallery_id[results_style_query_ind]
results_style_gallery_box = results_id_gallery_box[results_style_query_ind]
# find out the corresponding ground truth items in the gallery, that is ground truth items which have the same pair id and style as the query item.
gt_gallery_ind1 = np.where(gallery_pair_all==pair_id)[0]
gt_gellery_ind2 = np.where(gallery_style_all==style)[0]
gt_gallery_ind = np.intersect1d(gt_gallery_ind1,gt_gellery_ind2)
gt_gallery_image_id = gallery_image_id_all[gt_gallery_ind]
gt_gallery_box = gallery_box_all[gt_gallery_ind]
assert len(gt_gallery_ind)>0
if len(gt_gallery_ind) == 1:
gt_gallery_image_id = [gt_gallery_image_id]
#calculate top-1
for t in range(0,1):
# if corresponding ground truth gallery images contains retrieved gallery image,
# first find out the exact corresponding ground truth gallery image,
# then find out ground truth gallery items in this ground truth gallery image(whose number may be greater than 1)
# if the overlap between the retrieved gallery item and one of the ground truth gallery items is over the thresh, the retrieved result is positive.
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style>=thresh)[0])>0:
correct_num_1 = correct_num_1 + 1
break
# calculate top-5
for t in range(0,5):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_5 = correct_num_5 + 1
break
# calculate top-10
for t in range(0,10):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_10 = correct_num_10 + 1
break
# calculate top-15
for t in range(0,15):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_15 = correct_num_15 + 1
break
# calculate top-20
for t in range(0,20):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_20 = correct_num_20 + 1
break
else:
miss_num = miss_num + 1
print 'top-1'
print float(correct_num_1)/ query_num
print 'top-5'
print float(correct_num_5)/ query_num
print 'top-10'
print float(correct_num_10)/ query_num
print 'top-15'
print float(correct_num_15)/ query_num
print 'top-20'
print float(correct_num_20)/ query_num
| StarcoderdataPython |
10586 | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| StarcoderdataPython |
1689390 | #!/usr/bin/env python
import sys
import os, os.path
import shutil
from ROOT import gROOT,gSystem,gDirectory,RooAbsData,RooRandom,RooWorkspace
## batch mode
sys.argv.insert(1, '-b')
gROOT.Reset()
gROOT.SetBatch(True)
del sys.argv[1]
gSystem.Load("libSusyFitter.so")
gROOT.Reset()
def GenerateFitAndPlot(tl):
from ROOT import Util
from ROOT import RooExpandedFitResult
print("\n***GenerateFitAndPlot for TopLevelXML %s***\n"%tl.name)
w = Util.GetWorkspaceFromFile(tl.wsFileName,"combined")
Util.SaveInitialSnapshot(w)
# Util.ReadWorkspace(w, tl.wsFileName,"combined")
plotChannels = ""
for reg in tl.validationChannels:
if len(plotChannels)>0:
plotChannels+=","
pass
plotChannels+=reg
plotChannels = "ALL"
fitChannels = ""
for reg in tl.bkgConstrainChannels:
if len(fitChannels)>0:
fitChannels+=","
pass
fitChannels+=reg
pass
fitChannelsCR = fitChannels
for reg in tl.signalChannels:
if len(fitChannels)>0:
fitChannels+=","
pass
fitChannels+=reg
#fitChannels = "ALL"
lumiConst = not tl.signalSample
# fit toy MC if specified. When left None, data is fit by default
toyMC = None
if configMgr.toySeedSet and not configMgr.useAsimovSet: # generate a toy dataset
print("INFO : generating toy MC set for fitting and plotting. Seed = %i" % configMgr.toySeed)
RooRandom.randomGenerator().SetSeed( configMgr.toySeed )
toyMC = Util.GetToyMC() # this generates one toy dataset
pass
elif configMgr.useAsimovSet and not configMgr.toySeedSet: #
print("INFO : using Asimov set for fitting and plotting.")
toyMC = Util.GetAsimovSet(w) # this returns the asimov set
pass
else:
print("INFO : using data for fitting and plotting.")
## MB : turn on all JES bins. Some are turned off by HistFactory by default
if True:
if w.var("gamma_J3_bin_0")!=None: w.var("gamma_J3_bin_0").setConstant(False)
if w.var("gamma_J3_bin_1")!=None: w.var("gamma_J3_bin_1").setConstant(False)
if w.var("gamma_J3_bin_2")!=None: w.var("gamma_J3_bin_2").setConstant(False)
if w.var("gamma_J3_bin_3")!=None: w.var("gamma_J3_bin_3").setConstant(False)
if w.var("gamma_J3_bin_4")!=None: w.var("gamma_J3_bin_4").setConstant(False)
if w.var("gamma_J3_bin_5")!=None: w.var("gamma_J3_bin_5").setConstant(False)
#if w.var("gamma_J4_bin_0")!=None: w.var("gamma_J4_bin_0").setConstant(False)
#if w.var("gamma_J4_bin_1")!=None: w.var("gamma_J4_bin_1").setConstant(False)
if w.var("gamma_J4_bin_2")!=None: w.var("gamma_J4_bin_2").setConstant(False)
if w.var("gamma_J4_bin_3")!=None: w.var("gamma_J4_bin_3").setConstant(False)
if w.var("gamma_J4_bin_4")!=None: w.var("gamma_J4_bin_4").setConstant(False)
if w.var("gamma_J4_bin_5")!=None: w.var("gamma_J4_bin_5").setConstant(False)
if w.var("gamma_JC_bin_0")!=None: w.var("gamma_JC_bin_0").setConstant(False)
if w.var("gamma_JC_bin_1")!=None: w.var("gamma_JC_bin_1").setConstant(False)
if w.var("gamma_JC_bin_2")!=None: w.var("gamma_JC_bin_2").setConstant(False)
if w.var("gamma_JC_bin_3")!=None: w.var("gamma_JC_bin_3").setConstant(False)
if w.var("gamma_JC_bin_4")!=None: w.var("gamma_JC_bin_4").setConstant(False)
if w.var("gamma_JC_bin_5")!=None: w.var("gamma_JC_bin_5").setConstant(False)
if w.var("gamma_JC_bin_6")!=None: w.var("gamma_JC_bin_6").setConstant(False)
# Soft lepton
# if w.var("gamma_JSS_bin_0")!=None: w.var("gamma_JSS_bin_0").setConstant(False)
# set Errors of all parameters to 'natural' values before plotting/fitting
Util.resetAllErrors(w)
mu_Top = w.var("mu_Top")
print("mu_Top: ")
print(mu_Top)
if mu_Top:
mu_Top.setError(0.001)
else:
mu_Top = w.var("mu_Top_Np0")
if mu_Top:
mu_Top.setError(0.001)
mu_Top = w.var("mu_Top_Np1")
if mu_Top:
mu_Top.setError(0.001)
mu_Top = w.var("mu_Top_Np2")
if mu_Top:
mu_Top.setError(0.001)
mu_Top = w.var("mu_Top_Np3")
if mu_Top:
mu_Top.setError(0.001)
mu_Top = w.var("mu_Top_Np4")
if mu_Top:
mu_Top.setError(0.001)
mu_Top = w.var("mu_Top_Np5")
if mu_Top:
mu_Top.setError(0.001)
mu_WZ = w.var("mu_WZ")
mu_WZpT0GeV = w.var("mu_WZpT0GeV")
if mu_WZ:
mu_WZ.setError(0.001)
elif mu_WZpT0GeV:
mu_WZpT0GeV = w.var("mu_WZpT0GeV")
mu_WZpT0GeV.setError(0.001)
mu_WZpT0GeV = w.var("mu_WZpT50GeV")
mu_WZpT0GeV.setError(0.001)
mu_WZpT0GeV = w.var("mu_WZpT100GeV")
mu_WZpT0GeV.setError(0.001)
mu_WZpT0GeV = w.var("mu_WZpT150GeV")
mu_WZpT0GeV.setError(0.001)
mu_WZpT0GeV = w.var("mu_WZpT200GeV")
mu_WZpT0GeV.setError(0.001)
mu_WZpT0GeV = w.var("mu_WZpT250GeV")
mu_WZpT0GeV.setError(0.001)
else:
mu_WZ = w.var("mu_WZ_Np0")
mu_WZ.setError(0.001)
mu_WZ = w.var("mu_WZ_Np1")
mu_WZ.setError(0.001)
mu_WZ = w.var("mu_WZ_Np2")
mu_WZ.setError(0.001)
mu_WZ = w.var("mu_WZ_Np3")
mu_WZ.setError(0.001)
mu_WZ = w.var("mu_WZ_Np4")
mu_WZ.setError(0.001)
mu_WZ = w.var("mu_WZ_Np5")
mu_WZ.setError(0.001)
# set the flag for plotting ratio or pull distribution under the plot
plotRatio = True # plotRatio = False means that a pull distribution will be drawn
# get a list of all floating parameters for all regions
simPdf = w.pdf("simPdf");
mc = Util.GetModelConfig(w)
obsSet = mc.GetObservables()
floatPars = Util.getFloatParList(simPdf, obsSet)
# create an RooExpandedFitResult encompassing all the regions/parameters & save it to workspace
expResultBefore = RooExpandedFitResult(floatPars)
# expResultBefore.Print()
Util.ImportInWorkspace(w,expResultBefore,"RooExpandedFitResult_beforeFit")
# plot before fit
#Util.PlotPdfWithComponents(w,tl.name,plotChannels,"beforeFit_ORIGINAL",None,toyMC)
Util.PlotPdfWithComponents(w,tl.name,plotChannels,"beforeFit",expResultBefore,toyMC,plotRatio)
#return
# fit of CRs only
# resultCR = Util.FitPdf(w,fitChannelsCR,lumiConst,toyMC)
# load original snapshot
# w.loadSnapshot('snapshot_paramsVals_initial')
# fit of all regions
result = Util.FitPdf(w,fitChannels,lumiConst,toyMC)
# create an RooExpandedFitResult encompassing all the regions/parameters with the result & save it to workspace
expResultAfter = RooExpandedFitResult(result, floatPars)
Util.ImportInWorkspace(w,expResultAfter,"RooExpandedFitResult_afterFit")
# plot after fit
#Util.PlotPdfWithComponents(w,tl.name,plotChannels,"afterFit_ORIGINAL",result,toyMC)
Util.PlotPdfWithComponents(w,tl.name,plotChannels,"afterFit",expResultAfter,toyMC,plotRatio)
# plot each component of each region separately with propagated error after fit (interesting for debugging)
# Util.PlotSeparateComponents(tl.name,plotChannels,"afterFit",result,toyMC)
# plot correlation matrix for result
#Util.PlotCorrelationMatrix(result)
# Util.GetCorrelations(result, 0.85)
# plotPLL = False
# Util.PlotNLL(w, result, plotPLL, "", toyMC)
if toyMC:
Util.WriteWorkspace(w, tl.wsFileName,toyMC.GetName())
else:
Util.WriteWorkspace(w, tl.wsFileName)
try:
if not result == None:
result.Print()
return result
except:
pass
return
def GetLimits(tl,f):
from ROOT import RooStats,Util
#w=gDirectory.Get("w")
print("analysis name: ",tl.name)
print("workspace name: ",tl.wsFileName)
if not ("SU" in tl.name):
print("Do no hypothesis test for bkg only or discovery fit!\n")
return
print("Need to load workspace")
Util.ReadWorkspace(tl.wsFileName,"combined")
w=gDirectory.Get("w")
result = RooStats.MakeUpperLimitPlot(tl.name,w,2,3,1000,True,20,True)
if not result==0:
result.Print()
print(result.UpperLimit())
return
if __name__ == "__main__":
from configManager import configMgr
from prepareHistos import TreePrepare,HistoPrepare
configMgr.readFromTree = False
configMgr.executeHistFactory=False
runInterpreter = False
runFit = False
printLimits = False
doHypoTests = False
sigSamples = []
print("\n * * * Welcome to HistFitter * * *\n")
import os, sys
import getopt
def usage():
print("HistFitter.py [-i] [-t] [-w] [-f] [-l] [-l] [-p] [-n nTOYs] [-s seed] [-g gridPoint] <configuration_file>\n")
print("(all OFF by default. Turn steps ON with options)")
print("-t re-create histograms from TTrees (default: %s)"%(configMgr.readFromTree))
print("-w re-create workspace from histograms (default: %s)"%(configMgr.executeHistFactory))
print("-f fit the workspace (default: %s)"%(configMgr.executeHistFactory))
print("-n <nTOYs> sets number of TOYs (<=0 means to use real data, default: %i)"%configMgr.nTOYs)
print("-s <number> set the random seed for toy generation (default is CPU clock: %i)" % configMgr.toySeed)
print("-a use Asimov dataset for fitting and plotting (default: %i)" % configMgr.useAsimovSet)
print("-i stays in interactive session after executing the script (default %s)"%runInterpreter)
print("-v verbose level (1: minimal, 2: print histogram names, 3: print XML files, default: %i)"%configMgr.verbose)
print("-l make limit plot of workspace (default %s)" % printLimits)
print("-p run hypothesis test on workspace (default %s)" % doHypoTests)
print("-g <grid points to be processed> - give as comma separated list (default: %s)" % str(sigSamples))
print("\nAlso see the README file.\n")
print("Command examples:")
print("HistFitter.py -i python/MySusyFitterConfig.py #only runs initialization in interactive mode (try e.g.: configMgr.<tab>)")
print("HistFitter.py -t -w -f python/MySusyFitterConfig.py #runs all steps (TTree->Histos->Workspace->Fit) in batch mode")
print("HistFitter.py -f -i python/MySusyFitterConfig.py #only fit and plot, using existing workspace, in interactive session")
print("HistFitter.py -s 666 -f python/MySusyFitterConfig.py #fit a TOY dataset (from seed=666) and prints RooFitResult")
print("\nNote: examples of input TTrees can be found in /afs/cern.ch/atlas/groups/susy/1lepton/samples/")
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "twfin:s:v:alpg:")
configFile = str(args[0])
except:
usage()
for opt,arg in opts:
if opt == '-t':
configMgr.readFromTree=True
elif opt == '-w':
configMgr.executeHistFactory=True
elif opt == '-f':
runFit=True
elif opt == '-n':
configMgr.nTOYs = int(arg)
elif opt == '-i':
runInterpreter = True
elif opt == '-v':
configMgr.setVerbose( int(arg) )
elif opt == '-l':
printLimits = True
elif opt == '-p':
doHypoTests = True
elif opt == '-s':
configMgr.toySeedSet = True
configMgr.toySeed = int(arg)
elif opt == '-a':
configMgr.useAsimovSet = True
elif opt == '-g':
sigSamples = arg.split(',')
pass
gROOT.SetBatch(not runInterpreter)
#mandatory user-defined configuration
exec(compile(open(configFile, "rb").read(), configFile, 'exec'))
#standard execution from now on.
configMgr.initialize()
#runs Trees->histos and/or histos->workspace according to specifications
if configMgr.readFromTree or configMgr.executeHistFactory:
configMgr.executeAll()
if runFit:
for i in range(0,len(configMgr.topLvls)-1):
#if len(configMgr.topLvls)>1:
r=GenerateFitAndPlot(configMgr.topLvls[i])
#for idx in range(len(configMgr.topLvls)):
# r=GenerateFitAndPlot(configMgr.topLvls[idx]) #1])
pass
#configMgr.cppMgr.fitAll()
print("\nr0=GenerateFitAndPlot(configMgr.topLvls[0])")
print("r1=GenerateFitAndPlot(configMgr.topLvls[1])")
print("r2=GenerateFitAndPlot(configMgr.topLvls[2])")
pass
if printLimits:
configMgr.cppMgr.doUpperLimitAll()
#for tl in configMgr.topLvls:
# GetLimits(tl,f)
# pass
pass
if doHypoTests:
configMgr.cppMgr.doHypoTestAll()
pass
if configMgr.nTOYs>0 and doHypoTests==False and printLimits==False and runFit==False:
RooRandom.randomGenerator().SetSeed( configMgr.toySeed )
configMgr.cppMgr.runToysAll()
pass
if runInterpreter:
from code import InteractiveConsole
from ROOT import Util
cons = InteractiveConsole(locals())
cons.interact("Continuing interactive session... press Ctrl+d to exit")
pass
print("Leaving HistFitter... Bye!")
| StarcoderdataPython |
93276 | #!/usr/bin/python
import sys
from DG1022 import *
r = RigolDG('/dev/usbtmc0')
c = raw_input("Press any key to query IDN...")
r.meas.write('*IDN?')
c = raw_input("Press any key to read IDN response...")
r.meas.read()
c=raw_input("Press enter to enable Channel 1")
r.enableChan1()
c=raw_input("Press enter to enable Channel 2")
r.enableChan2()
c = raw_input("Press any key to set sync Voltages...")
r.setFunc(function=RigolDG.SQUARE,channel=RigolDG.CH1)
r.setFunc(function=RigolDG.SQUARE,channel=RigolDG.CH2)
r.syncVoltages(sync=True,ratio_CH1=2,ratio_CH2=1)
r.setVoltage(5)
c = raw_input("Press any key to set sync Frequencies...")
r.syncFrequency(sync=True,ratio_CH1=1,ratio_CH2=10)
r.setFreqHz(10000)
c = raw_input("Press any key to continue...")
r.setFreqHz(2000)
r.setVoltage(10)
r.enableChan1(False)
r.enableChan2(False)
r.setVoltage(value=5)
r.setVoltage(channel = RigolDG.CH2,value=5)
| StarcoderdataPython |
3391657 | <filename>src/eduid_scimapi/db/eventdb.py
from __future__ import annotations
import logging
from dataclasses import asdict, dataclass, field
from datetime import datetime, timedelta
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Type
from uuid import UUID, uuid4
from bson import ObjectId
from eduid_userdb.util import utc_now
from eduid_scimapi.db.basedb import ScimApiBaseDB
from eduid_scimapi.db.common import ScimApiResourceBase
from eduid_scimapi.schemas.scimbase import SCIMResourceType
from eduid_scimapi.utils import urlappend
if TYPE_CHECKING:
from eduid_scimapi.context import Context
logger = logging.getLogger(__name__)
@dataclass
class ScimApiEventResource:
resource_type: SCIMResourceType
scim_id: UUID
external_id: Optional[str]
def to_dict(self) -> Dict[str, Any]:
data = asdict(self)
data['scim_id'] = str(self.scim_id)
data['resource_type'] = self.resource_type.value
return data
@classmethod
def from_dict(cls: Type[ScimApiEventResource], data: Mapping[str, Any]) -> ScimApiEventResource:
_data = dict(data)
_data['resource_type'] = SCIMResourceType(_data['resource_type'])
_data['scim_id'] = UUID(_data['scim_id'])
return cls(**_data)
class EventLevel(Enum):
DEBUG = 'debug'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
class EventStatus(Enum):
CREATED = 'CREATED'
UPDATED = 'UPDATED'
DELETED = 'DELETED'
@dataclass
class _ScimApiEventRequired:
resource: ScimApiEventResource
level: EventLevel
source: str
data: Dict[str, Any]
expires_at: datetime
timestamp: datetime
@dataclass
class ScimApiEvent(ScimApiResourceBase, _ScimApiEventRequired):
db_id: ObjectId = field(default_factory=lambda: ObjectId()) # mongodb document _id
def to_dict(self) -> Dict[str, Any]:
data = asdict(self)
data['_id'] = data.pop('db_id')
data['level'] = self.level.value
data['scim_id'] = str(self.scim_id)
data['resource'] = self.resource.to_dict()
return data
@classmethod
def from_dict(cls: Type[ScimApiEvent], data: Mapping[str, Any]) -> ScimApiEvent:
_data = dict(data)
if '_id' in _data:
_data['db_id'] = _data.pop('_id')
_data['level'] = EventLevel(_data['level'])
_data['scim_id'] = UUID(_data['scim_id'])
_data['resource'] = ScimApiEventResource.from_dict(_data['resource'])
return cls(**_data)
class ScimApiEventDB(ScimApiBaseDB):
def __init__(self, db_uri: str, collection: str, db_name='eduid_scimapi'):
super().__init__(db_uri, db_name, collection=collection)
indexes = {
# Remove messages older than expires_at datetime
'auto-discard': {'key': [('expires_at', 1)], 'expireAfterSeconds': 0},
# Ensure unique scim_id
'unique-scimid': {'key': [('scim_id', 1)], 'unique': True},
}
self.setup_indexes(indexes)
def save(self, event: ScimApiEvent) -> bool:
""" Save a new event to the database. Events are never expected to be modified. """
event_dict = event.to_dict()
result = self._coll.insert_one(event_dict)
logger.debug(f'{self} Inserted event {event} in {self._coll_name}')
import pprint
extra_debug = pprint.pformat(event_dict, width=120)
logger.debug(f'Extra debug:\n{extra_debug}')
return result.acknowledged
def get_events_by_resource(
self, resource_type: SCIMResourceType, scim_id: Optional[UUID] = None, external_id: Optional[str] = None
) -> List[ScimApiEvent]:
filter = {
'resource.resource_type': resource_type.value,
}
if scim_id is not None:
filter['resource.scim_id'] = str(scim_id)
if external_id is not None:
filter['resource.external_id'] = external_id
docs = self._get_documents_by_filter(filter, raise_on_missing=False)
if docs:
return [ScimApiEvent.from_dict(this) for this in docs]
return []
def get_event_by_scim_id(self, scim_id: str) -> Optional[ScimApiEvent]:
doc = self._get_document_by_attr('scim_id', scim_id, raise_on_missing=False)
if not doc:
return None
return ScimApiEvent.from_dict(doc)
def add_api_event(
data_owner: str,
context: 'Context',
db_obj: ScimApiResourceBase,
resource_type: SCIMResourceType,
level: EventLevel,
status: EventStatus,
message: str,
) -> None:
""" Add an event with source=this-API. """
_now = utc_now()
_expires_at = _now + timedelta(days=1)
_event = ScimApiEvent(
scim_id=uuid4(),
resource=ScimApiEventResource(
resource_type=resource_type, scim_id=db_obj.scim_id, external_id=db_obj.external_id
),
timestamp=_now,
expires_at=_expires_at,
source='eduID SCIM API',
level=level,
data={'v': 1, 'status': status.value, 'message': message},
)
event_db = context.get_eventdb(data_owner=data_owner)
assert event_db # please mypy
event_db.save(_event)
# Send notification
event_location = urlappend(context.base_url, f'Events/{_event.scim_id}')
message = context.notification_relay.format_message(version=1, data={'location': event_location})
context.notification_relay.notify(data_owner=data_owner, message=message)
return None
| StarcoderdataPython |
1790266 | """
Outputs a script to convert 2 channel wav files to 1 channel wave files.
"""
import fnmatch
import os
for root, dirnames, filenames in os.walk("orchive"):
for filename in fnmatch.filter(filenames, "*.wav"):
new_filename = filename[:-4] + ".1c.wav"
print("sox {} {} remix 1,2".format(filename, new_filename))
| StarcoderdataPython |
3317550 | """
You must put all includes from others libraries before the include of pygin
and put all include of other files after the include of pygin
"""
# Other Libraries includes:
# pygin includes:
from pygin import *
# files includes:
from game.game_objects.controllers.retry_controller import RetryController
class RetryScene(Scene):
def __init__(self):
"""
Create the list of mesh_objects and call the superclass constructor passing the list
"""
self.init_game_objects_controllers_reference_list = [RetryController]
super(RetryScene, self).__init__(self.init_game_objects_controllers_reference_list)
| StarcoderdataPython |
91207 | from abc import ABCMeta, abstractmethod
# NOTE: domain service concern with domain and business logic, so call them and has responsibility.
class ObjectRepositoryIF(metaclass=ABCMeta):
# find object source by id from database. This method should return object or None
@abstractmethod
def find_by_id(self, id):
pass
# find object source by name from database. This method should return object or None
@abstractmethod
def find_by_name(self, name):
pass
# find object sources by user id from database. This method should return object or None
@abstractmethod
def find_by_user_id(self, user_id):
pass
# save object source to database. This method should return saved object or error
@abstractmethod
def save(self, ob):
pass
# save bucket source database. This method should return saved bucket or error
@abstractmethod
def save_bucket(self, bucket):
pass
# delete object source from database. This method should return deleted object or error
@abstractmethod
def delete(self, ob):
pass
# delete bucket source from database. This method should return deleted bucket or error
@abstractmethod
def delete_bucket(self, bucket):
pass
# update object source to database. This method should return updated object or error
@abstractmethod
def update(self, ob):
pass
| StarcoderdataPython |
1696315 | <filename>securitybot/util.py
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import pytz
import secrets
import os
from datetime import datetime, timedelta
from collections import namedtuple
from securitybot.tasker import StatusLevel
def tuple_builder(answer=None, text=None):
tup = namedtuple('Response', ['answer', 'text'])
tup.answer = answer if answer is not None else None
tup.text = text if text is not None else ''
return tup
def during_business_hours(time, bot):
'''
Checks if a given time is within business hours. Currently is true
from 10:00 to 17:59. Also checks to make sure that the day is a weekday.
Args:
time (Datetime): A datetime object to check.
'''
if time.tzinfo is not None:
here = time.astimezone(bot._local_tz)
else:
here = time.replace(tzinfo=pytz.utc).astimezone(bot._local_tz)
return (bot._opening_time <= here.hour < bot._closing_time and
1 <= time.isoweekday() <= 5)
def get_expiration_time(start, ttl, bot):
'''
Gets an expiration time for an alert.
Works by adding on a certain time and wrapping around after business hours
so that alerts that are started near the end of the day don't expire.
Args:
start (Datetime):
A datetime object indicating when an alert was started.
ttl (Timedelta):
A timedelta representing the amount of time the alert
should live for.
Returns:
Datetime: The expiry time for an alert.
'''
if start.tzinfo is None:
start = start.replace(tzinfo=pytz.utc)
end = start + ttl
if not during_business_hours(end, bot):
end_of_day = datetime(year=start.year,
month=start.month,
day=start.day,
hour=bot._closing_time,
tzinfo=bot._local_tz)
delta = end - end_of_day
next_day = end_of_day + timedelta(
hours=(bot._opening_time - bot._closing_time) % 24
)
# This may land on a weekend, so march to the next weekday
while not during_business_hours(next_day, bot):
next_day += timedelta(days=1)
end = next_day + delta
return end
def create_new_alert(dbclient, title, ldap, description,
reason, url='N/A', key=None):
# type: (str, str, str, str, str, str) -> None
'''
Creates a new alert in the SQL DB with an optionally random hash.
'''
# Generate random key if none provided
if key is None:
key = secrets.token_hex(nbytes=32)
# Insert that into the database as a new alert
dbclient.execute(
'new_alert_alerts',
(
key, ldap, title, description, reason, url
)
)
# key, comment, performed, authenticated
dbclient.execute('new_alert_user_response', (key, '', 0, 0))
dbclient.execute('new_alert_status', (key, StatusLevel.OPEN.value))
| StarcoderdataPython |
3344165 | <filename>piston/configuration/validators/theme_validator.py
from typing import Union
from piston.configuration.choose_config import choose_config
from piston.configuration.validators.validator_base import Validator
from piston.utilities.constants import Configuration, console, themes
class ThemeValidator(Validator):
"""Validates a string or list of themes by checking multiple criteria."""
def __init__(self, themes: Union[str, list]) -> None:
self.themes = themes
self.default_theme = Configuration.default_configuration["theme"]
super().__init__(self.themes, self.default_theme, "theme")
@staticmethod
def check_theme_exists(theme: str) -> bool:
"""Ensures that a given theme exists."""
if theme not in themes:
console.print(
f'[red]Theme invalid, "{theme}" not recognized. Using default theme.[/red]'
)
return False
return True
def validate_theme(self) -> bool:
"""Validates a theme string."""
if not self.validate_type():
return False
if isinstance(self.themes, str) and not ThemeValidator.check_theme_exists(
self.themes
): # Check the singular theme exists.
return False
if isinstance(self.themes, list): # Check each theme exists.
for theme in self.themes:
if not ThemeValidator.check_theme_exists(theme):
return False
return True
def fix_theme(self) -> str:
"""Finds and corrects any errors in a given theme or list of themes, then returns a fixed version."""
if self.validate_theme():
return choose_config(self.themes)
return self.default_theme
| StarcoderdataPython |
3364190 | <reponame>chenkaisun/MMLI1
import argparse
def read_args():
parser = argparse.ArgumentParser()
# pretrained language model
parser.add_argument("--plm", default="bert-base-cased", type=str, metavar='N')
parser.add_argument("--max_seq_len", default=1024, type=int)
# experiment
parser.add_argument("--model_name", default="fet_model", type=str)
parser.add_argument("--model_path", default="model/states/best_dev.pt", type=str)
parser.add_argument("--experiment", default="exp", type=str)
parser.add_argument("--experiment_path", default="../experiment/", type=str)
parser.add_argument("--exp", default="fet", type=str)
parser.add_argument("--exp_id", default="0", type=str)
parser.add_argument("--analyze", default=0, type=int)
parser.add_argument("--add_concept", default=0, type=int)
parser.add_argument("--add_label_text", default=0, type=int)
# data
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--tgt_name", default="p_np", type=str)
parser.add_argument("--data_dir", default="data/property_pred/clintox.csv", type=str)
parser.add_argument("--train_file", default="data/property_pred/clintox.csv", type=str)
parser.add_argument("--val_file", default="dev.json", type=str)
parser.add_argument("--test_file", default="test.json", type=str)
parser.add_argument("--cache_filename", default="", type=str)
parser.add_argument("--use_cache", default=0, type=int)
parser.add_argument("--cache_data", default=0, type=int)
# training params
parser.add_argument("--num_atom_types", default=0, type=int)
parser.add_argument("--num_edge_types", default=0, type=int)
parser.add_argument("--batch_size", default=6, type=int, help="Batch size for training.")
parser.add_argument("--plm_lr", default=2e-5, type=float, help="The initial learning rate for PLM.")
parser.add_argument("--lr", default=1e-3, type=float, help="The initial learning rate for Adam.")
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument("--activation", default="gelu", type=str)
parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.")
parser.add_argument("--num_epochs", default=15, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--eval_epoch", default=30, type=float, help="Number of steps between each evaluation.")
parser.add_argument('--patience', type=int, default=8)
parser.add_argument('--burn_in', type=int, default=0)
parser.add_argument('--print_epoch_interval', type=int, default=10)
parser.add_argument("--scheduler", default=1)
# parser.add_argument('-l', '--list', nargs='+', help='<Required> Set flag', required=True)
parser.add_argument("--gpu_id", default=0, type=int, help="gpu_id", )
parser.add_argument("--n_gpu", default=1, type=int, help="Number of gpu", )
parser.add_argument("--use_gpu", default=1, type=int, help="Using gpu or cpu", )
parser.add_argument("--use_amp", default=1, type=int, help="Using mixed precision")
parser.add_argument("--grad_accumulation_steps", default=1, type=int, help="Using mixed precision")
parser.add_argument("--num_workers", default=1, type=int)
# model params
parser.add_argument("--in_dim", default=14, type=float, help="Feature dim")
parser.add_argument("--out_dim", default=14, type=float, help="Feature dim")
parser.add_argument("--dropout", default=0.2, type=float, help="Dropout")
parser.add_argument('--g_dim', type=int, default=256, help='Number of final hidden units for graph.')
parser.add_argument('--num_gnn_layers', type=int, default=2, help='Number of final hidden units for graph.')
parser.add_argument('--plm_hidden_dim', type=int, default=768, help='Number of hidden units for plm.')
parser.add_argument('--hidden_dim', type=int, default=128, help='Number of hidden units.')
parser.add_argument('--embedding_dim', type=int, default=16, help='Number of embedding units.')
parser.add_argument('--batch_norm', default=False, help="Please give a value for batch_norm")
# parser.add_argument('--i_only', default=0, type=int)
# parser.add_argument('--g_only', default=0, type=int)
# parser.add_argument('--t_only', default=0, type=int)
# parser.add_argument('--i', default=0, type=int)
# parser.add_argument('--gt', default=0, type=int)
# parser.add_argument('--t', default=0, type=int)
# parser.add_argument('--td', default=0, type=int)
# parser.add_argument('--tg', default=0, type=int)
# parser.add_argument('--tdg', default=0, type=int)
# parser.add_argument('--tdg_x', default=0, type=int)
parser.add_argument('--model_type', default="tdgm")
parser.add_argument('--mult_mask', default=0, type=int)
parser.add_argument('--g_mult_mask', default=0, type=int)
parser.add_argument('--g_global_pooling', default=1, type=int)
parser.add_argument('--gnn_type', default="gine")
parser.add_argument('--cm_type', default=0, type=int) # 0 original, 1 no tformer, 2 3D
parser.add_argument('--pool_type', default=0, type=int) # for cm, 0 mean max, 1 max mean, 2 mean, 3 max
parser.add_argument('--type_embed', default=0, type=int)
parser.add_argument('--cm', default=0, type=int)
parser.add_argument('--attn_analysis', default=0, type=int)
parser.add_argument('--error_analysis', default=0, type=int)
##for lst
parser.add_argument('--embed_dim', type=int, default="200", help='Number of embedding units.')
parser.add_argument('--word_embed_type', type=int, default=16, help='Number of embedding units.')
parser.add_argument('--lstm_dropout', type=int, default=.5)
parser.add_argument('--embed_dropout', type=int, default=.5)
# auxiliary
# parser.add_argument("--debug", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--eval", action="store_true")
# # experiment specific
# parser.add_argument("--g", action="store_true")
# parser.add_argument("--tg", action="store_true")
args = parser.parse_args()
return args
| StarcoderdataPython |
3347224 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
import os
import signal
import argparse
import logging
import syslog
from gpuctl import __version__
from gpuctl import DRYRUN, GpuCtl, logger
from gpuctl import PciDev, GpuDev, GpuAMD, GpuNV
from gpuctl import EthCtl, scan_miner
def run():
gpu_ctl = None
def sigstop(a, b):
gpu_ctl.stop()
# print(f'exit')
# sys.exit(0)
signal.signal(signal.SIGINT, sigstop)
parser = argparse.ArgumentParser()
# device
parser.add_argument('-l', '--list', action='store_true',
help="list all GPU cards")
parser.add_argument(
'-s', '--slots', type=str, help="use PCI slot name to locate GPU (ie. 0000:01:00.0/0000:01:00.1)")
parser.add_argument('-a', '--amd', action='store_true',
help="only use AMD GPU")
parser.add_argument('-n', '--nvidia', action='store_true',
help="only use Nvidia GPU")
# timer
parser.add_argument('--interval', type=int, default=GpuCtl.INTERVAL,
help="monitoring interval")
parser.add_argument('-w', '--wait', type=int, default=GpuCtl.WAIT_PERIOD,
help="seconds before take action")
# fan control
parser.add_argument('--set-speed', type=int, choices=range(0,101), metavar="[0-100]",
help="set the fan speed (0~100)")
parser.add_argument('-f', '--fan', type=int,
help="if temperature is exceed than FAN once, activate fan control (default:70)")
parser.add_argument('-d', '--delta', type=int, default=2,
help="set fan speed if temperature diff %% is over DELTA (defaut:2)")
parser.add_argument('--curve', type=str,
help="set temp/fan-speed curve (ie. 0:0/10:10/80:100)")
# temperature monitoring/actions
parser.add_argument('--temp', type=int, default=85,
help="over temperature action threshold")
parser.add_argument('--tas', type=str,
help="over temperature action script")
# rate
parser.add_argument('--scan', action='store_true',
help="list miner through network inquiry")
# misc
parser.add_argument('-v', '--verbose',
action='store_true', help="show debug message")
parser.add_argument('-V', '--version', action='version', version="%(prog)s-" + __version__, help="show version info")
# parse arguments
args = parser.parse_args()
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
if args.verbose:
logger.setLevel(logging.DEBUG)
# check if script is existed
if args.tas != None:
if not os.path.isfile(args.tas):
print(f'gpuctl: script {args.tas} not found !\n')
sys.exit(0)
# parse curve
curve = None
if args.curve:
clst = args.curve.split('/')
for c in clst:
if ':' not in c:
print(f'Invaid curve: [{args.curve}]')
sys.exit(0)
if clst:
curve = [[int(c.split(':')[0]), int(c.split(':')[1])]
for c in clst]
if curve and GpuDev.check_curve(curve):
print(f'Applying curve: [{args.curve}]')
else:
print(f'Invaid curve: [{args.curve}]')
if args.scan:
miners = scan_miner()
print('')
for miner in miners:
r = miner.get_stats()
if r == None:
continue
print(f"Miner : {r['name']:12}, tcp port: {miner.port}, pid: {miner.pid}")
print(f"Uptime: {r['uptime']}s")
print(f"Rate(kh) Temp Fan ")
print(f"======== ==== ==== ")
for i in range(len(r['temp'])):
print(f"{r['rate'][i]:8} {r['temp'][i]:3}c {r['fan'][i]:3}%")
print('')
sys.exit(0)
# by slots
gpu_devices = []
slot_names = []
# parse slot
slots = None
if args.slots:
# by slot
slots = args.slots.split('/')
for sn in slots:
sn = sn.strip().lstrip()
sn = sn.strip('\'').lstrip('\'')
pdev = PciDev.create(sn)
gpu_dev = None
if pdev and pdev.is_amd():
gpu_dev = GpuAMD(pdev)
if pdev and pdev.is_nvidia():
gpu_dev = GpuNV(pdev)
if gpu_dev and gpu_dev.is_gpu():
gpu_devices.append(gpu_dev)
slot_names.append(gpu_dev.pci_dev.slot_name)
else:
# by vendors
vendors = []
if args.amd:
vendors.append('AMD')
if args.nvidia:
vendors.append('NVIDIA')
pci_devices = PciDev.discovery(vendors)
for pdev in pci_devices:
gpu_dev = None
if pdev and pdev.is_amd():
gpu_dev = GpuAMD(pdev)
if pdev and pdev.is_nvidia():
gpu_dev = GpuNV(pdev)
# remove duplicate gpu
if gpu_dev and gpu_dev.is_gpu() and pdev.slot_name not in slot_names:
gpu_devices.append(gpu_dev)
if args.set_speed != None:
for gpu in gpu_devices:
pdev = gpu.pci_dev
# set fan speed
print(f"Set slot {pdev.slot_name} fan speed to {args.set_speed}%")
rv = gpu.set_speed(args.set_speed)
if not rv:
print(f"Failed to set slot {pdev.slot_name} fan speed !!!")
sys.exit(0)
# list monitored devices
print("\n")
print("ID Slot Name Vendor PCI-ID Temp. Fan PWR Working")
print("-- ------------ -------- ----------- ----- ---- ------- -------")
cnt = 1
for gpu in gpu_devices:
pdev = gpu.pci_dev
working = gpu.is_working()
t_str = gpu.get_temperature() if gpu.get_temperature() else '--'
s_str = gpu.get_speed() if gpu.get_speed() else '--'
p_str = gpu.get_pwr() if gpu.get_pwr() else '--'
msg = f"{cnt:2} {pdev.slot_name} {pdev.vendor_name():8} [{pdev.vendor_id}:{pdev.device_id}] "
msg += f"{t_str:4}c {s_str:3}% {p_str:6.2f}w {working}"
print(msg)
cnt += 1
print("\n")
if args.list:
sys.exit(0)
if len(gpu_devices) == 0:
print('No GPU found, abort !\n')
sys.exit(0)
# remove not working devices
for gpu in list(gpu_devices):
if gpu.is_working() == False:
print(f'slot {gpu.pci_dev.slot_name} is malfunction, removed !\n')
gpu_devices.remove(gpu)
gpu_ctl = GpuCtl(gpu_devices=gpu_devices,
fan=args.fan, curve=curve,
delta=args.delta,
temp=args.temp, tas=args.tas,
verbose=args.verbose
)
if not gpu_ctl.set_interval(intvl=args.interval, wait_period=args.wait):
print(
f'Interval error {args.interval}/{args.wait} !\n')
sys.exit(0)
print(f"gpuctl: started\n")
gpu_ctl.start()
if __name__ == '__main__':
run()
| StarcoderdataPython |
61719 | #
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from core.vtg.emg.common import get_conf_property
from core.vtg.emg.common.c.types import Declaration, Function, Array, Pointer, Primitive
from core.vtg.emg.processGenerator.linuxModule.interface import Resource, Callback, StructureContainer, \
FunctionInterface
def yield_categories(collection):
"""
Analyze all new types found by SA component and yield final set of interface categories built from manually prepared
interface specifications and global variables. All new categories and interfaces are added directly to the
InterfaceCategoriesSpecification object. Also all types declarations are updated according with new imported C
types. However, there are still unused interfaces present in the collection after this function termination.
:param collection: InterfaceCategoriesSpecification object.
:param conf: Configuration property dictionary of InterfaceCategoriesSpecification object.
:return: None
"""
# Add resources
if get_conf_property(collection.conf, "generate new resource interfaces"):
__populate_resources(collection)
# Complement interface references
__complement_interfaces(collection)
return
def __populate_resources(collection):
# Iterate over categories
for category in collection.categories:
usage = dict()
# Extract callbacks
for callback in collection.callbacks(category):
for parameter in (p for i, p in enumerate(callback.declaration.points.parameters)
if isinstance(p, Declaration) and
not (len(callback.param_interfaces) > i and callback.param_interfaces[i])):
if parameter.identifier in usage:
usage[parameter.identifier]["counter"] += 1
else:
# Try to resolve interface
intfs = collection.resolve_interface_weakly(parameter, category=callback.category, use_cache=False)
if len(intfs) == 0:
# Only unmatched resources should be introduced
usage[parameter.identifier] = {
"counter": 1,
"declaration": parameter
}
# Introduce new resources
for declaration in (usage[i]["declaration"] for i in usage if usage[i]["counter"] > 1):
if "{}.{}".format(category, declaration.pretty_name) not in collection.interfaces:
identifier = declaration.pretty_name
elif "{}.{}".format(category, 'ldv_' + declaration.pretty_name) not in collection.interfaces:
identifier = 'ldv_' + declaration.pretty_name
else:
raise RuntimeError("Cannot yield identifier for callback {!r} of category {!r}".
format(declaration.identifier, category))
interface = Resource(category, identifier)
interface.declaration = declaration
collection.set_intf(interface)
return
def fulfill_function_interfaces(collection, interface, category=None):
"""
Check an interface declaration (function or function pointer) and try to match its return value type and
parameters arguments types with existing interfaces. The algorythm should be the following:
* Match explicitly stated interface References (only if they meet given category).
* Match rest parameters:
- Avoid matching primitives and arrays and pointers of primitives;
- Match interfaces from given category or from the category of already matched interfaces by interface
references;
- If there are more that one category is matched - do not do match to avoid mistakes in match.
:param collection: InterfaceCategoriesSpecification object.
:param interface: Interface object: KernelFunction or Callback.
:param category: Category filter.
:return: None.
"""
def is_primitive_or_void(decl):
"""
Return True if given declaration object has type of Primitive or pointer(* and **) to Primitive.
:param decl: Declaration object
:return: True - it is primitive, False - otherwise
"""
# todo: Implement check agains arrays of primitives
if isinstance(decl, Primitive) or (isinstance(decl, Pointer) and isinstance(decl.points, Primitive)) or \
decl.identifier in {'void *', 'void **'}:
return True
else:
return False
collection.logger.debug("Try to match collateral interfaces for function '{!r}'".format(interface.identifier))
# Check declaration type
if isinstance(interface, Callback):
declaration = interface.declaration.points
elif isinstance(interface, FunctionInterface):
declaration = interface.declaration
else:
raise TypeError('Expect pointer to function or function declaration but got {!r}'.
format(str(type(interface.declaration).__name__)))
# Second match rest types
if not interface.rv_interface and declaration.return_value and not is_primitive_or_void(declaration.return_value):
rv_interface = collection.resolve_interface(declaration.return_value, category, False)
if len(rv_interface) == 0:
rv_interface = collection.resolve_interface_weakly(declaration.return_value, category, False)
if len(rv_interface) == 1:
interface.rv_interface = rv_interface[-1]
elif len(rv_interface) > 1:
collection.logger.warning(
'Interface {!r} return value signature {!r} can be match with several following interfaces: {}'.
format(interface.identifier, declaration.return_value.identifier,
', '.join((i.identifier for i in rv_interface))))
for index in range(len(declaration.parameters)):
if not (len(interface.param_interfaces) > index and interface.param_interfaces[index]) and \
not isinstance(declaration.parameters[index], str) and \
not is_primitive_or_void(declaration.parameters[index]):
p_interface = collection.resolve_interface(declaration.parameters[index], category, False)
if len(p_interface) == 0:
p_interface = collection.resolve_interface_weakly(declaration.parameters[index], category, False)
if len(p_interface) == 1:
p_interface = p_interface[0]
elif len(p_interface) == 0:
p_interface = None
else:
collection.logger.warning(
'Interface {!r} parameter in the position {} with signature {!r} can be match with several '
'following interfaces: {}'.format(interface.identifier,
index, declaration.parameters[index].identifier,
', '.join((i.identifier for i in p_interface))))
p_interface = None
interface.set_param_interface(index, p_interface)
if p_interface and not category:
category = p_interface.category
def __complement_interfaces(collection):
def __match_interface_for_container(signature, category, id_match):
candidates = collection.resolve_interface_weakly(signature, category, use_cache=False)
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
else:
strict_candidates = collection.resolve_interface(signature, category, use_cache=False)
if len(strict_candidates) == 1:
return strict_candidates[0]
elif len(strict_candidates) > 1 and id_match:
id_candidates = [i for i in strict_candidates if i.short_identifier == id_match]
if len(id_candidates) == 1:
return id_candidates[0]
else:
return None
if len(strict_candidates) > 1:
raise RuntimeError('There are several interfaces with the same declaration {}'.
format(signature.to_string('a')))
# Filter of resources
candidates = [i for i in candidates if not isinstance(i, Resource)]
if len(candidates) == 1:
return candidates[0]
else:
return None
# Resolve callback parameters
for callback in collection.callbacks():
fulfill_function_interfaces(collection, callback, callback.category)
# Resolve kernel function parameters
for func in collection.function_interfaces:
fulfill_function_interfaces(collection, func)
# todo: Remove dirty declarations in container references and add additional clean one
# Resolve array elements
for container in (cnt for cnt in collection.containers() if cnt.declaration and
isinstance(cnt.declaration, Array) and not cnt.element_interface):
intf = __match_interface_for_container(container.declaration.element, container.category, None)
if intf:
container.element_interface = intf
# Resolve structure interfaces
for container in (cnt for cnt in collection.containers() if cnt.declaration and
isinstance(cnt, StructureContainer)):
for field in container.declaration.fields:
if field not in container.field_interfaces:
intf = __match_interface_for_container(container.declaration.fields[field], container.category,
field)
if intf:
container.field_interfaces[field] = intf
if field in container.field_interfaces and isinstance(container.field_interfaces[field], Callback) and \
isinstance(container.declaration.fields[field], Pointer) and \
isinstance(container.declaration.fields[field].points, Function) and \
isinstance(container.field_interfaces[field].declaration, Pointer) and \
isinstance(container.field_interfaces[field].declaration.points, Function):
# Track implementations from structures if types slightly differs and attached to structure variable
container.field_interfaces[field].declaration = container.declaration.fields[field]
return
| StarcoderdataPython |
3358250 | <filename>nadine-2.2.3/doors/keymaster/tests/test_controller.py
from django.test import SimpleTestCase
from django.utils import timezone
#from doors.hid_control import DoorController
from doors.keymaster.models import Keymaster
from doors.core import Messages, EncryptedConnection, CardHolder, Gatekeeper, TestDoorController
class DoorControllerTestCase(SimpleTestCase):
name = "test controller"
ip_address = "127.0.0.1"
username = "username"
password = "password"
controller = TestDoorController(name, ip_address, username, password)
def setUp(self):
pass
def test_creation(self):
self.assertEqual(self.controller.door_name, self.name)
self.assertEqual(self.controller.door_ip, self.ip_address)
self.assertEqual(self.controller.door_user, self.username)
self.assertEqual(self.controller.door_pass, self.password)
def test_save_cardholder(self):
self.controller.clear_data()
self.assertEqual(0, self.controller.cardholder_count())
cardholder = CardHolder("1", "Jacob", "Sayles", "jacobsayles", "123456")
self.controller.save_cardholder(cardholder)
self.assertEqual(1, self.controller.cardholder_count())
self.controller.clear_data()
self.assertEqual(0, self.controller.cardholder_count())
def test_get_cardholder(self):
cardholder = CardHolder("1", "Jacob", "Sayles", "jacobsayles", "123456")
self.controller.clear_data()
self.controller.save_cardholder(cardholder)
self.assertEqual(cardholder, self.controller.get_cardholder_by_id("1"))
self.assertEqual(cardholder, self.controller.get_cardholder_by_code("123456"))
def test_process_codes(self):
c1 = CardHolder("1", "Jacob", "Sayles", "jacobsayles", "123456")
c2 = CardHolder("2", "Susan", "Dorsch", "susandorsch", "111111")
c3 = CardHolder("3", "Bob", "Smith", "bobsmith", "666666")
self.controller.clear_data()
self.controller.save_cardholder(c1) # No Change
self.controller.save_cardholder(c2) # Change
self.controller.save_cardholder(c3) # Delete
self.assertEqual(3, self.controller.cardholder_count())
# Process the changes
new_codes = [
{'username':'jacobsayles', 'first_name':'Jacob', 'last_name':'Sayles', 'code':'123456'}, # No Change
{'username':'susandorsch', 'first_name':'Susan', 'last_name':'Dorsch', 'code':'222222'}, # Change
{'username':'fredjones', 'first_name':'Fred', 'last_name':'Jones', 'code':'7777777'}, # Add
]
changes = self.controller.process_door_codes(new_codes, load_credentials=False)
self.assertEqual(len(changes), 4)
for c in changes:
self.assertNotEqual(c.username, 'jacobsayles')
if c.username == 'susandorsch':
if c.code == "111111":
self.assertEqual(c.action, 'delete')
elif c.code == "222222":
self.assertEqual(c.action, 'add')
else:
self.fail("user 'susandorsch' has weird data")
elif c.username == 'bobsmith':
self.assertEqual(c.action, 'delete')
elif c.username == 'fredjones':
self.assertEqual(c.action, 'add')
else:
self.fail("Weird data found")
# Copyright 2020 Office Nomads LLC (https://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/Apache-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| StarcoderdataPython |
1601998 | <gh_stars>1-10
import segmentation_models_pytorch as smp
from configs import CFG
from .deeplabv3 import DeepLabV3ResNet18, DeepLabV3ResNet34, DeepLabV3ResNet50, DeepLabV3ResNet101
def build_model(num_channels, num_classes):
if CFG.MODEL.NAME == 'deeplabv3':
return smp.DeepLabV3(encoder_name=CFG.MODEL.BACKBONE.NAME,
in_channels=num_channels,
classes=num_classes)
elif CFG.MODEL.NAME == 'deeplabv3+':
return smp.DeepLabV3Plus(encoder_name=CFG.MODEL.BACKBONE.NAME,
in_channels=num_channels,
classes=num_classes)
elif CFG.MODEL.NAME == 'pspnet':
return smp.PSPNet(encoder_name=CFG.MODEL.BACKBONE.NAME,
in_channels=num_channels,
classes=num_classes)
elif CFG.MODEL.NAME == 'unet':
return smp.Unet(encoder_name=CFG.MODEL.BACKBONE.NAME,
in_channels=num_channels,
classes=num_classes)
elif CFG.MODEL.NAME == 'unet++':
return smp.UnetPlusPlus(encoder_name=CFG.MODEL.BACKBONE.NAME,
in_channels=num_channels,
classes=num_classes)
else:
raise NotImplementedError('invalid model: {}'.format(CFG.MODEL.NAME))
| StarcoderdataPython |
3313826 | <filename>parsons/ngpvan/people.py<gh_stars>1-10
from parsons.utilities import json_format
import logging
logger = logging.getLogger(__name__)
class People(object):
def __init__(self, van_connection):
self.connection = van_connection
def find_person(self, first_name=None, last_name=None, date_of_birth=None, email=None,
phone=None, phone_type=None, street_number=None, street_name=None, zip=None):
"""
Find a person record.
.. note::
Person find must include the following minimum combinations to conduct
a search.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip5, date_of_birth
- first_name, last_name, street_number, street_name, zip5
- email_address
`Args:`
first_name: str
The person's first name
last_name: str
The person's last name
dob: str
ISO 8601 formatted date of birth (e.g. ``1981-02-01``)
email: str
The person's email address
phone: str
Phone number of any type (Work, Cell, Home)
street_number: str
Street Number
street_name: str
Street Name
zip: str
5 digit zip code
`Returns:`
A person dict object
"""
logger.info(f'Finding {first_name} {last_name}.')
return self._people_search(
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth,
email=email,
phone=phone,
phone_type=phone_type,
street_number=street_number,
street_name=street_name,
zip=zip
)
def find_person_json(self, match_json):
"""
Find a person record based on json data.
.. note::
Person find must include the following minimum combinations to conduct
a search.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip5, date_of_birth
- first_name, last_name, street_number, street_name, zip5
- email_address
.. note::
A full list of possible values for the json, and its structure can be found
`here <https://docs.ngpvan.com/reference/people#peoplefind>`_.
`Args:`
match_json: dict
A dictionary of values to match against.
fields: The fields to return. Leave as default for all available fields
`Returns:`
A person dict object
"""
logger.info('Finding a match for json details.')
return self._people_search(match_json=match_json)
def update_person(self, id=None, id_type='vanid', first_name=None, last_name=None,
date_of_birth=None, email=None, phone=None, phone_type=None,
street_number=None, street_name=None, zip=None):
"""
Update a person record based on a provided ID. All other arguments provided will be
updated on the record.
.. warning::
This method can only be run on MyMembers, EveryAction, MyCampaign databases.
`Args:`
id: str
A valid id
id_type: str
A known person identifier type available on this VAN instance.
Defaults to ``vanid``.
first_name: str
The person's first name
last_name: str
The person's last name
dob: str
ISO 8601 formatted date of birth (e.g. ``1981-02-01``)
email: str
The person's email address
phone: str
Phone number of any type (Work, Cell, Home)
phone_type: str
One of 'H' for home phone, 'W' for work phone, 'C' for cell, 'M' for
main phone or 'F' for fax line. Defaults to home phone.
street_number: str
Street Number
street_name: str
Street Name
zip: str
5 digit zip code
`Returns:`
A person dict
"""
return self._people_search(
id=id,
id_type=id_type,
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth,
email=email,
phone=phone,
phone_type=phone_type,
street_number=street_number,
street_name=street_name,
zip=zip,
create=True
)
def update_person_json(self, id, id_type='vanid', match_json=None):
"""
Update a person record based on a provided ID within the match_json dict.
.. note::
A full list of possible values for the json, and its structure can be found
`here <https://docs.ngpvan.com/reference/people#peoplevanid>`_.
`Args:`
id: str
A valid id
id_type: str
A known person identifier type available on this VAN instance.
Defaults to ``vanid``.
match_json: dict
A dictionary of values to match against and save.
`Returns:`
A person dict
"""
return self._people_search(id=id, id_type=id_type, match_json=match_json, create=True)
def upsert_person(self, first_name=None, last_name=None, date_of_birth=None, email=None,
phone=None, phone_type=None, street_number=None, street_name=None, zip=None):
"""
Create or update a person record.
.. note::
Person find must include the following minimum combinations.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip5, date_of_birth
- first_name, last_name, street_number, street_name, zip5
- email_address
.. warning::
This method can only be run on MyMembers, EveryAction, MyCampaign databases.
`Args:`
first_name: str
The person's first name
last_name: str
The person's last name
dob: str
ISO 8601 formatted date of birth (e.g. ``1981-02-01``)
email: str
The person's email address
phone: str
Phone number of any type (Work, Cell, Home)
phone_type: str
One of 'H' for home phone, 'W' for work phone, 'C' for cell, 'M' for
main phone or 'F' for fax line. Defaults to home phone.
street_number: str
Street Number
street_name: str
Street Name
zip: str
5 digit zip code
`Returns:`
A person dict
"""
return self._people_search(
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth,
email=email,
phone=phone,
phone_type=phone_type,
street_number=street_number,
street_name=street_name,
zip=zip,
create=True
)
def upsert_person_json(self, match_json):
"""
Create or update a person record.
.. note::
Person find must include the following minimum combinations.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip5, date_of_birth
- first_name, last_name, street_number, street_name, zip5
- email_address
.. note::
A full list of possible values for the json, and its structure can be found
`here <https://docs.ngpvan.com/reference/people#peoplefindorcreate>`_. `vanId` can
be passed to ensure the correct record is updated.
.. warning::
This method can only be run on MyMembers, EveryAction, MyCampaign databases.
`Args:`
match_json: dict
A dictionary of values to match against and save.
`Returns:`
A person dict
"""
return self._people_search(match_json=match_json, create=True)
def _people_search(self, id=None, id_type=None, first_name=None, last_name=None,
date_of_birth=None, email=None, phone=None, phone_type='H',
street_number=None, street_name=None, zip=None, match_json=None,
create=False):
# Internal method to hit the people find/create endpoints
addressLine1 = None
if street_name and street_number:
addressLine1 = f'{street_number} {street_name}'
# Check to see if a match map has been provided
if not match_json:
json = {"firstName": first_name, "lastName": last_name}
# Will fail if empty dicts are provided, hence needed to add if exist
if email:
json['emails'] = [{'email': email}]
if phone: # To Do: Strip out non-integers from phone
json['phones'] = [{'phoneNumber': phone, 'phoneType': phone_type}]
if date_of_birth:
json['dateOfBirth'] = date_of_birth
if zip or addressLine1:
json['addresses'] = [{}]
if zip:
json['addresses'][0]['zipOrPostalCode'] = zip
if addressLine1:
json['addresses'][0]['addressLine1'] = addressLine1
else:
json = match_json
if 'vanId' in match_json:
id = match_json['vanId']
url = 'people/'
if id:
if create:
id_type = '' if id_type in ('vanid', None) else f"{id_type}:"
url += id_type + str(id)
else:
return self.get_person(id, id_type=id_type)
else:
url += 'find'
if create:
url += 'OrCreate'
else:
# Ensure that the minimum combination of fields were passed
json_flat = json_format.flatten_json(json)
self._valid_search(**json_flat)
return self.connection.post_request(url, json=json)
def _valid_search(self, firstName=None, lastName=None, email=None, phoneNumber=None,
dateOfBirth=None, addressLine1=None, zipOrPostalCode=None, **kwargs):
# Internal method to check if a search is valid, kwargs are ignored
if (None in [firstName, lastName, email] and
None in [firstName, lastName, phoneNumber] and
None in [firstName, lastName, zipOrPostalCode, dateOfBirth] and
None in [firstName, lastName, addressLine1, zipOrPostalCode] and
None in [email]):
raise ValueError("""
Person find must include the following minimum
combinations to conduct a search.
- first_name, last_name, email
- first_name, last_name, phone
- first_name, last_name, zip, dob
- first_name, last_name, street_number, street_name, zip
- email
""")
return True
def get_person(self, id, id_type='vanid', expand_fields=[
'contribution_history', 'addresses', 'phones', 'emails',
'codes', 'custom_fields', 'external_ids', 'preferences',
'recorded_addresses', 'reported_demographics', 'suppressions',
'cases', 'custom_properties', 'districts', 'election_records',
'membership_statuses', 'notes', 'organization_roles',
'disclosure_field_values']):
"""
Returns a single person record using their VANID or external id.
`Args:`
id: str
A valid id
id_type: str
A known person identifier type available on this VAN instance
such as ``dwid``. Defaults to ``vanid``.
expand_fields: list
A list of fields for which to include data. If a field is omitted,
``None`` will be returned for that field. Can be ``contribution_history``,
``addresses``, ``phones``, ``emails``, ``codes``, ``custom_fields``,
``external_ids``, ``preferences``, ``recorded_addresses``,
``reported_demographics``, ``suppressions``, ``cases``, ``custom_properties``,
``districts``, ``election_records``, ``membership_statuses``, ``notes``,
``organization_roles``, ``scores``, ``disclosure_field_values``.
`Returns:`
A person dict
"""
# Change end point based on id type
url = 'people/'
id_type = '' if id_type in ('vanid', None) else f"{id_type}:"
url += id_type + str(id)
expand_fields = ','.join([json_format.arg_format(f) for f in expand_fields])
# Removing the fields that are not returned in MyVoters
NOT_IN_MYVOTERS = ['codes', 'contribution_history', 'organization_roles']
if self.connection.db_code == 0:
expand_fields = [v for v in expand_fields if v not in NOT_IN_MYVOTERS]
logger.info(f'Getting person with {id_type} of {id} at url {url}')
return self.connection.get_request(url, params={'$expand': expand_fields})
def apply_canvass_result(self, id, result_code_id, id_type='vanid', contact_type_id=None,
input_type_id=None, date_canvassed=None):
"""
Apply a canvass result to a person. Use this end point for attempts that do not
result in a survey response or an activist code (e.g. Not Home).
`Args:`
id: str
A valid person id
result_code_id : int
Specifies the result code of the attempt. Valid ids can be found
by using the :meth:`get_canvass_responses_result_codes`
id_type: str
A known person identifier type available on this VAN instance
such as ``dwid``
contact_type_id : int
`Optional`; A valid contact type id
input_type_id : int
`Optional`; Defaults to 11 (API Input)
date_canvassed : str
`Optional`; ISO 8601 formatted date. Defaults to todays date
`Returns:`
``None``
"""
logger.info(f'Applying result code {result_code_id} to {id_type} {id}.')
self.apply_response(id, None, id_type=id_type, contact_type_id=contact_type_id,
input_type_id=input_type_id, date_canvassed=date_canvassed,
result_code_id=result_code_id)
def toggle_volunteer_action(self, id, volunteer_activity_id, action, id_type='vanid',
result_code_id=None, contact_type_id=None, input_type_id=None,
date_canvassed=None):
"""
Apply or remove a volunteer action to or from a person.
`Args:`
id: str
A valid person id
id_type: str
A known person identifier type available on this VAN instance
such as ``dwid``
volunteer_activity_id: int
A valid volunteer activity id
action: str
Either 'apply' or 'remove'
result_code_id : int
`Optional`; Specifies the result code of the response. If
not included,responses must be specified. Conversely, if
responses are specified, result_code_id must be null. Valid ids
can be found by using the :meth:`get_canvass_responses_result_codes`
contact_type_id: int
`Optional`; A valid contact type id
input_type_id: int
`Optional`; Defaults to 11 (API Input)
date_canvassed: str
`Optional`; ISO 8601 formatted date. Defaults to todays date
** NOT IMPLEMENTED **
"""
"""
response = {"volunteerActivityId": volunteer_activity_id,
"action": self._action_parse(action),
"type": "VolunteerActivity"}
logger.info(f'{action} volunteer activity {volunteer_activity_id} to {id_type} {id}')
self.apply_response(id, response, id_type, contact_type_id, input_type_id, date_canvassed,
result_code_id)
"""
def apply_response(self, id, response, id_type='vanid', contact_type_id=None,
input_type_id=None, date_canvassed=None, result_code_id=None,
omit_contact=False):
"""
Apply responses such as survey questions, activist codes, and volunteer actions
to a person record. This method allows you apply multiple responses (e.g. two survey
questions) at the same time. It is a low level method that requires that you
conform to the VAN API `response object
format <https://docs.ngpvan.com/reference/canvass-responses>`_.
`Args:`
id: str
A valid person id
response: dict
A list of dicts with each dict containing a valid action.
id_type: str
A known person identifier type available on this VAN instance
such as ``dwid``
result_code_id : int
`Optional`; Specifies the result code of the response. If
not included,responses must be specified. Conversely, if
responses are specified, result_code_id must be null. Valid ids
can be found by using the :meth:`get_canvass_responses_result_codes`
contact_type_id : int
`Optional`; A valid contact type id
input_type_id : int
`Optional`; Defaults to 11 (API Input)
date_canvassed : str
`Optional`; ISO 8601 formatted date. Defaults to todays date
responses : list or dict
The responses to apply.
omit_contact: boolean
Omit adding contact history to the response. This is particularly
useful when adding activist codes that are not based on contact
attempts.
`Returns:`
``True`` if successful
.. code-block:: python
response = [{"activistCodeId": 18917,
"action": "Apply",
"type": "ActivistCode"},
{"surveyQuestionId": 109149,
"surveyResponseId": 465468,
"action": "SurveyResponse"}
]
van.apply_response(5222, response)
""" # noqa: E501,E261
# Set url based on id_type
if id_type == 'vanid':
url = f"people/{id}/canvassResponses"
else:
url = f"people/{id_type}:{id}/canvassResponses"
json = {"canvassContext": {
"contactTypeId": contact_type_id,
"inputTypeId": input_type_id,
"dateCanvassed": date_canvassed,
"omitActivistCodeContactHistory": omit_contact},
"resultCodeId": result_code_id}
if response:
json['responses'] = response
if result_code_id is not None and response is not None:
raise ValueError("Both result_code_id and responses cannot be specified.")
if isinstance(response, dict):
json["responses"] = [response]
if result_code_id is not None and response is not None:
raise ValueError(
"Both result_code_id and responses cannot be specified.")
return self.connection.post_request(url, json=json)
def create_relationship(self, vanid_1, vanid_2, relationship_id):
"""
Create a relationship between two individuals
`Args:`
vanid_1 : int
The vanid of the primary individual; aka the node
vanid_2 : int
The vanid of the secondary individual; the spoke
relationship_id : int
The relationship id indicating the type of relationship
`Returns:`
``None``
"""
json = {'relationshipId': relationship_id,
'vanId': vanid_2}
self.connection.post_request(f"people/{vanid_1}/relationships", json=json)
logger.info(f'Relationship {vanid_1} to {vanid_2} created.')
def apply_person_code(self, id, code_id, id_type='vanid'):
"""
Apply a code to a person.
`Args:`
id: str
A valid person id.
code_id: int
A valid code id.
id_type: str
A known person identifier type available on this VAN instance
such as ``dwid``
`Returns:`
``None``
"""
# Set url based on id_type
if id_type == 'vanid':
url = f"people/{id}/codes"
else:
url = f"people/{id_type}:{id}/codes"
json = {"codeId": code_id}
self.connection.post_request(url, json=json)
logger.info(f'Code {code_id} applied to person id {id}.')
| StarcoderdataPython |
3354435 | <gh_stars>0
# -*- coding: utf-8 -*-
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008-2010 <NAME>
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
'''
GTK-Doc comment block format
----------------------------
A GTK-Doc comment block is built out of multiple parts. Each part can be further
divided into fields which are separated by a colon ("``:``") delimiter.
Known parts and the fields they are constructed from look like the following
(optional fields are enclosed in square brackets)::
┌───────────────────────────────────────────────────────────┐
│ /** │ ─▷ start token
├────────────────────┬──────────────────────────────────────┤
│ * identifier_name │ [: annotations] │ ─▷ identifier part
├────────────────────┼─────────────────┬────────────────────┤
│ * @parameter_name │ [: annotations] │ : description │ ─▷ parameter part
├────────────────────┴─────────────────┴────────────────────┤
│ * │ ─▷ comment block description
│ * comment_block_description │
├─────────────┬─────────────────┬───────────┬───────────────┤
│ * tag_name │ [: annotations] │ [: value] │ : description │ ─▷ tag part
├─────────────┴─────────────────┴───────────┴───────────────┤
│ */ │ ─▷ end token
└───────────────────────────────────────────────────────────┘
There are two conditions that must be met before a comment block is recognized
as a GTK-Doc comment block:
#. The comment block is opened with a GTK-Doc start token ("``/**``")
#. The first line following the start token contains a valid identifier part
Once a GTK-Doc comment block has been identified as such and has been stripped
from its start and end tokens the remaining parts have to be written in a
specific order:
#. There must be exactly 1 `identifier` part on the first line of the
comment block which consists of:
* a required `identifier_name` field
* an optional `annotations` field
#. Zero or more `parameter` parts, each consisting of:
* a required `parameter_name` field
* an optional `annotations` field
* a required `description` field (can be the empty string)
#. One optional `comment block description` part which must begin with at
least 1 empty line signaling the start of this part.
#. Zero or more `tag` parts, each consisting of:
* a required `tag_name` field
* an optional `annotations` field
* an optional `value` field
* a required `description` field (can be the empty string)
Additionally, the following restrictions are in effect:
#. Separating parts with an empty line:
* `identifier` and `parameter` parts cannot be separated from each other by
an empty line as this would signal the start of the
`comment block description` part (see above).
* it is required to separate the `comment block description` part from the
`identifier` or `parameter` parts with an empty line (see above)
* `comment block description` and `tag` parts can optionally be separated
by an empty line
#. Parts and fields cannot span multiple lines, except for:
* the `comment_block_description` part
* `parameter description` and `tag description` fields
#. Taking the above restrictions into account, spanning multiple paragraphs is
limited to the `comment block description` part and `tag description` fields.
Refer to the `GTK-Doc manual`_ for more detailed usage information.
.. _GTK-Doc manual:
http://developer.gnome.org/gtk-doc-manual/1.18/documenting.html.en
'''
from __future__ import absolute_import
import os
import re
from collections import namedtuple
from operator import ne, gt, lt
from .collections import Counter, OrderedDict
from .message import Position, warn, error
# GTK-Doc comment block parts
PART_IDENTIFIER = 0
PART_PARAMETERS = 1
PART_DESCRIPTION = 2
PART_TAGS = 3
# GTK-Doc comment block tags
# 1) Basic GTK-Doc tags.
# Note: This list cannot be extended unless the GTK-Doc project defines new tags.
TAG_DEPRECATED = 'deprecated'
TAG_RETURNS = 'returns'
TAG_SINCE = 'since'
TAG_STABILITY = 'stability'
GTKDOC_TAGS = [TAG_DEPRECATED,
TAG_RETURNS,
TAG_SINCE,
TAG_STABILITY]
# 2) Deprecated basic GTK-Doc tags.
# Note: This list cannot be extended unless the GTK-Doc project defines new deprecated tags.
TAG_DESCRIPTION = 'description'
TAG_RETURN_VALUE = 'return value'
DEPRECATED_GTKDOC_TAGS = [TAG_DESCRIPTION,
TAG_RETURN_VALUE]
# 3) Deprecated GObject-Introspection tags.
# Unfortunately, these where accepted by old versions of this module.
TAG_RETURN = 'return'
TAG_RETURNS_VALUE = 'returns value'
DEPRECATED_GI_TAGS = [TAG_RETURN,
TAG_RETURNS_VALUE]
# 4) Deprecated GObject-Introspection annotation tags.
# Accepted by old versions of this module while they should have been
# annotations on the identifier part instead.
# Note: This list can not be extended ever again. The GObject-Introspection project is not
# allowed to invent GTK-Doc tags. Please create new annotations instead.
TAG_ATTRIBUTES = 'attributes'
TAG_GET_VALUE_FUNC = 'get value func'
TAG_REF_FUNC = 'ref func'
TAG_RENAME_TO = 'rename to'
TAG_SET_VALUE_FUNC = 'set value func'
TAG_TRANSFER = 'transfer'
TAG_TYPE = 'type'
TAG_UNREF_FUNC = 'unref func'
TAG_VALUE = 'value'
TAG_VFUNC = 'virtual'
DEPRECATED_GI_ANN_TAGS = [TAG_ATTRIBUTES,
TAG_GET_VALUE_FUNC,
TAG_REF_FUNC,
TAG_RENAME_TO,
TAG_SET_VALUE_FUNC,
TAG_TRANSFER,
TAG_TYPE,
TAG_UNREF_FUNC,
TAG_VALUE,
TAG_VFUNC]
ALL_TAGS = GTKDOC_TAGS + DEPRECATED_GTKDOC_TAGS + DEPRECATED_GI_TAGS + DEPRECATED_GI_ANN_TAGS
# GObject-Introspection annotation start/end tokens
ANN_LPAR = '('
ANN_RPAR = ')'
# GObject-Introspection annotations
# 1) Supported annotations
# Note: when adding new annotations, GTK-Doc project's gtkdoc-mkdb needs to be modified too!
ANN_ALLOW_NONE = 'allow-none'
ANN_ARRAY = 'array'
ANN_ATTRIBUTES = 'attributes'
ANN_CLOSURE = 'closure'
ANN_CONSTRUCTOR = 'constructor'
ANN_DESTROY = 'destroy'
ANN_ELEMENT_TYPE = 'element-type'
ANN_FOREIGN = 'foreign'
ANN_GET_VALUE_FUNC = 'get-value-func'
ANN_IN = 'in'
ANN_INOUT = 'inout'
ANN_METHOD = 'method'
ANN_NULLABLE = 'nullable'
ANN_OPTIONAL = 'optional'
ANN_OUT = 'out'
ANN_REF_FUNC = 'ref-func'
ANN_RENAME_TO = 'rename-to'
ANN_SCOPE = 'scope'
ANN_SET_VALUE_FUNC = 'set-value-func'
ANN_SKIP = 'skip'
ANN_TRANSFER = 'transfer'
ANN_TYPE = 'type'
ANN_UNREF_FUNC = 'unref-func'
ANN_VFUNC = 'virtual'
ANN_VALUE = 'value'
GI_ANNS = [ANN_ALLOW_NONE,
ANN_NULLABLE,
ANN_OPTIONAL,
ANN_ARRAY,
ANN_ATTRIBUTES,
ANN_CLOSURE,
ANN_CONSTRUCTOR,
ANN_DESTROY,
ANN_ELEMENT_TYPE,
ANN_FOREIGN,
ANN_GET_VALUE_FUNC,
ANN_IN,
ANN_INOUT,
ANN_METHOD,
ANN_OUT,
ANN_REF_FUNC,
ANN_RENAME_TO,
ANN_SCOPE,
ANN_SET_VALUE_FUNC,
ANN_SKIP,
ANN_TRANSFER,
ANN_TYPE,
ANN_UNREF_FUNC,
ANN_VFUNC,
ANN_VALUE]
# 2) Deprecated GObject-Introspection annotations
ANN_ATTRIBUTE = 'attribute'
ANN_INOUT_ALT = 'in-out'
DEPRECATED_GI_ANNS = [ANN_ATTRIBUTE,
ANN_INOUT_ALT]
ALL_ANNOTATIONS = GI_ANNS + DEPRECATED_GI_ANNS
DICT_ANNOTATIONS = [ANN_ARRAY, ANN_ATTRIBUTES]
LIST_ANNOTATIONS = [ann for ann in ALL_ANNOTATIONS if ann not in DICT_ANNOTATIONS]
# (array) annotation options
OPT_ARRAY_FIXED_SIZE = 'fixed-size'
OPT_ARRAY_LENGTH = 'length'
OPT_ARRAY_ZERO_TERMINATED = 'zero-terminated'
ARRAY_OPTIONS = [OPT_ARRAY_FIXED_SIZE,
OPT_ARRAY_LENGTH,
OPT_ARRAY_ZERO_TERMINATED]
# (out) annotation options
OPT_OUT_CALLEE_ALLOCATES = 'callee-allocates'
OPT_OUT_CALLER_ALLOCATES = 'caller-allocates'
OUT_OPTIONS = [OPT_OUT_CALLEE_ALLOCATES,
OPT_OUT_CALLER_ALLOCATES]
# (scope) annotation options
OPT_SCOPE_ASYNC = 'async'
OPT_SCOPE_CALL = 'call'
OPT_SCOPE_NOTIFIED = 'notified'
SCOPE_OPTIONS = [OPT_SCOPE_ASYNC,
OPT_SCOPE_CALL,
OPT_SCOPE_NOTIFIED]
# (transfer) annotation options
OPT_TRANSFER_CONTAINER = 'container'
OPT_TRANSFER_FLOATING = 'floating'
OPT_TRANSFER_FULL = 'full'
OPT_TRANSFER_NONE = 'none'
TRANSFER_OPTIONS = [OPT_TRANSFER_CONTAINER,
OPT_TRANSFER_FLOATING,
OPT_TRANSFER_FULL,
OPT_TRANSFER_NONE]
# Pattern used to normalize different types of line endings
LINE_BREAK_RE = re.compile(r'\r\n|\r|\n', re.UNICODE)
# Pattern matching the start token of a comment block.
COMMENT_BLOCK_START_RE = re.compile(
r'''
^ # start
(?P<code>.*?) # whitespace, code, ...
\s* # 0 or more whitespace characters
(?P<token>/\*{2}(?![\*/])) # 1 forward slash character followed
# by exactly 2 asterisk characters
# and not followed by a slash character
\s* # 0 or more whitespace characters
(?P<comment>.*?) # GTK-Doc comment text
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching the end token of a comment block.
COMMENT_BLOCK_END_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<comment>.*?) # GTK-Doc comment text
\s* # 0 or more whitespace characters
(?P<token>\*+/) # 1 or more asterisk characters followed
# by exactly 1 forward slash character
(?P<code>.*?) # whitespace, code, ...
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching the ' * ' at the beginning of every
# line inside a comment block.
COMMENT_ASTERISK_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<comment>.*?) # invalid comment text
\s* # 0 or more whitespace characters
\* # 1 asterisk character
\s? # 0 or 1 whitespace characters
# WARNING: removing more than 1
# whitespace character breaks
# embedded example program indentation
''',
re.UNICODE | re.VERBOSE)
# Pattern matching the indentation level of a line (used
# to get the indentation before and after the ' * ').
INDENTATION_RE = re.compile(
r'''
^
(?P<indentation>\s*) # 0 or more whitespace characters
.*
$
''',
re.UNICODE | re.VERBOSE)
# Pattern matching an empty line.
EMPTY_LINE_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching SECTION identifiers.
SECTION_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
SECTION # SECTION
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<section_name>\w\S+?) # section name
\s* # 0 or more whitespace characters
:? # invalid delimiter
\s* # 0 or more whitespace characters
$
''',
re.UNICODE | re.VERBOSE)
# Pattern matching symbol (function, constant, struct and enum) identifiers.
SYMBOL_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<symbol_name>[\w-]*\w) # symbol name
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<fields>.*?) # annotations + description
\s* # 0 or more whitespace characters
:? # invalid delimiter
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching property identifiers.
PROPERTY_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<class_name>[\w]+) # class name
\s* # 0 or more whitespace characters
:{1} # 1 required colon
\s* # 0 or more whitespace characters
(?P<property_name>[\w-]*\w) # property name
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<fields>.*?) # annotations + description
\s* # 0 or more whitespace characters
:? # invalid delimiter
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching signal identifiers.
SIGNAL_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<class_name>[\w]+) # class name
\s* # 0 or more whitespace characters
:{2} # 2 required colons
\s* # 0 or more whitespace characters
(?P<signal_name>[\w-]*\w) # signal name
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<fields>.*?) # annotations + description
\s* # 0 or more whitespace characters
:? # invalid delimiter
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching parameters.
PARAMETER_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
@ # @ character
(?P<parameter_name>[\w-]*\w|.*?\.\.\.) # parameter name
\s* # 0 or more whitespace characters
:{1} # 1 required delimiter
\s* # 0 or more whitespace characters
(?P<fields>.*?) # annotations + description
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching tags.
_all_tags = '|'.join(ALL_TAGS).replace(' ', r'\s')
TAG_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<tag_name>''' + _all_tags + r''') # tag name
\s* # 0 or more whitespace characters
:{1} # 1 required delimiter
\s* # 0 or more whitespace characters
(?P<fields>.*?) # annotations + value + description
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
# Pattern matching value and description fields for TAG_DEPRECATED & TAG_SINCE tags.
TAG_VALUE_VERSION_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<value>([0-9\.])*) # value
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<description>.*?) # description
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE)
# Pattern matching value and description fields for TAG_STABILITY tags.
TAG_VALUE_STABILITY_RE = re.compile(
r'''
^ # start
\s* # 0 or more whitespace characters
(?P<value>(stable|unstable|private|internal)?) # value
\s* # 0 or more whitespace characters
(?P<delimiter>:?) # delimiter
\s* # 0 or more whitespace characters
(?P<description>.*?) # description
\s* # 0 or more whitespace characters
$ # end
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
class GtkDocAnnotations(OrderedDict):
'''
An ordered dictionary mapping annotation names to annotation options (if any). Annotation
options can be either a :class:`list`, a :class:`giscanner.collections.OrderedDict`
(depending on the annotation name)or :const:`None`.
'''
__slots__ = ('position')
def __init__(self, position=None):
OrderedDict.__init__(self)
#: A :class:`giscanner.message.Position` instance specifying the location of the
#: annotations in the source file or :const:`None`.
self.position = position
class GtkDocAnnotatable(object):
'''
Base class for GTK-Doc comment block parts that can be annotated.
'''
__slots__ = ('position', 'annotations')
#: A :class:`tuple` of annotation name constants that are valid for this object. Annotation
#: names not in this :class:`tuple` will be reported as *unknown* by :func:`validate`. The
#: :attr:`valid_annotations` class attribute should be overridden by subclasses.
valid_annotations = ()
def __init__(self, position=None):
#: A :class:`giscanner.message.Position` instance specifying the location of the
#: annotatable comment block part in the source file or :const:`None`.
self.position = position
#: A :class:`GtkDocAnnotations` instance representing the annotations
#: applied to this :class:`GtkDocAnnotatable` instance.
self.annotations = GtkDocAnnotations()
def __repr__(self):
return '<GtkDocAnnotatable %r %r>' % (self.annotations, )
def validate(self):
'''
Validate annotations stored by the :class:`GtkDocAnnotatable` instance, if any.
'''
if self.annotations:
position = self.annotations.position
for ann_name, options in self.annotations.items():
if ann_name in self.valid_annotations:
validate = getattr(self, '_do_validate_' + ann_name.replace('-', '_'))
validate(position, ann_name, options)
elif ann_name in ALL_ANNOTATIONS:
# Not error() as ann_name might be valid in some newer
# GObject-Instrospection version.
warn('unexpected annotation: %s' % (ann_name, ), position)
else:
# Not error() as ann_name might be valid in some newer
# GObject-Instrospection version.
warn('unknown annotation: %s' % (ann_name, ), position)
def _validate_options(self, position, ann_name, n_options, expected_n_options, operator,
message):
'''
Validate the number of options held by an annotation according to the test
``operator(n_options, expected_n_options)``.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param n_options: number of options held by the annotation
:param expected_n_options: number of expected options
:param operator: an operator function from python's :mod:`operator` module, for example
:func:`operator.ne` or :func:`operator.lt`
:param message: warning message used when the test
``operator(n_options, expected_n_options)`` fails.
'''
if n_options == 0:
t = 'none'
else:
t = '%d' % (n_options, )
if expected_n_options == 0:
s = 'no options'
elif expected_n_options == 1:
s = 'one option'
else:
s = '%d options' % (expected_n_options, )
if operator(n_options, expected_n_options):
warn('"%s" annotation %s %s, %s given' % (ann_name, message, s, t), position)
def _validate_annotation(self, position, ann_name, options, choices=None,
exact_n_options=None, min_n_options=None, max_n_options=None):
'''
Validate an annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to be validated
:param choices: an iterable of allowed option names or :const:`None` to skip this test
:param exact_n_options: exact number of expected options or :const:`None` to skip this test
:param min_n_options: minimum number of expected options or :const:`None` to skip this test
:param max_n_options: maximum number of expected options or :const:`None` to skip this test
'''
n_options = len(options)
if exact_n_options is not None:
self._validate_options(position,
ann_name, n_options, exact_n_options, ne, 'needs')
if min_n_options is not None:
self._validate_options(position,
ann_name, n_options, min_n_options, lt, 'takes at least')
if max_n_options is not None:
self._validate_options(position,
ann_name, n_options, max_n_options, gt, 'takes at most')
if options and choices is not None:
option = options[0]
if option not in choices:
warn('invalid "%s" annotation option: "%s"' % (ann_name, option), position)
def _do_validate_allow_none(self, position, ann_name, options):
'''
Validate the ``(allow-none)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options held by the annotation
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_array(self, position, ann_name, options):
'''
Validate the ``(array)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options held by the annotation
'''
if len(options) == 0:
return
for option, value in options.items():
if option == OPT_ARRAY_FIXED_SIZE:
try:
int(value)
except (TypeError, ValueError):
if value is None:
warn('"%s" annotation option "%s" needs a value' % (ann_name, option),
position)
else:
warn('invalid "%s" annotation option "%s" value "%s", must be an integer' %
(ann_name, option, value),
position)
elif option == OPT_ARRAY_ZERO_TERMINATED:
if value is not None and value not in ['0', '1']:
warn('invalid "%s" annotation option "%s" value "%s", must be 0 or 1' %
(ann_name, option, value),
position)
elif option == OPT_ARRAY_LENGTH:
if value is None:
warn('"%s" annotation option "length" needs a value' % (ann_name, ),
position)
else:
warn('invalid "%s" annotation option: "%s"' % (ann_name, option),
position)
def _do_validate_attributes(self, position, ann_name, options):
'''
Validate the ``(attributes)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
# The 'attributes' annotation allows free form annotations.
pass
def _do_validate_closure(self, position, ann_name, options):
'''
Validate the ``(closure)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, max_n_options=1)
def _do_validate_constructor(self, position, ann_name, options):
'''
Validate the ``(constructor)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_destroy(self, position, ann_name, options):
'''
Validate the ``(destroy)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_element_type(self, position, ann_name, options):
'''
Validate the ``(element)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, min_n_options=1, max_n_options=2)
def _do_validate_foreign(self, position, ann_name, options):
'''
Validate the ``(foreign)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_get_value_func(self, position, ann_name, options):
'''
Validate the ``(value-func)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_in(self, position, ann_name, options):
'''
Validate the ``(in)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_inout(self, position, ann_name, options):
'''
Validate the ``(in-out)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_method(self, position, ann_name, options):
'''
Validate the ``(method)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_nullable(self, position, ann_name, options):
'''
Validate the ``(nullable)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options held by the annotation
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_optional(self, position, ann_name, options):
'''
Validate the ``(optional)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options held by the annotation
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_out(self, position, ann_name, options):
'''
Validate the ``(out)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, max_n_options=1,
choices=OUT_OPTIONS)
def _do_validate_ref_func(self, position, ann_name, options):
'''
Validate the ``(ref-func)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_rename_to(self, position, ann_name, options):
'''
Validate the ``(rename-to)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_scope(self, position, ann_name, options):
'''
Validate the ``(scope)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1,
choices=SCOPE_OPTIONS)
def _do_validate_set_value_func(self, position, ann_name, options):
'''
Validate the ``(value-func)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_skip(self, position, ann_name, options):
'''
Validate the ``(skip)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=0)
def _do_validate_transfer(self, position, ann_name, options):
'''
Validate the ``(transfer)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1,
choices=TRANSFER_OPTIONS)
def _do_validate_type(self, position, ann_name, options):
'''
Validate the ``(type)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_unref_func(self, position, ann_name, options):
'''
Validate the ``(unref-func)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_value(self, position, ann_name, options):
'''
Validate the ``(value)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
def _do_validate_virtual(self, position, ann_name, options):
'''
Validate the ``(virtual)`` annotation.
:param position: :class:`giscanner.message.Position` of the line in the source file
containing the annotation to be validated
:param ann_name: name of the annotation holding the options to validate
:param options: annotation options to validate
'''
self._validate_annotation(position, ann_name, options, exact_n_options=1)
class GtkDocParameter(GtkDocAnnotatable):
'''
Represents a GTK-Doc parameter part.
'''
__slots__ = ('name', 'description')
valid_annotations = (ANN_ALLOW_NONE, ANN_ARRAY, ANN_ATTRIBUTES, ANN_CLOSURE, ANN_DESTROY,
ANN_ELEMENT_TYPE, ANN_IN, ANN_INOUT, ANN_OUT, ANN_SCOPE, ANN_SKIP,
ANN_TRANSFER, ANN_TYPE, ANN_OPTIONAL, ANN_NULLABLE)
def __init__(self, name, position=None):
GtkDocAnnotatable.__init__(self, position)
#: Parameter name.
self.name = name
#: Parameter description or :const:`None`.
self.description = None
def __repr__(self):
return '<GtkDocParameter %r %r>' % (self.name, self.annotations)
class GtkDocTag(GtkDocAnnotatable):
'''
Represents a GTK-Doc tag part.
'''
__slots__ = ('name', 'value', 'description')
valid_annotations = (ANN_ALLOW_NONE, ANN_ARRAY, ANN_ATTRIBUTES, ANN_ELEMENT_TYPE, ANN_SKIP,
ANN_TRANSFER, ANN_TYPE, ANN_NULLABLE, ANN_OPTIONAL)
def __init__(self, name, position=None):
GtkDocAnnotatable.__init__(self, position)
#: Tag name.
self.name = name
#: Tag value or :const:`None`.
self.value = None
#: Tag description or :const:`None`.
self.description = None
def __repr__(self):
return '<GtkDocTag %r %r>' % (self.name, self.annotations)
class GtkDocCommentBlock(GtkDocAnnotatable):
'''
Represents a GTK-Doc comment block.
'''
__slots__ = ('code_before', 'code_after', 'indentation',
'name', 'params', 'description', 'tags')
#: Valid annotation names for the GTK-Doc comment block identifier part.
valid_annotations = (ANN_ATTRIBUTES, ANN_CONSTRUCTOR, ANN_FOREIGN, ANN_GET_VALUE_FUNC,
ANN_METHOD, ANN_REF_FUNC, ANN_RENAME_TO, ANN_SET_VALUE_FUNC,
ANN_SKIP, ANN_TRANSFER, ANN_TYPE, ANN_UNREF_FUNC, ANN_VALUE, ANN_VFUNC)
def __init__(self, name, position=None):
GtkDocAnnotatable.__init__(self, position)
#: Code preceding the GTK-Doc comment block start token ("``/**``"), if any.
self.code_before = None
#: Code following the GTK-Doc comment block end token ("``*/``"), if any.
self.code_after = None
#: List of indentation levels (preceding the "``*``") for all lines in the comment
#: block's source text.
self.indentation = []
#: Identifier name.
self.name = name
#: Ordered dictionary mapping parameter names to :class:`GtkDocParameter` instances
#: applied to this :class:`GtkDocCommentBlock`.
self.params = OrderedDict()
#: The GTK-Doc comment block description part.
self.description = None
#: Ordered dictionary mapping tag names to :class:`GtkDocTag` instances
#: applied to this :class:`GtkDocCommentBlock`.
self.tags = OrderedDict()
def __cmp__(self, other):
# Note: This is used by g-ir-annotation-tool, which does a ``sorted(blocks.values())``,
# meaning that keeping this around makes update-glib-annotations.py patches
# easier to review.
return cmp(self.name, other.name)
def __repr__(self):
return '<GtkDocCommentBlock %r %r>' % (self.name, self.annotations)
def validate(self):
'''
Validate annotations applied to the :class:`GtkDocCommentBlock` identifier, parameters
and tags.
'''
GtkDocAnnotatable.validate(self)
for param in self.params.values():
param.validate()
for tag in self.tags.values():
tag.validate()
#: Result object returned by :class:`GtkDocCommentBlockParser`._parse_annotations()
_ParseAnnotationsResult = namedtuple('Result', ['success', 'annotations', 'start_pos', 'end_pos'])
#: Result object returned by :class:`GtkDocCommentBlockParser`._parse_fields()
_ParseFieldsResult = namedtuple('Result', ['success', 'annotations', 'description'])
class GtkDocCommentBlockParser(object):
'''
Parse GTK-Doc comment blocks into a parse tree built out of :class:`GtkDocCommentBlock`,
:class:`GtkDocParameter`, :class:`GtkDocTag` and :class:`GtkDocAnnotations`
objects. This parser tries to accept malformed input whenever possible and does
not cause the process to exit on syntax errors. It does however emit:
* warning messages at the slightest indication of recoverable malformed input and
* error messages for unrecoverable malformed input
whenever possible. Recoverable, in this context, means that we can serialize the
:class:`GtkDocCommentBlock` instance using a :class:`GtkDocCommentBlockWriter` without
information being lost. It is usually a good idea to heed these warning and error messages
as malformed input can result in both:
* invalid GTK-Doc output (HTML, pdf, ...) when the comment blocks are parsed
with GTK-Doc's gtkdoc-mkdb
* unexpected introspection behavior, for example missing parameters in the
generated .gir and .typelib files
.. NOTE:: :class:`GtkDocCommentBlockParser` functionality is heavily based on gtkdoc-mkdb's
`ScanSourceFile()`_ function and is currently in sync with GTK-Doc
commit `47abcd5`_.
.. _ScanSourceFile():
http://git.gnome.org/browse/gtk-doc/tree/gtkdoc-mkdb.in#n3722
.. _47abcd5:
https://git.gnome.org/browse/gtk-doc/commit/?id=47abcd53b8489ebceec9e394676512a181c1f1f6
'''
def parse_comment_blocks(self, comments):
'''
Parse multiple GTK-Doc comment blocks.
:param comments: an iterable of ``(comment, filename, lineno)`` tuples
:returns: a dictionary mapping identifier names to :class:`GtkDocCommentBlock` objects
'''
comment_blocks = {}
for (comment, filename, lineno) in comments:
try:
comment_block = self.parse_comment_block(comment, filename, lineno)
except Exception:
error('unrecoverable parse error, please file a GObject-Introspection bug'
'report including the complete comment block at the indicated location.',
Position(filename, lineno))
continue
if comment_block is not None:
# Note: previous versions of this parser did not check if an identifier was
# already stored in comment_blocks, so when different comment blocks where
# encountered documenting the same identifier the last comment block seen
# "wins". Keep this behavior for backwards compatibility, but emit a warning.
if comment_block.name in comment_blocks:
firstseen = comment_blocks[comment_block.name]
path = os.path.dirname(firstseen.position.filename)
warn('multiple comment blocks documenting \'%s:\' identifier '
'(already seen at %s).' %
(comment_block.name, firstseen.position.format(path)),
comment_block.position)
comment_blocks[comment_block.name] = comment_block
return comment_blocks
def parse_comment_block(self, comment, filename, lineno):
'''
Parse a single GTK-Doc comment block.
:param comment: string representing the GTK-Doc comment block including it's
start ("``/**``") and end ("``*/``") tokens.
:param filename: source file name where the comment block originated from
:param lineno: line number in the source file where the comment block starts
:returns: a :class:`GtkDocCommentBlock` object or ``None``
'''
code_before = ''
code_after = ''
comment_block_pos = Position(filename, lineno)
comment_lines = re.sub(LINE_BREAK_RE, '\n', comment).split('\n')
comment_lines_len = len(comment_lines)
# Check for the start of the comment block.
result = COMMENT_BLOCK_START_RE.match(comment_lines[0])
if result:
# Skip single line comment blocks
if comment_lines_len == 1:
position = Position(filename, lineno)
marker = ' ' * result.end('code') + '^'
error('Skipping invalid GTK-Doc comment block:'
'\n%s\n%s' % (comment_lines[0], marker),
position)
return None
code_before = result.group('code')
comment = result.group('comment')
if code_before:
position = Position(filename, lineno)
marker = ' ' * result.end('code') + '^'
warn('GTK-Doc comment block start token "/**" should '
'not be preceded by code:\n%s\n%s' % (comment_lines[0], marker),
position)
if comment:
position = Position(filename, lineno)
marker = ' ' * result.start('comment') + '^'
warn('GTK-Doc comment block start token "/**" should '
'not be followed by comment text:\n%s\n%s' % (comment_lines[0], marker),
position)
comment_lines[0] = comment
else:
del comment_lines[0]
else:
# Not a GTK-Doc comment block.
return None
# Check for the end of the comment block.
result = COMMENT_BLOCK_END_RE.match(comment_lines[-1])
if result:
code_after = result.group('code')
comment = result.group('comment')
if code_after:
position = Position(filename, lineno + comment_lines_len - 1)
marker = ' ' * result.end('code') + '^'
warn('GTK-Doc comment block end token "*/" should '
'not be followed by code:\n%s\n%s' % (comment_lines[-1], marker),
position)
if comment:
position = Position(filename, lineno + comment_lines_len - 1)
marker = ' ' * result.end('comment') + '^'
warn('GTK-Doc comment block end token "*/" should '
'not be preceded by comment text:\n%s\n%s' % (comment_lines[-1], marker),
position)
comment_lines[-1] = comment
else:
del comment_lines[-1]
else:
# Not a GTK-Doc comment block.
return None
# If we get this far, we must be inside something
# that looks like a GTK-Doc comment block.
comment_block = None
identifier_warned = False
block_indent = []
line_indent = None
part_indent = None
in_part = None
current_part = None
returns_seen = False
for line in comment_lines:
lineno += 1
position = Position(filename, lineno)
# Store the original line (without \n) and column offset
# so we can generate meaningful warnings later on.
original_line = line
column_offset = 0
# Store indentation level of the comment (before the ' * ')
result = INDENTATION_RE.match(line)
block_indent.append(result.group('indentation'))
# Get rid of the ' * ' at the start of the line.
result = COMMENT_ASTERISK_RE.match(line)
if result:
comment = result.group('comment')
if comment:
marker = ' ' * result.start('comment') + '^'
error('invalid comment text:\n%s\n%s' %
(original_line, marker),
position)
column_offset = result.end(0)
line = line[result.end(0):]
# Store indentation level of the line (after the ' * ').
result = INDENTATION_RE.match(line)
line_indent = len(result.group('indentation').replace('\t', ' '))
####################################################################
# Check for GTK-Doc comment block identifier.
####################################################################
if comment_block is None:
result = SECTION_RE.match(line)
if result:
identifier_name = 'SECTION:%s' % (result.group('section_name'), )
identifier_delimiter = None
identifier_fields = None
identifier_fields_start = None
else:
result = PROPERTY_RE.match(line)
if result:
identifier_name = '%s:%s' % (result.group('class_name'),
result.group('property_name'))
identifier_delimiter = result.group('delimiter')
identifier_fields = result.group('fields')
identifier_fields_start = result.start('fields')
else:
result = SIGNAL_RE.match(line)
if result:
identifier_name = '%s::%s' % (result.group('class_name'),
result.group('signal_name'))
identifier_delimiter = result.group('delimiter')
identifier_fields = result.group('fields')
identifier_fields_start = result.start('fields')
else:
result = SYMBOL_RE.match(line)
if result:
identifier_name = '%s' % (result.group('symbol_name'), )
identifier_delimiter = result.group('delimiter')
identifier_fields = result.group('fields')
identifier_fields_start = result.start('fields')
if result:
in_part = PART_IDENTIFIER
part_indent = line_indent
comment_block = GtkDocCommentBlock(identifier_name, comment_block_pos)
comment_block.code_before = code_before
comment_block.code_after = code_after
if identifier_fields:
res = self._parse_annotations(position,
column_offset + identifier_fields_start,
original_line,
identifier_fields)
if res.success:
if identifier_fields[res.end_pos:].strip():
# Not an identifier due to invalid trailing description field
result = None
in_part = None
part_indent = None
comment_block = None
else:
comment_block.annotations = res.annotations
if not identifier_delimiter and res.annotations:
marker_position = column_offset + result.start('delimiter')
marker = ' ' * marker_position + '^'
warn('missing ":" at column %s:\n%s\n%s' %
(marker_position + 1, original_line, marker),
position)
if not result:
# Emit a single warning when the identifier is not found on the first line
if not identifier_warned:
identifier_warned = True
marker = ' ' * column_offset + '^'
error('identifier not found on the first line:\n%s\n%s' %
(original_line, marker),
position)
continue
####################################################################
# Check for comment block parameters.
####################################################################
result = PARAMETER_RE.match(line)
if result:
part_indent = line_indent
param_name = result.group('parameter_name')
param_name_lower = param_name.lower()
param_fields = result.group('fields')
param_fields_start = result.start('fields')
marker = ' ' * (result.start('parameter_name') + column_offset) + '^'
if in_part not in [PART_IDENTIFIER, PART_PARAMETERS]:
warn('"@%s" parameter unexpected at this location:\n%s\n%s' %
(param_name, original_line, marker),
position)
in_part = PART_PARAMETERS
if param_name_lower == TAG_RETURNS:
# Deprecated return value as parameter instead of tag
param_name = TAG_RETURNS
if not returns_seen:
returns_seen = True
else:
error('encountered multiple "Returns" parameters or tags for "%s".' %
(comment_block.name, ),
position)
tag = GtkDocTag(TAG_RETURNS, position)
if param_fields:
result = self._parse_fields(position,
column_offset + param_fields_start,
original_line,
param_fields)
if result.success:
tag.annotations = result.annotations
tag.description = result.description
comment_block.tags[TAG_RETURNS] = tag
current_part = tag
continue
elif (param_name == 'Varargs'
or (param_name.endswith('...') and param_name != '...')):
# Deprecated @Varargs notation or named __VA_ARGS__ instead of @...
warn('"@%s" parameter is deprecated, please use "@..." instead:\n%s\n%s' %
(param_name, original_line, marker),
position)
param_name = '...'
if param_name in comment_block.params.keys():
error('multiple "@%s" parameters for identifier "%s":\n%s\n%s' %
(param_name, comment_block.name, original_line, marker),
position)
parameter = GtkDocParameter(param_name, position)
if param_fields:
result = self._parse_fields(position,
column_offset + param_fields_start,
original_line,
param_fields)
if result.success:
parameter.annotations = result.annotations
parameter.description = result.description
comment_block.params[param_name] = parameter
current_part = parameter
continue
####################################################################
# Check for comment block description.
#
# When we are parsing parameter parts or the identifier part (when
# there are no parameters) and encounter an empty line, we must be
# parsing the comment block description.
#
# Note: it is unclear why GTK-Doc does not allow paragraph breaks
# at this location as those might be handy describing
# parameters from time to time...
####################################################################
if (EMPTY_LINE_RE.match(line) and in_part in [PART_IDENTIFIER, PART_PARAMETERS]):
in_part = PART_DESCRIPTION
part_indent = line_indent
continue
####################################################################
# Check for GTK-Doc comment block tags.
####################################################################
result = TAG_RE.match(line)
if result and line_indent <= part_indent:
part_indent = line_indent
tag_name = result.group('tag_name')
tag_name_lower = tag_name.lower()
tag_fields = result.group('fields')
tag_fields_start = result.start('fields')
marker = ' ' * (result.start('tag_name') + column_offset) + '^'
if tag_name_lower in DEPRECATED_GI_ANN_TAGS:
# Deprecated GObject-Introspection specific tags.
# Emit a warning and transform these into annotations on the identifier
# instead, as agreed upon in http://bugzilla.gnome.org/show_bug.cgi?id=676133
warn('GObject-Introspection specific GTK-Doc tag "%s" '
'has been deprecated, please use annotations on the identifier '
'instead:\n%s\n%s' % (tag_name, original_line, marker),
position)
# Translate deprecated tag name into corresponding annotation name
ann_name = tag_name_lower.replace(' ', '-')
if tag_name_lower == TAG_ATTRIBUTES:
transformed = ''
result = self._parse_fields(position,
result.start('tag_name') + column_offset,
line,
tag_fields.strip(),
False,
False)
if result.success:
for annotation in result.annotations:
ann_options = self._parse_annotation_options_list(position, marker,
line, annotation)
n_options = len(ann_options)
if n_options == 1:
transformed = '%s %s' % (transformed, ann_options[0], )
elif n_options == 2:
transformed = '%s %s=%s' % (transformed, ann_options[0],
ann_options[1])
else:
# Malformed Attributes: tag
error('malformed "Attributes:" tag will be ignored:\n%s\n%s' %
(original_line, marker),
position)
transformed = None
if transformed:
transformed = '%s %s' % (ann_name, transformed.strip())
ann_name, docannotation = self._parse_annotation(
position,
column_offset + tag_fields_start,
original_line,
transformed)
stored_annotation = comment_block.annotations.get('attributes')
if stored_annotation:
error('Duplicate "Attributes:" annotation will '
'be ignored:\n%s\n%s' % (original_line, marker),
position)
else:
comment_block.annotations[ann_name] = docannotation
else:
ann_name, options = self._parse_annotation(position,
column_offset + tag_fields_start,
line,
'%s %s' % (ann_name, tag_fields))
comment_block.annotations[ann_name] = options
continue
elif tag_name_lower == TAG_DESCRIPTION:
# Deprecated GTK-Doc Description: tag
warn('GTK-Doc tag "Description:" has been deprecated:\n%s\n%s' %
(original_line, marker),
position)
in_part = PART_DESCRIPTION
if comment_block.description is None:
comment_block.description = tag_fields
else:
comment_block.description += '\n%s' % (tag_fields, )
continue
# Now that the deprecated stuff is out of the way, continue parsing real tags
if (in_part == PART_DESCRIPTION
or (in_part == PART_PARAMETERS and not comment_block.description)
or (in_part == PART_IDENTIFIER and not comment_block.params and not
comment_block.description)):
in_part = PART_TAGS
if in_part != PART_TAGS:
in_part = PART_TAGS
warn('"%s:" tag unexpected at this location:\n%s\n%s' %
(tag_name, original_line, marker),
position)
if tag_name_lower in [TAG_RETURN, TAG_RETURNS,
TAG_RETURN_VALUE, TAG_RETURNS_VALUE]:
if not returns_seen:
returns_seen = True
else:
error('encountered multiple return value parameters or tags for "%s".' %
(comment_block.name, ),
position)
tag = GtkDocTag(TAG_RETURNS, position)
if tag_fields:
result = self._parse_fields(position,
column_offset + tag_fields_start,
original_line,
tag_fields)
if result.success:
tag.annotations = result.annotations
tag.description = result.description
comment_block.tags[TAG_RETURNS] = tag
current_part = tag
continue
else:
if tag_name_lower in comment_block.tags.keys():
error('multiple "%s:" tags for identifier "%s":\n%s\n%s' %
(tag_name, comment_block.name, original_line, marker),
position)
tag = GtkDocTag(tag_name_lower, position)
if tag_fields:
result = self._parse_fields(position,
column_offset + tag_fields_start,
original_line,
tag_fields)
if result.success:
if result.annotations:
error('annotations not supported for tag "%s:".' % (tag_name, ),
position)
if tag_name_lower in [TAG_DEPRECATED, TAG_SINCE]:
result = TAG_VALUE_VERSION_RE.match(result.description)
tag.value = result.group('value')
tag.description = result.group('description')
elif tag_name_lower == TAG_STABILITY:
result = TAG_VALUE_STABILITY_RE.match(result.description)
tag.value = result.group('value').capitalize()
tag.description = result.group('description')
comment_block.tags[tag_name_lower] = tag
current_part = tag
continue
####################################################################
# If we get here, we must be in the middle of a multiline
# comment block, parameter or tag description.
####################################################################
if EMPTY_LINE_RE.match(line) is None:
line = line.rstrip()
if in_part in [PART_IDENTIFIER, PART_DESCRIPTION]:
if not comment_block.description:
if in_part == PART_IDENTIFIER:
self._validate_multiline_annotation_continuation(line, original_line,
column_offset, position)
if comment_block.description is None:
comment_block.description = line
else:
comment_block.description += '\n' + line
continue
elif in_part in [PART_PARAMETERS, PART_TAGS]:
if not current_part.description:
self._validate_multiline_annotation_continuation(line, original_line,
column_offset, position)
if current_part.description is None:
current_part.description = line
else:
current_part.description += '\n' + line
continue
########################################################################
# Finished parsing this comment block.
########################################################################
if comment_block:
# We have picked up a couple of \n characters that where not
# intended. Strip those.
if comment_block.description:
comment_block.description = comment_block.description.strip()
for tag in comment_block.tags.values():
self._clean_description_field(tag)
for param in comment_block.params.values():
self._clean_description_field(param)
comment_block.indentation = block_indent
comment_block.validate()
return comment_block
else:
return None
def _clean_description_field(self, part):
'''
Remove extraneous leading and trailing whitespace from description fields.
:param part: a GTK-Doc comment block part having a description field
'''
if part.description:
if part.description.strip() == '':
part.description = None
else:
if EMPTY_LINE_RE.match(part.description.split('\n', 1)[0]):
part.description = part.description.rstrip()
else:
part.description = part.description.strip()
def _validate_multiline_annotation_continuation(self, line, original_line,
column_offset, position):
'''
Validate annotatable parts' source text ensuring annotations don't span multiple lines.
For example, the following comment block would result in a warning being emitted for
the forth line::
/**
* shiny_function:
* @array_: (out caller-allocates) (array)
* (element-type utf8) (transfer full): A beautiful array
*/
:param line: line to validate, stripped from ("``*/``") at start of the line.
:param original_line: original line (including ("``*/``")) being validated
:param column_offset: number of characters stripped from `line` when ("``*/``")
was removed
:param position: :class:`giscanner.message.Position` of `line` in the source file
'''
result = self._parse_annotations(position, column_offset, original_line, line)
if result.success and result.annotations:
marker = ' ' * (result.start_pos + column_offset) + '^'
error('ignoring invalid multiline annotation continuation:\n%s\n%s' %
(original_line, marker),
position)
def _parse_annotation_options_list(self, position, column, line, options):
'''
Parse annotation options into a list. For example::
┌──────────────────────────────────────────────────────────────┐
│ 'option1 option2 option3' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ ['option1', 'option2', 'option3'] │ ◁─ parsed options
└──────────────────────────────────────────────────────────────┘
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of the `options` in the source file
:param line: complete source line
:param options: annotation options to parse
:returns: a list of annotation options
'''
parsed = []
if options:
result = options.find('=')
if result >= 0:
marker = ' ' * (column + result) + '^'
warn('invalid annotation options: expected a "list" but '
'received "key=value pairs":\n%s\n%s' % (line, marker),
position)
parsed = self._parse_annotation_options_unknown(position, column, line, options)
else:
parsed = options.split(' ')
return parsed
def _parse_annotation_options_dict(self, position, column, line, options):
'''
Parse annotation options into a dict. For example::
┌──────────────────────────────────────────────────────────────┐
│ 'option1=value1 option2 option3=value2' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ {'option1': 'value1', 'option2': None, 'option3': 'value2'} │ ◁─ parsed options
└──────────────────────────────────────────────────────────────┘
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of the `options` in the source file
:param line: complete source line
:param options: annotation options to parse
:returns: an ordered dictionary of annotation options
'''
parsed = OrderedDict()
if options:
for p in options.split(' '):
parts = p.split('=', 1)
key = parts[0]
value = parts[1] if len(parts) == 2 else None
parsed[key] = value
return parsed
def _parse_annotation_options_unknown(self, position, column, line, options):
'''
Parse annotation options into a list holding a single item. This is used when the
annotation options to parse in not known to be a list nor dict. For example::
┌──────────────────────────────────────────────────────────────┐
│ ' option1 option2 option3=value1 ' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ ['option1 option2 option3=value1'] │ ◁─ parsed options
└──────────────────────────────────────────────────────────────┘
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of the `options` in the source file
:param line: complete source line
:param options: annotation options to parse
:returns: a list of annotation options
'''
if options:
return [options.strip()]
def _parse_annotation(self, position, column, line, annotation):
'''
Parse an annotation into the annotation name and a list or dict (depending on the
name of the annotation) holding the options. For example::
┌──────────────────────────────────────────────────────────────┐
│ 'name opt1=value1 opt2=value2 opt3' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ 'name', {'opt1': 'value1', 'opt2':'value2', 'opt3':None} │ ◁─ parsed annotation
└──────────────────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────────────────┐
│ 'name opt1 opt2' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ 'name', ['opt1', 'opt2'] │ ◁─ parsed annotation
└──────────────────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────────────────┐
│ 'unkownname unknown list of options' │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ 'unkownname', ['unknown list of options'] │ ◁─ parsed annotation
└──────────────────────────────────────────────────────────────┘
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of the `annotation` in the source file
:param line: complete source line
:param annotation: annotation to parse
:returns: a tuple containing the annotation name and options
'''
# Transform deprecated type syntax "tokens"
annotation = annotation.replace('<', ANN_LPAR).replace('>', ANN_RPAR)
parts = annotation.split(' ', 1)
ann_name = parts[0].lower()
ann_options = parts[1] if len(parts) == 2 else None
if ann_name == ANN_INOUT_ALT:
marker = ' ' * (column) + '^'
warn('"%s" annotation has been deprecated, please use "%s" instead:\n%s\n%s' %
(ANN_INOUT_ALT, ANN_INOUT, line, marker),
position)
ann_name = ANN_INOUT
elif ann_name == ANN_ATTRIBUTE:
marker = ' ' * (column) + '^'
warn('"%s" annotation has been deprecated, please use "%s" instead:\n%s\n%s' %
(ANN_ATTRIBUTE, ANN_ATTRIBUTES, line, marker),
position)
ann_name = ANN_ATTRIBUTES
ann_options = self._parse_annotation_options_list(position, column, line, ann_options)
n_options = len(ann_options)
if n_options == 1:
ann_options = ann_options[0]
elif n_options == 2:
ann_options = '%s=%s' % (ann_options[0], ann_options[1])
else:
marker = ' ' * (column) + '^'
error('malformed "(attribute)" annotation will be ignored:\n%s\n%s' %
(line, marker),
position)
return None, None
column += len(ann_name) + 2
if ann_name in LIST_ANNOTATIONS:
ann_options = self._parse_annotation_options_list(position, column, line, ann_options)
elif ann_name in DICT_ANNOTATIONS:
ann_options = self._parse_annotation_options_dict(position, column, line, ann_options)
else:
ann_options = self._parse_annotation_options_unknown(position, column, line,
ann_options)
return ann_name, ann_options
def _parse_annotations(self, position, column, line, fields, parse_options=True):
'''
Parse annotations into a :class:`GtkDocAnnotations` object.
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of the `annotations` in the source file
:param line: complete source line
:param fields: string containing the fields to parse
:param parse_options: whether options will be parsed into a :class:`GtkDocAnnotations`
object or into a :class:`list`
:returns: if `parse_options` evaluates to True a :class:`GtkDocAnnotations` object,
a :class:`list` otherwise. If `line` does not contain any annotations,
:const:`None`
'''
if parse_options:
parsed_annotations = GtkDocAnnotations(position)
else:
parsed_annotations = []
i = 0
parens_level = 0
prev_char = ''
char_buffer = []
start_pos = 0
end_pos = 0
for i, cur_char in enumerate(fields):
cur_char_is_space = cur_char.isspace()
if cur_char == ANN_LPAR:
parens_level += 1
if parens_level == 1:
start_pos = i
if prev_char == ANN_LPAR:
marker = ' ' * (column + i) + '^'
error('unexpected parentheses, annotations will be ignored:\n%s\n%s' %
(line, marker),
position)
return _ParseAnnotationsResult(False, None, None, None)
elif parens_level > 1:
char_buffer.append(cur_char)
elif cur_char == ANN_RPAR:
parens_level -= 1
if prev_char == ANN_LPAR:
marker = ' ' * (column + i) + '^'
error('unexpected parentheses, annotations will be ignored:\n%s\n%s' %
(line, marker),
position)
return _ParseAnnotationsResult(False, None, None, None)
elif parens_level < 0:
marker = ' ' * (column + i) + '^'
error('unbalanced parentheses, annotations will be ignored:\n%s\n%s' %
(line, marker),
position)
return _ParseAnnotationsResult(False, None, None, None)
elif parens_level == 0:
end_pos = i + 1
if parse_options is True:
name, options = self._parse_annotation(position,
column + start_pos,
line,
''.join(char_buffer).strip())
if name is not None:
if name in parsed_annotations:
marker = ' ' * (column + i) + '^'
error('multiple "%s" annotations:\n%s\n%s' %
(name, line, marker), position)
parsed_annotations[name] = options
else:
parsed_annotations.append(''.join(char_buffer).strip())
char_buffer = []
else:
char_buffer.append(cur_char)
elif cur_char_is_space:
if parens_level > 0:
char_buffer.append(cur_char)
else:
if parens_level == 0:
break
else:
char_buffer.append(cur_char)
prev_char = cur_char
if parens_level > 0:
marker = ' ' * (column + i) + '^'
error('unbalanced parentheses, annotations will be ignored:\n%s\n%s' %
(line, marker),
position)
return _ParseAnnotationsResult(False, None, None, None)
else:
return _ParseAnnotationsResult(True, parsed_annotations, start_pos, end_pos)
def _parse_fields(self, position, column, line, fields, parse_options=True,
validate_description_field=True):
'''
Parse annotations out of field data. For example::
┌──────────────────────────────────────────────────────────────┐
│ '(skip): description of some parameter │ ─▷ source
├──────────────────────────────────────────────────────────────┤
│ ({'skip': []}, 'description of some parameter') │ ◁─ annotations and
└──────────────────────────────────────────────────────────────┘ remaining fields
:param position: :class:`giscanner.message.Position` of `line` in the source file
:param column: start column of `fields` in the source file
:param line: complete source line
:param fields: string containing the fields to parse
:param parse_options: whether options will be parsed into a :class:`GtkDocAnnotations`
object or into a :class:`list`
:param validate_description_field: :const:`True` to validate the description field
:returns: if `parse_options` evaluates to True a :class:`GtkDocAnnotations` object,
a :class:`list` otherwise. If `line` does not contain any annotations,
:const:`None` and a string holding the remaining fields
'''
description_field = ''
result = self._parse_annotations(position, column, line, fields, parse_options)
if result.success:
description_field = fields[result.end_pos:].strip()
if description_field and validate_description_field:
if description_field.startswith(':'):
description_field = description_field[1:]
else:
if result.end_pos > 0:
marker_position = column + result.end_pos
marker = ' ' * marker_position + '^'
warn('missing ":" at column %s:\n%s\n%s' %
(marker_position + 1, line, marker),
position)
return _ParseFieldsResult(result.success, result.annotations, description_field)
class GtkDocCommentBlockWriter(object):
'''
Serialized :class:`GtkDocCommentBlock` objects into GTK-Doc comment blocks.
'''
def __init__(self, indent=True):
#: :const:`True` if the original indentation preceding the "``*``" needs to be retained,
#: :const:`False` otherwise. Default value is :const:`True`.
self.indent = indent
def _serialize_annotations(self, annotations):
'''
Serialize an annotation field. For example::
┌──────────────────────────────────────────────────────────────┐
│ {'name': {'opt1': 'value1', 'opt2':'value2', 'opt3':None} │ ◁─ GtkDocAnnotations
├──────────────────────────────────────────────────────────────┤
│ '(name opt1=value1 opt2=value2 opt3)' │ ─▷ serialized
└──────────────────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────────────────┐
│ {'name': ['opt1', 'opt2']} │ ◁─ GtkDocAnnotations
├──────────────────────────────────────────────────────────────┤
│ '(name opt1 opt2)' │ ─▷ serialized
└──────────────────────────────────────────────────────────────┘
┌──────────────────────────────────────────────────────────────┐
│ {'unkownname': ['unknown list of options']} │ ◁─ GtkDocAnnotations
├──────────────────────────────────────────────────────────────┤
│ '(unkownname unknown list of options)' │ ─▷ serialized
└──────────────────────────────────────────────────────────────┘
:param annotations: :class:`GtkDocAnnotations` to be serialized
:returns: a string
'''
serialized = []
for ann_name, options in annotations.items():
if options:
if isinstance(options, list):
serialize_options = ' '.join(options)
else:
serialize_options = ''
for key, value in options.items():
if value:
serialize_options += '%s=%s ' % (key, value)
else:
serialize_options += '%s ' % (key, )
serialize_options = serialize_options.strip()
serialized.append('(%s %s)' % (ann_name, serialize_options))
else:
serialized.append('(%s)' % (ann_name, ))
return ' '.join(serialized)
def _serialize_parameter(self, parameter):
'''
Serialize a parameter.
:param parameter: :class:`GtkDocParameter` to be serialized
:returns: a string
'''
# parameter_name field
serialized = '@%s' % (parameter.name, )
# annotations field
if parameter.annotations:
serialized += ': ' + self._serialize_annotations(parameter.annotations)
# description field
if parameter.description:
if parameter.description.startswith('\n'):
serialized += ':' + parameter.description
else:
serialized += ': ' + parameter.description
else:
serialized += ':'
return serialized.split('\n')
def _serialize_tag(self, tag):
'''
Serialize a tag.
:param tag: :class:`GtkDocTag` to be serialized
:returns: a string
'''
# tag_name field
serialized = tag.name.capitalize()
# annotations field
if tag.annotations:
serialized += ': ' + self._serialize_annotations(tag.annotations)
# value field
if tag.value:
serialized += ': ' + tag.value
# description field
if tag.description:
if tag.description.startswith('\n'):
serialized += ':' + tag.description
else:
serialized += ': ' + tag.description
if not tag.value and not tag.description:
serialized += ':'
return serialized.split('\n')
def write(self, block):
'''
Serialize a :class:`GtkDocCommentBlock` object.
:param block: :class:`GtkDocCommentBlock` to be serialized
:returns: a string
'''
if block is None:
return ''
else:
lines = []
# Identifier part
if block.name.startswith('SECTION'):
lines.append(block.name)
else:
if block.annotations:
annotations = self._serialize_annotations(block.annotations)
lines.append('%s: %s' % (block.name, annotations))
else:
# Note: this delimiter serves no purpose other than most people being used
# to reading/writing it. It is completely legal to ommit this.
lines.append('%s:' % (block.name, ))
# Parameter parts
for param in block.params.values():
lines.extend(self._serialize_parameter(param))
# Comment block description part
if block.description:
lines.append('')
for l in block.description.split('\n'):
lines.append(l)
# Tag parts
if block.tags:
# Note: this empty line servers no purpose other than most people being used
# to reading/writing it. It is completely legal to ommit this.
lines.append('')
for tag in block.tags.values():
lines.extend(self._serialize_tag(tag))
# Restore comment block indentation and *
if self.indent:
indent = Counter(block.indentation).most_common(1)[0][0] or ' '
if indent.endswith('\t'):
start_indent = indent
line_indent = indent + ' '
else:
start_indent = indent[:-1]
line_indent = indent
else:
start_indent = ''
line_indent = ' '
i = 0
while i < len(lines):
line = lines[i]
if line:
lines[i] = '%s* %s\n' % (line_indent, line)
else:
lines[i] = '%s*\n' % (line_indent, )
i += 1
# Restore comment block start and end tokens
lines.insert(0, '%s/**\n' % (start_indent, ))
lines.append('%s*/\n' % (line_indent, ))
# Restore code before and after comment block start and end tokens
if block.code_before:
lines.insert(0, '%s\n' % (block.code_before, ))
if block.code_after:
lines.append('%s\n' % (block.code_after, ))
return ''.join(lines)
| StarcoderdataPython |
1725323 |
import os
import copy
import datetime
import warnings
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import math
from datetime import datetime
import random
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten, Embedding, Dropout, PReLU,ReLU
from keras.layers import Bidirectional, SpatialDropout1D, CuDNNGRU,CuDNNLSTM, Conv1D,Conv2D,MaxPool2D,Reshape
from keras.layers import GlobalAvgPool1D, GlobalMaxPool1D, concatenate,GlobalMaxPooling1D,GlobalAveragePooling1D
from keras.regularizers import l2,l1
from keras.layers.normalization import BatchNormalization
from keras.engine import Layer
from keras.layers.core import Flatten
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation,BatchNormalization
from keras.regularizers import l1,l2
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau,EarlyStopping,Callback
import gc
from tqdm import tqdm_notebook
class _Data_Preprocess:
def __init__(self):
self.int8_max = np.iinfo(np.int8).max
self.int8_min = np.iinfo(np.int8).min
self.int16_max = np.iinfo(np.int16).max
self.int16_min = np.iinfo(np.int16).min
self.int32_max = np.iinfo(np.int32).max
self.int32_min = np.iinfo(np.int32).min
self.int64_max = np.iinfo(np.int64).max
self.int64_min = np.iinfo(np.int64).min
self.float16_max = np.finfo(np.float16).max
self.float16_min = np.finfo(np.float16).min
self.float32_max = np.finfo(np.float32).max
self.float32_min = np.finfo(np.float32).min
self.float64_max = np.finfo(np.float64).max
self.float64_min = np.finfo(np.float64).min
'''
function: _get_type(self,min_val, max_val, types)
get the correct types that our columns can trans to
'''
def _get_type(self, min_val, max_val, types):
if types == 'int':
if max_val <= self.int8_max and min_val >= self.int8_min:
return np.int8
elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
return np.int16
elif max_val <= self.int32_max and min_val >= self.int32_min:
return np.int32
return None
elif types == 'float':
if max_val <= self.float16_max and min_val >= self.float16_min:
return np.float16
if max_val <= self.float32_max and min_val >= self.float32_min:
return np.float32
if max_val <= self.float64_max and min_val >= self.float64_min:
return np.float64
return None
'''
function: _memory_process(self,df)
column data types trans, to save more memory
'''
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
memory_preprocess = _Data_Preprocess()
user_app_actived = pd.read_csv('../../data/original_data/user_app_actived.csv',names=['uId', 'appId'])
x_train = pd.read_csv('../../data/original_data/age_train.csv',names=['uId','age_group'],dtype={'uId':np.int32, 'age_group':np.int8})
x_test = pd.read_csv('../data/original_data/age_test.csv',names=['uId'],dtype={'uId':np.int32})
usage_list = pd.read_csv('../../data/processed_data/usage_app_info.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId.csv')
train = pd.read_csv('../../data/features/base_train.csv')
test = pd.read_csv('../../data/features/base_test.csv')
train=memory_preprocess._memory_process(train)
test=memory_preprocess._memory_process(test)
print(test.info())
gc.collect()
actived_features_all = pd.read_csv('../../data/features/actived_features_all.csv')
actived_features_all=memory_preprocess._memory_process(actived_features_all)
train = pd.merge(train, actived_features_all, how='left', on='uId').fillna(0)
test = pd.merge(test, actived_features_all, how='left', on='uId').fillna(0)
del actived_features_all
gc.collect()
act_use_rnn_hide_train=pd.read_csv('../../data/features/act_use_rnn_hide_train.csv')
act_use_rnn_hide_train=memory_preprocess._memory_process(act_use_rnn_hide_train)
act_use_rnn_hide_train.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
train = pd.merge(train, act_use_rnn_hide_train, how='left', on='uId').fillna(0)
del act_use_rnn_hide_train
act_use_rnn_hide_test=pd.read_csv('../../data/features/act_use_rnn_hide_test.csv')
act_use_rnn_hide_test=memory_preprocess._memory_process(act_use_rnn_hide_test)
act_use_rnn_hide_test.rename(columns={'Unnamed: 0': 'uId'}, inplace=True)
test = pd.merge(test, act_use_rnn_hide_test, how='left', on='uId').fillna(0)
print(test.info())
del act_use_rnn_hide_test
gc.collect()
user_app_actived['app_list'] = user_app_actived.appId.str.split('#')
import ast
from tqdm import tqdm
usage_train = []
for idx in tqdm(usage_list.appId):
usage_train.append(ast.literal_eval(idx))
usage_list['app_list'] = usage_train
user_app_actived.drop('appId',axis=1,inplace=True)
usage_list.drop('appId',axis=1,inplace=True)
user_app_actived = pd.merge(user_app_actived, usage_list, how='left', on='uId')
result = []
for index,row in tqdm(user_app_actived.iterrows()):
try:
result.append(row['app_list_x'] + row['app_list_y'])
except:
result.append(row['app_list_x'])
user_app_actived['app_list'] = result
user_app_actived.drop(['app_list_x','app_list_y'],axis=1,inplace =True)
x_train = pd.merge(x_train, user_app_actived, how='left', on='uId')
x_test = pd.merge(x_test, user_app_actived, how='left', on='uId')
y_train = x_train.age_group - 1
x_train = x_train.drop('age_group',axis=1)
del user_app_actived
del usage_list
del usage_train
gc.collect()
train_uId = x_train.uId.tolist()
test_uId = x_test.uId.tolist()
test.index = test.uId.tolist()
train.index = train.uId.tolist()
test = test.loc[test_uId,:]
train = train.loc[train_uId,:]
appId = pd.read_csv('../../data/processed_data/appId.csv')
usage_appId = pd.read_csv('../../data/processed_data/usage_appId_top_num100000.csv')
usage_appId = usage_appId[-10000:]
usage_appId['id'] = np.arange(0,10000)
all_appid = list(set(appId.appId.tolist() + usage_appId.appId.tolist()))
app_dict = dict(zip(all_appid,np.arange(len(all_appid))))
x_train = [[x for x in apps if x in app_dict] for apps in x_train.app_list]
x_test = [[x for x in apps if x in app_dict] for apps in x_test.app_list]
x_train = [" ".join(app) for app in x_train]
x_test = [" ".join(app) for app in x_test]
c_vec1 = CountVectorizer(lowercase=False,ngram_range=(1,1),dtype=np.int8)
c_vec1.fit(x_train + x_test)
x_train = c_vec1.transform(x_train).toarray()
x_test = c_vec1.transform(x_test).toarray()
gc.collect()
train.drop(['uId','age_group'],axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
train = train.replace([np.inf, -np.inf], np.nan).fillna(0)
test = test.replace([np.inf, -np.inf], np.nan).fillna(0)
scaler = MinMaxScaler()
scaler.fit(pd.concat([train,test],axis=0))
train = scaler.transform(train)
test = scaler.transform(test)
train = memory_preprocess._memory_process(pd.DataFrame(train))
test = memory_preprocess._memory_process(pd.DataFrame(test))
gc.collect()
x_train = np.hstack((x_train,train.values))
x_test = np.hstack((x_test,test.values))
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train, num_classes=None)
def mlp_v3():
model = Sequential()
model.add(Dense(1024, input_shape=(13,400,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# model.add(BatchNormalization())
#
model.add(Dense(6))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Nadam',
metrics=['accuracy'])
return model
from sklearn.model_selection import train_test_split, StratifiedKFold
kfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=False)
y_test = np.zeros((x_test.shape[0],6))
y_val = np.zeros((x_train.shape[0],6))
for i, (train_index, valid_index) in enumerate(kfold.split(x_train, np.argmax(y_train,axis=1))):
X_train, X_val, Y_train, Y_val = x_train[train_index],x_train[valid_index], y_train[train_index], y_train[valid_index]
filepath="weights_best2.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001, verbose=2)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto')
callbacks = [checkpoint, reduce_lr]
model = mlp_v3()
if i == 0:print(model.summary())
model.fit(X_train, Y_train, batch_size=128, epochs=5, validation_data=(X_val, Y_val), verbose=1, callbacks=callbacks,
)
model.load_weights(filepath)
y_val[valid_index] = model.predict(X_val, batch_size=128, verbose=1)
y_test += np.array(model.predict(x_test, batch_size=128, verbose=1))/5
y_val = pd.DataFrame(y_val,index=train_uId)
y_val.to_csv('../../data/prob_file/act_all_train_mlp.csv')
y_test = pd.DataFrame(y_test,index=test_uId)
y_test.to_csv('../../data/prob_file/act_all_test_mlp.csv') | StarcoderdataPython |
96086 | # library
from redesigned_barnacle.config import load_config, parse_file
from redesigned_barnacle.eth import eth_start
from redesigned_barnacle.sparkline import Sparkline
from redesigned_barnacle.unit import temp_ftoc
from prometheus_express import start_http_server, CollectorRegistry, Counter, Gauge, Router
from bme280 import BME280
from ssd1306 import SSD1306_I2C
# system
import esp32
import machine
import network
import os
import time
def bind(eth, config):
ip = eth.ifconfig()[0]
port = int(config['server_port'])
print('Binding server: {}:{}'.format(ip, port))
return start_http_server(port, address=ip)
def main():
# setup sensors
bus = machine.I2C(scl=machine.Pin(16), sda=machine.Pin(13))
bme = BME280(i2c=bus)
oled = SSD1306_I2C(128, 32, bus)
# setup storage
card = machine.SDCard()
os.mount(card, '/card')
# setup networking
config = load_config('/card', 'config.yml')
eth = eth_start(
config,
mdc=machine.Pin(23),
mdio=machine.Pin(18),
phy_type=network.PHY_LAN8720,
phy_addr=0,
clock_mode=network.ETH_CLOCK_GPIO17_OUT,
power_pin=machine.Pin(12, machine.Pin.OUT)
)
# setup display
sl = Sparkline(32, 128)
oled.init_display()
oled.fill(0x0)
oled.text('loading', 0, 0)
oled.show()
# setup Prometheus metrics
registry = CollectorRegistry(namespace='prometheus_express')
metric_beat = Counter(
'system_heartbeat',
'system heartbeat counter',
labels=['location'],
registry=registry
)
metric_temp = Gauge(
'sensor_temperature',
'temperature data from the sensors',
labels=['location', 'sensor'],
registry=registry
)
router = Router()
router.register('GET', '/metrics', registry.handler)
server = False
# wait for incoming connection
while True:
while not server:
time.sleep(1)
server = bind(eth, config)
bme_reading = bme.read_compensated_data()
temp_line = ((bme_reading[0] - 12) * 2) % 32
print('temp line: {}'.format(temp_line))
oled.fill(0x0)
sl.push(temp_line)
sl.draw(oled, 0, 12)
oled.text(str(bme_reading[0]), 0, 0)
oled.show()
location = config['metric_location']
metric_beat.labels(location).inc(1)
metric_temp.labels(location, 'esp32').set(
temp_ftoc(esp32.raw_temperature()))
metric_temp.labels(location, 'bme280').set(bme_reading[0])
try:
server.accept(router)
except OSError as err:
print('Error accepting request: {}'.format(err))
except ValueError as err:
print('Error parsing request: {}'.format(err))
main()
| StarcoderdataPython |
4815494 | from vivarium.utils.datum import Datum
class Protein(Datum):
defaults = {
'id': '',
'sequence': ''}
def __init__(self, config, defaults=defaults):
super(Protein, self).__init__(config, self.defaults)
GFP = Protein({
'id': 'GFP',
'sequence': 'MSKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKFICTTGKLPVPWPTLVTTFSYGVQCFSRYPDHMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSALSKDPNEKRDHMVLLEFVTAAGITHGMDELYK'})
| StarcoderdataPython |
3333729 | def checkupdate():
import requests
from subprocess import Popen, PIPE
with open("babysploit/version", "r") as fwv:
data = fwv.read().replace(" ", "")
cv = requests.get("https://raw.githubusercontent.com/M4cs/BabySploit/master/babysploit/version").text.replace(" ", "")
if data == cv:
pass
elif data != cv:
print("[!] Update Found | Version: %s [!]" % cv)
ask = str(input("[y\\n] ").lower())
if ask == "y":
process1 = Popen(["git", "fetch", "--all"], stdout=PIPE, stderr=PIPE)
process2 = Popen(["git", "reset", "--hard", "origin/master"], stdout=PIPE, stderr=PIPE)
process3 = Popen(["pip3", "install", "-r", "requirements.txt"], stdout=PIPE, stderr=PIPE)
print("Fetching Update..")
process1.communicate()
print("Downloading Update..")
process2.communicate()
print("Installing Modules.. This may take a minute..")
process3.communicate()
Popen(["rm -rf babysploit/__pycache__/"], stdout=PIPE, stderr=PIPE)
| StarcoderdataPython |
184105 | # openweatherclass Python Program for Raspberry Pi 3
# Author: <NAME>
# Date: August 2, 2021 v1.0.0
# Revision:
# Import class files
import requests
import json
# import pprint
# --------------Class Definitions------------------#
class OpenWeatherAPI(object):
'''
OpenWeatherAPI Class:
Inputs: apikey - provided with user account on openweathermap.org
location - city,state OR zipcode
units - weather units - weather units (default is 'imperial')
country - two character country code (defaul is US)
'''
def __init__(self, apikey, location='59327', units='imperial', country='US'):
self.apikey = apikey
self.location = location
self.country = country
self.units = units
baseurl = 'https://api.openweathermap.org/data/2.5/weather?'
self.complete_url = baseurl + 'appid=' + self.apikey + '&q=' + self.location + ',' + self.country + '&units=' + self.units
def get_wind_direction(self, degrees):
wd = int(degrees)
if wd > 348.75 or wd < 11.25:
wind_dir = 'N'
elif wd > 11.25 and wd < 33.75:
wind_dir = 'NNE'
elif wd > 33.75 and wd < 56.25:
wind_dir = 'NE'
elif wd > 56.25 and wd < 78.75:
wind_dir = 'ENE'
elif wd > 78.75 and wd <101.25:
wind_dir = 'E'
elif wd > 101.25 and wd < 123.75:
wind_dir = 'ESE'
elif wd > 123.75 and wd < 146.25:
wind_dir = 'SE'
elif wd > 146.25 and wd < 168.75:
wind_dir = 'SSE'
elif wd > 168.75 and wd < 191.25:
wind_dir = 'S'
elif wd > 191.25 and wd < 213.75:
wind_dir = 'SSW'
elif wd > 213.75 and wd < 236.25:
wind_dir = 'SW'
elif wd > 236.25 and wd < 258.75:
wind_dir = 'WSW'
elif wd > 258.75 and wd < 281.25:
wind_dir = 'W'
elif wd > 281.25 and wd < 303.75:
wind_dir = 'WNW'
elif wd > 303.75 and wd < 326.25:
wind_dir = 'NW'
elif wd > 326.25 and wd < 348.75:
wind_dir = 'NNW'
else:
wind_dir = 'Unknown'
return wind_dir
def get_weather_data(self):
response = requests.get(self.complete_url)
result = response.json()
# pprint.pprint(result)
# check results
if result['cod'] != 404 and 'message' not in result:
data = result['main']
current_temp = data['temp']
current_pressure = data['pressure']
current_humidity = data['humidity']
weather = result['weather']
weather_desc = weather[0]['description']
location = result['name']
wind_direction = result['wind']['deg']
wind_speed = result['wind']['speed']
# Convert Degrees to Human Direction
wind_dir = self.get_wind_direction(wind_direction)
forecast = (('In {0}, it is currently: {1} with a temperature of: {2}F and winds out of: {3} at {4} MPH. Barometric pressure is: {5} hPa and humidity is: {6}%.').format(location, weather_desc, current_temp, wind_dir, wind_speed, current_pressure, current_humidity))
return(forecast)
else:
return(('Error: Weather for Location: {0} not found.').format(self.location))
| StarcoderdataPython |
10236 | # ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗
# ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝
# ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░
# ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░
# ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗
# ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝
#
# Developed by <NAME> (C) Ling • Black 2020
# @site http://ling.black
# ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗
# ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝
# ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░
# ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░
# ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗
# ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝
#
# Developed by <NAME> (C) Ling • Black 2020
# @site http://ling.black
from typing import List
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from core.response import RequestLimit
from database import get_db, DatabaseUtils
from database.wow.models import PostModel, PostCommentsModel
from wow.interface.entity import PostCategory, Post, PostCategoryCreate, PostCreate, PostLikeCreate, PostCommentCreate
from wow.utils.posts import PostsUtils
from wow.utils.users import BlizzardUsersUtils
router = APIRouter()
class TokenArgs(BaseModel):
token: str
class TokenPostIdArgs(BaseModel):
token: str
post_id: int
class CommentIdAndToken(TokenArgs):
comment_id: int
class PostAPIList(BaseModel):
items: List[Post]
count: int
class PostAPIListResponse(BaseModel):
response: PostAPIList
request: RequestLimit
# -----------------------------------
# CATEGORIES
# -----------------------------------
@router.post(
"/categories",
response_model=PostCategory,
summary='Adds the category'
)
def add_category(body: PostCategoryCreate):
"""
Adds the category
:param body:
:return:
"""
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
return PostsUtils.add_category(user_id=blizzard_id, url=body.url, title=body.title)
@router.get(
"/categories",
response_model=List[PostCategory],
summary='Returns the categories'
)
def get_categories():
"""
Returns the categories list
:return:
"""
return PostsUtils.get_categories()
# -----------------------------------
# POSTS
# -----------------------------------
@router.get(
"/",
response_model=PostAPIListResponse,
summary='Returns all the posts'
)
def get_posts_all(limit: int = 100, offset: int = 0):
return PostsUtils.get_posts_limit(
limit=limit,
offset=offset
)
@router.get(
"/category/{category_url}",
response_model=PostAPIListResponse,
summary='Returns the posts in category'
)
def get_posts_all(category_url: int, limit: int = 100, offset: int = 0):
"""
Returns all the posts by category
:param category_url:
:param limit:
:param offset:
:return:
"""
return PostsUtils.get_posts_by_category_limit(
category_id=category_url,
limit=limit,
offset=offset
)
@router.get(
"/user/{blizzard_id}",
response_model=PostAPIListResponse,
summary='Returns the posts by users'
)
def get_posts_all(blizzard_id: int, limit: int = 100, offset: int = 0):
"""
Returns all the posts by category
:param blizzard_id:
:param limit:
:param offset:
:return:
"""
return PostsUtils.get_posts_by_blizzard_id(
blizzard_id=blizzard_id,
limit=limit,
offset=offset
)
@router.post(
"/like",
summary='Likes the post',
tags=['Лайки']
)
def like_post(body: PostLikeCreate):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
return PostsUtils.add_like(
user_id=blizzard_id,
post_id=body.post_id,
)
@router.post(
"/unlike",
summary='Unlikes the post',
tags=['Лайки']
)
def like_post(body: PostLikeCreate):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
return PostsUtils.remove_like(
user_id=blizzard_id,
post_id=body.post_id,
)
@router.post(
"/comment",
summary='Adds the comment',
tags=['Комментарии']
)
def like_post(body: PostCommentCreate):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
return PostsUtils.add_comment(
user_id=blizzard_id,
post_id=body.post_id,
reply_id=body.reply_id,
text=body.text,
)
@router.delete(
"/comment",
summary='Removes the comment',
tags=['Комментарии']
)
def removes_post(body: CommentIdAndToken, db=Depends(get_db)):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
com = db.query(PostCommentsModel).filter(PostCommentsModel.id == body.comment_id).filter(
PostCommentsModel.user_id == blizzard_id)
if com.count() > 0:
com.delete()
db.commit()
return True
return False
@router.post(
"/",
response_model=Post,
summary='Adds the post'
)
def add_post(body: PostCreate):
"""
Adds the post item
:param body:
:return:
"""
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
return PostsUtils.add_post(
user_id=blizzard_id,
category_id=body.category_id,
title=body.title,
content=body.content,
tags=body.tags,
image=body.image
)
@router.delete(
"/{post_id}",
summary='Deletes the post'
)
def delete_post(post_id: int, body: TokenArgs, db=Depends(get_db)):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id)
if q.count() == 0:
raise HTTPException(status_code=404, detail='Post is undefined')
return DatabaseUtils.remove_query(db, q)
@router.post(
"/{post_id}",
summary='Edits the post'
)
def edit_post(post_id: int, body: PostCreate, db=Depends(get_db)):
blizzard_id = BlizzardUsersUtils.id__safe(body.token)
q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id)
if q.count() == 0:
raise HTTPException(status_code=404, detail='Post is undefined')
q.update({
'title': body.title,
'content': body.content,
'category_id': body.category_id,
'image': body.image,
'tags': body.tags,
})
db.commit()
return True
@router.get(
"/{post_id}",
response_model=Post,
summary='Returns the post'
)
def get_post(post_id: int, db=Depends(get_db)):
return db.query(PostModel).filter(PostModel.id == post_id).first()
| StarcoderdataPython |
1703382 | # Important: We are using PIL to read .png files later.
# This was done on purpose to read indexed png files
# in a special way -- only indexes and not map the indexes
# to actual rgb values. This is specific to PASCAL VOC
# dataset data. If you don't want thit type of behaviour
# consider using skimage.io.imread()
from PIL import Image
import numpy as np
import skimage.io as io
import tensorflow as tf
import pdb
# Helper functions for defining tf types
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def write_image_annotation_pairs_to_tfrecord_with_paths(filename_pairs, tfrecords_filename):
"""Writes given image/annotation pairs to the tfrecords file.
The function reads each image/annotation pair given filenames
of image and respective annotation and writes it to the tfrecord
file.
Parameters
----------
filename_pairs : array of tuples (img_filepath, annotation_filepath)
Array of tuples of image/annotation filenames
tfrecords_filename : string
Tfrecords filename to write the image/annotation pairs
"""
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
index = 0
for img_path, annotation_path,depth_path in filename_pairs:
img_ = np.array(Image.open(img_path))
img = img_[:,:240,:]
annotation_ = np.array(Image.open(annotation_path))
annotation = annotation_[:,:240]
depth_ = np.array(Image.open(depth_path))
depth= depth_[:,:240]
depth = depth.astype('float32')
depth = np.log(depth + 1.)
index +=1
print index
# Unomment this one when working with surgical data
# annotation = annotation[:, :, 0]
# The reason to store image sizes was demonstrated
# in the previous example -- we have to know sizes
# of images to later read raw serialized string,
# convert to 1d array and convert to respective
# shape that image used to have.
height = depth.shape[0]
width = depth.shape[1]
img_raw = img.tostring()
annotation_raw = annotation.tostring()
depth_raw = depth.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image_name': _bytes_feature((img_path)),
'height': _int64_feature(height),
'width': _int64_feature(width),
'image_raw': _bytes_feature(img_raw),
'mask_raw': _bytes_feature(annotation_raw),
'depth_raw':_bytes_feature(depth_raw)}))
writer.write(example.SerializeToString())
writer.close()
def write_mnist_mnistm_syn_shvn(mnist_train,mnistm_train,shvn_train,syn_train, tfrecords_filename):
"""Writes given image/annotation pairs to the tfrecords file.
The function reads each image/annotation pair given filenames
of image and respective annotation and writes it to the tfrecord
file.
Parameters
----------
filename_pairs : array of tuples (img_filepath, annotation_filepath)
Array of tuples of image/annotation filenames
tfrecords_filename : string
Tfrecords filename to write the image/annotation pairs
"""
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
height1 = mnist_train[0].shape[0]
width1 = mnist_train[0].shape[1]
height2 = shvn_train[0].shape[0]
width2 = shvn_train[0].shape[1]
for k in xrange(len(mnist_train)/2):
i = k
j = k+len(mnist_train)/2
mnist1,mnistm1,shvn1,syn1 = mnist_train[i].tostring(),mnistm_train[i].tostring(),shvn_train[i].tostring(),syn_train[i].tostring()
mnist2,mnistm2,shvn2,syn2 = mnist_train[j].tostring(),mnistm_train[j].tostring(),shvn_train[j].tostring(),syn_train[j].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height1': _int64_feature(height1),
'width1': _int64_feature(width1),
'height2': _int64_feature(height2),
'width2': _int64_feature(width2),
'mnist1': _bytes_feature(mnist1),
'mnist2': _bytes_feature(mnist2),
'mnistm1': _bytes_feature(mnistm1),
'mnistm2': _bytes_feature(mnistm2),
'shvn1': _bytes_feature(shvn1),
'shvn2': _bytes_feature(shvn2),
'syn1': _bytes_feature(syn1),
'syn2': _bytes_feature(syn2)}))
writer.write(example.SerializeToString())
print('index: %d'%k)
writer.close()
def read_mnist_mnistm_syn_shvn(tfrecord_filenames_queue):
"""Return image/annotation tensors that are created by reading tfrecord file.
The function accepts tfrecord filenames queue as an input which is usually
can be created using tf.train.string_input_producer() where filename
is specified with desired number of epochs. This function takes queue
produced by aforemention tf.train.string_input_producer() and defines
tensors converted from raw binary representations into
reshaped image/annotation tensors.
Parameters
----------
tfrecord_filenames_queue : tfrecord filename queue String queue object from tf.train.string_input_producer()
Returns
-------
image, annotation : tuple of tf.int32 (image, annotation)
Tuple of image/annotation tensors
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(tfrecord_filenames_queue)
features = tf.parse_single_example(
serialized_example,
features={
'height1': tf.FixedLenFeature([], tf.int64),
'width1': tf.FixedLenFeature([], tf.int64),
'height2': tf.FixedLenFeature([], tf.int64),
'width2': tf.FixedLenFeature([], tf.int64),
'mnist1': tf.FixedLenFeature([], tf.string),
'mnist2': tf.FixedLenFeature([], tf.string),
'mnistm1': tf.FixedLenFeature([], tf.string),
'mnistm2': tf.FixedLenFeature([], tf.string),
'shvn1': tf.FixedLenFeature([], tf.string),
'shvn2': tf.FixedLenFeature([], tf.string),
'syn1': tf.FixedLenFeature([], tf.string),
'syn2': tf.FixedLenFeature([], tf.string)
})
mnist1 = tf.decode_raw(features['mnist1'], tf.uint8)
mnist2 = tf.decode_raw(features['mnist2'], tf.uint8)
mnistm1 = tf.decode_raw(features['mnistm1'], tf.uint8)
mnistm2 = tf.decode_raw(features['mnistm2'], tf.uint8)
shvn1 = tf.decode_raw(features['shvn1'], tf.uint8)
shvn2 = tf.decode_raw(features['shvn2'], tf.uint8)
syn1 = tf.decode_raw(features['syn1'], tf.uint8)
syn2 = tf.decode_raw(features['syn2'], tf.uint8)
height1 = tf.cast(features['height1'], tf.int32)
width1 = tf.cast(features['width1'], tf.int32)
height2 = tf.cast(features['height2'], tf.int32)
width2 = tf.cast(features['width2'], tf.int32)
image1_shape = tf.stack([height1, width1, 3])
image2_shape = tf.stack([height2, width2, 3])
# The last dimension was added because
# the tf.resize_image_with_crop_or_pad() accepts tensors
# that have depth. We need resize and crop later.
# TODO: See if it is necessary and probably remove third
# dimension
mnist1 = tf.reshape(mnist1, image1_shape)
mnist2 = tf.reshape(mnist2, image1_shape)
mnistm1 = tf.reshape(mnistm1, image1_shape)
mnistm2 = tf.reshape(mnistm2, image1_shape)
shvn1 = tf.reshape(shvn1,image2_shape)
shvn2 = tf.reshape(shvn2,image2_shape)
syn1 = tf.reshape(syn1,image2_shape)
syn2 = tf.reshape(syn2,image2_shape)
return mnist1,mnistm1,shvn1,syn1,mnist2,mnistm2,shvn2,syn2
| StarcoderdataPython |
65999 | #!/usr/bin/env python3
import sys
import numpy as np
from example import AmiciExample
class ExampleCalvetti(AmiciExample):
def __init__(self):
AmiciExample.__init__( self )
self.numX = 6
self.numP = 0
self.numK = 6
self.modelOptions['theta'] = []
self.modelOptions['kappa'] = [0.29, 0.74, 0.44, 0.08, 0.27, 0.18]
self.modelOptions['ts'] = np.linspace(0, 20, 201)
self.modelOptions['pscale'] = 0
self.solverOptions['atol'] = 1e-6
self.solverOptions['rtol'] = 1e-4
self.solverOptions['sens_ind'] = []
self.solverOptions['sensi'] = 0
self.solverOptions['sensi_meth'] = 1
def writeNoSensi(filename):
ex = ExampleCalvetti()
ex.writeToFile(filename, '/model_calvetti/nosensi/')
def main():
if len(sys.argv) < 2:
print("Error: Must provide output file as first and only argument.")
sys.exit(1)
filename = sys.argv[1]
writeNoSensi(filename)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3266924 | <filename>003/highest_and_lowest_7kyu.py
"""In this little assignment you are given a string of space separated
numbers, and have to return the highest and lowest number."""
def high_and_low(numbers):
"Function to return the smallest and largest element from a list"
numbers = [int(num) for num in numbers.split(" ")]
return "{max} {min}".format(max=max(numbers), min=min(numbers))
def test_case():
"""Some useful test cases for the problem"""
assert high_and_low("4 5 29 54 4 0 -214 542 -64 1 -3 6 -6") == "542 -214"
print("Test Success!")
test_case()
| StarcoderdataPython |
4811105 | <gh_stars>1-10
import numpy as np
class truss:
def __init__(self, x1, x2, y1, y2, E, A,
node1, node2, stress=None, strain=None):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.E = E
self.A = A
self.l = ((x2-x1)**2+(y2-y1)**2)**(1/2)
self.cos_t = (x2-x1)/self.l
self.sin_t = (y2-y1)/self.l
self.L = [[self.cos_t, self.sin_t, 0, 0],
[0, 0, self.cos_t, self.sin_t]]
self.eL = [-self.cos_t, -self.sin_t, self.cos_t, self.sin_t]
self.K1D = [[self.E*self.A/self.l, -self.E*self.A/self.l],
[-self.E*self.A/self.l, self.E*self.A/self.l]]
self.K2D = (np.matrix(self.L).getT()*np.matrix(self.K1D)*\
np.matrix(self.L)).tolist()
self.node1 = node1 #zero-indexed
self.node2 = node2 #zero-indexed
class force:
def __init__(self, fx, fy, node):
self.fx = fx
self.fy = fy
self.node = node
class fixed_node:
def __init__(self, node, x_or_y, disp):
self.node = node
self.x_or_y = x_or_y
self.disp = disp
| StarcoderdataPython |
1080 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk
class AddFriendWidget(Gtk.Box):
def __init__(self, main_window, fchat_prv, friend_list):
Gtk.Box.__init__(self, spacing=7, orientation = Gtk.Orientation.VERTICAL)
self.fchat_prv = fchat_prv
self.main_window = main_window
self.friend_list = friend_list
self.fchat_prv.add_friend_gui = self
self.generate_keys_bt = Gtk.Button('Generate Key')
self.generate_keys_bt.connect('clicked', self.on_generate_keys)
self.save_bt = Gtk.Button('Save')
self.save_bt.connect('clicked', self.on_save)
self.cancel_bt = Gtk.Button('Cancel')
self.cancel_bt.connect('clicked', self.on_cancel)
self.close_bt = Gtk.Button('Close')
self.close_bt.connect('clicked', self.on_close)
self.owner_info = Gtk.Entry()
self.owner_info.set_sensitive(False)
self.copy_clipboard_bt = Gtk.Button(label='Copy to clipboard')
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.copy_clipboard_bt.connect('clicked', self.on_copy_clipboard)
h_owner = Gtk.Box(spacing=5)
h_owner.pack_start(self.owner_info, True, True, 1)
h_owner.pack_start(self.copy_clipboard_bt, False, False, 1)
self.friend_info = Gtk.Entry()
self.friend_info.set_placeholder_text('Key of your friend')
self.spinner = Gtk.Spinner()
self.pack_start(h_owner, True, False, 7)
self.pack_start(self.friend_info, True, False, 7)
self.pack_start(self.spinner, True, False, 7)
h_bt = Gtk.Box()
h_bt.pack_start(self.generate_keys_bt, True, False, 7)
h_bt.pack_start(self.save_bt, True, False, 7)
h_bt.pack_start(self.cancel_bt, True, False, 7)
h_bt.pack_start(self.close_bt, True, False, 7)
self.pack_start(h_bt, True, False, 7)
self.job = None
def on_generate_keys(self, button):
self.pub, self.prv, self.pub_info_key, self.job = self.fchat_prv.generate_key_for_friend()
self.owner_info.set_text(self.pub_info_key)
self.on_generate_keys_start()
def on_generate_keys_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_generate_keys_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_generate_keys_faild(self, text):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_cancel(self, button):
if self.job:
self.job.remove_from_queue_when_finish()
def on_close(self, button):
self.main_window.application.back_main_window_or_friend_list()
def on_save(self, button):
if self.owner_info.get_text() == '':
self.msg_info('You should generate a key that contains your info')
return
if self.friend_info.get_text() == '':
self.msg_info('Friend info is required')
return
self.fchat_prv.add_friend(self.pub, self.prv, self.friend_info.get_text())
self.on_save_start()
def on_save_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_save_start_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
self.friend_list.sync_friends_list()
def on_save_start_duplicate(self, text):
self.msg_info(text)
def on_save_start_faild(self):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Error adding friend please try later")
dialog.run()
dialog.destroy()
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_copy_clipboard(self, button):
self.clipboard.set_text(self.owner_info.get_text(), -1)
def msg_info(self, text):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Info")
dialog.format_secondary_text(text)
dialog.run()
dialog.destroy()
| StarcoderdataPython |
88548 | <reponame>vlue-c/Visual-Explanation-Methods-PyTorch
from .simple_grad import SimpleGradient
| StarcoderdataPython |
1771149 | # Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various Flask framework items.
import os
import sys
import codecs
import psycopg2
from flask import url_for, redirect, render_template, flash, g
from bluecat import route, util
from bluecat.wtform_extensions import GatewayForm
from bluecat.wtform_fields import TableField
import config.default_config as config
from main_app import app
def module_path():
encoding = sys.getfilesystemencoding()
return os.path.dirname(os.path.abspath(__file__))
def get_resource_text():
return util.get_text(module_path(), config.language)
sql_params = [
{'id': 'entity', 'table': 'public.entity', 'where': ""},
{'id': 'ip4b', 'table': 'public.entity', 'where': "WHERE discriminator = 'IP4B'"},
{'id': 'ip4n', 'table': 'public.entity', 'where': "WHERE discriminator = 'IP4N'"},
{'id': 'ip4a', 'table': 'public.ipv4_address_basic_view', 'where': ""},
{'id': 'view', 'table': 'public.entity', 'where': "WHERE discriminator = 'VIEW'"},
{'id': 'zone', 'table': 'public.entity', 'where': "WHERE discriminator = 'ZONE'"},
{'id': 'record', 'table': 'public.resource_record_view', 'where': ""},
{'id': 'mac', 'table': 'public.entity', 'where': "WHERE discriminator = 'MACA'"},
{'id': 'nusr', 'table': 'public.entity', 'where': "WHERE discriminator = 'NUSR'"},
{'id': 'gusr', 'table': 'public.entity', 'where': "WHERE discriminator = 'GUSR'"},
{'id': 'location', 'table': 'public.entity', 'where': "WHERE discriminator = 'LOCATION'"},
]
def load_statistics(text):
data = []
db_address = os.environ['BAM_IP']
connector = psycopg2.connect(host=db_address, database="proteusdb", user="bcreadonly")
cursor = connector.cursor()
db_size_sql = "SELECT pg_size_pretty(pg_database_size('proteusdb')) FROM pg_database"
cursor.execute(db_size_sql)
result = cursor.fetchall()
data.append([text['title_db_size'], result[0][0]])
for sql_param in sql_params:
sql = "SELECT count(id) FROM %s %s" % (sql_param['table'], sql_param['where'])
cursor.execute(sql)
result = cursor.fetchall()
data.append([text['title_%s_count' % sql_param['id']], '{:,}'.format(int(result[0][0]))])
cursor.close()
connector.close()
return data
def table_features():
"""Returns table formatted data for display in the TableField component"""
# pylint: disable=unused-argument
text = get_resource_text()
data = load_statistics(text)
return {
'columns': [
{'title': text['title_title']},
{'title': text['title_value']}
],
'columnDefs': [
{'className': 'dt-right', 'targets': [1]}
],
'searching': False,
'ordering': False,
'paging': False,
'info': False,
'lengthChange': False,
'data': data
}
class GenericFormTemplate(GatewayForm):
"""
Generic form Template
Note:
When updating the form, remember to make the corresponding changes to the workflow pages
"""
workflow_name = 'view_db_statistics'
workflow_permission = 'view_db_statistics_page'
text = get_resource_text()
output_table = TableField(
workflow_name=workflow_name,
permissions=workflow_permission,
label=text['label_list'],
table_features=table_features(),
is_disabled_on_start=False
)
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/view_db_statistics/view_db_statistics_endpoint')
@util.workflow_permission_required('view_db_statistics_page')
@util.exception_catcher
def view_db_statistics_view_db_statistics_page():
form = GenericFormTemplate()
return render_template(
'view_db_statistics_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
| StarcoderdataPython |
4827583 | # hexutil.py
"""Miscellaneous utility routines relating to hex and byte strings"""
# Copyright (c) 2008-2012 <NAME>
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from binascii import a2b_hex, b2a_hex
from dicom import in_py3
def hex2bytes(hexstring):
"""Return bytestring for a string of hex bytes separated by whitespace
This is useful for creating specific byte sequences for testing, using
python's implied concatenation for strings with comments allowed.
Example:
hex_string = (
"08 00 32 10" # (0008, 1032) SQ "Procedure Code Sequence"
" 08 00 00 00" # length 8
" fe ff 00 e0" # (fffe, e000) Item Tag
)
byte_string = hex2bytes(hex_string)
Note in the example that all lines except the first must start with a space,
alternatively the space could end the previous line.
"""
return a2b_hex(hexstring.replace(" ", ""))
def bytes2hex(byte_string):
s = b2a_hex(byte_string)
if in_py3:
s = s.decode()
return " ".join(s[i:i+2] for i in range(0, len(s), 2))
| StarcoderdataPython |
1784904 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# a = np.linspace(0,10)
# b = np.linspace(0,5)
# plt.figure()
# plt.plot(a,b)
# plt.show()
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
plt.figure()
plt.plot(x_data , y_data, color='red', marker='x',linestyle='')
Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
biases = tf.Variable(tf.zeros([1]))
y = Weights * x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(201):
sess.run(train)
print(step, sess.run(Weights), sess.run(biases) , sess.run(loss))
print("\n----------------END----------------")
w = sess.run(Weights)
b = sess.run(biases)
# print("w : " + str(w) + " b : " + str(b))
#
lin_x = np.linspace(0,1,100)
result_y = w * lin_x + b
plt.plot(lin_x , result_y ,color='green',linestyle='--')
plt.show() | StarcoderdataPython |
3358030 | <filename>blender_bindings/ui/export_nodes/model_tree_nodes.py
from typing import List
import bpy
from bpy.types import NodeTree, Node, Operator
from . import nodes
class SourceIO_OP_EvaluateNodeTree(Operator):
bl_idname = "sourceio.evaluate_nodetree"
bl_label = "Evaluate tree"
tmp_file: bpy.types.Text
def execute(self, context: bpy.types.Context):
if not bpy.data.texts.get('qc', False):
self.tmp_file = bpy.data.texts.new('qc')
else:
self.tmp_file = bpy.data.texts['qc']
all_nodes = context.space_data.node_tree.nodes
outputs = [] # type:List[Node]
for node in all_nodes: # type: Node
if node.bl_idname == "SourceIOModelNode":
outputs.append(node)
for output in outputs: # type:nodes.SourceIOModelNode
self.traverse_tree(output)
return {'FINISHED'}
def traverse_tree(self, start_node: nodes.SourceIOModelNode):
self.tmp_file.write(start_node.model_name + "\n")
objects = start_node.inputs['Objects']
bodygroups = start_node.inputs['Bodygroups']
skins = start_node.inputs['Skin']
if objects.is_linked:
for link in objects.links:
object_node: nodes.SourceIOObjectNode = link.from_node
self.tmp_file.write("\tmesh " + object_node.get_value().obj.name + "\n")
if bodygroups.is_linked:
self.tmp_file.write("Bodygroups:\n")
for link in bodygroups.links:
bodygroup_node = link.from_node # type: nodes.SourceIOBodygroupNode
self.tmp_file.write(str(bodygroup_node.get_value()))
self.tmp_file.write('\n')
if skins.is_linked:
skin_node = skins.links[0].from_node # type: nodes.SourceIOSkinNode
self.tmp_file.write(str(skin_node.get_value()))
self.tmp_file.write('\n')
class SourceIO_NT_ModelTree(NodeTree):
bl_idname = 'sourceio.model_definition'
bl_label = "SourceIO model definition"
bl_icon = 'NODETREE'
def update(self, ):
for node in self.nodes:
node.update()
for link in self.links: # type:bpy.types.NodeLink
if link.from_socket.bl_idname != link.to_socket.bl_idname:
self.links.remove(link)
self.check_link_duplicates()
def check_link_duplicates(self):
to_remove = []
for link in self.links:
for link2 in self.links:
if link == link2 or link in to_remove:
continue
if link.from_node == link2.from_node and link.to_node == link2.to_node:
to_remove.append(link2)
break
for link in to_remove:
self.links.remove(link)
| StarcoderdataPython |
3354052 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Functions and constants to scale current 4-20 mA to measurements, and vice versa.
"""
__all__ = ['P1Flow', 'P1Level', 'current_to_signal', 'signal_to_current']
class P1Level(object):
EUMAX = 1225.0
EUMIN = 0.0
RAWMAX = 31208.0
RAWMIN = 0.0
class P1Flow(object):
EUMAX = 10.0
EUMIN = 0.0
RAWMAX = 31208.0
RAWMIN = -15.0
def current_to_signal(raw_in, scaling):
"""
Scales the input signal from current 4 - 20 mA to the human readable measurements.
:param raw_in: current value.
:param scaling: scaling constants.
:return: signal value.
"""
result = raw_in - scaling.RAWMIN
result *= (scaling.EUMAX - scaling.EUMIN) / (scaling.RAWMAX - scaling.RAWMIN)
result += scaling.EUMIN
return result
def signal_to_current(scale_in, scaling):
"""
Scales the input signal from human readable measurements to current 4 - 20 mA.
:param scale_in: signal value.
:param scaling: scaling constants.
:return: current value.
"""
result = scale_in - scaling.EUMIN
result /= (scaling.EUMAX - scaling.EUMIN) / (scaling.RAWMAX - scaling.RAWMIN)
result += scaling.RAWMIN
return 0 if result < 0 else result
| StarcoderdataPython |
3295028 | <filename>tools/binprof.py
#!/usr/bin/env python3
import subprocess
from dataclasses import dataclass
from typing import Optional
from pprint import pprint
from collections import defaultdict
import argparse
# Details of what the bufferent `symbol_type`s mean
# https://sourceware.org/binutils/docs/binutils/nm.html
# This article hit the same problem with nm
# https://web.archive.org/web/20190317203555/https://www.embeddedrelated.com/showarticle/900.php
# They looked up details with readelf/objdump
parser = argparse.ArgumentParser(description='RAM and Flash profiling')
parser.add_argument('binary', type=str, help='The path to the binary to profile')
parser.add_argument('--other', type=str, default="other", help='What to classify unknown memory as')
parser.add_argument('--no-error-if-unknown', action='store_false', default=False, help='Raise an error if there is memory classified as other')
args = parser.parse_args()
result = subprocess.run(
f"nm --print-size --size-sort --radix=d --line-numbers {args.binary}",
check=True,
shell=True,
capture_output=True,
encoding="utf-8",
universal_newlines=True,
)
@dataclass(frozen=True)
class Result:
position: int
size: int
symbol_type: str
name: str
location: Optional[str] = None
def __post_init__(self):
super().__setattr__("position", int(self.position))
super().__setattr__("size", int(self.size))
flash_symb = []
ram_symb = []
for line in result.stdout.split("\n"):
if not line:
continue
details = line.split(' ')
if "\t" in details[-1]:
details = details[:-1] + details[-1].split("\t")
r = Result(*details)
# Contiki's ramprof picks up [abdrw] and flashprof picks up [t] (both case insensitive)
if r.symbol_type in "Tt":
flash_symb.append(r)
elif r.symbol_type in "abdrwABDRW":
ram_symb.append(r)
else:
raise RuntimeError(f"Unknown symbol type {r.symbol_type}")
def summarise(symbs):
return sum(x.size for x in symbs)
def classify(symb, other="other"):
# I apologise for this horrible function.
# nm is unable to pick up the correct location of static variables declared inside functions
# So we need to manually classify these variables
# Other variables simply do not have a location for some unknown reason
if symb.location is None:
if symb.name in ("process_current", "process_list", "curr_instance", "linkaddr_null",
"linkaddr_node_addr", "etimer_process", "csma_driver", "drop_route", "framer_802154"):
return "contiki-ng/net"
if symb.name.startswith(("uip", "sicslowpan_", "rpl_", "tcpip_", "root_ipaddr.")):
return "contiki-ng/net"
if symb.name in ("serial_line_process", "sensors_process", "serial_line_event_message",
"curr_log_level_main", "curr_log_level_coap", "button_hal_periodic_event",
"button_hal_press_event", "button_hal_release_event", "node_id", "sensors_event"):
return "contiki-ng"
if symb.name.startswith(("heap_end.",)):
return "contiki-ng"
if symb.name in ("bignum_add_get_result", "ecc_add_get_result", "vdd3_sensor", "vectors"):
return "contiki-ng/cc2538"
if symb.name.startswith("cc2538_"):
return "contiki-ng/cc2538"
if symb.name.startswith("reset_cause."):
return "contiki-ng/cc2538"
if symb.name.startswith("p05."): # I think this refers to MP3_WTV020SD_P05_PORT
return "contiki-ng/cc2538"
if symb.name in ("coap_status_code", "coap_error_message", "coap_timer_default_driver"):
return "contiki-ng/coap"
if symb.name.startswith(("message.", "response.")):
return "contiki-ng/coap"
if symb.name in ("pe_edge_capability_add", "pe_edge_capability_remove"):
return "system/trust"
if symb.name in ("pe_message_signed", "pe_message_signed", "pe_message_verified",
"root_cert", "our_cert", "our_privkey"):
return "system/crypto"
if symb.name.startswith(("verify_state.", "sign_state.", "ecdh2_unver_state.", "ecdh2_req_state.",
"pkitem.", "sitem.", "vitem.")):
return "system/crypto"
if symb.name in ("mqtt_client_process"):
return "system/mqtt-over-coap"
if symb.name in ("pe_timed_unlock_unlocked", "root_ep", "autostart_processes"):
return "system/common"
if symb.name in ("pe_data_from_resource_rich_node", "resource_rich_edge_started",
"applications_available", "application_names"): # Edge specific
return "system/common"
if symb.name in ("_C_numeric_locale", "__mprec_bigtens", "__mprec_tinytens", "__mprec_tens",
"__hexdig", "_ctype_", "_impure_ptr"):
return "newlib"
if symb.name.startswith(("fpinan.", "fpi.")):
return "newlib"
return other
if "newlib" in symb.location or "libgcc" in symb.location:
return "newlib"
if "oscore" in symb.location:
return "contiki-ng/oscore"
if "os/net/app-layer/coap" in symb.location:
return "contiki-ng/coap"
if "os/net" in symb.location:
return "contiki-ng/net"
if "arch/cpu/cc2538" in symb.location or "arch/platform/zoul" in symb.location:
return "contiki-ng/cc2538"
if "applications/monitoring" in symb.location:
return "applications/monitoring"
if "applications/routing" in symb.location:
return "applications/routing"
if "applications/challenge-response" in symb.location:
return "applications/challenge-resp"
if any(osdir in symb.location for osdir in ("os/lib", "os/sys", "os/dev", "os/contiki")):
return "contiki-ng"
if "crypto" in symb.location:
return "system/crypto"
if "trust" in symb.location:
return "system/trust"
if "mqtt-over-coap" in symb.location:
return "system/mqtt-over-coap"
if "wsn/node" in symb.location or "wsn/edge" in symb.location:
return "system/common"
return other
def classify_all(symbs, other="other"):
result = defaultdict(list)
for symb in symbs:
result[classify(symb, other=other)].append(symb)
return dict(result)
classified_ram_symb = classify_all(ram_symb, other=args.other)
summarised_ram_symb = {k: summarise(v) for k, v in classified_ram_symb.items()}
classified_flash_symb = classify_all(flash_symb, other=args.other)
summarised_flash_symb = {k: summarise(v) for k, v in classified_flash_symb.items()}
if "other" in classified_ram_symb or "other" in classified_flash_symb:
try:
print("RAM unknown:")
pprint(classified_ram_symb["other"])
except KeyError:
pass
try:
print("Flash unknown:")
pprint(classified_flash_symb["other"])
except KeyError:
pass
if not args.no_error_if_unknown:
raise RuntimeError("Symbols with an unknown classification")
total_flash_symb = sum(summarised_flash_symb.values())
total_ram_symb = sum(summarised_ram_symb.values())
keys = set(summarised_ram_symb.keys()) | set(classified_flash_symb.keys())
for k in sorted(keys):
print(f"{k} & {summarised_flash_symb[k]} & {round(100*summarised_flash_symb[k]/total_flash_symb, 1)} & {summarised_ram_symb[k]} & {round(100*summarised_ram_symb[k]/total_ram_symb, 1)} \\\\")
print("\\midrule")
print(f"Total Used & {total_flash_symb} & 100 & {total_ram_symb} & 100 \\\\")
print()
config = [
('Certificates', 'PUBLIC_KEYSTORE_SIZE', 12, 'public_keys_memb'),
('Stereotypes', 'MAX_NUM_STEREOTYPES', 5, 'stereotypes_memb'),
('Edges', 'NUM_EDGE_RESOURCES', 4, 'edge_resources_memb'),
('Edge Capabilities', 'NUM_EDGE_CAPABILITIES', 3 * 4, 'edge_capabilities_memb'),
('Peers', 'NUM_PEERS', 8, 'peers_memb'),
('Peer Edges', 'NUM_PEERS', 8 * 4, 'peer_edges_memb'),
('Peer Edge Capabilities', 'NUM_PEERS', 8 * 4 * 3, 'peer_capabilities_memb'),
None,
('Reputation Tx Buffer', 'TRUST_TX_SIZE', 2, 'trust_tx_memb'),
('Reputation Rx Buffer', 'TRUST_RX_SIZE', 2, 'trust_rx_memb'),
None,
('Sign Buffer', 'MESSAGES_TO_SIGN_SIZE', 3, 'messages_to_sign_memb'),
('Verify Buffer', 'MESSAGES_TO_VERIFY_SIZE', 3, 'messages_to_verify_memb'),
]
for conf in config:
if conf is None:
print("\\midrule")
continue
(nice_name, cname, num, vname) = conf
try:
[symb] = [x for x in ram_symb if x.name == vname + "_memb_mem"]
size = symb.size
print(f"{nice_name} & {num} & {int(size/num)} & {size} \\\\ % {vname}")
except ValueError:
print(f"Missing {vname}")
| StarcoderdataPython |
1689462 | import re
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
class TableIncrementPreprocessor(Preprocessor):
def run(self, lines):
num = 1
new_lines = []
for line in lines:
new_line = line
match = re.match(r"\|\s*(1\.)\s*\|", line)
if match:
new_line = line.replace(match.group(), "| %d |" % num)
num += 1
else:
num = 1
new_lines.append(new_line)
return new_lines
# 匹配md表格语法中| 1. |部分为自增序列
class TableIncrementExtension(Extension):
def extendMarkdown(self, md):
md.preprocessors.register(TableIncrementPreprocessor(md), "table_increment", 0)
def makeExtension(**kwargs):
return TableIncrementExtension(**kwargs)
| StarcoderdataPython |
3215463 | # -*- coding: utf-8 -*-
# File: optimizer.py
from contextlib import contextmanager
import tensorflow as tf
from ..tfutils.common import get_tf_version_tuple
from ..compat import tfv1
from ..utils.develop import HIDE_DOC
from .gradproc import FilterNoneGrad, GradientProcessor
__all__ = ['apply_grad_processors', 'ProxyOptimizer',
'PostProcessOptimizer', 'VariableAssignmentOptimizer',
'AccumGradOptimizer']
class ProxyOptimizer(tfv1.train.Optimizer):
"""
A transparent proxy which delegates all methods of :class:`tf.train.Optimizer`
"""
def __init__(self, opt, name='ProxyOptimizer'):
assert isinstance(opt, tfv1.train.Optimizer), opt
super(ProxyOptimizer, self).__init__(False, name)
self._opt = opt
@HIDE_DOC
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
@HIDE_DOC
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
@HIDE_DOC
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
@HIDE_DOC
def apply_gradients(self, *args, **kwargs):
return self._opt.apply_gradients(*args, **kwargs)
def apply_grad_processors(opt, gradprocs):
"""
Wrapper around optimizers to apply gradient processors.
Args:
opt (tf.train.Optimizer):
gradprocs (list[GradientProcessor]): gradient processors to add to the
optimizer.
Returns:
a :class:`tf.train.Optimizer` instance which runs the gradient
processors before updating the variables.
"""
assert isinstance(gradprocs, (list, tuple)), gradprocs
for gp in gradprocs:
assert isinstance(gp, GradientProcessor), gp
class _ApplyGradientProcessor(ProxyOptimizer):
def __init__(self, opt, gradprocs):
self._gradprocs = gradprocs[:]
super(_ApplyGradientProcessor, self).__init__(opt)
def apply_gradients(self, grads_and_vars,
global_step=None, name=None):
g = self._apply(grads_and_vars)
return self._opt.apply_gradients(g, global_step, name)
def _apply(self, g):
for proc in self._gradprocs:
g = proc.process(g)
return g
return _ApplyGradientProcessor(opt, gradprocs)
class PostProcessOptimizer(ProxyOptimizer):
"""
An optimizer which applies some "post-processing operation" per variable
(e.g. clipping, quantization) after the gradient update.
"""
def __init__(self, opt, func, colocate=True):
"""
Args:
opt (tf.train.Optimizer):
func (tf.Variable -> tf.Operation or None): the operation needed
to perform for this variable after the gradient update.
colocate (boolean): colocate the function with the variable. No effect since TF 1.13.
"""
super(PostProcessOptimizer, self).__init__(opt)
self._func = func
self._colocate = colocate
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
update_op = super(PostProcessOptimizer, self).apply_gradients(
grads_and_vars, global_step)
ops = []
with tf.control_dependencies([update_op]):
for _, var in grads_and_vars:
with self._maybe_colocate(var):
op = self._func(var)
if op is not None:
assert isinstance(op, tf.Operation), op
ops.append(op)
update_op = tf.group(update_op, *ops, name=name)
return update_op
@contextmanager
def _maybe_colocate(self, var):
G = tf.get_default_graph()
if self._colocate and get_tf_version_tuple() <= (1, 12):
with G.colocate_with(var):
yield
else:
yield
class VariableAssignmentOptimizer(PostProcessOptimizer):
"""
An optimizer which assigns each variable a new value (e.g. clipping,
quantization) after the gradient update.
"""
def __init__(self, opt, func):
"""
Args:
opt (tf.train.Optimizer):
func (tf.Variable -> tf.Tensor or None): the new value to be
assigned to this variable after the gradient update.
"""
def f(v):
t = func(v)
if t is None:
return t
return tf.assign(v, t, use_locking=False).op
super(VariableAssignmentOptimizer, self).__init__(opt, f)
class AccumGradOptimizer(ProxyOptimizer):
"""
An optimizer which accumulates gradients across :math:`k` :meth:`minimize` executions,
and apply them together in every :math:`k` th :meth:`minimize` execution.
This is roughly the same as using a :math:`k` times larger batch size plus a
:math:`k` times larger learning rate, but uses much less memory.
This optimizer can be used in any TensorFlow code (with or without tensorpack).
Example:
.. code-block:: python
from tensorpack.tfutils.optimizer import AccumGradOptimizer
myopt = tf.train.GradientDescentOptimizer(0.01)
myopt = AccumGradOptimizer(myopt, niter=5)
train_op = myopt.minimize(loss)
"""
def __init__(self, opt, niter):
"""
Args:
opt (tf.train.Optimizer): the underlying sub-optimizer.
niter (int): number of iterations to accumulate gradients.
"""
super(AccumGradOptimizer, self).__init__(opt, 'AccumGrad')
self._niter = int(niter)
def _create_accum_slots(self, var_list):
slots = []
for v in var_list:
# TODO an option to not colocate the accumulators with variables (to save more memory)
s = self._zeros_slot(v, "accum", self._name)
slots.append(s)
return slots
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
grads_and_vars = FilterNoneGrad().process(grads_and_vars)
vs = []
for g, v in grads_and_vars:
assert isinstance(g, (tf.Tensor, tf.IndexedSlices)) and isinstance(v, tf.Variable), \
"AccumGradOptimizer does not work for the gradient of {}! " \
"Types of v and g are {} and {}".format(v.op.name, type(v), type(g))
vs.append(v)
with tf.control_dependencies(None):
slots = self._create_accum_slots(vs)
slots_and_vars = [(s, gv[1]) for s, gv in zip(slots, grads_and_vars)]
with tf.variable_scope(self._name), tf.device('/cpu:0'):
counter = tf.Variable(
0, name="counter", trainable=False, dtype=tf.int32)
with tf.name_scope('AccumGradOptimizer'):
ops = []
for s, gv in zip(slots, grads_and_vars):
g, v = gv
ops.append(s.assign_add(g))
update_counter = tf.assign_add(counter, 1, name='update_counter')
update_slot_op = tf.group(update_counter, *ops, name='update_slot')
def update_grad():
update_op = self._opt.apply_gradients(slots_and_vars)
with tf.control_dependencies([update_op]):
clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]
return tf.group(*clear_ops, name='update_grad')
pred = tf.equal(tf.mod(counter, self._niter), 0)
with tf.control_dependencies([update_slot_op]):
if name is None:
name = 'cond_update_grad'
op = tf.cond(pred, update_grad, tf.no_op)
if global_step is not None:
# Tensorpack maintains global_step by other means,
# so this option is useless in tensorpack trainers.
# But we include the implementation here for completeness
global_step_increment = tf.assign_add(global_step, 1)
op = tf.group(op, global_step_increment, name=name)
else:
op = tf.identity(op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
min_op = opt.minimize(cost, global_step=tf.train.get_or_create_global_step())
sess = tf.Session()
sess.run(tf.global_variables_initializer())
with sess.as_default():
for _ in range(20):
min_op.run()
print(x.eval())
print(tf.train.get_or_create_global_step().eval())
| StarcoderdataPython |
4834792 | #!/usr/bin/env python
from setuptools import setup, find_packages
import provider
setup(
name='edx-django-oauth2-provider',
version=provider.__version__,
description='edX fork of django-oauth2-provider',
long_description=open('README.rst').read(),
author='edX',
author_email='<EMAIL>',
url='https://github.com/edx/django-oauth2-provider',
packages=find_packages(exclude=('tests*',)),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=['shortuuid>=0.4.3,<1.0.0', 'Django>=1.7,<2.1'],
include_package_data=True,
zip_safe=False,
)
| StarcoderdataPython |
78593 | <gh_stars>0
# coding: utf-8
# ##### <h1>BroBeurKids Nikola</h1>
#
# This script deals with creating data for BroBeurKids/wcmckee Nikola site.
#
# The directory to look at is brobeurkidsdotcom/posts
# or folder
#
# wcmckee.com /posts
#
# /github folders are scanned with the input for folders.
# It's basically a search for notebook, then turn notebook into a blog posts.
#
# Arrow is used to generate the date (YR/MONTH/DAY),
# and time(HR : MIN: SEC)
#
# This is where ipynb files are kept.
#
# This script generates the .meta files needed.
#
# The meta file is called the same as the ipynb. It also contains the slug and title, and date.
# The date is collected by looking at the save date of the ipynb.
#
# Create preconfig files. A posts folder that one item is published.
# Depending on prepost folder deturms the post folder it's moved to.
# Sequale and post certain times a day.
# This script creates a blog post and saves it in posts folder along with the .meta file for it.
# Config file for the script. Specify a list of blog names.
# Reads a json file that contains: blog name (school name), author (input username), twitter config dir, domain name (school name - space and crap),
#
# Do login/logout via blog post.
#
# Title should accept spaces - slug should be edited to remove space and replace with -
# In[1]:
import os
import getpass
from walkdir import filtered_walk, dir_paths, all_paths, file_paths
import arrow
#import nikola
#from TwitterFollowBot import TwitterBot
# In[2]:
raw = arrow.utcnow()
# In[3]:
def returntime():
return raw.strftime('%H:%M:%S')
# In[4]:
yraw = raw.strftime("%Y")
mntaw = raw.strftime("%m")
dytaw = raw.strftime("%d")
#gmtz.strftime("%Y")
# In[5]:
fulda = yraw + '/' + mntaw + '/' + dytaw
# In[6]:
fultim = fulda + ' ' + returntime()
# In[7]:
#fultim
# In[8]:
#gtur = getpass.getuser()
# In[9]:
#lisbbkn = os.listdir('/home/' + gtur + '/brobeurkidsdotcom/posts')
# In[10]:
#lisbbkn
# In[11]:
#for lisb in lisbbkn:
# if '.ipynb' in lisb:
# print lisb
# In[12]:
#Name of notebook you want to turn into a blog
#Could check the folder (walkdir) for files not
#in the wcmckee.com posts folder.
#Tags. Modules used.
#Look at the file and extract out modules imported,
#using these as tags.
# In[13]:
podir = input('blog dir: ')
# In[14]:
nbog = input('Name of file to blog: ')
# In[15]:
etnam = input('Extension of file to blog: ')
# In[16]:
tagmak = input('post tags: ')
# In[17]:
#pear = input('path to search: ')
# In[18]:
jsve = dict()
# In[19]:
nbog + etnam
# In[21]:
#Write the blog post
#Ask to write content or publish
writecont = input('Write content? y/N ')
if 'y' in writecont:
contenmak = input('content: ')
else:
#search or manually locate fil.
pear = input('path to search: y/N')
if 'y' in pear:
files = file_paths(filtered_walk(pear, depth=100, included_files=[nbog + etnam]))
for fil in files:
print (fil)
jsve.update({'filefound' : fil})
else:
patlim = input('path of file: ')
jsve.update({'filefound' : patlim + nbog + etnam})
# In[22]:
#fil
# In[23]:
#add extra tags in
# In[24]:
jsve
# In[25]:
#lastbit = contenmak[:50]
# In[26]:
#contenmak[-50:]
# In[27]:
#sampb = lastbit + '... ' + contenmak[-50:]
# In[28]:
#sampb
# In[ ]:
# In[29]:
#savewhere = input('dir to save post: ')
# In[30]:
my_list = tagmak.split(",")
# In[31]:
my_list
# In[32]:
hashtag = list()
# In[33]:
for myl in my_list:
#print ('#' + myl.replace(' ', ''))
hashtag.append(('#' + myl.replace(' ', '')))
# In[34]:
#bro_bot = TwitterBot('/home/wcmckee/wcmnot/wcmckee-notebook/config.txt')
#bro_ot = TwitterBot(podir + '/config.txt')
# In[35]:
#for has in hashtag:
# print (has)
# bro_bot.auto_follow(has, count=1)
# In[36]:
endstring = ''
for s in hashtag:
endstring += s + ' '
# In[37]:
endstring
# In[ ]:
# In[38]:
jsve.update({'filename' : nbog + etnam, 'tags' : tagmak, 'date' : fulda, 'time' : returntime()})
# In[39]:
#jsve.update({})
# In[40]:
jsve
# In[41]:
#Search for blog through folders.
# In[42]:
#files = file_paths(filtered_walk(pear, depth=100, included_files=[nbog + etnam]))
# In[43]:
#print files
# In[44]:
#for fil in files:
# print (fil)
# jsve.update({'filefound' : fil})
# In[45]:
#jsve['filefound']
# In[46]:
#opblog = ('/home/wcmckee/github/')
# In[47]:
#podir = ('/home/wcmckee/github/wcmckee.com/posts/')
# In[48]:
jsve.update({'blogdir' : podir})
# In[49]:
postsdir = podir + ('/posts/' )
# In[50]:
#os.system('cp ' + jsve['filefound'] + ' ' + postsdir)
# In[51]:
#for fie in files:
#print fie
#print (fie)
#print ('Copy ' + fie + ' to ' + postsdir)
#os.system('cp ' + fie + ' ' + postsdir)
# In[52]:
jsve
# In[ ]:
# In[53]:
os.system('cp ' + jsve['filefound'] + ' ' + postsdir)
# In[54]:
if 'y' in writecont:
oprst = open(podir + '/posts/' + nbog + etnam, 'w')
oprst.write(contenmak)
oprst.close()
else:
os.system('cp ' + jsve['filefound'] + ' ' + postsdir)
# In[55]:
opeza = open(podir + '/posts/' + nbog + '.meta', 'w')
opeza.write(nbog + '\n' + nbog + '\n' + fultim + '\n' + tagmak)
opeza.close()
# In[134]:
#print (podir + '/posts/' + nbog + '.meta')
# In[135]:
#os.chdir(podir)
#os.system('nikola build')
# In[136]:
#os.system('rsync -azP ' + postsdir + ' ' + '<EMAIL>:/home/wcmckee/bb/blog/posts')
# In[137]:
#bro_bot.search_tweets(ndstring)
# In[138]:
#bro_bot.send_tweet(nbog + ' ' + endstring + ' http://brobeur.com/blog/output/posts/' + nbog + '.html')
# In[ ]:
# In[ ]:
# In[ ]:
| StarcoderdataPython |
1729604 | from src.scenario.scenario_generator import ScenarioGenerator
from src.executors.exact.solve_opf import solve
from src.scenario.scenario import Scenario
from src.grid.grid import Grid
import numpy as np
class GridEnv:
def __init__(self,
grid: Grid,
scenario: Scenario,
scenario_generator: ScenarioGenerator,
tee: bool = False):
""" Environment clas. Combines the DC grid with the information about EVs and power price.
grid: DC grid
scenario: scenario specifying EVs and power price
scenario_generator: ScenarioGenerator object, contains information about all
EV and power price related distributions """
self.grid = grid
self.scenario = scenario
self.scenario_generator = scenario_generator
self.tee = tee
self.t_start_ind = 0
self.t_end_ind = scenario.t_end_ind
self.timesteps_hr = scenario.timesteps_hr
self.t_ind = 0
self.ptu_size_hr = scenario.ptu_size_hr
self.V_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.P_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_lines = np.empty((self.grid.n_lines, self.timesteps_hr.shape[0]))
self.SOC_evs = np.nan * np.ones((len(self.scenario.evs), self.timesteps_hr.shape[0]))
@property
def t_hr(self):
return self.timesteps_hr[self.t_ind]
@property
def finished(self):
return self.t_ind > self.t_end_ind
@property
def current_SOC(self):
return np.minimum([ev.soc_max for ev in self.scenario.evs], np.maximum(0, self.SOC_evs[:, self.t_ind]))
def reset(self, ):
V_nodes, P_nodes, I_nodes, I_lines = self.grid.get_init_state()
self.grid.apply_state(V_nodes, P_nodes, I_nodes, I_lines)
utility_coefs = np.zeros(self.grid.n_nodes)
for load_ind in self.grid.load_inds:
utility_coefs[load_ind] = 100
for gen_ind in self.grid.gen_inds:
utility_coefs[gen_ind] = 1
self.grid.update_demand_and_price(np.zeros(self.grid.n_nodes), np.zeros(self.grid.n_nodes), utility_coefs)
self.t_ind = 0
self.V_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.P_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_lines = np.empty((self.grid.n_lines, self.timesteps_hr.shape[0]))
self.SOC_evs = np.zeros((len(self.scenario.evs), self.timesteps_hr.shape[0]))
for ev_ind, ev in enumerate(self.scenario.evs):
if self.t_hr == ev.t_arr_hr:
self.SOC_evs[ev_ind, self.t_ind] = ev.soc_arr
def step(self, p_demand_min, p_demand_max, utility_coefs, normalize_opf=False):
#print('Stepping:', p_demand_min.round(2), '\n', p_demand_max.round(2))
for load_ind in self.grid.load_inds:
ev_at_t_at_load = self.scenario.load_evs_presence[load_ind][self.t_ind]
active_evs_at_t_at_node = [ev for ev in ev_at_t_at_load if ev.t_dep_hr > self.t_hr]
if len(active_evs_at_t_at_node) > 0:
ev = active_evs_at_t_at_node[0]
ev_ind = self.scenario.evs.index(ev)
load_p_max = (ev.soc_max - self.SOC_evs[ev_ind, self.t_ind]) / self.scenario.ptu_size_hr
p_demand_max[load_ind] = min(load_p_max, p_demand_max[load_ind])
p_demand_max[p_demand_max < p_demand_min] = p_demand_min[p_demand_max < p_demand_min] + 1e-10
self.grid.update_demand_and_price(p_demand_min-1e-8, p_demand_max + 1e-8, utility_coefs)
#print('After corrections:', p_demand_min.round(2), '\n', p_demand_max.round(2))
# self.tee = True
#print('LB', p_demand_min)
#print('UB', p_demand_max)
loads_p_demand_min = p_demand_min[self.grid.load_inds]
loads_p_demand_max = p_demand_max[self.grid.load_inds]
if loads_p_demand_min.max() == loads_p_demand_max.max() == 0:
model = None
V_nodes = np.array(([n.v_nominal for n in self.grid.nodes]))
P_nodes = np.zeros(self.grid.n_nodes)
I_nodes = np.zeros(self.grid.n_nodes)
I_lines = np.zeros(self.grid.n_lines)
else:
model, V_nodes, P_nodes, I_nodes, I_lines = solve(self.grid, tee=self.tee, normalize=normalize_opf)
self.grid.apply_state(V_nodes, P_nodes, I_nodes, I_lines)
self.V_nodes[:, self.t_ind] = np.copy(V_nodes)
self.P_nodes[:, self.t_ind] = np.copy(P_nodes)
self.I_nodes[:, self.t_ind] = np.copy(I_nodes)
self.I_lines[:, self.t_ind] = np.copy(I_lines)
self.t_ind += 1
if not self.finished:
for ev_ind, ev in enumerate(self.scenario.evs):
if self.t_hr == ev.t_arr_hr:
self.SOC_evs[ev_ind, self.t_ind] = ev.soc_arr
elif ev.t_arr_hr < self.t_hr <= ev.t_dep_hr:
old_soc = self.SOC_evs[ev_ind, self.t_ind - 1]
new_soc = old_soc + self.ptu_size_hr * self.P_nodes[ev.load_ind, self.t_ind - 1]
self.SOC_evs[ev_ind, self.t_ind] = np.copy(new_soc)
def observe_scenario(self, know_future=False):
if know_future:
return self.scenario
else:
return self.scenario.create_scenario_unknown_future(self.t_ind)
def get_cost_coefs(self):
utility_coefs = np.zeros(self.grid.n_nodes)
utility_coefs[self.grid.gen_inds] = self.scenario.power_price[self.t_ind]
for load_ind in self.grid.load_inds:
ev_at_t_at_load = self.scenario.load_evs_presence[load_ind][self.t_ind]
active_evs_at_t_at_node = [ev for ev in ev_at_t_at_load if ev.t_dep_hr > self.t_hr]
if len(active_evs_at_t_at_node) > 0:
assert len(active_evs_at_t_at_node) == 1, "More than 1 EV at load %d" % load_ind
ev = active_evs_at_t_at_node[0]
utility_coefs[load_ind] = ev.utility_coef
return utility_coefs
def generate_possible_futures(self, n_scenarios):
return self.scenario_generator.generate(self.grid.n_loads, n_scenarios, self.t_ind,
self.scenario.get_evs_known_at_t_ind(self.t_ind)) | StarcoderdataPython |
1634466 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
def detect_bursts(spikes, dt):
"""Returns the indices of the first spikes of burst-groups, all bursts
and the indices of the previous spike of each burst.
The time that determines the duration of each spike is empirically
predefined.
Args:
spikes (numpy array of int): indices of spikes.
dt (float): time-step.
Returns:
(list, list, list): lists containing the indices of the first spikes of
(1) burst-groups, (2) all bursts and (3) the indices of the previous
spike of each burst.
"""
b = 0.007/dt # 0.007 sec maximum time between spikes in a burst
b_group = 0.05/dt # 0.05 sec maximum time between spikes in a burst-group
bursts_all = [spikes[0]] # set first burst to the first spike
burst_groups = [spikes[0]] # set first burst to the first spike
previous_spike = spikes[0] # index of the previous spike
pre_burst_spikes = [0] # set index of first spike before a burst
for spike in spikes[1:]:
# Find beginning of a burst
if spike-previous_spike > b:
bursts_all.append(spike)
pre_burst_spikes.append(int(previous_spike/dt))
# Find beginning of a burst-group
if spike-previous_spike > b_group:
burst_groups.append(spike)
previous_spike = spike
return burst_groups, bursts_all, pre_burst_spikes
if __name__ == "__main__":
import params as pms
import simulation
import plots
import matplotlib.pyplot as plt
from brian2 import second
lif_state_monitor, lif_spike_monitor, epsp_monitor, ipsp_monitor = simulation.execute(
pms.Vth, pms.Vreset, pms.Vrest, 0,
pms.refract, pms.dur, pms.tau, pms.tau_syn_ex, pms.tau_syn_in,
pms.xs_on_ex, pms.ys_on_ex, pms.xs_off_ex, pms.ys_off_ex,
pms.xs_on_inh, pms.ys_on_inh, pms.xs_off_inh, pms.ys_off_inh,
pms.X, pms.Y, pms.lx, pms.dx, pms.ly, pms.dy,
pms.sigma_center, pms.sigma_surround,
pms.stimulus, pms.t, pms.we, pms.wi, pms.num_rfc,
pms.r0, pms.L0, pms.G_ex, pms.G_inh)
burst_groups, bursts_all, pre_burst_spikes = detect_bursts(
((lif_spike_monitor.spike_trains()[0]/pms.dt)/second).astype(int),
pms.dt)
plots.plot_bursts(lif_state_monitor, bursts_all, burst_groups, pms.dt)
plt.show()
| StarcoderdataPython |
1642696 | import re
from .time import times_to_ms
from .formatbase import FormatBase
from .ssaevent import SSAEvent
# thanks to http://otsaloma.io/gaupol/doc/api/aeidon.files.mpl2_source.html
MPL2_FORMAT = re.compile(r"^(?um)\[(-?\d+)\]\[(-?\d+)\](.*)")
class MPL2Format(FormatBase):
"""MPL2 subtitle format implementation"""
@classmethod
def guess_format(cls, text):
"""See :meth:`pysubs2.formats.FormatBase.guess_format()`"""
if MPL2_FORMAT.search(text):
return "mpl2"
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
"""See :meth:`pysubs2.formats.FormatBase.from_file()`"""
def prepare_text(lines):
out = []
for s in lines.split("|"):
s = s.strip()
if s.startswith("/"):
# line beginning with '/' is in italics
s = r"{\i1}%s{\i0}" % s[1:].strip()
out.append(s)
return "\\N".join(out)
subs.events = [SSAEvent(start=times_to_ms(s=float(start) / 10), end=times_to_ms(s=float(end) / 10),
text=prepare_text(text)) for start, end, text in MPL2_FORMAT.findall(fp.getvalue())]
@classmethod
def to_file(cls, subs, fp, format_, **kwargs):
"""
See :meth:`pysubs2.formats.FormatBase.to_file()`
No styling is supported at the moment.
"""
# TODO handle italics
for line in subs:
if line.is_comment:
continue
print("[{start}][{end}] {text}".format(start=int(line.start // 100),
end=int(line.end // 100),
text=line.plaintext.replace("\n", "|")),
file=fp)
| StarcoderdataPython |
3231593 | <filename>ex37.py
num = int(input('Digite um numero inteiro: '))
print('''Escolha uma das bases para conversão:'
[1] converter para BINÁRIO
[2] converter para OCTAL
[3] converter para HEXADECIMAL''')
opção = int(input('Sua Opção'))
if opção == 1:
print('{} convertido para Binário é igual {}'.format(num, bin(num)[2:]))
elif opção == 2:
print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif opção == 3:
print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('opção inválida. tente novamente.') | StarcoderdataPython |
4391 | <gh_stars>1-10
"""
HTTP MultiServer/MultiClient for the ByteBlower Python API.
All examples are guaranteed to work with Python 2.7 and above
Copyright 2018, Ex<NAME>.
"""
# Needed for python2 / python3 print function compatibility
from __future__ import print_function
# import the ByteBlower module
import byteblowerll.byteblower as byteblower
import time
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Configuration for the first ByteBlower port.
# Will be used as HTTP server.
'port_1_config': {
'interface': 'trunk-1-13',
'mac': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# 'ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port number to be used by the HTTP connection.
# On the HTTP server, this will be the port on which the server
# listens.
'tcp_port': 4096
},
# Configuration for the second ByteBlower port.
# Will be used as HTTP client.
'port_2_config': {
'interface': 'trunk-1-25',
'mac': '00:bb:01:00:00:02',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port range the HTTP Clients will use to connect with
# the HTTP server
'tcp_port_min': 32000,
'tcp_port_max': 50000
},
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# total duration, in nanoseconds.
# This is the duration of the flow. When this duration expires,
# all sessions will be stopped.
'duration': 10000000000,
# session duration, in nanoseconds
# Duration of the individual sessions
# 'session_duration': 1500000000,
'session_duration': None,
# session size, in bytes
# The number of bytes transmitted by a session
'session_size': 1 * 1000 * 1000,
# 'session_size': None,
# max concurrent sessions
# Maximum number of sessions that will be running simultaneously
'max_concurrent_sessions': 100,
# maximum number of sessions
# No more than this number of sessions will be created
# 0 means no limit
'max_total_sessions': 0,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
class Example:
def __init__(self, **kwargs):
self.server_address = kwargs['server_address']
self.port_1_config = kwargs['port_1_config']
self.port_2_config = kwargs['port_2_config']
# Helper function, we can use this to parse the HTTP Method to the
# enumeration used by the API
from byteblowerll.byteblower import ParseHTTPRequestMethodFromString
http_method_arg = kwargs['http_method']
self.http_method = ParseHTTPRequestMethodFromString(http_method_arg)
self.duration = kwargs['duration']
self.session_duration = kwargs['session_duration']
self.session_size = kwargs['session_size']
self.max_concurrent_sessions = kwargs['max_concurrent_sessions']
self.max_total_sessions = kwargs['max_total_sessions']
self.tos = kwargs['tos']
self.server = None
self.port_1 = None
self.port_2 = None
def cleanup(self):
"""Clean up the created objects"""
byteblower_instance = byteblower.ByteBlower.InstanceGet()
if self.port_1:
self.server.PortDestroy(self.port_1)
self.port_1 = None
if self.port_2:
self.server.PortDestroy(self.port_2)
self.port_2 = None
if self.server is not None:
byteblower_instance.ServerRemove(self.server)
self.server = None
def run(self):
byteblower_instance = byteblower.ByteBlower.InstanceGet()
print("Connecting to ByteBlower server %s..." % self.server_address)
self.server = byteblower_instance.ServerAdd(self.server_address)
# Create the port which will be the HTTP server (port_1)
print("Creating HTTP Server port")
self.port_1 = self.provision_port(self.port_1_config)
print("Creating HTTP Client port")
# Create the port which will be the HTTP client (port_2)
self.port_2 = self.provision_port(self.port_2_config)
http_server_ip_address = self.port_1_config['ip_address']
# create a HTTP server
http_server = self.port_1.ProtocolHttpMultiServerAdd()
server_tcp_port = self.port_1_config['tcp_port']
if server_tcp_port is not None:
http_server.PortSet(server_tcp_port)
else:
server_tcp_port = http_server.PortGet()
# create a HTTP Client
http_client = self.port_2.ProtocolHttpMultiClientAdd()
# - remote endpoint
http_client.RemoteAddressSet(http_server_ip_address)
http_client.RemotePortSet(server_tcp_port)
# - local endpoint
http_client.LocalPortRangeSet(self.port_2_config['tcp_port_min'],
self.port_2_config['tcp_port_max'])
# Configure the direction.
# If the HTTP Method is GET,
# traffic will flow from the HTTP server to the HTTP client
# If the HTTP Method is PUT,
# traffic will flow from the HTTP client to the HTTP server
http_client.HttpMethodSet(self.http_method)
print("Server port:", self.port_1.DescriptionGet())
print("Client port:", self.port_2.DescriptionGet())
# let the HTTP server listen for requests
http_server.Start()
# - total duration of all sessions
http_client.DurationSet(self.duration)
# - how many connections can be created?
http_client.CumulativeConnectionLimitSet(self.max_total_sessions)
# - how many connections can be running at the same time
http_client.MaximumConcurrentRequestsSet(self.max_concurrent_sessions)
# - individual duration, can be size-based or time-based
if self.session_duration is not None:
# let the HTTP Client request a page of a specific duration
# to download...
http_client.SessionDurationSet(self.session_duration)
elif self.session_size is not None:
# let the HTTP Client request a page of a specific size...
http_client.SessionSizeSet(self.session_size)
else:
raise ValueError("Either duration or request_size must be configured")
print("Starting the HTTP client")
http_client.Start()
http_client_result = http_client.ResultGet()
for iteration in range(10):
time.sleep(1)
http_client_result.Refresh()
print("-" * 10)
print("Iteration", iteration+1)
print(" connections attempted", http_client_result.ConnectionsAttemptedGet())
print(" connections established", http_client_result.ConnectionsEstablishedGet())
print(" connections aborted", http_client_result.ConnectionsAbortedGet())
print(" connections refused", http_client_result.ConnectionsRefusedGet())
print("-" * 10)
http_client.Stop()
http_server.Stop()
print("Stopped the HTTP client")
request_status_value = http_client.StatusGet()
request_status_string = byteblower.ConvertHTTPMultiClientStatusToString(request_status_value)
http_client_result.Refresh()
tx_bytes = http_client_result.TcpTxByteCountGet()
tx_speed = http_client_result.TcpTxSpeedGet()
rx_bytes = http_client_result.TcpRxByteCountGet()
rx_speed = http_client_result.TcpRxSpeedGet()
http_server_result = http_server.ResultGet()
http_server_result.Refresh()
print("Requested Duration : {} nanoseconds".format(self.duration))
print("Status : {}".format(request_status_string))
print("Client Result data : {}".format(http_client_result.DescriptionGet()))
print("Server Result data : {}".format(http_server_result.DescriptionGet()))
return [
self.duration,
self.session_duration,
self.session_size,
self.max_total_sessions,
self.max_concurrent_sessions,
tx_bytes, rx_bytes,
tx_speed, rx_speed,
request_status_value
]
def provision_port(self, config):
port = self.server.PortCreate(config['interface'])
port_l2 = port.Layer2EthIISet()
port_l2.MacSet(config['mac'])
ip_config = config['ip']
if not isinstance(ip_config, list):
# Config is not static, DHCP or slaac
if ip_config.lower() == "dhcpv4":
port_l3 = port.Layer3IPv4Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpGet()
elif ip_config.lower() == "dhcpv6":
port_l3 = port.Layer3IPv6Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpDhcpGet()
elif ip_config.lower() == "slaac":
port_l3 = port.Layer3IPv6Set()
port_l3.StatelessAutoconfiguration()
config['ip_address'] = port_l3.IpStatelessGet()
else:
# Static configuration
if len(ip_config) == 3:
# IPv4
port_l3 = port.Layer3IPv4Set()
port_l3.IpSet(ip_config[0])
port_l3.NetmaskSet(ip_config[1])
port_l3.GatewaySet(ip_config[2])
config['ip_address'] = port_l3.IpGet()
elif len(ip_config) == 2:
port_l3 = port.Layer3IPv6Set()
# IPv6
address = ip_config[0]
prefix_length = ip_config[1]
ip = "{}/{}".format(address, prefix_length)
port_l3.IpManualAdd(ip)
config['ip_address'] = ip_config[0]
if not isinstance(config['ip_address'], str):
ip = config['ip_address'][0]
if '/' in ip:
config['ip_address'] = ip.split('/')[0]
print("Created port", port.DescriptionGet())
return port
# When this python module is called stand-alone, the run-function must be
# called. This approach makes it possible to include it in a series of
# examples.
if __name__ == "__main__":
example = Example(**configuration)
try:
example.run()
finally:
example.cleanup()
| StarcoderdataPython |
4809230 | <reponame>Columbine21/TFR-Net<filename>trains/missingTask/__init__.py
from trains.missingTask.TFR_NET import TFR_NET
__all__ = ['TFR_NET'] | StarcoderdataPython |
3304220 | <reponame>ouyangjunfei/shopyo<filename>shopyo/modules/settings/models.py
from shopyoapi.init import db
class Settings(db.Model):
__tablename__ = "settings"
setting = db.Column(db.String(100), primary_key=True)
value = db.Column(db.String(100))
| StarcoderdataPython |
196570 | import unittest
import numpy as np
import cal_joint_lps
import data_set_4
import mix_lp
def CalGamma(dataC, dataU, pD, pA, g1, g2, calc_type):
rt_c, rt_u, rt_cu = cal_joint_lps.CalJointLPS(dataC, dataU, g1, g2)
if calc_type == 0:
res = mix_lp.MyGetMixLP2(rt_cu, pA)
return res
lp = rt_c + mix_lp.MyGetMixLP2(rt_u, pA)
res = mix_lp.MyGetMixLP2(lp, pD)
return res
class CalGammaTestCase(unittest.TestCase):
def test_CalGamma(self):
dataC = data_set_4.dataC
dataU = data_set_4.dataU
# Test _calGamma_S.
g1 = [0, 1]
g2 = [2, 3]
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 0)
print('_calGamma_S:')
print('res with cal_type = 0 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(28.58330 - res) > 1e-5)
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 1)
print('res with cal_type = 1 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(-0.10349 - res) > 1e-5)
print('')
# Test _calGamma_E.
g1 = [0, 1]
g2 = [3] # Exclude `2' from g2
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 0)
print('_calGamma_E:')
print('res with cal_type = 0 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(30.52722 - res) > 1e-5)
res = CalGamma(dataC, dataU, 0.1, 0.1, g1, g2, 1)
print('res with cal_type = 1 -> {:3.5f} {}'.format(res, np.exp(res)))
self.assertFalse(abs(-0.0524233 - res) > 1e-5)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3331360 | # Generated by Django 2.0.8 on 2018-09-19 19:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('electionnight', '0005_auto_20180304_1829'),
]
operations = [
migrations.RemoveField(
model_name='apelectionmeta',
name='ballot_measure',
),
migrations.RemoveField(
model_name='apelectionmeta',
name='election',
),
migrations.DeleteModel(
name='APElectionMeta',
),
]
| StarcoderdataPython |
1783762 | # quick and simple extraction of tor nodes from directory
# with insert into QRadar reference collections.
#
# you'll need the TorCtl python package
# from https://gitweb.torproject.org/pytorctl.git/
# and you'll need to have tor installed on the same
# host where this script runs.
# in the config file (tor_reference_config) find common
# paths for the Tor bundle on Windows or Mac. You'll
# have to (un)comment and/or edit these to suit your
# environment.
#
# author: <NAME>
# edits: <NAME>
import sys
import subprocess
import time
from TorCtl import TorCtl
import requests
# do as I say, not as I do
requests.packages.urllib3.disable_warnings()
config = {}
exec(open('tor_reference_config').read(), config)
qradarIpAddress = config.get('qradarIP')
qradarSecToken = config.get('qradarAPIToken')
TASKLIST=config.get('TASKLIST')
TOR_PATH=config.get('TOR_PATH')
TOR_PS_NAME=config.get('TOR_PS_NAME')
CONTROL_PORT=config.get('CONTROL_PORT')
def download_network_view():
global VIDALIA_PATH
global CONTROL_PORT
global AUTH_PASS
start_tor=True
ps_list = subprocess.Popen(TASKLIST, stdout=subprocess.PIPE).communicate()[0]
for ps in ps_list:
if ps.startswith(TOR_PS_NAME):
start_tor = False
if start_tor:
print "Initializing TOR.."
subprocess.Popen([TOR_PATH])
time.sleep(20)
print "starting.."
# open the TOR connection
conn = TorCtl.connect(controlAddr="127.0.0.1", controlPort=CONTROL_PORT)
all_nodes = conn.get_network_status()
print "wrapping it up."
conn.close()
return all_nodes
def create_reference_set(name,elmType,ttl):
url='https://' + qradarIpAddress + '/api/reference_data/sets'
headers={'SEC': qradarSecToken, 'Version': '4.0', 'Accept': 'application/json'}
data={'name': name, 'element_type': elmType, 'time_to_live': ttl, 'timeout_type': 'LAST_SEEN'}
try:
response=requests.get(url+'/'+name,headers=headers,verify=False)
if response.status_code == 404:
response=requests.post(url,headers=headers,data=data,verify=False)
print('reference set ' + str(name) + ' creation HTTP status: ' + str(response.status_code))
except requests.exceptions.RequestException as exception:
print(str(exception) + ', exiting.\n')
def add_tor_node(set_name,ip):
headers={'SEC': qradarSecToken, 'Version': '4.0', 'Accept': 'application/json'}
set_url='https://' + qradarIpAddress + '/api/reference_data/sets/' + set_name
set_data={'name': set_name, 'value': ip, 'source': 'tor_reference_import'}
try:
response=requests.post(set_url,headers=headers,data=set_data,verify=False)
if response.status_code > 201:
print('tor node ' + str(ip) + ' insertion HTTP status: ' + str(response.status_code))
except requests.exceptions.RequestException as exception:
print(str(exception) + ', exiting.\n')
def main():
# check for and create reference collections in QRadar
create_reference_set('tor_exit_nodes','IP','7 days')
create_reference_set('tor_guard_nodes','IP','7 days')
create_reference_set('tor_intermediary_nodes','IP','7 days')
# Guard, Exit
guards = set()
exits = set()
intermediaries = set()
all_nodes = download_network_view()
for node in all_nodes:
middle = True
if "Guard" in node.flags:
guards.add(node.ip)
middle = False
if "Exit" in node.flags:
exits.add(node.ip)
middle = False
if (middle):
intermediaries.add(node.ip)
print('adding guard nodes ... ')
for node in guards:
add_tor_node('tor_guard_nodes',node)
sys.stdout.write('.')
sys.stdout.flush()
print(' done.\n')
print('adding exit nodes ... ')
for node in exits:
add_tor_node('tor_exit_nodes',node)
sys.stdout.write('.')
sys.stdout.flush()
print(' done.\n')
print('adding intermediary nodes ... ')
for node in intermediaries:
add_tor_node('tor_intermediary_nodes',node)
sys.stdout.write('.')
sys.stdout.flush()
print(' done.\n')
if __name__ == "__main__":
main()
| StarcoderdataPython |
81587 |
import renpy
import pygame
import os
import math
import ctypes
import euclid
from OpenGL import GL as gl
FONT_SIZE = 18
FONT = None
def drawText(canvas, text, pos, color, align=-1, background=(128, 128, 128)):
global FONT
if FONT is None:
pygame.font.init()
FONT = pygame.font.Font(None, FONT_SIZE)
surface = FONT.render(text, True, color, background)
if align == 1:
pos = (pos[0] - surface.get_width(), pos[1])
canvas.get_surface().blit(surface, pos)
return surface.get_size()
def drawLinesSafe(canvas, color, connect, points, width=1):
#Workaround for hang if two points are the same
safe = []
i = 0
while i < len(points):
p = (round(points[i][0]), round(points[i][1]))
safe.append(p)
i2 = i + 1
while i2 < len(points):
p2 = (round(points[i2][0]), round(points[i2][1]))
if p != p2:
break
i2 += 1
i = i2
if connect and len(safe) > 0:
first = safe[0]
safe.append((first[0], first[1] - 1)) #Wrong by one pixel to be sure...
canvas.lines(color, False, safe, width)
def createTransform2d():
eye = euclid.Vector3(0, 0, 1)
at = euclid.Vector3(0, 0, 0)
up = euclid.Vector3(0, 1, 0)
view = euclid.Matrix4.new_look_at(eye, at, up)
perspective = euclid.Matrix4.new_perspective(math.radians(90), 1.0, 0.1, 10)
return perspective * view
def createPerspective(fov, width, height, zMin, zMax):
#TODO This negates fov to flip y-axis in the framebuffer.
return euclid.Matrix4.new_perspective(-math.radians(fov), width / float(height), zMin, zMax)
def createPerspectiveBlender(lens, xResolution, yResolution, width, height, zMin, zMax):
factor = lens / 32.0
ratio = xResolution / float(yResolution)
fov = math.atan(0.5 / ratio / factor)
fov = fov * 360 / math.pi
return createPerspective(fov, width, height, zMin, zMax)
def createPerspectiveOrtho(left, right, bottom, top, near, far):
projection = [0] * 16
projection[0] = 2 / (right - left)
projection[4] = 0
projection[8] = 0
projection[12] = -(right + left) / (right - left)
projection[1] = 0
projection[5] = 2 / (top - bottom)
projection[9] = 0
projection[13] = -(top + bottom) / (top - bottom)
projection[2] = 0
projection[6] = 0
projection[10] = -2 / (far - near)
projection[14] = -(far + near) / (far - near)
projection[3] = 0
projection[7] = 0
projection[11] = 0
projection[15] = 1
return projection
def clamp(value, small, large):
return max(min(value, large), small)
def interpolate(a, b, s):
#Simple linear interpolation
return a + s * (b - a)
def interpolate2d(p1, p2, s):
x = interpolate(p1[0], p2[0], s)
y = interpolate(p1[1], p2[1], s)
return (x, y)
def interpolate3d(p1, p2, s):
x = interpolate(p1[0], p2[0], s)
y = interpolate(p1[1], p2[1], s)
z = interpolate(p1[2], p2[2], s)
return (x, y, z)
def makeFloatArray(elements, count):
raw = (gl.GLfloat * (len(elements) * count))()
for i in range(len(elements)):
v = elements[i]
for x in range(count):
raw[(i * count) + x] = v[x]
return raw
def matrixToList(m):
return [m.a, m.e, m.i, m.m,
m.b, m.f, m.j, m.n,
m.c, m.g, m.k, m.o,
m.d, m.h, m.l, m.p]
def getTexParameteriv(glTex, param):
result = ctypes.c_int(0)
gl.glGetTexLevelParameteriv(gl.GL_TEXTURE_2D, 0, param, ctypes.byref(result))
return result.value
def listFiles():
results = []
for root, dirs, files in os.walk(renpy.config.gamedir):
dirs[:] = [d for d in dirs if not d[0] == "."] #Ignore dot directories
for f in files:
match = os.path.join(root, f).replace("\\", "/")
results.append(match)
results.sort()
return results
def scanForFiles(extension):
results = []
for f in listFiles():
if f.split(".")[-1].lower() == extension.lower():
results.append(f)
results.sort()
return results
def findFile(name):
#First try fast and bundle supporting listing
for f in renpy.exports.list_files():
if f.split("/")[-1] == name:
return f
#Scan all game directories
for f in listFiles():
if f.split("/")[-1] == name:
return f
return None
def openFile(path):
return renpy.exports.file(path)
| StarcoderdataPython |
1701799 | """
Adiabatic flame temperature and equilibrium composition for a
fuel/air mixture as a function of equivalence ratio,
including formation of solid carbon.
"""
from Cantera import *
import sys
##############################################################
#
# Edit these parameters to change the initial temperature, the
# pressure, and the phases in the mixture
#
###############################################################
temp = 300.0
pres = 101325.0
# phases
gas = importPhase('gri30.cti')
carbon = importPhase('graphite.cti')
# the phases that will be included in the calculation, and their
# initial moles
mix_phases = [ (gas, 1.0), (carbon, 0.0) ]
# gaseous fuel species
fuel_species = 'CH4'
# air composition
air_N2_O2_ratio = 3.76
# equivalence ratio range
phi_min = 0.3
phi_max = 3.5
npoints = 50
##################################################
mix = Mixture(mix_phases)
nsp = mix.nSpecies()
# create some arrays to hold the data
phi = zeros(npoints,'d')
tad = zeros(npoints,'d')
xeq = zeros([nsp,npoints],'d')
# find fuel, nitrogen, and oxygen indices
ifuel, io2, in2 = gas.speciesIndex([fuel_species, 'O2', 'N2'])
if ifuel < 0:
raise "fuel species "+fuel_species+" not present!"
if gas.nAtoms(fuel_species,'O') > 0 or gas.nAtoms(fuel_species,'N') > 0:
raise "Error: only hydrocarbon fuels are supported."
stoich_O2 = gas.nAtoms(fuel_species,'C') + 0.25*gas.nAtoms(fuel_species,'H')
for i in range(npoints):
phi[i] = phi_min + (phi_max - phi_min)*i/(npoints - 1)
x = zeros(nsp,'d')
x[ifuel] = phi[i]
x[io2] = stoich_O2
x[in2] = stoich_O2*air_N2_O2_ratio
# set the gas state
gas.set(T = temp, P = pres, X = x)
# create a mixture of 1 mole of gas, and 0 moles of solid carbon.
mix = Mixture(mix_phases)
mix.setTemperature(temp)
mix.setPressure(pres)
# equilibrate the mixture adiabatically at constant P
mix.equilibrate('HP', maxsteps = 1000,
err = 1.0e-6, maxiter = 200, loglevel=0)
tad[i] = mix.temperature();
print 'At phi = %12.4g, Tad = %12.4g' % (phi[i],tad[i])
xeq[:,i] = mix.speciesMoles()
# write output CSV file for importing into Excel
csvfile = 'adiabatic.csv'
f = open(csvfile,'w')
writeCSV(f,['phi','T (K)']+mix.speciesNames())
for n in range(npoints):
writeCSV(f,[phi[n], tad[n]]+list(xeq[:,n]))
f.close()
print 'output written to '+csvfile
# make plots
if '--plot' in sys.argv:
import plotting
plotting.plotEquilData(mix, phi, tad, xeq)
| StarcoderdataPython |
17220 | <filename>src/psion/oauth2/endpoints/revocation.py
from __future__ import annotations
from typing import Optional
from psion.oauth2.exceptions import InvalidClient, OAuth2Error, UnsupportedTokenType
from psion.oauth2.models import JSONResponse, Request
from .base import BaseEndpoint
class RevocationEndpoint(BaseEndpoint):
"""
Endpoint used by the `Client` to revoke a token in its possession.
If the Client succeeds to authenticate but provides a token that was
not issued to itself, the `Provider` **DOES NOT** revoke the token,
since the Client is not authorized to operate the token.
If the token is already invalid, does not exist within the Provider
or is otherwise unknown or invalid, it is also considered "revoked".
:cvar `__authentication_methods__`: Allowed Client Authentication methods.
:cvar `__supported_tokens__`: Token types supported by the endpoint.
"""
__endpoint__: str = "revocation"
__authentication_methods__: list[str] = None
__supported_tokens__: list[str] = ["access_token", "refresh_token"]
async def __call__(self, request: Request) -> JSONResponse:
"""
Revokes a previously issued Token.
First it validates the `Revocation Request` of the `Client`
by making sure the required parameter "token" is present,
and that the `Client` can authenticate with the allowed
authentication methods.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.1>`_::
The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token REQUIRED. The token that the client wants to get revoked.
token_type_hint OPTIONAL. A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in
order to help the authorization server to optimize the token
lookup. If the server is unable to locate the token using
the given hint, it MUST extend its search across all of its
supported token types. An authorization server MAY ignore
this parameter, particularly if it is able to detect the
token type automatically. This specification defines two
such values:
* access_token: An access token as defined in [RFC6749],
Section 1.4
* refresh_token: A refresh token as defined in [RFC6749],
Section 1.5
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter
using the registry defined in Section 4.1.2.
For example, a client may request the revocation of a refresh token
with the following request:
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=<PASSWORD>&token_type_hint=refresh_token
It then returns an empty response with a HTTP Status 200 OK,
signaling that the provided token has been revoked by the server.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.2>`_::
The authorization server responds with HTTP status code 200 if the
token has been revoked successfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the
purpose of the revocation request, invalidating the particular token,
is already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization
server and does not influence the revocation response.
This endpoint does not return any errors, except when the provided
`token_type_hint` is not supported by the Provider.
:raises UnsupportedTokenType: The provided token_type_hint is not supported.
"""
try:
client = await self.authenticate(request, self.__authentication_methods__)
data = request.form()
token: str = data.get("token")
token_type_hint: Optional[str] = data.get("token_type_hint")
if not token or not isinstance(token, str):
return
if token_type_hint:
if token_type_hint not in self.__supported_tokens__:
raise UnsupportedTokenType
await self.adapter.revoke_token(client, token, token_type_hint)
return JSONResponse()
except InvalidClient as exc:
return JSONResponse(401, exc.headers, exc.dump())
except OAuth2Error as exc:
return JSONResponse(400, exc.headers, exc.dump())
| StarcoderdataPython |
97713 | import inspect
import logging
import os
import pickle
import cloudpickle
from quake.client.base.task import make_input, new_py_task
from quake.client.job import _set_rank
from quake.common.layout import Layout
from quake.job.config import JobConfiguration
from .glob import get_global_plan, get_inout_obj, set_inout_obj
def task_runner(jctx, input_data, python_job):
_set_rank(jctx.rank)
return python_job.run(input_data)
"""
def _load(obj):
if isinstance(obj, bytes):
return pickle.loads(obj)
if len(obj) == 1:
return _load(obj[0])
return [_load(o) for o in obj]
"""
def collection_to_list(obj):
if len(obj) > 16:
return "..."
else:
return ",".join(obj_to_debug_string(o) for o in obj)
def obj_to_debug_string(obj):
if isinstance(obj, list):
return "List({})[{}]".format(len(obj), collection_to_list(obj))
if isinstance(obj, tuple):
return "Tuple({})[{}]".format(len(obj), collection_to_list(obj))
# if isinstance(obj, dict) and len(obj) <= 20:
# return "Dict({})[{}]".format(len(obj), ",".join("{}:{}"
# .format(obj_to_debug_string(k), obj_to_debug_string(v)) in k, v in obj.items()))
s = repr(obj)
if len(s) > 20:
return str(type(obj))
else:
return s
logger = logging.getLogger(__name__)
class PythonJob:
def __init__(self, pickled_fn, task_args, task_returns, inouts, cwd):
self.pickled_fn = pickled_fn
self.task_args = task_args
self.inouts = inouts
self.cwd = cwd
self.task_returns = task_returns
def run(self, input_data):
# kwargs = {name: pickle.loads(input_data[value]) for name, value in self.task_args.items()}
kwargs = {}
for name, value in self.task_args.items():
logger.info("Arg %s => %s", name, obj_to_debug_string(value))
value = replace_placeholders(value, input_data)
kwargs[name] = value
logger.info("Set arg %s => %s", name, obj_to_debug_string(value))
result = cloudpickle.loads(self.pickled_fn)(**kwargs)
# print("RETURNING", obj_to_debug_string(result))
# print("RETURNS", self.task_returns)
# print("INOUTS", self.inouts)
if self.task_returns == 1:
logger.info("Pickling individual output")
output = [pickle.dumps(result)]
else:
if self.task_returns != len(result):
raise Exception(
"Expecting {} outputs, but got {}".format(
self.task_returns, len(result)
)
)
logger.info("Pickling %s outputs", len(result))
output = [pickle.dumps(r) for r in result]
inouts = [pickle.dumps(kwargs[name]) for name in self.inouts]
# print("RETURNING INOUTS", obj_to_debug_string(inouts))
return output + inouts
# ArgConfig = collections.namedtuple("ArgConfig", "layout")
class ArgConfig:
def __init__(self, inout):
self.inout = inout
self.layout = "all_to_all"
self.unwrap = True
call_id_counter = 0
class WrapperConfig:
def __init__(self, fn, default_keep, n_processes, n_outputs, arg_configs):
self.fn = fn
self.signature = inspect.signature(fn)
self.default_keep = default_keep
self.n_processes = n_processes
self.n_outputs = n_outputs
self._pickled_fn = None
self.inouts = []
self.arg_configs = {}
for name, value in arg_configs.items():
if isinstance(value, str):
tp = value
elif isinstance(value, dict):
tp = value.get("type")
else:
raise Exception("Invalid value: '{}'".format(value))
if tp in (None, "collection_in", "in", "fileout"):
inout = None
elif tp == "inout":
inout = len(self.inouts)
self.inouts.append(name)
else:
raise Exception("Invalid type {}".format(tp))
self.arg_configs[name] = ArgConfig(inout)
def _update_mpi_args(self, n_processes, args):
self.n_processes = n_processes
for name, value in args.items():
assert name.endswith("_layout")
name = name[: -len("_layout")]
# print("QE: MPI_ARG", name)
block_length = value["block_length"]
block_count = value["block_count"]
stride = value["stride"]
assert stride == 1
assert block_length == 1
assert block_count == n_processes
config = self.arg_configs.get(name)
if config is None:
config = ArgConfig(False)
self.arg_configs[name] = config
config.layout = "scatter"
config.unwrap = True
def pickle_fn(self):
if self._pickled_fn:
return self._pickled_fn
else:
self._pickled_fn = cloudpickle.dumps(self.fn)
return self._pickled_fn
def _prepare_inputs(self, args, kwargs):
# if "self" in self.signature.parameters:
# binding = self.signature.bind(None, *args, **kwargs)
# else:
# binding = self.signature.bind(*args, **kwargs)
binding = self.signature.bind(*args, **kwargs)
inputs = []
args = {}
for name, value in binding.arguments.items():
arg_config = self.arg_configs.get(name)
if arg_config:
layout = arg_config.layout
unwrap = arg_config.unwrap
else:
layout = "all_to_all"
unwrap = True
args[name] = put_placeholders(value, inputs, 0, layout, unwrap)
"""
if inout_obj is not None:
value = inout_obj
if isinstance(value, ResultProxy):
if arg_config:
layout = arg_config.layout
else:
layout = "all_to_all"
task_args[name] = len(inputs)
inputs.append(make_input(value.task, list(range(value.n_outputs)), layout=layout))
elif isinstance(value, ResultPartProxy):
if arg_config:
layout = arg_config.layout
else:
layout = "all_to_all"
task_args[name] = len(inputs)
inputs.append(make_input(value.task, [value.output_id], layout=layout))
else:
assert not isinstance(value, Task)
if arg_config and arg_config.layout != "all_to_all":
raise Exception("Non-task result is used as argument with layout")
const_args[name] = value
"""
return inputs, args, binding
def __repr__(self):
return "<FunctionWrapper of '{}'>".format(self.fn.__class__.__name__)
def make_task(self, *args, keep=None, returns=None, **kwargs):
global call_id_counter
call_id_counter += 1
call_id = call_id_counter
# print("QE: [{}] CALLING {}".format(call_id, self.fn))
# traceback.print_stack()
# if self.fn.__name__ == "updatePartialQoiEstimators_Task":
# print(args)
# xxx()
if keep is None:
keep = self.default_keep
if returns is None:
returns = self.n_outputs
else:
assert isinstance(returns, int)
inputs, task_args, binding = self._prepare_inputs(args, kwargs)
if self.n_processes != 1:
assert returns == self.n_processes
task_returns = 1
else:
task_returns = returns
cwd = os.getcwd()
real_returns = task_returns + len(self.inouts)
payload = PythonJob(self.pickle_fn(), task_args, task_returns, self.inouts, cwd)
config = pickle.dumps(JobConfiguration(task_runner, real_returns, payload))
task = new_py_task(
real_returns + len(self.inouts), self.n_processes, keep, config, inputs
)
task.config["env"] = dict(os.environ)
task.call_id = call_id
get_global_plan().add_task(task)
for i, name in enumerate(self.inouts):
obj = binding.arguments[name]
set_inout_obj(obj, TaskOutput(task, returns + i))
return ResultProxy(task, task_returns)
def put_placeholders(obj, inputs, level=0, layout="all_to_all", unwrap=False):
inout_obj = get_inout_obj(obj)
if inout_obj is not None:
assert level == 0
# print("%%% REPLACING", obj_to_debug_string(obj), " ---> ", inout_obj)
obj = inout_obj
if isinstance(obj, ResultProxy):
placeholder = Placeholder(len(inputs), unwrap)
inp = make_input(obj.task, list(range(obj.task_outputs)), layout=layout)
inputs.append(inp)
return placeholder
if isinstance(obj, ResultPartProxy):
assert layout == "all_to_all"
placeholder = Placeholder(len(inputs), unwrap)
inp = make_input(
obj.task,
list(range(obj.task_outputs)),
layout=Layout(0, obj.output_idx, 0, 1),
)
inputs.append(inp)
return placeholder
if isinstance(obj, TaskOutput):
placeholder = Placeholder(len(inputs), unwrap)
inp = make_input(obj.task, obj.output_id)
inputs.append(inp)
return placeholder
elif isinstance(obj, list):
level += 1
return [put_placeholders(o, inputs, level, "all_to_all", unwrap) for o in obj]
else:
return obj
def _load(obj, unwrap):
if isinstance(obj, bytes):
return pickle.loads(obj)
if unwrap and len(obj) == 1:
return _load(obj[0], False)
return [_load(o, False) for o in obj]
def replace_placeholders(obj, inputs):
if isinstance(obj, Placeholder):
data = inputs[obj.index]
return _load(data, obj.unwrap)
elif isinstance(obj, list):
return [replace_placeholders(o, inputs) for o in obj]
else:
return obj
class Placeholder:
def __init__(self, index, unwrap):
self.index = index
self.unwrap = unwrap
class ResultProxy:
__slots__ = ["task", "task_outputs", "n_outputs"]
def __init__(self, task, task_outputs):
self.task = task
self.task_outputs = task_outputs
self.n_outputs = task_outputs * task.n_workers
def __len__(self):
return self.n_outputs
def __getitem__(self, idx):
if not 0 <= idx < self.n_outputs:
raise Exception(
"Asked element at index {}, but there are only {} parts".format(
idx, self.n_outputs
)
)
return ResultPartProxy(self.task, self.task_outputs, idx)
def __iter__(self):
return iter(
ResultPartProxy(self.task, self.task_outputs, output_id)
for output_id in range(self.n_outputs)
)
class ResultPartProxy:
__slots__ = ["task", "task_outputs", "output_idx"]
def __init__(self, task, task_outputs, output_idx):
self.task = task
self.task_outputs = task_outputs
self.output_idx = output_idx
def __repr__(self):
return "<RPP task={} outputs={} idx={}>".format(
self.task, self.task_outputs, self.output_idx
)
class TaskOutput:
__slots__ = ["task", "output_id"]
def __init__(self, task, output_id):
self.task = task
self.output_id = output_id
def __repr__(self):
return "<TaskOutput task_id={} output_id={}>".format(
self.task.task_id, self.output_id
)
| StarcoderdataPython |
1733796 | <filename>lib/sldr/hunspell.py
#!/usr/bin/python3
class Aff:
def __init__(self, filename):
self.fname = filename
self.parse(filename)
self.pfx = {}
self.sfx = {}
self.sfx_cross = {}
self.pfx_cross = {}
def parse(self, filename):
with open(filename) as inf:
lines = list(inf.readlines)
i = 0
while i < len(lines):
l = lines[i]
l = re.sub(r"#.*$", "", l)
l = l.strip()
if not len(l):
continue
w = l.split(' ')
if w[0] in ("AF", "REP", "MAP", "PHONE", "BREAK", "COMPOUNDRULE", "ICONV", "OCONV"):
num = int(w[1])
setattr(self, w[0].lower(), [])
for j in range(num):
s = re.sub(r"#.*$", "", lines[i+j+1]).strip()
sw = s.plit()
if sw != w[0]:
raise SyntaxError("Subelement {} is not of header type {}".format(s, w[0]))
getattr(self, sw[0].lower()).append(sw[1:])
elif w[0] in ('PFX', 'SFX'):
# following line has syntax error "can't assign to function call"
# commented out so installation won't show error
# getattr(self, w[1].lower()+"_cross")=w[2].lower()
num = int(w[3])
for j in range(num):
s = re.sub(r"#.*$", "", lines[i+j+1]).strip()
sw = s.split()
if sw != w[0]:
raise SyntaxError("Subelement {} is not of header type {}".format(s, w[0]))
getattr(self, sw[0].lower()).setdefault(w[1].lower(), []).append(sw[1:])
elif len(w):
setattr(self, w[0].lower(), w[1:])
class Word:
def __init__(self, name, classes):
self.word = name
self.classes = classes
class Dic:
def __init__(self, filename, aff=None):
self.fname = filename
self.parse(filename, aff)
self.words = {}
def parse(self, filename, aff):
classtype = "short"
ignorechars = ""
if aff is not None:
numclasschar = getattr(aff, 'flag', 'short')
ignorechars = getattr(aff, 'ignore', '')
with open(filename) as inf:
for i, l in enumerate(inf.readlines()):
w = l.split()
if i == 0 and len(w) == 1:
try:
n == int(w[0])
except ValueError:
pass
else:
continue
(d, c, _) = (w[0]+"//").split("/")
matched = "".join(x for x in d.lower() if x not in ignorechars)
classes = None
if len(c):
if classtype == "short":
classes = c
elif classtype == "long":
classes = ["".join(x) for x in zip(s[::2], s[1::2])]
elif classtype == "num":
classes = c.split(",")
self.words[matched] = Word(d, classes)
if d == d.upper():
self.words[matched.upper()] = self.words[matched]
elif d == d.title():
self.words[matched.title()] = self.words[matched]
for p in w[1:]:
(t, v) = p.split(":")
setattr(self.words[matched], t, v)
class Hunspell:
def __init__(self, filename):
self.aff = Aff(filename+".aff")
self.dic = Dic(filename+".dic")
| StarcoderdataPython |
3333578 | from .camera import Camera
from .heartbeat import Heartbeat
from .mse_motor import Motor
from .mse_robot import Robot
from .image import bgr8_to_jpeg
from .object_detection import ObjectDetector | StarcoderdataPython |
3379345 | <gh_stars>0
import time
import igraph as ig
import agenspy.graph
if __name__ == '__main__':
kegg = agenspy.graph.Graph('kegg',
replace=True,
dbname='test')
graph= ig.Graph.Read_GraphMLz('graphs/kegg.graphml.gz')
print(len(graph.vs))
print(len(graph.es))
start = time.time()
kegg.create_from_igraph(graph,
node_label='gene',
edge_label_attr='interaction',
strip_attrs=True)
print('--- time: %s seconds ---' %(time.time()-start))
print(kegg.nv)
print(kegg.ne)
kegg.commit()
kegg.close()
| StarcoderdataPython |
9741 | <filename>muse_for_anything/api/v1_api/taxonomy_items.py
"""Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/"
)
class TaxonomyItemView(MethodView):
"""Endpoint for a single taxonomy item."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = (
TaxonomyItem.query.options(selectinload(TaxonomyItem.current_ancestors))
.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
)
.first()
)
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_taxonomy_modifiable(self, taxonomy: Taxonomy):
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
self._check_if_taxonomy_modifiable(taxonomy=taxonomy_item.taxonomy)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""Get a single taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
embedded: List[ApiResponse] = []
for relation in found_taxonomy_item.current_ancestors:
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_source))
for relation in found_taxonomy_item.current_related:
embedded.append(taxonomy_item_relation_to_api_response(relation))
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_target))
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item(found_taxonomy_item),
*action_links_for_taxonomy_item(found_taxonomy_item),
],
embedded=embedded,
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item),
)
@API_V1.arguments(TaxonomyItemSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def put(self, data, namespace: str, taxonomy: str, taxonomy_item: str):
"""Update a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
taxonomy_item_version = TaxonomyItemVersion(
taxonomy_item=found_taxonomy_item,
version=found_taxonomy_item.current_version.version + 1,
name=data["name"],
description=data.get("description", ""),
sort_key=data.get("sort_key", 10),
)
found_taxonomy_item.current_version = taxonomy_item_version
DB.session.add(found_taxonomy_item)
DB.session.add(taxonomy_item_version)
DB.session.commit()
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
return ApiResponse(
links=[taxonomy_item_link],
embedded=[taxonomy_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"update",
"put",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def post(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Restore a deleted taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually restore when not already restored
if found_taxonomy_item.deleted_on is not None:
# restore taxonomy item
deleted_timestamp = found_taxonomy_item.deleted_on
found_taxonomy_item.deleted_on = None
# also restore relations
ancestors: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
ancestor_ids = set()
relation: TaxonomyItemRelation
for relation in ancestors:
if relation.taxonomy_item_source.deleted_on is not None:
continue # do not restore relations to deleted items
ancestor_ids.add(relation.taxonomy_item_source_id)
relation.deleted_on = None
DB.session.add(relation)
def produces_circle(relation: TaxonomyItemRelation) -> bool:
if relation.taxonomy_item_target_id in ancestor_ids:
return True
for rel in relation.taxonomy_item_target.current_related:
if produces_circle(rel):
return True
return False
children: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
for relation in children:
if relation.taxonomy_item_target.deleted_on is not None:
continue # do not restore relations to deleted items
if produces_circle(relation):
continue
relation.deleted_on = None
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in found_taxonomy_item.current_ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in found_taxonomy_item.current_related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"restore",
"post",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Delete a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually delete when not already deleted
if found_taxonomy_item.deleted_on is None:
# delete taxonomy item
deleted_timestamp = datetime.utcnow()
found_taxonomy_item.deleted_on = deleted_timestamp
# also delete incoming and outgoing relations to remove them
# from relations of existing items
ancestors = found_taxonomy_item.current_ancestors
for relation in found_taxonomy_item.current_ancestors:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
related = found_taxonomy_item.current_related
for relation in found_taxonomy_item.current_related:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/"
)
class TaxonomyItemRelationsView(MethodView):
"""Endpoint for manipulating taxonomy item relations."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = TaxonomyItem.query.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
).first()
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
def _check_item_circle(
self,
item_target: TaxonomyItem,
item_source: TaxonomyItem,
original_target: Optional[TaxonomyItem] = None,
):
"""Check for a path from target to source which would form a circular dependency. Abort if such a path is found!"""
if original_target is None:
original_target = item_target
relation: TaxonomyItemRelation
for relation in item_target.current_related:
if relation.taxonomy_item_target.deleted_on is not None:
continue # exclude deleted items as targets
if relation.taxonomy_item_target_id == item_source.id:
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Cannot add a relation from %(target)s to %(source)s as it would create a circle!",
target=original_target.name,
source=item_source.name,
),
)
else:
self._check_item_circle(
item_target=relation.taxonomy_item_target,
item_source=item_source,
original_target=original_target,
)
@API_V1.arguments(TaxonomyItemRelationPostSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def post(
self,
data: Dict[str, str],
namespace: str,
taxonomy: str,
taxonomy_item: str,
):
"""Create a new relation to a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
if namespace != data["namespace_id"] or taxonomy != data["taxonomy_id"]:
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"Cannot create a relation to a taxonomy item of a different taxonomy!"
),
)
found_taxonomy_item = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
found_taxonomy_item_target = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=data["taxonomy_item_id"]
)
self._check_item_circle(found_taxonomy_item_target, found_taxonomy_item)
relation = TaxonomyItemRelation(
taxonomy_item_source=found_taxonomy_item,
taxonomy_item_target=found_taxonomy_item_target,
)
DB.session.add(relation)
DB.session.commit()
taxonomy_item_relation_link = (
taxonomy_item_relation_to_taxonomy_item_relation_data(relation).self
)
taxonomy_item_relation_data = taxonomy_item_relation_to_api_response(relation)
taxonomy_item_source_link = taxonomy_item_to_api_link(found_taxonomy_item)
taxonomy_item_source_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_item_target_link = taxonomy_item_to_api_link(found_taxonomy_item_target)
taxonomy_item_target_data = taxonomy_item_to_api_response(
found_taxonomy_item_target
)
self_link = create_action_link_for_taxonomy_item_relation_page(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self_link.rel = (*self_link.rel, "ont-taxonomy-item-relation")
self_link.resource_type = "new"
return ApiResponse(
links=[
taxonomy_item_relation_link,
taxonomy_item_source_link,
taxonomy_item_target_link,
],
embedded=[
taxonomy_item_relation_data,
taxonomy_item_source_data,
taxonomy_item_target_data,
],
data=NewApiObject(
self=self_link,
new=taxonomy_item_relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/<string:relation>/"
)
class TaxonomyItemRelationView(MethodView):
"""Endpoint for removing taxonomy item relations."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not relation or not relation.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item relation id has the wrong format!"
),
)
def _get_taxonomy_item_relation(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
) -> TaxonomyItemRelation:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
relation_id = int(relation)
found_taxonomy_item_relation: Optional[
TaxonomyItemRelation
] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.id == relation_id,
TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_relation is None
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy_id
!= taxonomy_id
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item relation not found.")
)
return found_taxonomy_item_relation # is not None because abort raises exception
def _check_if_modifiable(self, relation: TaxonomyItemRelation):
taxonomy_item = relation.taxonomy_item_source
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy item!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
if relation.deleted_on is not None:
# cannot modify deleted item relation!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item relation is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemRelationSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Get a single relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
return ApiResponse(
links=(
*nav_links_for_taxonomy_item_relation(found_relation),
*action_links_for_taxonomy_item_relation(found_relation),
),
data=taxonomy_item_relation_to_taxonomy_item_relation_data(found_relation),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Delete an existing relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
self._check_if_modifiable(found_relation)
# only actually delete when not already deleted
if found_relation.deleted_on is None:
# delete taxonomy item relation
found_relation.deleted_on = datetime.utcnow()
DB.session.add(found_relation)
DB.session.commit()
relation_link = taxonomy_item_relation_to_taxonomy_item_relation_data(
found_relation
).self
relation_data = taxonomy_item_relation_to_api_response(found_relation)
source_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_source)
source_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_source
)
target_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_target)
target_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_target
)
return ApiResponse(
links=[relation_link, source_item_link, target_item_link],
embedded=[relation_data, source_item_data, target_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemRelationView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item-relation",
),
resource_type="changed",
),
changed=relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/"
)
class TaxonomyItemVersionsView(MethodView):
"""Endpoint for all versions of a taxonomy item."""
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""TODO."""
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/<string:version>/"
)
class TaxonomyItemVersionView(MethodView):
"""Endpoint for a single version of a taxonomy item."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not version or not version.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item version has the wrong format!"
),
)
def _get_taxonomy_item_version(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
) -> TaxonomyItemVersion:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
version_nr = int(version)
found_taxonomy_item_version: Optional[
TaxonomyItemVersion
] = TaxonomyItemVersion.query.filter(
TaxonomyItemVersion.version == version_nr,
TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_version is None
or found_taxonomy_item_version.taxonomy_item.taxonomy_id != taxonomy_id
or found_taxonomy_item_version.taxonomy_item.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item version not found.")
)
return found_taxonomy_item_version # is not None because abort raises exception
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
version: str,
**kwargs: Any
):
"""Get a single taxonomy item version."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
found_taxonomy_item_version = self._get_taxonomy_item_version(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item_version(found_taxonomy_item_version),
*action_links_for_taxonomy_item_version(found_taxonomy_item_version),
],
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item_version),
)
| StarcoderdataPython |
77009 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
import testtools
from neutron.agent.common import config
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.cfg_agent import cfg_agent
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
LOG = logging.getLogger(__name__)
def prepare_router_data(enable_snat=None, num_internal_ports=1):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '192.168.127.12',
'subnet_id': _uuid()}],
'subnet': {'cidr': '172.16.17.32/24',
'gateway_ip': '172.16.17.32'}}
int_ports = []
for i in range(num_internal_ports):
int_ports.append({'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.%s.0/24' % i,
'gateway_ip': '35.4.%s.1' % i}})
hosting_device = {'id': _uuid(),
'host_type': 'CSR1kv',
'ip_address': '172.16.58.3',
'port': '23'}
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port,
'hosting_device': hosting_device}
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router, int_ports
class TestCiscoCfgAgentWIthStateReporting(base.BaseTestCase):
def setUp(self):
self.conf = cfg.ConfigOpts()
config.register_agent_state_opts_helper(cfg.CONF)
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(cfg_agent.CiscoCfgAgent.OPTS)
cfg.CONF.set_override('report_interval', 0, 'AGENT')
super(TestCiscoCfgAgentWIthStateReporting, self).setUp()
self.devmgr_plugin_api_cls_p = mock.patch(
'neutron.plugins.cisco.cfg_agent.cfg_agent.'
'CiscoDeviceManagementApi')
devmgr_plugin_api_cls = self.devmgr_plugin_api_cls_p.start()
self.devmgr_plugin_api = mock.Mock()
devmgr_plugin_api_cls.return_value = self.devmgr_plugin_api
self.devmgr_plugin_api.register_for_duty.return_value = True
self.plugin_reportstate_api_cls_p = mock.patch(
'neutron.agent.rpc.PluginReportStateAPI')
plugin_reportstate_api_cls = self.plugin_reportstate_api_cls_p.start()
self.plugin_reportstate_api = mock.Mock()
plugin_reportstate_api_cls.return_value = self.plugin_reportstate_api
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
mock.patch('neutron.common.rpc.create_connection').start()
def test_agent_registration_success(self):
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
self.assertTrue(agent.devmgr_rpc.register_for_duty(agent.context))
def test_agent_registration_success_after_2_tries(self):
self.devmgr_plugin_api.register_for_duty = mock.Mock(
side_effect=[False, False, True])
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
self.assertEqual(agent.devmgr_rpc.register_for_duty.call_count, 3)
def test_agent_registration_fail_always(self):
self.devmgr_plugin_api.register_for_duty = mock.Mock(
return_value=False)
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
cfg_agent.MAX_REGISTRATION_ATTEMPTS = 3
with testtools.ExpectedException(SystemExit):
cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
def test_agent_registration_no_device_mgr(self):
self.devmgr_plugin_api.register_for_duty = mock.Mock(
return_value=None)
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
cfg_agent.MAX_REGISTRATION_ATTEMPTS = 3
with testtools.ExpectedException(SystemExit):
cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
def test_report_state(self):
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
agent._report_state()
self.assertIn('total routers', agent.agent_state['configurations'])
self.assertEqual(0, agent.agent_state[
'configurations']['total routers'])
@mock.patch('neutron.plugins.cisco.cfg_agent.'
'cfg_agent.CiscoCfgAgentWithStateReport._agent_registration')
def test_report_state_attribute_error(self, agent_registration):
cfg.CONF.set_override('report_interval', 1, 'AGENT')
self.plugin_reportstate_api.report_state.side_effect = AttributeError
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
agent.heartbeat = mock.Mock()
agent.send_agent_report(None, None)
self.assertTrue(agent.heartbeat.stop.called)
| StarcoderdataPython |
1010 | """
Utils for creating xdelta patches.
"""
import logging
from subprocess import check_output, CalledProcessError
from shutil import copyfile
from os import remove, path
class PatchChecksumError(Exception):
def __init__(self, message, errors):
super(PatchChecksumError, self).__init__(message)
class Patch:
# TODO: Abstract out the need for "edited" by just copying the original
# file.
def __init__(self, original, filename, edited=None, xdelta_dir='.'):
self.original = original
self.edited = edited
self.filename = filename
# Need to have this absolute path for xdelta3 to be found.
self.xdelta_path = path.join(xdelta_dir, 'xdelta3')
# self.xdelta_path = 'xdelta3'
def create(self):
if self.edited is None:
raise Exception
cmd = [
self.xdelta_path,
'-f',
'-s',
self.original,
self.edited,
self.filename,
]
print(cmd)
logging.info(cmd)
try:
check_output(cmd)
except CalledProcessError as e:
raise Exception(e.output)
def apply(self):
if not self.edited:
copyfile(self.original, self.original + "_temp")
self.edited = self.original
self.original = self.original + "_temp"
cmd = [
self.xdelta_path,
'-f',
'-d',
'-s',
self.original,
self.filename,
self.edited,
]
logging.info(cmd)
try:
check_output(cmd)
except CalledProcessError:
raise PatchChecksumError('Target file had incorrect checksum', [])
finally:
if self.original.endswith('_temp'):
remove(self.original)
| StarcoderdataPython |
3288565 | <filename>src/Filter.py
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from scapy.all import *
class EmptyDelegate(QItemDelegate):
def __init__(self, parent):
super(EmptyDelegate, self).__init__(parent)
def createEditor(self, QWidget, QStyleOptionViewItem, QModelIndex):
return None
class Filter(QWidget):
filter_emit = pyqtSignal(str)
def __init__(self):
super().__init__()
self.initUI()
def sizeHint(self):
return QSize(500,500)
def initUI(self):
self.filter = {
'host': "[src or dst] host <ip>",
'port': "[src or dst] port <port>",
'proto': "[ip or ip6][src or dst] proto <protocol>",
'ether host': "ether [src or dst] host <ip>",
'net': "[src or dst] net <net>",
'gateway': "gateway <ip>",
'mask' : "net <net> mask <mask>",
'vlan': "vlan <ID>"
}
self.rule = ''
#self.setWindowTitle("Filter")
self.layout = QVBoxLayout()
self.filter_table = QTableWidget(8, 3)
self.filter_table.setHorizontalHeaderLabels(["名称", "格式", "过滤器"])
self.filter_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.filter_table.verticalHeader().setVisible(False)
QTableWidget.resizeColumnsToContents(self.filter_table)
QTableWidget.resizeRowsToContents(self.filter_table)
self.filter_table.setItemDelegateForColumn(0, EmptyDelegate(self))#第0列不可编辑
self.filter_table.setItemDelegateForColumn(1, EmptyDelegate(self)) # 第1列不可编辑
for i, key in enumerate(self.filter.keys()):
self.filter_table.setItem(i, 0, QTableWidgetItem(key))
self.filter_table.setItem(i, 1, QTableWidgetItem(self.filter[key]))
self.ok_button = QPushButton("OK")
self.cancel_button = QPushButton("Cancel")
self.button_layout = QHBoxLayout()
self.button_layout.addStretch(1)
self.button_layout.addWidget(self.ok_button)
self.button_layout.addWidget(self.cancel_button)
self.temp_widget = QWidget()
self.temp_widget.setLayout(self.button_layout)
self.ok_button.clicked.connect(self.ok_action)
self.cancel_button.clicked.connect(self.cancel_action)
self.layout.addWidget(self.filter_table)
self.layout.addWidget(self.temp_widget)
self.setLayout(self.layout)
self.setGeometry(300, 100, 900, 500)
def ok_action(self):
for i, key in enumerate(self.filter.keys()):
try:
temp_text = self.filter_table.item(i, 2).text()
except:
temp_text = ''
if temp_text != '':
self.rule += (" and " + temp_text)
if self.rule != '':
self.rule = self.rule[5:]
self.filter_emit.emit(self.rule) #信号,这个和下面那个return_filter可以选一个,也可以都用
def cancel_action(self):
self.filter_emit.emit(None)
def return_filter(self):
return self.rule
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Filter()
win.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3398176 | <reponame>0xYoan/python_cherrytree<filename>tests/test_python_cherrytree.py
#!/usr/bin/env python
"""Tests for `python_cherrytree` package."""
import unittest
from python_cherrytree import python_cherrytree
class TestPython_cherrytree(unittest.TestCase):
"""Tests for `python_cherrytree` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.manager = python_cherrytree.SqlManager("./tests/CTF_template.ctb")
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_show_nodes(self):
"""Test show_nodes."""
self.manager.show_nodes()
def test_change_node_name(self):
"""Test change_node_name"""
self.manager.change_node_name("Test", 22)
def test_add_txt(self):
"""Test add_txt"""
self.manager.add_txt("Text", 24)
| StarcoderdataPython |
3260671 | <reponame>hartescout/sqlite-dissect<filename>sqlite_dissect/utilities.py
from binascii import hexlify
from hashlib import md5
from logging import getLogger
from re import compile
from struct import pack
from struct import unpack
from sqlite_dissect.constants import ALL_ZEROS_REGEX
from sqlite_dissect.constants import LOGGER_NAME
from sqlite_dissect.constants import OVERFLOW_HEADER_LENGTH
from sqlite_dissect.constants import BLOB_SIGNATURE_IDENTIFIER
from sqlite_dissect.constants import STORAGE_CLASS
from sqlite_dissect.constants import TEXT_SIGNATURE_IDENTIFIER
from sqlite_dissect.exception import InvalidVarIntError
"""
utilities.py
This script holds general utility functions for reference by the sqlite carving library.
This script holds the following function(s):
calculate_expected_overflow(overflow_byte_size, page_size)
decode_varint(byte_array, offset)
encode_varint(value)
get_class_instance(class_name)
get_md5_hash(string)
get_record_content(serial_type, record_body, offset=0)
get_serial_type_signature(serial_type)
get_storage_class(serial_type)
has_content(byte_array)
"""
def calculate_expected_overflow(overflow_byte_size, page_size):
overflow_pages = 0
last_overflow_page_content_size = overflow_byte_size
if overflow_byte_size > 0:
while overflow_byte_size > 0:
overflow_pages += 1
last_overflow_page_content_size = overflow_byte_size
overflow_byte_size = overflow_byte_size - page_size + OVERFLOW_HEADER_LENGTH
return overflow_pages, last_overflow_page_content_size
def decode_varint(byte_array, offset=0):
unsigned_integer_value = 0
varint_relative_offset = 0
for x in xrange(1, 10):
varint_byte = ord(byte_array[offset + varint_relative_offset:offset + varint_relative_offset + 1])
varint_relative_offset += 1
if x == 9:
unsigned_integer_value <<= 1
unsigned_integer_value |= varint_byte
else:
msb_set = varint_byte & 0x80
varint_byte &= 0x7f
unsigned_integer_value |= varint_byte
if msb_set == 0:
break
else:
unsigned_integer_value <<= 7
signed_integer_value = unsigned_integer_value
if signed_integer_value & 0x80000000 << 32:
signed_integer_value -= 0x10000000000000000
return signed_integer_value, varint_relative_offset
def encode_varint(value):
max_allowed = 0x7fffffffffffffff
min_allowed = (max_allowed + 1) - 0x10000000000000000
if value > max_allowed or value < min_allowed:
log_message = "The value: {} is not able to be cast into a 64 bit signed integer for encoding."
log_message = log_message.format(value)
getLogger(LOGGER_NAME).error(log_message)
raise InvalidVarIntError(log_message)
byte_array = bytearray()
value += 1 << 64 if value < 0 else 0
if value & 0xff000000 << 32:
byte = value & 0xff
byte_array.insert(0, pack("B", byte))
value >>= 8
for _ in xrange(8):
byte_array.insert(0, pack("B", (value & 0x7f) | 0x80))
value >>= 7
else:
while value:
byte_array.insert(0, pack("B", (value & 0x7f) | 0x80))
value >>= 7
if len(byte_array) >= 9:
log_message = "The value: {} produced a varint with a byte array of length: {} beyond the 9 bytes " \
"allowed for a varint."
log_message = log_message.format(value, len(byte_array))
getLogger(LOGGER_NAME).error(log_message)
raise InvalidVarIntError(log_message)
byte_array[-1] &= 0x7f
return byte_array
def get_class_instance(class_name):
if class_name.find(".") != -1:
path_array = class_name.split(".")
module = ".".join(path_array[:-1])
instance = __import__(module)
for section in path_array[1:]:
instance = getattr(instance, section)
return instance
else:
log_message = "Class name: {} did not specify needed modules in order to initialize correctly."
log_message = log_message.format(log_message)
getLogger(LOGGER_NAME).error(log_message)
raise ValueError(log_message)
def get_md5_hash(string):
md5_hash = md5()
md5_hash.update(string)
return md5_hash.hexdigest().upper()
def get_record_content(serial_type, record_body, offset=0):
# NULL
if serial_type == 0:
content_size = 0
value = None
# 8-bit twos-complement integer
elif serial_type == 1:
content_size = 1
value = unpack(b">b", record_body[offset:offset + content_size])[0]
# Big-endian 16-bit twos-complement integer
elif serial_type == 2:
content_size = 2
value = unpack(b">h", record_body[offset:offset + content_size])[0]
# Big-endian 24-bit twos-complement integer
elif serial_type == 3:
content_size = 3
value_byte_array = '\0' + record_body[offset:offset + content_size]
value = unpack(b">I", value_byte_array)[0]
if value & 0x800000:
value -= 0x1000000
# Big-endian 32-bit twos-complement integer
elif serial_type == 4:
content_size = 4
value = unpack(b">i", record_body[offset:offset + content_size])[0]
# Big-endian 48-bit twos-complement integer
elif serial_type == 5:
content_size = 6
value_byte_array = '\0' + '\0' + record_body[offset:offset + content_size]
value = unpack(b">Q", value_byte_array)[0]
if value & 0x800000000000:
value -= 0x1000000000000
# Big-endian 64-bit twos-complement integer
elif serial_type == 6:
content_size = 8
value = unpack(b">q", record_body[offset:offset + content_size])[0]
# Big-endian IEEE 754-2008 64-bit floating point number
elif serial_type == 7:
content_size = 8
value = unpack(b">d", record_body[offset:offset + content_size])[0]
# Integer constant 0 (schema format == 4)
elif serial_type == 8:
content_size = 0
value = 0
# Integer constant 1 (schema format == 4)
elif serial_type == 9:
content_size = 0
value = 1
# These values are not used/reserved and should not be found in sqlite files
elif serial_type == 10 or serial_type == 11:
raise Exception()
# A BLOB that is (N-12)/2 bytes in length
elif serial_type >= 12 and serial_type % 2 == 0:
content_size = (serial_type - 12) / 2
value = record_body[offset:offset + content_size]
# A string in the database encoding and is (N-13)/2 bytes in length. The nul terminator is omitted
elif serial_type >= 13 and serial_type % 2 == 1:
content_size = (serial_type - 13) / 2
value = record_body[offset:offset + content_size]
else:
log_message = "Invalid serial type: {} at offset: {} in record body: {}."
log_message = log_message.format(serial_type, offset, hexlify(record_body))
getLogger(LOGGER_NAME).error(log_message)
raise ValueError(log_message)
return content_size, value
def get_serial_type_signature(serial_type):
if serial_type >= 12:
if serial_type % 2 == 0:
return BLOB_SIGNATURE_IDENTIFIER
elif serial_type % 2 == 1:
return TEXT_SIGNATURE_IDENTIFIER
return serial_type
def get_storage_class(serial_type):
if serial_type == 0:
return STORAGE_CLASS.NULL
if serial_type in [1, 2, 3, 4, 5, 6, 8, 9]:
return STORAGE_CLASS.INTEGER
if serial_type == 7:
return STORAGE_CLASS.REAL
if serial_type >= 12 and serial_type % 2 == 0:
return STORAGE_CLASS.BLOB
if serial_type >= 13 and serial_type % 2 == 0:
return STORAGE_CLASS.TEXT
def has_content(byte_array):
pattern = compile(ALL_ZEROS_REGEX)
if pattern.match(hexlify(byte_array)):
return False
return True
| StarcoderdataPython |
1633060 | <filename>instance_data/problem_instance_generator.py
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
from problem_printer import ProblemPrinter
# This script generates data for multi capacity dual arm robot assembly application
# There built in assumptions are given in the paper XYZ.
# In short, if an object is picked with tool it has to be put down using the same tool
# Every pick task that requires suction cup, generates a camera task and a place task, and a press task (to be done in that order)
# Every pick task that requires gripper, generates a place task, and a press task (to be done in that order)
# Press tasks requires empty gripper, since the gripper will be used to "press"
# Components are stacked sequentially on fixture 1 and fixture 2,
# then the sub-assembly of fixture 1 is picked up and merged with the sub-assembly of fixture 2.
# It is then picked up, and moved to output location.
# To fully define a problem one needs to give the tasks, which tool they are handled with,
# which order they are assembled on the two fixtures.
# This is done by providing two lists of numbers.
# One list represents the order of assembly of components on one fixture,
# the value of each number represents which tool handled that component.
# Here, we concatenate these two lists, by intersecting the two lists with a -1.
# Given that 0 is gripper (G), and 1 is suction cup (1), the following string:
# [G,S,-1,S,G] represents a problem,
# where component 1 and 4 are picked by gripper,
# and component 2 amd 3 are handled by suckion cup.
# component 2 is placed atop component 1 on fixture 1,
# and component 4 is placed atop component 3 on fixture 2,
# (followed by the picking of sub-assembly of fixture 1, and placing it on fixture 2, as this is always the case)
# Running this script will generate the problem instances of the paper XYZ
durations = {}
durations["template_pick_duration"] = 27
durations["template_camera_duration"] = 30
durations["template_place_duration"] = 51
durations["template_press_duration"] = 6
file_prefix = "p_"
G = 0
S = 1
fixture_def = []
#4 components
fixture_def.append([G,G,-1,G,G])
fixture_def.append([G,S,-1,S,G])
fixture_def.append([S,G,-1,S,G])
fixture_def.append([S,S,-1,S,S])
#5 components
fixture_def.append([G,G,-1,G,G,G])
fixture_def.append([G,S,-1,S,S,G])
fixture_def.append([G,S,S,-1,S,G])
fixture_def.append([S,S,S,-1,S,S])
#6 components
fixture_def.append([G,G,G,-1,G,G,G])
fixture_def.append([G,S,S,G,-1,S,G])
fixture_def.append([G,S,G,-1,S,S,G])
fixture_def.append([S,S,S,S,-1,S,S])
#7 components
fixture_def.append([G,G,G,-1,G,G,G,G])
fixture_def.append([G,S,G,-1,S,G,S,G])
fixture_def.append([S,G,S,G,-1,G,S,G])
fixture_def.append([S,S,S,S,-1,S,S,S])
#8 components
fixture_def.append([G,G,G,G,-1,G,G,G,G])
fixture_def.append([G,S,G,S,G,-1,S,G,S])
fixture_def.append([S,G,S,G,-1,S,G,S,G])
fixture_def.append([S,S,S,S,S,-1,S,S,S])
#9 components
fixture_def.append([G,G,G,G,-1,G,G,G,G,G])
fixture_def.append([G,S,G,S,-1,G,S,G,S,G])
fixture_def.append([S,G,S,G,S,-1,G,S,G,S])
fixture_def.append([S,S,S,S,S,-1,S,S,S,S])
#10 components
fixture_def.append([G,G,G,G,G,-1,G,G,G,G,G])
fixture_def.append([G,S,G,S,G,S,-1,G,S,G,S])
fixture_def.append([S,G,S,G,S,-1,G,S,G,S,G])
fixture_def.append([S,S,S,S,S,S,-1,S,S,S,S])
# Give template durations
ppinstance = ProblemPrinter(fixture_def[0], 0, **durations)
ppinstance.FilePrint(file_prefix)
for i in range(1,len(fixture_def)):
ppinstance = ProblemPrinter(fixture_def[i], i*100)
print("Generating model file: ",ppinstance.FilePrint(file_prefix))
| StarcoderdataPython |
4833775 | <gh_stars>1-10
from __future__ import unicode_literals
from django.contrib import admin
from django.template.defaultfilters import truncatechars
from django.utils.translation import ugettext_lazy as _
from reviewboard.reviews.forms import DefaultReviewerForm, GroupForm
from reviewboard.reviews.models import (Comment,
DefaultReviewer,
FileAttachmentComment,
GeneralComment,
Group,
Review,
ReviewRequest,
ReviewRequestDraft,
Screenshot,
ScreenshotComment,
StatusUpdate)
class CommentAdmin(admin.ModelAdmin):
list_display = ('truncated_text', 'review_request_id', 'first_line',
'num_lines', 'timestamp')
search_fields = ['text']
list_filter = ('timestamp',)
raw_id_fields = ('filediff', 'interfilediff', 'reply_to')
ordering = ['-timestamp']
def review_request_id(self, obj):
return obj.review.get().review_request.display_id
review_request_id.short_description = _('Review request ID')
def truncated_text(self, obj):
return truncatechars(obj.text, 60)
truncated_text.short_description = _('Comment Text')
class DefaultReviewerAdmin(admin.ModelAdmin):
form = DefaultReviewerForm
filter_horizontal = ('repository', 'groups', 'people',)
list_display = ('name', 'file_regex')
raw_id_fields = ('local_site',)
fieldsets = (
(_('General Information'), {
'fields': ('name', 'file_regex', 'local_site'),
'classes': ['wide'],
}),
(_('Reviewers'), {
'fields': ('groups', 'people'),
}),
(_('Repositories'), {
'description': _('<p>A default reviewer will cover all '
'repositories, unless assigned one or more '
'specific repositories below.</p>'),
'fields': ('repository',),
})
)
class GroupAdmin(admin.ModelAdmin):
form = GroupForm
list_display = ('name', 'display_name', 'mailing_list', 'invite_only',
'visible')
raw_id_fields = ('local_site',)
fieldsets = (
(_('General Information'), {
'fields': ('name', 'display_name', 'mailing_list',
'email_list_only', 'visible'),
}),
(_('Access Control'), {
'fields': ('invite_only', 'users', 'local_site',
'is_default_group'),
}),
(_('State'), {
'fields': ('incoming_request_count', 'extra_data'),
'classes': ('collapse',),
}),
)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('review_request', 'user', 'public', 'ship_it',
'is_reply', 'timestamp')
list_filter = ('public', 'timestamp')
search_fields = ['review_request__summary']
raw_id_fields = ('review_request', 'user', 'base_reply_to',
'body_top_reply_to', 'body_bottom_reply_to',
'comments', 'screenshot_comments',
'file_attachment_comments', 'general_comments',
'reviewed_diffset')
fieldsets = (
(_('General Information'), {
'fields': ('user', 'review_request', 'public', 'ship_it',
'body_top_rich_text', 'body_top',
'body_bottom_rich_text', 'body_bottom'),
}),
(_('Related Objects'), {
'fields': ('base_reply_to',
'body_top_reply_to',
'body_bottom_reply_to',
'comments',
'screenshot_comments',
'file_attachment_comments',
'general_comments'),
'classes': ('collapse',)
}),
(_('State'), {
'fields': ('email_message_id', 'time_emailed', 'extra_data'),
'classes': ('collapse',)
})
)
class ReviewRequestAdmin(admin.ModelAdmin):
list_display = ('summary', 'submitter', 'status', 'public', 'last_updated')
list_filter = ('public', 'status', 'time_added', 'last_updated',
'repository')
search_fields = ['summary']
raw_id_fields = ('submitter', 'diffset_history', 'screenshots',
'inactive_screenshots', 'file_attachments',
'inactive_file_attachments', 'changedescs', 'local_site',
'depends_on', 'repository')
filter_horizontal = ('target_people', 'target_groups')
fieldsets = (
(_('General Information'), {
'fields': ('submitter', 'public', 'status',
'summary',
'description_rich_text',
'description',
'testing_done_rich_text',
'testing_done',
'bugs_closed', 'repository', 'branch',
'depends_on', 'commit_id', 'time_added')
}),
(_('Reviewers'), {
'fields': ('target_people', 'target_groups'),
}),
(_('Related Objects'), {
'fields': ('screenshots', 'inactive_screenshots',
'file_attachments', 'inactive_file_attachments',
'changedescs', 'diffset_history', 'local_site'),
'classes': ['collapse'],
}),
(_('State'), {
'description': _('<p>This is advanced state that should not be '
'modified unless something is wrong.</p>'),
'fields': ('email_message_id', 'time_emailed',
'last_review_activity_timestamp',
'shipit_count', 'issue_open_count',
'issue_resolved_count', 'issue_dropped_count',
'local_id', 'extra_data'),
'classes': ['collapse'],
}),
)
actions = [
'close_submitted',
'close_discarded',
'reopen',
]
def close_submitted(self, request, queryset):
rows_updated = queryset.update(status=ReviewRequest.SUBMITTED)
if rows_updated == 1:
msg = '1 review request was closed as submitted.'
else:
msg = '%s review requests were closed as submitted.' % \
rows_updated
self.message_user(request, msg)
close_submitted.short_description = \
_("Close selected review requests as submitted")
def close_discarded(self, request, queryset):
rows_updated = queryset.update(status=ReviewRequest.DISCARDED)
if rows_updated == 1:
msg = '1 review request was closed as discarded.'
else:
msg = '%s review requests were closed as discarded.' % \
rows_updated
self.message_user(request, msg)
close_discarded.short_description = \
_("Close selected review requests as discarded")
def reopen(self, request, queryset):
rows_updated = queryset.update(status=ReviewRequest.PENDING_REVIEW)
if rows_updated == 1:
msg = '1 review request was reopened.'
else:
msg = '%s review requests were reopened.' % rows_updated
self.message_user(request, msg)
reopen.short_description = _("Reopen selected review requests")
class ReviewRequestDraftAdmin(admin.ModelAdmin):
list_display = ('summary', 'submitter', 'last_updated')
list_filter = ('last_updated',)
search_fields = ['summary']
raw_id_fields = ('review_request', 'diffset', 'screenshots',
'inactive_screenshots', 'changedesc')
filter_horizontal = ('target_people', 'target_groups')
fieldsets = (
(_('General Information'), {
'fields': ('review_request',
'summary',
'description_rich_text',
'description',
'testing_done_rich_text',
'testing_done',
'depends_on', 'bugs_closed', 'branch', 'commit_id'),
}),
(_('Reviewers'), {
'fields': ('target_people', 'target_groups'),
}),
(_('Related Objects'), {
'fields': ('screenshots', 'inactive_screenshots', 'changedesc',
'diffset'),
'classes': ['collapse'],
}),
(_('State'), {
'fields': ('extra_data',),
}),
)
class ScreenshotAdmin(admin.ModelAdmin):
list_display = ('thumb', 'caption', 'image', 'review_request_id')
list_display_links = ('thumb', 'caption')
search_fields = ('caption',)
def review_request_id(self, obj):
return obj.review_request.get().id
review_request_id.short_description = _('Review request ID')
class ScreenshotCommentAdmin(admin.ModelAdmin):
list_display = ('text', 'screenshot', 'review_request_id', 'timestamp')
list_filter = ('timestamp',)
search_fields = ['text']
raw_id_fields = ('screenshot', 'reply_to')
def review_request_id(self, obj):
return obj.review.get().review_request.id
review_request_id.short_description = _('Review request ID')
class FileAttachmentCommentAdmin(admin.ModelAdmin):
list_display = ('text', 'file_attachment', 'review_request_id',
'timestamp')
list_filter = ('timestamp',)
search_fields = ['text']
raw_id_fields = ('file_attachment', 'reply_to')
def review_request_id(self, obj):
return obj.review.get().review_request.id
review_request_id.short_description = _('Review request ID')
class GeneralCommentAdmin(admin.ModelAdmin):
list_display = ('text', 'review_request_id', 'timestamp')
list_filter = ('timestamp',)
search_fields = ['text']
raw_id_fields = ('reply_to',)
def review_request_id(self, obj):
return obj.review.get().review_request.id
review_request_id.short_description = _('Review request ID')
class StatusUpdateAdmin(admin.ModelAdmin):
list_display = ('review_request_id', 'summary', 'description')
def review_request_id(self, obj):
return obj.review_request.id
review_request_id.short_description = _('Review request ID')
admin.site.register(Comment, CommentAdmin)
admin.site.register(DefaultReviewer, DefaultReviewerAdmin)
admin.site.register(FileAttachmentComment, FileAttachmentCommentAdmin)
admin.site.register(GeneralComment, GeneralCommentAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Review, ReviewAdmin)
admin.site.register(ReviewRequest, ReviewRequestAdmin)
admin.site.register(ReviewRequestDraft, ReviewRequestDraftAdmin)
admin.site.register(Screenshot, ScreenshotAdmin)
admin.site.register(ScreenshotComment, ScreenshotCommentAdmin)
admin.site.register(StatusUpdate, StatusUpdateAdmin)
| StarcoderdataPython |
1779881 | <filename>xml_extractions/common_xml_parser_function.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from lxml import etree
def read_xml(xml_path: str):
"""
Parse XML file
:param xml_path: path to the XML file
:return: root node
"""
if os.path.exists(xml_path):
return etree.parse(xml_path)
else:
raise IOError("File [" + xml_path + "] doesn't exist!")
def replace_none(s: str) -> str:
"""
Replace NONE by an empty string
:param s: original string which may be NONE
:return: a stripped string, empty if NONE
"""
if s is None:
return ""
return s.strip()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.