content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
import networkx as nx
groundTruth="E:/Download/social_network/email-Eu-core-department-labels.txt/email-Eu-core-department-labels.txt"
edgeList="E:\Download\social_network\email-EuAll.txt\Email-EuAll.txt"
if __name__ == "__main__":
preData() | [
11748,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
355,
299,
87,
220,
220,
198,
2833,
38782,
2625,
36,
14079,
10002,
14,
14557,
62,
27349,
14,
12888,
12,
36,
84,
12,
7295,
12,
10378,
1823,
12,
23912,
1424,
13,
14116,
14,
12888,
12... | 2.607843 | 102 |
from mirage.libs import ir,utils,io
from mirage.core import module
| [
6738,
5720,
496,
13,
8019,
82,
1330,
4173,
11,
26791,
11,
952,
198,
6738,
5720,
496,
13,
7295,
1330,
8265,
198
] | 3.190476 | 21 |
import os
import json
import numpy as np
| [
11748,
28686,
201,
198,
11748,
33918,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.26087 | 23 |
import Client as client
import communication_serial as little_board
#it's just an example. Change please the name mainly for job with SO Windown and Linux
name_board = '/dev/ttyUSB0'
value_of_baudRate= 9600
b = little_board.Board(name_board, value_of_baudRate)
b.begin()
while True:
#receiving the command
movement = client.read_socket()
#After change the value of comparation
#byte from send. It's a form to say 'you can send... '
if b.read() == b'\x01\r\n':
b.send(movement)
#another kind of byte from send
elif b.read() == b'\x01\r\n':
break
client.finalize()
b.finalize() | [
11748,
20985,
355,
5456,
198,
11748,
6946,
62,
46911,
355,
1310,
62,
3526,
198,
198,
2,
270,
338,
655,
281,
1672,
13,
9794,
3387,
262,
1438,
8384,
329,
1693,
351,
12809,
3086,
593,
290,
7020,
198,
3672,
62,
3526,
796,
31051,
7959,
1... | 2.66383 | 235 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Schooner - Course Management System
# University of Turku / Faculty of Technilogy / Department of Computing
# (c) 2021, Jani Tammi <jasata@utu.fi>
#
# CourseAssistant.py - Data dictionary class for assistant.assistant
# 2021-09-06 Initial version.
#
#
# Consider this more "proper" data object than "Assistant"
#
#
# EOF | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
3059,
2049,
263,
532,
20537,
8549,
4482,
198,
2,
2059,
286,
8484,
84,
1220,
35262,
286,
5429,
19202,
... | 2.923077 | 130 |
# Generated by Django 2.2.3 on 2020-04-25 18:25
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
18,
319,
12131,
12,
3023,
12,
1495,
1248,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import pytest
| [
11748,
12972,
9288,
628
] | 3.75 | 4 |
import logging
LOG_LEVEL_NAMES = [
'DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL',
]
LOG_LEVELS = {getattr(logging, name) for name in LOG_LEVEL_NAMES}
| [
11748,
18931,
628,
198,
25294,
62,
2538,
18697,
62,
45,
29559,
796,
685,
198,
220,
220,
220,
705,
30531,
3256,
198,
220,
220,
220,
705,
10778,
3256,
198,
220,
220,
220,
705,
31502,
3256,
198,
220,
220,
220,
705,
24908,
3256,
198,
22... | 2.17284 | 81 |
# -*- coding: utf-8 -*-
"""
Train, test, predict steps for a graph-based model using a binary conjugate
(two classes on the primal edges)
Structured machine learning, currently using graph-CRF or Edge Convolution Network
Copyright NAVER(C) 2019 JL. Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import lxml.etree as etree
from xml_formats.PageXml import PageXml
from util.Shape import ShapeLoader
from graph.Graph_DOM import Graph_DOM
from .GraphBinaryConjugateSegmenter import GraphBinaryConjugateSegmenter
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
16835,
11,
1332,
11,
4331,
4831,
329,
257,
4823,
12,
3106,
2746,
1262,
257,
13934,
11644,
1018,
378,
220,
198,
220,
220,
220,
357,
1154... | 3.155556 | 225 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 TU Wien.
#
# Invenio-Requests is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Base classes for requests in Invenio."""
import abc
import uuid
from datetime import datetime
from invenio_db import db
from invenio_records.models import RecordMetadataBase
from sqlalchemy_utils import UUIDType
class RequestMetadata(db.Model, RecordMetadataBase):
"""Base class for requests of any kind in Invenio."""
__tablename__ = "requests_metadata"
id = db.Column(UUIDType, primary_key=True, default=uuid.uuid4)
# TODO later
# labels: maybe per-community CVs
# prerequisites for each action: like checks in GitHub actions
# assignees: enables notifications? no impact on permissions
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
309,
52,
370,
2013,
13,
198,
2,
198,
2,
554,
574,
952,
12,
16844,
3558,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
... | 3.146617 | 266 |
import numpy
import environments.atmosphere as atmosphere
constants = {
'g0': 9.80665, #NIST http://physics.nist.gov/Pubs/SP330/sp330.pdf
'm': 5.972365e24, #wgs84
'a': 6378137.00, #wgs84
'omega': 7292115e-10, #wgs84
'r0': 6356766.0, #USSA 1976
'air': {
'M': 28.9644, #USSA 1976
'Cp': 1005.7,
'Cv': 718.0,
'beta': 1.458e-6, #USSA 1976
'S': 110.4, #USSA 1976
'P0': 101325.0, #USSA 1976
'rho0': 1.225, #USSA 1976
'T0': 288.15, #USSA 1976
'mu0': 1.7894e-5, #USSA 1976
}
}
class Atmosphere(atmosphere.GenericAtmosphere):
"""An atmosphere model for earth.
"""
def __init__(self):
"""Constructor
Arguments:
no arguments
Returns:
class instance
"""
super(Atmosphere, self).__init__()
self.constants = constants
air = constants['air']
self._M = air['M']
self._Cp = air['Cp']
self._Cv = air['Cv']
self._beta = air['beta']
self._S = air['S']
self._T0 = air['T0']
self._P0 = air['P0']
self._g = constants['g0']
self._r0 = constants['r0']
self._compute_layer_props()
def T_isa(self, z):
"""Compute the ISA temperature
Valid between -1 and 47 km
Arguments:
z: altitude (m)
Returns:
T: temperature (K)
"""
assert z > -1e3 and z < 47e3, 'z must be between -1 and 47 km'
H = self.geopotential_height(z)
layer = self._get_layer_props(H)
return layer['T0'] + layer['lapse'] * (H - layer['H0'])
def P_isa(self, z):
"""Compute the ISA pressure
Valid between -1 and 47 km
Arguments:
z: altitude (m)
Returns:
P: pressure (Pa)
"""
assert z > -1e3 and z < 47e3, 'z must be between -1 and 47 km'
H = self.geopotential_height(z)
layer = self._get_layer_props(H)
if layer['lapse'] == 0.0:
P = layer['P0'] * numpy.exp(
-self._g / self.R / layer['T0'] * (H - layer['H0']))
else:
P = layer['P0'] * numpy.power(
layer['T0'] / self.T_isa(z),
self._g / self.R / layer['lapse'])
return P
def rho_isa(self, z):
"""Compute the ISA density
Valid between -1 and 47 km
Arguments:
z: altitude (m)
Returns:
rho: density (kg/m3)
"""
assert z > -1e3 and z < 47e3, 'z must be between -1 and 47 km'
return self.P_isa(z) / self.R / self.T_isa(z)
def mu_isa(self, z):
"""Compute the ISA dynamic viscosity
Arguments:
z: altitude (m)
Returns:
mu: dynamic viscosity (N-s/m2)
"""
assert z > -1e3 and z < 47e3, 'z must be between -1 and 47 km'
mu = self._beta * numpy.power(self.T_isa(z), 1.5) / (
self.T_isa(z) + self._S)
return mu
def _compute_layer_props(self):
"""Compute the tables which we'll use for layer properties
This should be called on initialization to prepare tables
Arguments:
no arguments
Returns:
no returns
"""
H = numpy.array([0, 11000.0, 20000.0, 32000.0, 47000.0])
lapse = [-6.5/1000.0, 0.0, 1.0/1000.0, 2.8/1000.0, 0.0]
T = [self._T0,]
P = [self._P0,]
for idx, h in enumerate(H[1:]):
T.append(T[-1] + (h - H[idx]) * lapse[idx])
if lapse[idx] == 0.0:
P_i = P[-1] * numpy.exp(
-self._g / self.R / T[-2] * (h - H[idx]))
else:
P_i = P[-1] * numpy.power(
T[-2] / T[-1],
self._g / self.R / lapse[idx])
P.append(P_i)
self._layers = {
'H': H,
'T': T,
'P': P,
'lapse': lapse}
def _get_layer_props(self, H):
"""Get the properties defining each layer
Arguments:
H: geopotential height (m)
Returns:
props: dict of layer properties containing
H0: layer base height
T0: layer base temperature
P0: layer base temperature
lapse: layer lapse rate
"""
idx = numpy.argmax(self._layers['H'] > H) - 1
idx = numpy.clip(idx, 0, len(self._layers) - 1)
props = {
'H0': self._layers['H'][idx],
'T0': self._layers['T'][idx],
'P0': self._layers['P'][idx],
'lapse': self._layers['lapse'][idx]}
return props
| [
11748,
299,
32152,
198,
11748,
12493,
13,
265,
6384,
1456,
355,
8137,
198,
198,
9979,
1187,
796,
1391,
198,
220,
220,
220,
705,
70,
15,
10354,
860,
13,
1795,
36879,
11,
1303,
45,
8808,
2638,
1378,
746,
23154,
13,
77,
396,
13,
9567,
... | 1.821429 | 2,604 |
# app/order/admin.py
# Django modules
from django.contrib import admin
# Locals
from app.orders.models import Orders, Sales
# Register your models here.
@admin.register(Orders)
@admin.register(Sales)
| [
2,
598,
14,
2875,
14,
28482,
13,
9078,
198,
198,
2,
37770,
13103,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
15181,
874,
198,
6738,
598,
13,
6361,
13,
27530,
1330,
30689,
11,
17329,
198,
198,
2,
17296,
534,
... | 3.136364 | 66 |
from random import *
t = 5
n = 8
print t
for _ in xrange(t):
x = randrange(1,n+1)
print x
for _ in xrange(x):
print randrange(-50,51), randrange(-50,51)
| [
6738,
4738,
1330,
1635,
198,
83,
796,
642,
198,
77,
796,
807,
198,
4798,
256,
198,
1640,
4808,
287,
2124,
9521,
7,
83,
2599,
198,
197,
87,
796,
43720,
9521,
7,
16,
11,
77,
10,
16,
8,
198,
197,
4798,
2124,
198,
197,
1640,
4808,
... | 2.257143 | 70 |
@bot.message_handler(commands=['id', 'Id'])
| [
31,
13645,
13,
20500,
62,
30281,
7,
9503,
1746,
28,
17816,
312,
3256,
705,
7390,
6,
12962,
198
] | 2.444444 | 18 |
from collections import deque
import itertools
########
# PART 1
example = read_input("event2016/day24/example.txt")
assert solve_for(example, False) == 14
p1 = read_input("event2016/day24/input.txt")
answer = solve_for(p1, False)
print("Part 1 =", answer)
assert answer == 502 # check with accepted answer
########
# PART 2
#print(solve_for(example, True))
answer = solve_for(p1, True)
print("Part 2 =", answer)
assert answer == 724 # check with accepted answer
| [
6738,
17268,
1330,
390,
4188,
198,
11748,
340,
861,
10141,
198,
198,
7804,
198,
2,
16652,
352,
628,
628,
628,
198,
20688,
796,
1100,
62,
15414,
7203,
15596,
5304,
14,
820,
1731,
14,
20688,
13,
14116,
4943,
198,
30493,
8494,
62,
1640,
... | 3.077922 | 154 |
from copy import deepcopy
import pytest
from magpylib._src.defaults.defaults_utility import color_validator
from magpylib._src.defaults.defaults_utility import COLORS_MATPLOTLIB_TO_PLOTLY
from magpylib._src.defaults.defaults_utility import get_defaults_dict
from magpylib._src.defaults.defaults_utility import linearize_dict
from magpylib._src.defaults.defaults_utility import magic_to_dict
from magpylib._src.defaults.defaults_utility import MagicProperties
from magpylib._src.defaults.defaults_utility import update_nested_dict
def test_update_nested_dict():
"""test all argument combinations of `update_nested_dicts`"""
# `d` gets updated, that's why we deepcopy it
d = {"a": 1, "b": {"c": 2, "d": None}, "f": None, "g": {"c": None, "d": 2}, "h": 1}
u = {"a": 2, "b": 3, "e": 5, "g": {"c": 7, "d": 5}, "h": {"i": 3}}
res = update_nested_dict(
deepcopy(d), u, same_keys_only=False, replace_None_only=False
)
assert res == {
"a": 2,
"b": 3,
"e": 5,
"f": None,
"g": {"c": 7, "d": 5},
"h": {"i": 3},
}, "failed updating nested dict"
res = update_nested_dict(
deepcopy(d), u, same_keys_only=True, replace_None_only=False
)
assert res == {
"a": 2,
"b": 3,
"f": None,
"g": {"c": 7, "d": 5},
"h": {"i": 3},
}, "failed updating nested dict"
res = update_nested_dict(
deepcopy(d), u, same_keys_only=True, replace_None_only=True
)
assert res == {
"a": 1,
"b": {"c": 2, "d": None},
"f": None,
"g": {"c": 7, "d": 2},
"h": 1,
}, "failed updating nested dict"
res = update_nested_dict(
deepcopy(d), u, same_keys_only=False, replace_None_only=True
)
assert res == {
"a": 1,
"b": {"c": 2, "d": None},
"f": None,
"g": {"c": 7, "d": 2},
"e": 5,
"h": 1,
}, "failed updating nested dict"
def test_magic_to_dict():
"""test all argument combinations of `magic_to_dict`"""
d = {"a_b": 1, "c_d_e": 2, "a": 3, "c_d": {"e": 6}}
res = magic_to_dict(d, separator="_")
assert res == {"a": 3, "c": {"d": {"e": 6}}}
d = {"a.b": 1, "c": 2, "a": 3, "c.d": {"e": 6}}
res = magic_to_dict(d, separator=".")
assert res == {"a": 3, "c": {"d": {"e": 6}}}
with pytest.raises(AssertionError):
magic_to_dict(0, separator=".")
with pytest.raises(AssertionError):
magic_to_dict(d, separator=0)
def test_linearize_dict():
"""test all argument combinations of `magic_to_dict`"""
mydict = {
"line": {"width": 1, "style": "solid", "color": None},
"marker": {"size": 1, "symbol": "o", "color": None},
}
res = linearize_dict(mydict, separator=".")
assert res == {
"line.width": 1,
"line.style": "solid",
"line.color": None,
"marker.size": 1,
"marker.symbol": "o",
"marker.color": None,
}, "linearization of dict failed"
with pytest.raises(AssertionError):
magic_to_dict(0, separator=".")
with pytest.raises(AssertionError):
magic_to_dict(mydict, separator=0)
@pytest.mark.parametrize(
"color, allow_None, color_expected",
[
(None, True, None),
("blue", True, "blue"),
("r", True, "red"),
(0, True, "#000000"),
(0.5, True, "#7f7f7f"),
("0.5", True, "#7f7f7f"),
((127, 127, 127), True, "#7f7f7f"),
("rgb(127, 127, 127)", True, "#7f7f7f"),
((0, 0, 0, 0), False, "#000000"),
((.1, .2, .3), False, "#19334c"),
]
+ [(shortC, True, longC) for shortC, longC in COLORS_MATPLOTLIB_TO_PLOTLY.items()],
)
def test_good_colors(color, allow_None, color_expected):
"""test color validator based on matploblib validation"""
assert color_validator(color, allow_None=allow_None) == color_expected
@pytest.mark.parametrize(
"color, allow_None, expected_exception",
[
(None, False, ValueError),
(-1, False, ValueError),
((-1, 0, 0), False, ValueError),
((1, 2), False, ValueError),
((0, 0, 260), False, ValueError),
((0, "0", 200), False, ValueError),
("rgb(a, 0, 260)", False, ValueError),
("2", False, ValueError),
("mybadcolor", False, ValueError),
],
)
def test_bad_colors(color, allow_None, expected_exception):
"""test color validator based on matploblib validation"""
with pytest.raises(expected_exception):
color_validator(color, allow_None=allow_None)
def test_MagicProperties():
"""test MagicProperties class"""
class BPsub1(MagicProperties):
"MagicProperties class"
@property
def prop1(self):
"""prop1"""
return self._prop1
@prop1.setter
class BPsub2(MagicProperties):
"MagicProperties class"
@property
def prop2(self):
"""prop2"""
return self._prop2
@prop2.setter
bp1 = BPsub1(prop1=1)
# check setting attribute/property
assert bp1.prop1 == 1, "`bp1.prop1` should be `1`"
with pytest.raises(AttributeError):
getattr(bp1, "prop1e") # only properties are allowed to be set
assert bp1.as_dict() == {"prop1": 1}, "`as_dict` method failed"
bp2 = BPsub2(prop2=2)
bp1.prop1 = bp2 # assigning class to subproperty
# check as_dict method
assert bp1.as_dict() == {"prop1": {"prop2": 2}}, "`as_dict` method failed"
# check update method with different parameters
assert bp1.update(prop1_prop2=10).as_dict() == {
"prop1": {"prop2": 10}
}, "magic property setting failed"
with pytest.raises(AttributeError):
bp1.update(prop1_prop2=10, prop3=4)
assert bp1.update(prop1_prop2=10, prop3=4, _match_properties=False).as_dict() == {
"prop1": {"prop2": 10}
}, "magic property setting failed, should ignore `'prop3'`"
assert bp1.update(prop1_prop2=20, _replace_None_only=True).as_dict() == {
"prop1": {"prop2": 10}
}, "magic property setting failed, `prop2` should be remained unchanged `10`"
# check copy method
bp3 = bp2.copy()
assert bp3 is not bp2, "failed copying, should return a different id"
assert (
bp3.as_dict() == bp2.as_dict()
), "failed copying, should return the same property values"
# check flatten dict
assert bp3.as_dict(flatten=True) == bp2.as_dict(
flatten=True
), "failed copying, should return the same property values"
# check failing init
with pytest.raises(AttributeError):
BPsub1(a=0) # `a` is not a property in the class
# check repr
assert repr(MagicProperties()) == "MagicProperties()", "repr failed"
def test_get_defaults_dict():
"""test get_defaults_dict"""
s0 = get_defaults_dict("display.style")
s1 = get_defaults_dict()["display"]["style"]
assert s0 == s1, "dicts don't match"
| [
6738,
4866,
1330,
2769,
30073,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
2153,
79,
2645,
571,
13557,
10677,
13,
12286,
82,
13,
12286,
82,
62,
315,
879,
1330,
3124,
62,
12102,
1352,
198,
6738,
2153,
79,
2645,
571,
13557,
10677,
13,... | 2.229314 | 3,118 |
from .tables import metadata
from .settings import DBSettings
| [
6738,
764,
83,
2977,
1330,
20150,
198,
6738,
764,
33692,
1330,
360,
4462,
12374,
198
] | 4.133333 | 15 |
if __name__ == '__main__':
test_cases = [
[10, 3, 15, 7, 8, 23, 98, 29],
[],
[3],
[9,8,7,2],
[1,2,3,4,5]
]
for arr in test_cases:
merge_sort(arr)
print("Sorted:", arr) | [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1332,
62,
33964,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
685,
940,
11,
513,
11,
1315,
11,
767,
11,
807,
11,
2242,
11,
9661,
11,
2808,
4... | 1.601351 | 148 |
__author__ = 'smackware'
from typing import *
from collections import namedtuple
if TYPE_CHECKING:
from rdisq.consts import ServiceUid
RequestPayload = namedtuple("RequestPayload", "task_id timeout args kwargs")
| [
834,
9800,
834,
796,
705,
5796,
441,
1574,
6,
198,
198,
6738,
19720,
1330,
1635,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
374,
6381,
80,
13,
1102,
6448,
1330,
... | 3.098592 | 71 |
from pos_vec import Position
| [
6738,
1426,
62,
35138,
1330,
23158,
628
] | 4.285714 | 7 |
"""Groebner bases algorithms. """
from sympy.core.symbol import Dummy
from sympy.polys.monomials import monomial_mul, monomial_lcm, monomial_divides, term_div
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyconfig import query
def groebner(seq, ring, method=None):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Wrapper around the (default) improved Buchberger and the other algorithms
for computing Groebner bases. The choice of algorithm can be changed via
``method`` argument or :func:`sympy.polys.polyconfig.setup`, where
``method`` can be either ``buchberger`` or ``f5b``.
"""
if method is None:
method = query('groebner')
_groebner_methods = {
'buchberger': _buchberger,
'f5b': _f5b,
}
try:
_groebner = _groebner_methods[method]
except KeyError:
raise ValueError("'%s' is not a valid Groebner bases algorithm (valid are 'buchberger' and 'f5b')" % method)
domain, orig = ring.domain, None
if not domain.is_Field or not domain.has_assoc_Field:
try:
orig, ring = ring, ring.clone(domain=domain.get_field())
except DomainError:
raise DomainError("can't compute a Groebner basis over %s" % domain)
else:
seq = [ s.set_ring(ring) for s in seq ]
G = _groebner(seq, ring)
if orig is not None:
G = [ g.clear_denoms()[1].set_ring(orig) for g in G ]
return G
def _buchberger(f, ring):
"""
Computes Groebner basis for a set of polynomials in `K[X]`.
Given a set of multivariate polynomials `F`, finds another
set `G`, such that Ideal `F = Ideal G` and `G` is a reduced
Groebner basis.
The resulting basis is unique and has monic generators if the
ground domains is a field. Otherwise the result is non-unique
but Groebner bases over e.g. integers can be computed (if the
input polynomials are monic).
Groebner bases can be used to choose specific generators for a
polynomial ideal. Because these bases are unique you can check
for ideal equality by comparing the Groebner bases. To see if
one polynomial lies in an ideal, divide by the elements in the
base and see if the remainder vanishes.
They can also be used to solve systems of polynomial equations
as, by choosing lexicographic ordering, you can eliminate one
variable at a time, provided that the ideal is zero-dimensional
(finite number of solutions).
Notes
=====
Algorithm used: an improved version of Buchberger's algorithm
as presented in T. Becker, V. Weispfenning, Groebner Bases: A
Computational Approach to Commutative Algebra, Springer, 1993,
page 232.
References
==========
.. [1] [Bose03]_
.. [2] [Giovini91]_
.. [3] [Ajwa95]_
.. [4] [Cox97]_
"""
order = ring.order
monomial_mul = ring.monomial_mul
monomial_div = ring.monomial_div
monomial_lcm = ring.monomial_lcm
if not f:
return []
# replace f with a reduced list of initial polynomials; see [BW] page 203
f1 = f[:]
while True:
f = f1[:]
f1 = []
for i in range(len(f)):
p = f[i]
r = p.rem(f[:i])
if r:
f1.append(r.monic())
if f == f1:
break
I = {} # ip = I[p]; p = f[ip]
F = set() # set of indices of polynomials
G = set() # set of indices of intermediate would-be Groebner basis
CP = set() # set of pairs of indices of critical pairs
for i, h in enumerate(f):
I[h] = i
F.add(i)
#####################################
# algorithm GROEBNERNEWS2 in [BW] page 232
while F:
# select p with minimum monomial according to the monomial ordering
h = min([f[x] for x in F], key=lambda f: order(f.LM))
ih = I[h]
F.remove(ih)
G, CP = update(G, CP, ih)
# count the number of critical pairs which reduce to zero
reductions_to_zero = 0
while CP:
ig1, ig2 = select(CP)
CP.remove((ig1, ig2))
h = spoly(f[ig1], f[ig2], ring)
# ordering divisors is on average more efficient [Cox] page 111
G1 = sorted(G, key=lambda g: order(f[g].LM))
ht = normal(h, G1)
if ht:
G, CP = update(G, CP, ht[1])
else:
reductions_to_zero += 1
######################################
# now G is a Groebner basis; reduce it
Gr = set()
for ig in G:
ht = normal(f[ig], G - {ig})
if ht:
Gr.add(ht[1])
Gr = [f[ig] for ig in Gr]
# order according to the monomial ordering
Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True)
return Gr
def spoly(p1, p2, ring):
"""
Compute LCM(LM(p1), LM(p2))/LM(p1)*p1 - LCM(LM(p1), LM(p2))/LM(p2)*p2
This is the S-poly provided p1 and p2 are monic
"""
LM1 = p1.LM
LM2 = p2.LM
LCM12 = ring.monomial_lcm(LM1, LM2)
m1 = ring.monomial_div(LCM12, LM1)
m2 = ring.monomial_div(LCM12, LM2)
s1 = p1.mul_monom(m1)
s2 = p2.mul_monom(m2)
s = s1 - s2
return s
# F5B
# convenience functions
# signature functions
def sig_cmp(u, v, order):
"""
Compare two signatures by extending the term order to K[X]^n.
u < v iff
- the index of v is greater than the index of u
or
- the index of v is equal to the index of u and u[0] < v[0] w.r.t. order
u > v otherwise
"""
if u[1] > v[1]:
return -1
if u[1] == v[1]:
#if u[0] == v[0]:
# return 0
if order(u[0]) < order(v[0]):
return -1
return 1
def sig_key(s, order):
"""
Key for comparing two signatures.
s = (m, k), t = (n, l)
s < t iff [k > l] or [k == l and m < n]
s > t otherwise
"""
return (-s[1], order(s[0]))
def sig_mult(s, m):
"""
Multiply a signature by a monomial.
The product of a signature (m, i) and a monomial n is defined as
(m * t, i).
"""
return sig(monomial_mul(s[0], m), s[1])
# labeled polynomial functions
def lbp_sub(f, g):
"""
Subtract labeled polynomial g from f.
The signature and number of the difference of f and g are signature
and number of the maximum of f and g, w.r.t. lbp_cmp.
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) < 0:
max_poly = g
else:
max_poly = f
ret = Polyn(f) - Polyn(g)
return lbp(Sign(max_poly), ret, Num(max_poly))
def lbp_mul_term(f, cx):
"""
Multiply a labeled polynomial with a term.
The product of a labeled polynomial (s, p, k) by a monomial is
defined as (m * s, m * p, k).
"""
return lbp(sig_mult(Sign(f), cx[0]), Polyn(f).mul_term(cx), Num(f))
def lbp_cmp(f, g):
"""
Compare two labeled polynomials.
f < g iff
- Sign(f) < Sign(g)
or
- Sign(f) == Sign(g) and Num(f) > Num(g)
f > g otherwise
"""
if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) == -1:
return -1
if Sign(f) == Sign(g):
if Num(f) > Num(g):
return -1
#if Num(f) == Num(g):
# return 0
return 1
def lbp_key(f):
"""
Key for comparing two labeled polynomials.
"""
return (sig_key(Sign(f), Polyn(f).ring.order), -Num(f))
# algorithm and helper functions
def critical_pair(f, g, ring):
"""
Compute the critical pair corresponding to two labeled polynomials.
A critical pair is a tuple (um, f, vm, g), where um and vm are
terms such that um * f - vm * g is the S-polynomial of f and g (so,
wlog assume um * f > vm * g).
For performance sake, a critical pair is represented as a tuple
(Sign(um * f), um, f, Sign(vm * g), vm, g), since um * f creates
a new, relatively expensive object in memory, whereas Sign(um *
f) and um are lightweight and f (in the tuple) is a reference to
an already existing object in memory.
"""
domain = ring.domain
ltf = Polyn(f).LT
ltg = Polyn(g).LT
lt = (monomial_lcm(ltf[0], ltg[0]), domain.one)
um = term_div(lt, ltf, domain)
vm = term_div(lt, ltg, domain)
# The full information is not needed (now), so only the product
# with the leading term is considered:
fr = lbp_mul_term(lbp(Sign(f), Polyn(f).leading_term(), Num(f)), um)
gr = lbp_mul_term(lbp(Sign(g), Polyn(g).leading_term(), Num(g)), vm)
# return in proper order, such that the S-polynomial is just
# u_first * f_first - u_second * f_second:
if lbp_cmp(fr, gr) == -1:
return (Sign(gr), vm, g, Sign(fr), um, f)
else:
return (Sign(fr), um, f, Sign(gr), vm, g)
def cp_cmp(c, d):
"""
Compare two critical pairs c and d.
c < d iff
- lbp(c[0], _, Num(c[2]) < lbp(d[0], _, Num(d[2])) (this
corresponds to um_c * f_c and um_d * f_d)
or
- lbp(c[0], _, Num(c[2]) >< lbp(d[0], _, Num(d[2])) and
lbp(c[3], _, Num(c[5])) < lbp(d[3], _, Num(d[5])) (this
corresponds to vm_c * g_c and vm_d * g_d)
c > d otherwise
"""
zero = Polyn(c[2]).ring.zero
c0 = lbp(c[0], zero, Num(c[2]))
d0 = lbp(d[0], zero, Num(d[2]))
r = lbp_cmp(c0, d0)
if r == -1:
return -1
if r == 0:
c1 = lbp(c[3], zero, Num(c[5]))
d1 = lbp(d[3], zero, Num(d[5]))
r = lbp_cmp(c1, d1)
if r == -1:
return -1
#if r == 0:
# return 0
return 1
def cp_key(c, ring):
"""
Key for comparing critical pairs.
"""
return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5]))))
def s_poly(cp):
"""
Compute the S-polynomial of a critical pair.
The S-polynomial of a critical pair cp is cp[1] * cp[2] - cp[4] * cp[5].
"""
return lbp_sub(lbp_mul_term(cp[2], cp[1]), lbp_mul_term(cp[5], cp[4]))
def is_rewritable_or_comparable(sign, num, B):
"""
Check if a labeled polynomial is redundant by checking if its
signature and number imply rewritability or comparability.
(sign, num) is comparable if there exists a labeled polynomial
h in B, such that sign[1] (the index) is less than Sign(h)[1]
and sign[0] is divisible by the leading monomial of h.
(sign, num) is rewritable if there exists a labeled polynomial
h in B, such thatsign[1] is equal to Sign(h)[1], num < Num(h)
and sign[0] is divisible by Sign(h)[0].
"""
for h in B:
# comparable
if sign[1] < Sign(h)[1]:
if monomial_divides(Polyn(h).LM, sign[0]):
return True
# rewritable
if sign[1] == Sign(h)[1]:
if num < Num(h):
if monomial_divides(Sign(h)[0], sign[0]):
return True
return False
def f5_reduce(f, B):
"""
F5-reduce a labeled polynomial f by B.
Continuously searches for non-zero labeled polynomial h in B, such
that the leading term lt_h of h divides the leading term lt_f of
f and Sign(lt_h * h) < Sign(f). If such a labeled polynomial h is
found, f gets replaced by f - lt_f / lt_h * h. If no such h can be
found or f is 0, f is no further F5-reducible and f gets returned.
A polynomial that is reducible in the usual sense need not be
F5-reducible, e.g.:
>>> from sympy.polys.groebnertools import lbp, sig, f5_reduce, Polyn
>>> from sympy.polys import ring, QQ, lex
>>> R, x,y,z = ring("x,y,z", QQ, lex)
>>> f = lbp(sig((1, 1, 1), 4), x, 3)
>>> g = lbp(sig((0, 0, 0), 2), x, 2)
>>> Polyn(f).rem([Polyn(g)])
0
>>> f5_reduce(f, [g])
(((1, 1, 1), 4), x, 3)
"""
order = Polyn(f).ring.order
domain = Polyn(f).ring.domain
if not Polyn(f):
return f
while True:
g = f
for h in B:
if Polyn(h):
if monomial_divides(Polyn(h).LM, Polyn(f).LM):
t = term_div(Polyn(f).LT, Polyn(h).LT, domain)
if sig_cmp(sig_mult(Sign(h), t[0]), Sign(f), order) < 0:
# The following check need not be done and is in general slower than without.
#if not is_rewritable_or_comparable(Sign(gp), Num(gp), B):
hp = lbp_mul_term(h, t)
f = lbp_sub(f, hp)
break
if g == f or not Polyn(f):
return f
def _f5b(F, ring):
"""
Computes a reduced Groebner basis for the ideal generated by F.
f5b is an implementation of the F5B algorithm by Yao Sun and
Dingkang Wang. Similarly to Buchberger's algorithm, the algorithm
proceeds by computing critical pairs, computing the S-polynomial,
reducing it and adjoining the reduced S-polynomial if it is not 0.
Unlike Buchberger's algorithm, each polynomial contains additional
information, namely a signature and a number. The signature
specifies the path of computation (i.e. from which polynomial in
the original basis was it derived and how), the number says when
the polynomial was added to the basis. With this information it
is (often) possible to decide if an S-polynomial will reduce to
0 and can be discarded.
Optimizations include: Reducing the generators before computing
a Groebner basis, removing redundant critical pairs when a new
polynomial enters the basis and sorting the critical pairs and
the current basis.
Once a Groebner basis has been found, it gets reduced.
References
==========
.. [1] Yao Sun, Dingkang Wang: "A New Proof for the Correctness of F5
(F5-Like) Algorithm", http://arxiv.org/abs/1004.0084 (specifically
v4)
.. [2] Thomas Becker, Volker Weispfenning, Groebner bases: A computational
approach to commutative algebra, 1993, p. 203, 216
"""
order = ring.order
# reduce polynomials (like in Mario Pernici's implementation) (Becker, Weispfenning, p. 203)
B = F
while True:
F = B
B = []
for i in range(len(F)):
p = F[i]
r = p.rem(F[:i])
if r:
B.append(r)
if F == B:
break
# basis
B = [lbp(sig(ring.zero_monom, i + 1), F[i], i + 1) for i in range(len(F))]
B.sort(key=lambda f: order(Polyn(f).LM), reverse=True)
# critical pairs
CP = [critical_pair(B[i], B[j], ring) for i in range(len(B)) for j in range(i + 1, len(B))]
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
k = len(B)
reductions_to_zero = 0
while len(CP):
cp = CP.pop()
# discard redundant critical pairs:
if is_rewritable_or_comparable(cp[0], Num(cp[2]), B):
continue
if is_rewritable_or_comparable(cp[3], Num(cp[5]), B):
continue
s = s_poly(cp)
p = f5_reduce(s, B)
p = lbp(Sign(p), Polyn(p).monic(), k + 1)
if Polyn(p):
# remove old critical pairs, that become redundant when adding p:
indices = []
for i, cp in enumerate(CP):
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
indices.append(i)
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
indices.append(i)
for i in reversed(indices):
del CP[i]
# only add new critical pairs that are not made redundant by p:
for g in B:
if Polyn(g):
cp = critical_pair(p, g, ring)
if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]):
continue
elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]):
continue
CP.append(cp)
# sort (other sorting methods/selection strategies were not as successful)
CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True)
# insert p into B:
m = Polyn(p).LM
if order(m) <= order(Polyn(B[-1]).LM):
B.append(p)
else:
for i, q in enumerate(B):
if order(m) > order(Polyn(q).LM):
B.insert(i, p)
break
k += 1
#print(len(B), len(CP), "%d critical pairs removed" % len(indices))
else:
reductions_to_zero += 1
# reduce Groebner basis:
H = [Polyn(g).monic() for g in B]
H = red_groebner(H, ring)
return sorted(H, key=lambda f: order(f.LM), reverse=True)
def red_groebner(G, ring):
"""
Compute reduced Groebner basis, from BeckerWeispfenning93, p. 216
Selects a subset of generators, that already generate the ideal
and computes a reduced Groebner basis for them.
"""
def reduction(P):
"""
The actual reduction algorithm.
"""
Q = []
for i, p in enumerate(P):
h = p.rem(P[:i] + P[i + 1:])
if h:
Q.append(h)
return [p.monic() for p in Q]
F = G
H = []
while F:
f0 = F.pop()
if not any(monomial_divides(f.LM, f0.LM) for f in F + H):
H.append(f0)
# Becker, Weispfenning, p. 217: H is Groebner basis of the ideal generated by G.
return reduction(H)
def is_groebner(G, ring):
"""
Check if G is a Groebner basis.
"""
for i in range(len(G)):
for j in range(i + 1, len(G)):
s = spoly(G[i], G[j], ring)
s = s.rem(G)
if s:
return False
return True
def is_minimal(G, ring):
"""
Checks if G is a minimal Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, g.LM):
return False
return True
def is_reduced(G, ring):
"""
Checks if G is a reduced Groebner basis.
"""
order = ring.order
domain = ring.domain
G.sort(key=lambda g: order(g.LM))
for i, g in enumerate(G):
if g.LC != domain.one:
return False
for term in g.terms():
for h in G[:i] + G[i + 1:]:
if monomial_divides(h.LM, term[0]):
return False
return True
def groebner_lcm(f, g):
"""
Computes LCM of two polynomials using Groebner bases.
The LCM is computed as the unique generator of the intersection
of the two ideals generated by `f` and `g`. The approach is to
compute a Groebner basis with respect to lexicographic ordering
of `t*f` and `(1 - t)*g`, where `t` is an unrelated variable and
then filtering out the solution that doesn't contain `t`.
References
==========
.. [1] [Cox97]_
"""
if f.ring != g.ring:
raise ValueError("Values should be equal")
ring = f.ring
domain = ring.domain
if not f or not g:
return ring.zero
if len(f) <= 1 and len(g) <= 1:
monom = monomial_lcm(f.LM, g.LM)
coeff = domain.lcm(f.LC, g.LC)
return ring.term_new(monom, coeff)
fc, f = f.primitive()
gc, g = g.primitive()
lcm = domain.lcm(fc, gc)
f_terms = [ ((1,) + monom, coeff) for monom, coeff in f.terms() ]
g_terms = [ ((0,) + monom, coeff) for monom, coeff in g.terms() ] \
+ [ ((1,) + monom,-coeff) for monom, coeff in g.terms() ]
t = Dummy("t")
t_ring = ring.clone(symbols=(t,) + ring.symbols, order=lex)
F = t_ring.from_terms(f_terms)
G = t_ring.from_terms(g_terms)
basis = groebner([F, G], t_ring)
H = [ h for h in basis if is_independent(h, 0) ]
h_terms = [ (monom[1:], coeff*lcm) for monom, coeff in H[0].terms() ]
h = ring.from_terms(h_terms)
return h
def groebner_gcd(f, g):
"""Computes GCD of two polynomials using Groebner bases. """
if f.ring != g.ring:
raise ValueError("Values should be equal")
domain = f.ring.domain
if not domain.is_Field:
fc, f = f.primitive()
gc, g = g.primitive()
gcd = domain.gcd(fc, gc)
H = (f*g).quo([groebner_lcm(f, g)])
if len(H) != 1:
raise ValueError("Length should be 1")
h = H[0]
if not domain.is_Field:
return gcd*h
else:
return h.monic()
| [
37811,
42921,
1765,
1008,
12536,
16113,
13,
37227,
628,
198,
6738,
10558,
88,
13,
7295,
13,
1837,
23650,
1330,
360,
13513,
198,
6738,
10558,
88,
13,
35428,
82,
13,
2144,
296,
8231,
1330,
937,
49070,
62,
76,
377,
11,
937,
49070,
62,
... | 2.162832 | 9,519 |
import os
import camelot
import pandas as pd
import matplotlib.pyplot as plt
| [
198,
11748,
28686,
198,
11748,
41021,
313,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
628,
628
] | 2.964286 | 28 |
import json
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
faiss_l2_data = None
with open("benchmark/results/faiss_l2_test_result.json", "r") as read_file:
faiss_l2_data = json.load(read_file)
pyvptree_l2_data = None
with open("benchmark/results/pyvptree_l2_test_result.json", "r") as read_file:
pyvptree_l2_data = json.load(read_file)
print(pyvptree_l2_data)
print('generating L2 index bench mark ... ')
# L2 result:
# Compare fixed K and fixed dimension, time vs numpoints (for various Ks)
dimension = 16
for K in [1, 3, 8, 32]:
x = [200000, 500000, 1000000]
y = [faiss_l2_data[str(dimension)][str(K)][str(v)]['avg_search_time'] for v in x]
plt.figure().set_dpi(120);
plt.gcf().set_size_inches(8, 5)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}'))
plt.gca().xaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.title(f'Search L2 Index, faiss vs pyvptree K={K}, dimension={dimension}')
plt.plot(x, y, "-b", label="faiss", color='red')
y = [pyvptree_l2_data[str(dimension)][str(K)][str(v)]['avg_search_time'] for v in x]
plt.plot(x, y, "-b", label="pyvptree", color='blue')
plt.xlabel('number of points')
plt.ylabel('search time (seconds)')
plt.legend(loc="upper left")
plt.savefig(f'benchmark/results/l2_K{K}_{dimension}D_vs_time.png')
# Compare fixed number of points and fixed K for varing dimensions
K = 3
num_points = 1000000
x = [3, 4, 8, 16, 32]
y = [faiss_l2_data[str(v)][str(K)][str(num_points)]['avg_search_time'] for v in x]
plt.figure().set_dpi(120);
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}'))
plt.title(f'Search L2 Index, faiss vs pyvptree K={K}, number of points={num_points}')
plt.plot(x, y, "-b", label="faiss", color='red')
y = [pyvptree_l2_data[str(v)][str(K)][str(num_points)]['avg_search_time'] for v in x]
plt.plot(x, y, "-b", label="pyvptree", color='blue')
plt.xlabel('data dimension')
plt.ylabel('search time (seconds)')
plt.legend(loc="upper left")
plt.savefig(f'benchmark/results/l2_K{K}_{num_points}P_vs_time.png')
print('done generating benchmarks ... ')
| [
11748,
33918,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1330,
4285,
17410,
8479,
1436,
198,
198,
13331,
747,
62,
75,
17,
62,
7890,
796,
6045,
198,
4480,
1280,
7203,
2696... | 2.314255 | 926 |
#!/usr/bin/env python
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
webserver.py
This is the main module used to create the flask application object. All the blueprints
are imported and registered with the application object.
"""
import os
from flask_cors import CORS
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from webserver_pkg.calibration import CALIBRATION_BLUEPRINT
from webserver_pkg.device_info_api import DEVICE_INFO_API_BLUEPRINT
from webserver_pkg.login import LOGIN_BLUEPRINT
from webserver_pkg.led_api import LED_API_BLUEPRINT
from webserver_pkg.models import MODELS_BLUEPRINT
from webserver_pkg.software_update import SOFTWARE_UPDATE_BLUEPRINT
from webserver_pkg.ssh_api import SSH_API_BLUEPRINT
from webserver_pkg.vehicle_logs import VEHICLE_LOGS_BLUEPRINT
from webserver_pkg.vehicle_control import VEHICLE_CONTROL_BLUEPRINT
from webserver_pkg.wifi_settings import WIFI_SETTINGS_BLUEPRINT
template_dir = os.path.abspath('/opt/aws/deepracer/lib/device_console/templates')
# Create the Flask application object.
app = Flask(__name__, template_folder=template_dir)
CORS(app)
csrf = CSRFProtect()
# Initialize the application with CSRF and register all the API blueprints.
csrf.init_app(app)
app.register_blueprint(VEHICLE_LOGS_BLUEPRINT)
app.register_blueprint(VEHICLE_CONTROL_BLUEPRINT)
app.register_blueprint(WIFI_SETTINGS_BLUEPRINT)
app.register_blueprint(LOGIN_BLUEPRINT)
app.register_blueprint(SOFTWARE_UPDATE_BLUEPRINT)
app.register_blueprint(CALIBRATION_BLUEPRINT)
app.register_blueprint(SSH_API_BLUEPRINT)
app.register_blueprint(LED_API_BLUEPRINT)
app.register_blueprint(DEVICE_INFO_API_BLUEPRINT)
app.register_blueprint(MODELS_BLUEPRINT)
app.config.update(
DEBUG=True,
SECRET_KEY='secret_',
SESSION_COOKIE_SECURE=True,
REMEMBER_COOKIE_SECURE=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
29113,
29113,
14468,
2,
198,
2,
220,
220,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
19... | 2.422456 | 1,238 |
# (c) @AbirHasan2005
from typing import Union
from pyromod import listen
from pyrogram import Client
from pyrogram.storage import Storage
from configs import Config
LOGGER = Config.LOGGER
log = LOGGER.getLogger(__name__)
#class Client(RawClient, New):
class Bot(Client):
""" Custom Bot Class """
app = Bot()
app.run()
| [
2,
357,
66,
8,
2488,
4826,
343,
19242,
272,
14315,
198,
198,
6738,
19720,
1330,
4479,
198,
6738,
12972,
398,
375,
1330,
6004,
198,
6738,
12972,
39529,
1330,
20985,
198,
6738,
12972,
39529,
13,
35350,
1330,
20514,
198,
6738,
4566,
82,
... | 2.91453 | 117 |
"""Example for first day of class in Intro"""
import urllib2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
url = "http://esapubs.org/archive/ecol/E084/093/Mammal_lifehistories_v2.txt"
web_data = urllib2.urlopen(url)
data = np.genfromtxt(web_data, delimiter='\t', names=True, dtype=None, skip_footer=4)
#plt.loglog(data['massg'], data['litter_size'], 'bo')
families = np.unique(data['family'])
for fam in families:
fam_data = data[data['family'] == fam]
if len(fam_data) > 10:
plt.figure()
y = np.log10(fam_data['litter_size'])
x = np.log10(fam_data['massg'])
X = sm.add_constant(x, prepend=True)
regression_results = sm.OLS(y, X).fit()
plt.plot(x, y, 'bo')
intercept, slope = results.params
y_pred = intercept + slope * x
plt.plot(x, y_pred, 'r-')
else:
print(str(fam) + "did not have enough data to analyze") | [
37811,
16281,
329,
717,
1110,
286,
1398,
287,
37219,
37811,
198,
198,
11748,
2956,
297,
571,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
1... | 2.248826 | 426 |
#
# PySNMP MIB module BAY-STACK-NES-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BAY-STACK-NES-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:35:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, iso, ModuleIdentity, Bits, Integer32, Counter32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, TimeTicks, Counter64, MibIdentifier, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "iso", "ModuleIdentity", "Bits", "Integer32", "Counter32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "TimeTicks", "Counter64", "MibIdentifier", "NotificationType")
DisplayString, RowStatus, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TruthValue", "TextualConvention")
bayStackMibs, = mibBuilder.importSymbols("SYNOPTICS-ROOT-MIB", "bayStackMibs")
bayStackNesMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 5, 34))
bayStackNesMib.setRevisions(('2014-08-22 00:00', '2009-05-19 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: bayStackNesMib.setRevisionsDescriptions(('v2: Changed organization to Avaya.', 'v1: Initial version.',))
if mibBuilder.loadTexts: bayStackNesMib.setLastUpdated('201408220000Z')
if mibBuilder.loadTexts: bayStackNesMib.setOrganization('Avaya')
if mibBuilder.loadTexts: bayStackNesMib.setContactInfo('Avaya')
if mibBuilder.loadTexts: bayStackNesMib.setDescription("Avaya Energy Saver (AES, formerly known as NES) MIB Copyright 2014 Avaya, Inc. All rights reserved. This Avaya SNMP Management Information Base Specification embodies Avaya' confidential and proprietary intellectual property. Avaya retains all title and ownership in the Specification, including any revisions. This Specification is supplied 'AS IS,' and Avaya makes no warranty, either express or implied, as to the use, operation, condition, or performance of the Specification.")
bayStackNesNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 34, 0))
bayStackNesObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 34, 1))
bayStackNesNotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 34, 2))
bsnesScalars = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 1))
bsnesEnergySaverEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bsnesEnergySaverEnabled.setStatus('current')
if mibBuilder.loadTexts: bsnesEnergySaverEnabled.setDescription('This object controls whether the Avaya Energy Saver feature is enabled.')
bsnesPoePowerSavingEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bsnesPoePowerSavingEnabled.setStatus('current')
if mibBuilder.loadTexts: bsnesPoePowerSavingEnabled.setDescription('This object controls whether Avaya Energy Saver POE power saving is enabled.')
bsnesEfficiencyModeEnabled = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bsnesEfficiencyModeEnabled.setStatus('current')
if mibBuilder.loadTexts: bsnesEfficiencyModeEnabled.setDescription('This object controls whether Avaya Energy Saver Efficiency-Mode is enabled.')
bsnesEnergySaverActive = MibScalar((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bsnesEnergySaverActive.setStatus('current')
if mibBuilder.loadTexts: bsnesEnergySaverActive.setDescription('This object controls whether Avaya Energy Saver is currently active. A value of true(1) indicates energy saving is active. A value of false(2) indicates energy saving is currently inactive. The value of this object will change over time as specified by the energy saving schedule. Setting this object allows energy saving to be manually activated or deactivated.')
bsnesScheduleTable = MibTable((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2), )
if mibBuilder.loadTexts: bsnesScheduleTable.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleTable.setDescription('This table contains the schedule for activation and deactivation of the Avaya Energy Saver feature.')
bsnesScheduleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1), ).setIndexNames((0, "BAY-STACK-NES-MIB", "bsnesScheduleDay"), (0, "BAY-STACK-NES-MIB", "bsnesScheduleHour"), (0, "BAY-STACK-NES-MIB", "bsnesScheduleMinute"))
if mibBuilder.loadTexts: bsnesScheduleEntry.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleEntry.setDescription('An energy saver schedule entry, indicates a time to activate or deactivate energy savings.')
bsnesScheduleDay = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("monday", 1), ("tuesday", 2), ("wednesday", 3), ("thursday", 4), ("friday", 5), ("saturday", 6), ("sunday", 7))))
if mibBuilder.loadTexts: bsnesScheduleDay.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleDay.setDescription('Day on which this schedule entry takes effect.')
bsnesScheduleHour = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 23)))
if mibBuilder.loadTexts: bsnesScheduleHour.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleHour.setDescription('Hour on which this schedule entry takes effect. A value of 0 means 12am midnight. A value of 12 means 12pm noon.')
bsnesScheduleMinute = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 59)))
if mibBuilder.loadTexts: bsnesScheduleMinute.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleMinute.setDescription('Minute on which this schedule entry takes effect.')
bsnesScheduleAction = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("activate", 1), ("deactivate", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: bsnesScheduleAction.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleAction.setDescription('The action taken when this schedule entry takes effect. Indicates whether energy savings will be activated or deactivated.')
bsnesScheduleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: bsnesScheduleRowStatus.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleRowStatus.setDescription('Used to create/delete schedule entries.')
bsnesInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 3), )
if mibBuilder.loadTexts: bsnesInterfaceTable.setStatus('current')
if mibBuilder.loadTexts: bsnesInterfaceTable.setDescription('This table contains per-port NES settings.')
bsnesInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 3, 1), ).setIndexNames((0, "BAY-STACK-NES-MIB", "bsnesInterfaceIndex"))
if mibBuilder.loadTexts: bsnesInterfaceEntry.setStatus('current')
if mibBuilder.loadTexts: bsnesInterfaceEntry.setDescription('NES settings for a port.')
bsnesInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: bsnesInterfaceIndex.setStatus('current')
if mibBuilder.loadTexts: bsnesInterfaceIndex.setDescription('The ifIndex value of an interface.')
bsnesInterfaceEnergySaverEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 3, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: bsnesInterfaceEnergySaverEnabled.setStatus('current')
if mibBuilder.loadTexts: bsnesInterfaceEnergySaverEnabled.setDescription('Indicates whether the Avaya Energy Saver feature is enabled for this interface.')
bsnesInterfaceEnergySaverPoeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("notApplicable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bsnesInterfaceEnergySaverPoeStatus.setStatus('current')
if mibBuilder.loadTexts: bsnesInterfaceEnergySaverPoeStatus.setDescription('Indicates the Avaya Energy Saver PoE status for this interface.')
bsnesSavingsTable = MibTable((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 4), )
if mibBuilder.loadTexts: bsnesSavingsTable.setStatus('current')
if mibBuilder.loadTexts: bsnesSavingsTable.setDescription('This table contains per-unit information about the amount of power being saved by NES.')
bsnesSavingsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 4, 1), ).setIndexNames((0, "BAY-STACK-NES-MIB", "bsnesSavingsUnitIndex"))
if mibBuilder.loadTexts: bsnesSavingsEntry.setStatus('current')
if mibBuilder.loadTexts: bsnesSavingsEntry.setDescription('Information about the amount of power being saved for a unit.')
bsnesSavingsUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: bsnesSavingsUnitIndex.setStatus('current')
if mibBuilder.loadTexts: bsnesSavingsUnitIndex.setDescription('The unit number.')
bsnesSavingsUnitSavings = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 4, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bsnesSavingsUnitSavings.setStatus('current')
if mibBuilder.loadTexts: bsnesSavingsUnitSavings.setDescription('Indicates the amount of switch capacity power being saved on this unit. The value of this object is 1/10 watts.')
bsnesSavingsPoeSavings = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 34, 1, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bsnesSavingsPoeSavings.setStatus('current')
if mibBuilder.loadTexts: bsnesSavingsPoeSavings.setDescription('Indicates the amount of PoE power being saved on this unit. The value of this object is 1/10 watts.')
bsnesGloballyEnabled = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 1))
if mibBuilder.loadTexts: bsnesGloballyEnabled.setStatus('current')
if mibBuilder.loadTexts: bsnesGloballyEnabled.setDescription('Indicates that NES was globally enabled.')
bsnesGloballyDisabled = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 2))
if mibBuilder.loadTexts: bsnesGloballyDisabled.setStatus('current')
if mibBuilder.loadTexts: bsnesGloballyDisabled.setDescription('Indicates that NES was globally disabled.')
bsnesManuallyActivated = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 3))
if mibBuilder.loadTexts: bsnesManuallyActivated.setStatus('current')
if mibBuilder.loadTexts: bsnesManuallyActivated.setDescription('Indicates that NES was manually activated.')
bsnesManuallyDeactivated = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 4))
if mibBuilder.loadTexts: bsnesManuallyDeactivated.setStatus('current')
if mibBuilder.loadTexts: bsnesManuallyDeactivated.setDescription('Indicates that NES was manually deactived.')
bsnesScheduleNotApplied = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 5))
if mibBuilder.loadTexts: bsnesScheduleNotApplied.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleNotApplied.setDescription('Indicates that a schedule was not applied because SNTP in not synchronized.')
bsnesScheduleApplied = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 6))
if mibBuilder.loadTexts: bsnesScheduleApplied.setStatus('current')
if mibBuilder.loadTexts: bsnesScheduleApplied.setDescription('Indicates that SNTP is synchronized and that the schedule is being applied.')
bsnesActivated = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 7))
if mibBuilder.loadTexts: bsnesActivated.setStatus('current')
if mibBuilder.loadTexts: bsnesActivated.setDescription('Indicates that NES was activated by schedule.')
bsnesDeactivated = NotificationType((1, 3, 6, 1, 4, 1, 45, 5, 34, 0, 8))
if mibBuilder.loadTexts: bsnesDeactivated.setStatus('current')
if mibBuilder.loadTexts: bsnesDeactivated.setDescription('Indicates that NES was deactivated by schedule.')
mibBuilder.exportSymbols("BAY-STACK-NES-MIB", bsnesSavingsUnitSavings=bsnesSavingsUnitSavings, bsnesInterfaceIndex=bsnesInterfaceIndex, bsnesSavingsTable=bsnesSavingsTable, bsnesScheduleHour=bsnesScheduleHour, bayStackNesObjects=bayStackNesObjects, bsnesManuallyDeactivated=bsnesManuallyDeactivated, bayStackNesMib=bayStackNesMib, bsnesScheduleNotApplied=bsnesScheduleNotApplied, bsnesScheduleMinute=bsnesScheduleMinute, bsnesGloballyDisabled=bsnesGloballyDisabled, bsnesScalars=bsnesScalars, bsnesInterfaceEnergySaverEnabled=bsnesInterfaceEnergySaverEnabled, bsnesScheduleRowStatus=bsnesScheduleRowStatus, bsnesGloballyEnabled=bsnesGloballyEnabled, bsnesEnergySaverEnabled=bsnesEnergySaverEnabled, bsnesDeactivated=bsnesDeactivated, bsnesPoePowerSavingEnabled=bsnesPoePowerSavingEnabled, PYSNMP_MODULE_ID=bayStackNesMib, bayStackNesNotificationObjects=bayStackNesNotificationObjects, bsnesSavingsUnitIndex=bsnesSavingsUnitIndex, bsnesInterfaceEnergySaverPoeStatus=bsnesInterfaceEnergySaverPoeStatus, bsnesSavingsEntry=bsnesSavingsEntry, bsnesScheduleAction=bsnesScheduleAction, bsnesSavingsPoeSavings=bsnesSavingsPoeSavings, bayStackNesNotifications=bayStackNesNotifications, bsnesManuallyActivated=bsnesManuallyActivated, bsnesInterfaceEntry=bsnesInterfaceEntry, bsnesInterfaceTable=bsnesInterfaceTable, bsnesEnergySaverActive=bsnesEnergySaverActive, bsnesEfficiencyModeEnabled=bsnesEfficiencyModeEnabled, bsnesActivated=bsnesActivated, bsnesScheduleEntry=bsnesScheduleEntry, bsnesScheduleDay=bsnesScheduleDay, bsnesScheduleApplied=bsnesScheduleApplied, bsnesScheduleTable=bsnesScheduleTable)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
347,
4792,
12,
2257,
8120,
12,
37379,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,... | 2.942606 | 4,896 |
from .REDutils import *
from models.downsampler import Downsampler
# - blur image - exactly like the NCSR is doing it -
# - the inverse function H -
| [
6738,
764,
22083,
26791,
1330,
1635,
198,
6738,
4981,
13,
30371,
321,
20053,
1330,
5588,
37687,
20053,
628,
198,
2,
532,
23671,
2939,
532,
3446,
588,
262,
399,
7902,
49,
318,
1804,
340,
532,
628,
198,
198,
2,
532,
262,
34062,
2163,
... | 3.444444 | 45 |
import os
__version__ = '3.2.dev0'
PHOTOLOGUE_APP_DIR = os.path.dirname(os.path.abspath(__file__))
| [
11748,
28686,
198,
198,
834,
9641,
834,
796,
705,
18,
13,
17,
13,
7959,
15,
6,
198,
198,
11909,
2394,
33462,
8924,
62,
24805,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
... | 2.148936 | 47 |
# author : Group 30 - Block 3 - MDS UBC
# date : 2021-11-19
"""Collects a .csv file containing the stocks price and trends volatility data and generates a number of volatility plots.
Usage: generate-plot-images.py --in_file=<in_file> --out_folder=<out_folder>
Options:
--in_file=<in_file> Path (including filename) from where to retrieve the stocks volatility data (must be in standard csv format)
--out_folder=<out_folder> Folder path of where to save the plots as .png files
"""
import pandas as pd
import os
from docopt import docopt
import altair as alt
opt = docopt(__doc__)
if __name__ == "__main__":
main(opt["--in_file"], opt["--out_folder"])
| [
2,
1772,
1058,
4912,
1542,
532,
9726,
513,
532,
337,
5258,
471,
2749,
198,
2,
3128,
1058,
33448,
12,
1157,
12,
1129,
198,
198,
37811,
31337,
82,
257,
764,
40664,
2393,
7268,
262,
14420,
2756,
290,
11257,
30772,
1366,
290,
18616,
257,
... | 3.059091 | 220 |
'''Module containing model specifications'''
from copy import deepcopy
from numpy import arange, array, multiply, ones
from numpy.random import rand
TWO_AGE_SIR_SPEC = {
'compartmental_structure': 'SIR', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/12, # Recovery rate
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
SINGLE_AGE_SEIR_SPEC = {
'compartmental_structure': 'SEIR', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/7, # Recovery rate
'incubation_rate': 1/5, # E->I incubation rate
'sus': array([1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
TWO_AGE_SEIR_SPEC = {
'compartmental_structure': 'SEIR', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/7, # Recovery rate
'incubation_rate': 1/5, # E->I incubation rate
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
TWO_AGE_SEPIR_SPEC = {
'compartmental_structure': 'SEPIR', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/4, # Recovery rate
'incubation_rate': 1/5, # E->P incubation rate
'symp_onset_rate': 1/3, # P->I prodromal to symptomatic rate
'prodromal_trans_scaling':
array([0.5,0.5]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
TWO_AGE_SEPIR_SPEC_FOR_FITTING = {
'compartmental_structure': 'SEPIR', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'recovery_rate': 1/4, # Recovery rate
'incubation_rate': 1/5, # E->P incubation rate
'symp_onset_rate': 1/3, # P->I prodromal to symptomatic rate
'prodromal_trans_scaling':
array([0.5,0.5]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'EL'
}
TWO_AGE_INT_SEPIRQ_SPEC = {
'compartmental_structure': 'SEPIRQ', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/4, # Recovery rate
'incubation_rate': 1/5, # E->P incubation rate
'symp_onset_rate': 1/3, # P->I prodromal to symptomatic rate
'exp_iso_rate': 1/1 * ones(2,), # Ave. time in days to detection by class
'pro_iso_rate': 1/1 * ones(2,),
'inf_iso_rate': 1/0.5 * ones(2,),
'discharge_rate': 1/14, # 1 / ave time in isolation
'iso_method': "int", # This is either "int" or "ext"
'ad_prob': 1, # Probability under internal isolation that household members actually isolate
'class_is_isolating':
array([[True, True, True],
[True, True, True],
[True, True, True]]), # Element (i,j) is "If someone of class j is present, class i will isolate externally"
'prodromal_trans_scaling':
array([0.5,0.5]), # Prodromal transmission intensity relative to full inf transmission
'iso_trans_scaling':
array([1,1]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
TWO_AGE_EXT_SEPIRQ_SPEC = {
'compartmental_structure': 'SEPIRQ', # This is which subsystem key to use
'AR': 0.45, # Secondary attack probability
'R*': 1.1, # Household-level reproduction number
'recovery_rate': 1/4, # Recovery rate
'incubation_rate': 1/5, # E->P incubation rate
'symp_onset_rate': 1/3, # P->I prodromal to symptomatic rate
'exp_iso_rate': 1/1 * ones(2,), # Ave. time in days to detection by class
'pro_iso_rate': 1/1 * ones(2,),
'inf_iso_rate': 1/0.5 * ones(2,),
'discharge_rate': 1/14, # 1 / ave time in isolation
'iso_method': "ext", # This is either "int" or "ext"
'ad_prob': 0.2, # Probability under internal isolation that household members actually isolate
'class_is_isolating':
array([[False, False, False],
[False, False, True],
[False, False, False]]), # Element (i,j) is "If someone of class j is present, class i will isolate externally"
'prodromal_trans_scaling':
array([0.5,0.5]), # Prodromal transmission intensity relative to full inf transmission
'iso_trans_scaling':
array([1,1]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1]), # Relative susceptibility by age/vulnerability class
'density_expo' : 0.5, # "Cauchemez parameter"
'fit_method' : 'R*'
}
SINGLE_AGE_UK_SPEC = {
'k_home': { # File location for UK within-household contact matrix
'file_name': 'inputs/MUestimates_home_2.xlsx',
'sheet_name':'United Kingdom of Great Britain'
},
'k_all': { # File location for UK pop-level contact matrix
'file_name': 'inputs/MUestimates_all_locations_2.xlsx',
'sheet_name': 'United Kingdom of Great Britain'
},
'pop_pyramid_file_name': 'inputs/United Kingdom-2019.csv', # File location for UK age pyramid
'fine_bds' : arange(0,81,5), # Boundaries used in pyramid/contact data
'coarse_bds' : array([0]), # Desired boundaries for model population
'adult_bd' : 1
}
TWO_AGE_UK_SPEC = {
'k_home': { # File location for UK within-household contact matrix
'file_name': 'inputs/MUestimates_home_2.xlsx',
'sheet_name':'United Kingdom of Great Britain'
},
'k_all': { # File location for UK pop-level contact matrix
'file_name': 'inputs/MUestimates_all_locations_2.xlsx',
'sheet_name': 'United Kingdom of Great Britain'
},
'pop_pyramid_file_name': 'inputs/United Kingdom-2019.csv', # File location for UK age pyramid
'fine_bds' : arange(0,81,5), # Boundaries used in pyramid/contact data
'coarse_bds' : array([0,20]), # Desired boundaries for model population
'adult_bd' : 1
}
VO_SPEC = {
# Interpretable parameters:
'R0': 2.4,
'incubation_rate': 1/1,
'recovery_rate': 1/9,
# Age bands used to stratify the vector parameters
'age_quant_bounds': array([20,60]),
'asymp_trans_scaling': array([1.0,
1.0,
1.0]),
'symptom_prob' : array([0.2,
0.2,
0.2,]),
# Relative susceptibility
'sus' : array([1.0,
1.0,
1.0],),
'k_home': {
'file_name': 'inputs/MUestimates_home_1.xlsx',
'sheet_name': 'Italy'
},
'k_all': {
'file_name': 'inputs/MUestimates_all_locations_1.xlsx',
'sheet_name': 'Italy'
},
'pop_pyramid_file_name': 'inputs/Italy-2019.csv',
# TODO: Parameter below (rho) may be redundant
'rho_file_name': 'inputs/rho_estimate_cdc.csv',
'external_importation': {
'type': 'exponential',
'exponent': 1.0e-2,
# TODO: Parameter below (alpha) needs a better name
'alpha': 1.0e-5,
}
}
SEPIRQ_SPEC = {
# Interpretable parameters:
'R0': 1.01, # Reproduction number
'recovery_rate': 1/4, # Recovery rate
'incubation_rate': 1/5, # E->P incubation rate
'symp_onset_rate': 1/3, # P->I prodromal to symptomatic rate
'prodromal_trans_scaling':
array([0.5,0.5,0.5]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1,1]), # Relative susceptibility by age/vulnerability class
'external_trans_scaling': 0.5, # Relative intensity of external compared to internal contacts
'vuln_prop': 2.2/56, # Total proportion of adults who are shielding
'k_home': {
'file_name': 'inputs/MUestimates_home_2.xlsx',
'sheet_name':'United Kingdom of Great Britain'
},
'k_all': {
'file_name': 'inputs/MUestimates_all_locations_2.xlsx',
'sheet_name': 'United Kingdom of Great Britain'
},
'pop_pyramid_file_name': 'inputs/United Kingdom-2019.csv'
}
CAREHOME_SPEC = {
# Interpretable parameters:
'R_carehome': 1.1, # Within-carehome reproduction number
'incubation_rate': 1/1, # E->P incubation rate
'symp_onset_rate': 1/5, # P->I prodromal to symptomatic rate
'recovery_rate': 1/4, # Recovery rate
'prodromal_trans_scaling': array([0.7,0.7,0.7]), # Prodromal transmission intensity relative to full inf transmission
'sus': array([1,1,1]), # Relative susceptibility by age/vulnerability class
'empty_rate': array([1/240,0,0]), # Rate of bed emptying
'covid_mortality_rate': array([(0.01)*(1/4),0,0]), # Coronavirus death rate - death prob times time to death
'refill_rate': array([1/75, 0, 0]), # Rate of bed refilling - 1/(ave. days until refil)
'inter_home_coupling': 0 # Coupling strength between different care homes, between 0 and 1
}
| [
7061,
6,
26796,
7268,
2746,
20640,
7061,
6,
198,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
299,
32152,
1330,
610,
858,
11,
7177,
11,
29162,
11,
3392,
198,
6738,
299,
32152,
13,
25120,
1330,
43720,
198,
198,
34551,
46,
62,
11879,
... | 2.130858 | 5,036 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DialogueList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid):
"""
Initialize the DialogueList
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the parent Assistant.
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueList
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueList
"""
super(DialogueList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
def get(self, sid):
"""
Constructs a DialogueContext
:param sid: The sid
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
"""
return DialogueContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a DialogueContext
:param sid: The sid
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
"""
return DialogueContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.DialogueList>'
class DialoguePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the DialoguePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The unique ID of the parent Assistant.
:returns: twilio.rest.preview.understand.assistant.dialogue.DialoguePage
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialoguePage
"""
super(DialoguePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DialogueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
"""
return DialogueInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.DialoguePage>'
class DialogueContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid, sid):
"""
Initialize the DialogueContext
:param Version version: Version that contains the resource
:param assistant_sid: The assistant_sid
:param sid: The sid
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
"""
super(DialogueContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Dialogues/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a DialogueInstance
:returns: Fetched DialogueInstance
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return DialogueInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.DialogueContext {}>'.format(context)
class DialogueInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, assistant_sid, sid=None):
"""
Initialize the DialogueInstance
:returns: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
"""
super(DialogueInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'assistant_sid': payload['assistant_sid'],
'sid': payload['sid'],
'data': payload['data'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'assistant_sid': assistant_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DialogueContext for this DialogueInstance
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueContext
"""
if self._context is None:
self._context = DialogueContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account that created this Field.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def assistant_sid(self):
"""
:returns: The unique ID of the parent Assistant.
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: The unique ID of the Dialogue
:rtype: unicode
"""
return self._properties['sid']
@property
def data(self):
"""
:returns: The dialogue memory object as json
:rtype: dict
"""
return self._properties['data']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a DialogueInstance
:returns: Fetched DialogueInstance
:rtype: twilio.rest.preview.understand.assistant.dialogue.DialogueInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.DialogueInstance {}>'.format(context)
| [
2,
19617,
28,
40477,
12,
23,
198,
81,
37811,
198,
1212,
2438,
373,
7560,
416,
198,
59,
1220,
4808,
220,
220,
220,
4808,
220,
4808,
91,
220,
220,
4808,
220,
4808,
198,
930,
44104,
8,
11139,
28264,
5769,
62,
91,
11139,
91,
930,
7,
... | 2.515267 | 3,406 |
# -*- coding: utf-8 -*-
#UTILITIES
import nltk
import string
from nltk.collocations import *
from nltk.corpus import stopwords
'''
def jaccard(file1, file2):
lst1 = set(open(file1,'rb').read().splitlines())
lst2 = set(open(file2,'rb').read().splitlines())
return float(len(lst1 & lst2))/len(lst1 | lst2)
'''
def filestream_to_word_list(fstream, lemmatize=True, remove_stopwords=True):
''' TODO: Increase stopword coverages '''
text = fstream.read().splitlines()
text = [''.join(ch for ch in line.strip() if ord(ch)<128) for line in text]
text = [line.lower() for line in text if line != '']
if remove_stopwords:
text = [word for word in nltk.word_tokenize(' '.join(text)) if word not in stopwords.words('english')]
text = [word for word in text if word not in set(string.punctuation) and not word.isdigit()]
return set(text) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
3843,
4146,
30383,
198,
11748,
299,
2528,
74,
198,
11748,
4731,
198,
198,
6738,
299,
2528,
74,
13,
26000,
20968,
1330,
1635,
198,
6738,
299,
2528,
74,
13,
10215,
79... | 2.604294 | 326 |
import tests.activity_tests
import tests.entity_tests
import tests.report_tests | [
11748,
5254,
13,
21797,
62,
41989,
198,
11748,
5254,
13,
26858,
62,
41989,
198,
11748,
5254,
13,
13116,
62,
41989
] | 3.95 | 20 |
import sys
import os
PROJECT_HOME = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(PROJECT_HOME)
from flask.ext.testing import TestCase
from flask import request
from flask import url_for, Flask
import unittest
import requests
import time
import app
import json
import httpretty
class TestExpectedResults(TestCase):
'''Check if the service returns expected results'''
def create_app(self):
'''Create the wsgi application'''
app_ = app.create_app()
return app_
@httpretty.activate
def test_object_search_200(self):
'''Test to see if calling the object search endpoint
works for valid data'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
# We will be doing a POST request with a set of identifiers
identifiers = ["3133169", "1575544"]
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# Do the POST request
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({'identifiers': identifiers}))
# The response should have a status code 200
self.assertTrue(r.status_code == 200)
# See if we received the expected results
expected = {u'3133169': {u'id': '3133169', u'canonical': u'LMC'}, u'1575544': {u'id': '1575544', u'canonical': u'ANDROMEDA'}}
self.assertEqual(r.json, expected)
@httpretty.activate
def test_object_search_500(self):
'''Test to see if a 500 from SIMBAD is processed correctly'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
identifiers = ["3133169", "1575544"]
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=500,
body='')
# Do the POST request
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({'identifiers': identifiers}))
# See if we received the expected results
self.assertEqual(r.json['Error'], 'Unable to get results!')
self.assertEqual(r.json['Error Info'], 'SIMBAD returned status 500')
@httpretty.activate
def test_object_search_bad_data(self):
'''Test to see if bad data from SIMBAD is processed correctly'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
identifiers = ["3133169", "1575544"]
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='{}')
# Do the POST request
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({'identifiers': identifiers}))
# See if we received the expected results
self.assertEqual(r.json['Error'], 'Unable to get results!')
self.assertEqual(r.json['Error Info'], 'Bad data returned by SIMBAD')
@httpretty.activate
def test_object_search_empty_list(self):
'''Test to see if an empty id list is processed correctly'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='{}')
# Do the POST request
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({}))
# First we omit the 'identifiers' attribute in the input
# See if we received the expected results
self.assertEqual(r.json['Error'], 'Unable to get results!')
self.assertEqual(r.json['Error Info'], 'No identifiers/objects found in POST body')
# The same should happen with an empty identifiers list
identifiers = []
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({'identifiers': identifiers}))
# See if we received the expected results
self.assertEqual(r.json['Error'], 'Unable to get results!')
self.assertEqual(r.json['Error Info'], 'No identifiers/objects found in POST body')
@httpretty.activate
def test_position_search_200(self):
'''Test to see if calling the position search endpoint
works for valid data'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[["2003A&A...405..111G"],["2011AcA....61..103G"]]}
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# Do the GET request
url = url_for('positionsearch', pstring="80.89416667 -69.75611111:0.166666")
r = self.client.get(url)
# The response should have a status code 200
self.assertTrue(r.status_code == 200)
# See if we received the expected results
expected = {u'data': [u'2011AcA....61..103G', u'2003A&A...405..111G']}
self.assertEqual(r.json, expected)
@httpretty.activate
def test_position_search_poserror(self):
'''Test position query with invalid position string'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[["2003A&A...405..111G"],["2011AcA....61..103G"]]}
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# First an incorrectly formatted search radius:
# this should result in using the default radius and return a valid result
pstring = "80.89416667 -69.75611111:1 2 3 4"
url = url_for('positionsearch', pstring=pstring)
r = self.client.get(url)
# See if we received the expected results
expected = {u'data': [u'2011AcA....61..103G', u'2003A&A...405..111G']}
self.assertEqual(r.json, expected)
# Next an invalid type for RA and DEC
pstring = "A B:0.166666"
url = url_for('positionsearch', pstring=pstring)
r = self.client.get(url)
# See if we received the expected results
expected = 'Invalid position string: %s'%pstring
self.assertEqual(r.json['Error Info'], expected)
# Test no sign for DEC
pstring = "80.89416667 69.75611111:0.166666"
url = url_for('positionsearch', pstring=pstring)
r = self.client.get(url)
# See if we received the expected results
expected = 'Invalid position string: %s'%pstring
self.assertEqual(r.json['Error Info'], expected)
@httpretty.activate
def test_id_search_200(self):
'''Test to see if calling the id search endpoint
works for valid data'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"]]}
# We will be doing a POST request with a set of identifiers
objects = ["Andromeda", "LMC"]
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# Do the POST request
r = self.client.post(
url_for('objectsearch'),
content_type='application/json',
data=json.dumps({'objects': objects}))
# The response should have a status code 200
self.assertTrue(r.status_code == 200)
# See if we received the expected results
expected = {u'LMC': {'id': '3133169', 'canonical': u'LMC'}, u'Andromeda': {'id': '1575544', 'canonical': u'ANDROMEDA'}}
self.assertEqual(r.json, expected)
@httpretty.activate
def test_query_search_200(self):
'''test translation Solr query with "object:" modifier'''
QUERY_URL = self.app.config.get('OBJECTS_SIMBAD_TAP_URL')
mockdata = {"data":[[1575544, "NAME ANDROMEDA","NAME ANDROMEDA"],[3133169, "NAME LMC", "NAME LMC"],[3253618, "NAME SMC", "NAME SMC"]]}
# The test query we will provide
query = 'bibstem:A&A object:Andromeda year:2015'
# Mock the reponse
httpretty.register_uri(
httpretty.POST, QUERY_URL,
content_type='application/json',
status=200,
body='%s'%json.dumps(mockdata))
# Do the POST request
r = self.client.post(
url_for('querysearch'),
content_type='application/json',
data=json.dumps({'query': query}))
# The response should have a status code 200
self.assertTrue(r.status_code == 200)
# See if we received the expected results
expected = {"query": "bibstem:A&A simbid:1575544 year:2015"}
self.assertEqual(r.json, expected)
| [
11748,
25064,
198,
11748,
28686,
198,
31190,
23680,
62,
39069,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
198,
220,
220,
220,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
40720,
40720... | 2.275664 | 4,179 |
#!/usr/bin/python
# Tool to provide dedicated variables for cross-compilation
__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>"
__copyright__ = "Jérôme Carretero, 2014"
"""
This tool allows to use environment variables to define cross-compilation
variables intended for build variants.
The variables are obtained from the environment in 3 ways:
1. By defining CHOST, they can be derived as ${CHOST}-${TOOL}
2. By defining HOST_x
3. By defining ${CHOST//-/_}_x
else one can set ``cfg.env.CHOST`` in ``wscript`` before loading ``cross_gnu``.
Usage:
- In your build script::
def configure(cfg):
...
for variant in x_variants:
setenv(variant)
conf.load('cross_gnu')
conf.xcheck_host_var('POUET')
...
- Then::
CHOST=arm-hardfloat-linux-gnueabi waf configure
env arm-hardfloat-linux-gnueabi-CC="clang -..." waf configure
CFLAGS=... CHOST=arm-hardfloat-linux-gnueabi HOST_CFLAGS=-g waf configure
HOST_CC="clang -..." waf configure
This example ``wscript`` compiles to Microchip PIC (xc16-gcc-xyz must be in PATH):
.. code:: python
from waflib import Configure
#from https://gist.github.com/rpuntaie/2bddfb5d7b77db26415ee14371289971
import waf_variants
variants='pc fw/variant1 fw/variant2'.split()
top = "."
out = "../build"
PIC = '33FJ128GP804' #dsPICxxx
@Configure.conf
def gcc_modifier_xc16(cfg):
v = cfg.env
v.cprogram_PATTERN = '%s.elf'
v.LINKFLAGS_cprogram = ','.join(['-Wl','','','--defsym=__MPLAB_BUILD=0','','--script=p'+PIC+'.gld',
'--stack=16','--check-sections','--data-init','--pack-data','--handles','--isr','--no-gc-sections',
'--fill-upper=0','--stackguard=16','--no-force-link','--smart-io']) #,'--report-mem'])
v.CFLAGS_cprogram=['-mcpu='+PIC,'-omf=elf','-mlarge-code','-msmart-io=1',
'-msfr-warn=off','-mno-override-inline','-finline','-Winline']
def configure(cfg):
if 'fw' in cfg.variant: #firmware
cfg.env.DEST_OS = 'xc16' #cfg.env.CHOST = 'xc16' #works too
cfg.load('c cross_gnu') #cfg.env.CHOST becomes ['xc16']
...
else: #configure for pc SW
...
def build(bld):
if 'fw' in bld.variant: #firmware
bld.program(source='maintst.c', target='maintst');
bld(source='maintst.elf', target='maintst.hex', rule="xc16-bin2hex ${SRC} -a -omf=elf")
else: #build for pc SW
...
"""
import os
from waflib import Utils, Configure
from waflib.Tools import ccroot, gcc
try:
from shlex import quote
except ImportError:
from pipes import quote
def get_chost_stuff(conf):
"""
Get the CHOST environment variable contents
"""
chost = None
chost_envar = None
if conf.env.CHOST:
chost = conf.env.CHOST[0]
chost_envar = chost.replace("-", "_")
return chost, chost_envar
@Configure.conf
@Configure.conf
@Configure.conf
@Configure.conf
def configure(conf):
"""
Configuration example for gcc, it will not work for g++/clang/clang++
"""
conf.xcheck_host()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
16984,
284,
2148,
7256,
9633,
329,
3272,
12,
5589,
10520,
198,
198,
834,
9800,
834,
796,
11593,
76,
2913,
10613,
834,
796,
366,
41,
42445,
27083,
1326,
1879,
260,
353,
78,
1279,
66,
41,
... | 2.362199 | 1,328 |
#
# This class is automatically generated by mig. DO NOT EDIT THIS FILE.
# This class implements a Python interface to the 'CommandMsg'
# message type.
#
import tinyos.message.Message
# The default size of this message type in bytes.
DEFAULT_MESSAGE_SIZE = 28
# The Active Message type associated with this message.
AM_TYPE = 99
| [
2,
198,
2,
770,
1398,
318,
6338,
7560,
416,
37011,
13,
8410,
5626,
48483,
12680,
45811,
13,
198,
2,
770,
1398,
23986,
257,
11361,
7071,
284,
262,
705,
21575,
50108,
6,
198,
2,
3275,
2099,
13,
198,
2,
198,
198,
11748,
7009,
418,
13... | 3.510417 | 96 |
# Generated by Django 3.1.5 on 2021-01-24 17:54
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
486,
12,
1731,
1596,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo)
from models import User
# this method will check if the email already exists
'''FORM FOR REGISTERING'''
'''FORM FOR LOGIN'''
'''FORM FOR SEARCH'''
'''FORM TO REMOVE FLIGHT'''
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
30275,
15878,
11,
39900,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
357,
6601,
37374,
11,
797,
25636,
79,
11,
3254,
24765,
... | 3.25 | 116 |
#
import os
import math
from array import array
import optparse
import ROOT
from ROOT import *
import scipy
import Plotting_Header
from Plotting_Header import *
for varname, xmin, xmax in zip(["LepPt","METPt", "TAGPt", "TPRIMEM", "WPt", "lepJetPt","ZPRIMEM"],[50,0,0,0,0,0,200],[500,900,1400,1800,1200,1000,3000])[0:1]:
varplot(varname, xmin, xmax)
| [
2,
198,
11748,
28686,
198,
11748,
10688,
198,
6738,
7177,
1330,
7177,
198,
11748,
2172,
29572,
198,
11748,
15107,
2394,
198,
6738,
15107,
2394,
1330,
1635,
198,
11748,
629,
541,
88,
198,
11748,
28114,
889,
62,
39681,
198,
6738,
28114,
8... | 2.38 | 150 |
from requests.auth import HTTPBasicAuth
import requests
url = "https://komoju.com/api/v1/payments"
user = ""
req = requests.get(url, auth=HTTPBasicAuth(user, ''))
# print(req)
# print(dir(req))
# print(req.text)
payload = {
'amount': 100,
'currency': "test",
'external_order_num': 100
}
print(len(payload))
for i in payload:
print(i)
# test['test10'] = 100
# print(test) | [
6738,
7007,
13,
18439,
1330,
14626,
26416,
30515,
198,
11748,
7007,
628,
198,
6371,
796,
366,
5450,
1378,
74,
296,
13210,
84,
13,
785,
14,
15042,
14,
85,
16,
14,
15577,
902,
1,
198,
7220,
796,
13538,
198,
198,
42180,
796,
7007,
13,
... | 2.487342 | 158 |
"""
Contrains functions relative to predicting using the neural networks
"""
import os
from pynomalous.learning.training import get_model
from keras.models import load_model
def load_trained_net(mal):
"""
Generates a neural network, loads in the neural network the pretrained weights and returns it
:param mal: the mal the net has to recognize
:param weights_root: the root directory of the network weights
:return: the trained model
"""
model_root = os.path.join(os.getcwd(), 'data', 'models')
model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))
return model
| [
37811,
198,
4264,
81,
1299,
5499,
3585,
284,
25539,
1262,
262,
17019,
7686,
198,
37811,
198,
198,
11748,
28686,
198,
198,
6738,
279,
2047,
18048,
516,
13,
40684,
13,
34409,
1330,
651,
62,
19849,
198,
6738,
41927,
292,
13,
27530,
1330,
... | 3.190722 | 194 |
# -*- coding: utf-8 -*-
# start
try:
import tensorflow as tf
import numpy as np
import pickle
from tensorflow.python.platform import gfile
from random import randint
import os
from scipy.misc import imsave
from matplotlib import pyplot as plt
except ImportError:
raise ValueError("Please install tensorflow and matplotlib.")
# start with 0.1 so reLu isnt always 0
# the convolution with padding of 1 on each side, and moves by 1.
# max pooling basically shrinks it by 2x, taking the highest value on each feature.
batchsize = 50
imagesize = 32
colors = 3
sess = tf.InteractiveSession()
img = tf.placeholder("float", shape=[None, imagesize, imagesize, colors])
lbl = tf.placeholder("float", shape=[None, 10])
# for each 5x5 area, check for 32 features over 3 color channels
wConv1 = initWeight([5, 5, colors, 32])
bConv1 = initBias([32])
# move the conv filter over the picture
conv1 = conv2d(img, wConv1)
# adds bias
bias1 = conv1 + bConv1
# relu = max(0,x), adds nonlinearality
relu1 = tf.nn.relu(bias1)
# maxpool to 16x16
pool1 = maxPool2d(relu1)
# second conv layer, takes a 16x16 with 32 layers, turns to 8x8 with 64 layers
wConv2 = initWeight([5, 5, 32, 64])
bConv2 = initBias([64])
conv2 = conv2d(pool1, wConv2)
bias2 = conv2 + bConv2
relu2 = tf.nn.relu(bias2)
pool2 = maxPool2d(relu2)
# fully-connected is just a regular neural net: 8*8*64 for each training data
wFc1 = initWeight([(imagesize / 4) * (imagesize / 4) * 64, 1024])
bFc1 = initBias([1024])
# reduce dimensions to flatten
pool2flat = tf.reshape(pool2, [-1, (imagesize / 4) * (imagesize / 4) * 64])
# 128 training set by 2304 data points
fc1 = tf.matmul(pool2flat, wFc1) + bFc1
relu3 = tf.nn.relu(fc1)
# dropout removes duplicate weights
keepProb = tf.placeholder("float")
drop = tf.nn.dropout(relu3, keepProb)
wFc2 = initWeight([1024, 10])
bFc2 = initWeight([10])
# softmax converts individual probabilities to percentages
guesses = tf.nn.softmax(tf.matmul(drop, wFc2) + bFc2)
# how wrong it is
cross_entropy = -tf.reduce_sum(lbl * tf.log(guesses + 1e-9))
# theres a lot of tensorflow optimizers such as gradient descent
# adam is one of them
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# array of bools, checking if each guess was correct
correct_prediction = tf.equal(tf.argmax(guesses, 1), tf.argmax(lbl, 1))
# represent the correctness as a float [1,1,0,1] -> 0.75
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
batch = unpickle("cifar-10-batches-py/data_batch_1")
validationData = batch["data"][555:batchsize + 555]
validationRawLabel = batch["labels"][555:batchsize + 555]
validationLabel = np.zeros((batchsize, 10))
validationLabel[np.arange(batchsize), validationRawLabel] = 1
validationData = validationData / 255.0
validationData = np.reshape(validationData, [-1, 3, 32, 32])
validationData = np.swapaxes(validationData, 1, 3)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(os.getcwd() + "/training/"))
# train for 20000
# print mnistbatch[0].shape
def unpool(value, name='unpool'):
"""N-dimensional version of the unpooling operation from
https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
:param value: A Tensor of shape [b, d0, d1, ..., dn, ch]
:return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]
"""
with tf.name_scope(name) as scope:
sh = value.get_shape().as_list()
dim = len(sh[1:-1])
out = (tf.reshape(value, [-1] + sh[-dim:]))
for i in range(dim, 0, -1):
out = tf.concat(i, [out, out])
out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]
out = tf.reshape(out, out_size, name=scope)
return out
if __name__ == '__main__':
tf.app.run()
# end
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
923,
198,
28311,
25,
198,
220,
220,
220,
1330,
11192,
273,
11125,
355,
48700,
198,
220,
220,
220,
1330,
299,
32152,
355,
45941,
198,
220,
220,
220,
1330,
2298,... | 2.541667 | 1,512 |
from picraftzero.log import logger
from picraftzero.utils import constrain
from picraftzero.interfaces.hardware.providers import MotorProvider
from picraftzero.utils import dedupe
from picraftzero.thirdparty.pimoroni import explorerhat
| [
198,
6738,
8301,
1617,
22570,
13,
6404,
1330,
49706,
628,
198,
198,
6738,
8301,
1617,
22570,
13,
26791,
1330,
1500,
3201,
198,
198,
6738,
8301,
1617,
22570,
13,
3849,
32186,
13,
10424,
1574,
13,
15234,
4157,
1330,
12533,
29495,
198,
673... | 3.588235 | 68 |
from .pure_python_clustering_sample import *
from .cpp_implemented_clustering_sample import CppImplementedClusteringSample
from .bkmeans import *
from .pqkmeans import *
| [
6738,
764,
37424,
62,
29412,
62,
565,
436,
1586,
62,
39873,
1330,
1635,
198,
6738,
764,
20322,
62,
320,
1154,
12061,
62,
565,
436,
1586,
62,
39873,
1330,
327,
381,
3546,
1154,
12061,
2601,
436,
1586,
36674,
198,
6738,
764,
65,
74,
1... | 2.982456 | 57 |
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
BASE_TT_CURVE=declarative_base() | [
6738,
44161,
282,
26599,
1330,
1635,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
198,
33,
11159,
62,
15751,
62,
34,
4261,
6089,
28,
32446,
283,
876,
62,
8692,
3419
] | 2.85 | 40 |
from parsl.config import Config
from parsl.providers import CondorProvider
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_query
config = Config(
executors=[
HighThroughputExecutor(
label='OSG_HTEX',
address=address_by_query(),
max_workers=1,
provider=CondorProvider(
nodes_per_block=1,
init_blocks=4,
max_blocks=4,
# This scheduler option string ensures that the compute nodes provisioned
# will have modules
scheduler_options='Requirements = OSGVO_OS_STRING == "RHEL 6" && Arch == "X86_64" && HAS_MODULES == True',
# Command to be run before starting a worker, such as:
# 'module load Anaconda; source activate parsl_env'.
worker_init='',
walltime="00:20:00",
),
)
]
)
| [
6738,
13544,
75,
13,
11250,
1330,
17056,
198,
6738,
13544,
75,
13,
15234,
4157,
1330,
9724,
273,
29495,
198,
6738,
13544,
75,
13,
18558,
315,
669,
1330,
3334,
15046,
1996,
23002,
38409,
198,
6738,
13544,
75,
13,
2860,
16746,
1330,
2209,... | 2.098468 | 457 |
from abc import ABCMeta, abstractmethod
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
import numpy as np
from scipy.interpolate import splev
from skimage.transform import rotate
from .mapping_functions import mapping_functions, inverse_mapping_functions
class Camera(metaclass=ABCMeta):
'''
Base class for All Sky Camera. Base classes need to set
the lens, sensor, location and max_magnitude members and
implement the read method, returning an instance of image
Attributes
----------
sensor: Sensor
Sensor instance describing the sensor properties
lens: Lens
Lens instance describing the optical properties
max_magnitude: float
Maximum catalog magnitude to consider
location: astropy.coordinates.EarthLocation
Location of the all sky camera
rotation: astropy.units.Quantity[angle] or astropy.coordinates.Angle
maximum visiual magnitude of stars to take into account
rotate_image: bool
If True, rotate the image instead of the coordinate system
'''
max_magnitude = 6
@u.quantity_input(rotation=u.rad)
@property
@abstractmethod
@property
@abstractmethod
@abstractmethod
@classmethod
@abstractmethod
def read(path):
'''
Read an image file into an instance Image,
must be overridden by subclasses
'''
pass
@classmethod
def theta2r(cls, theta):
'''
Calculates distance from the image center for a given incident angle
Parameters
-----------
theta: float
Polar angle between zenith and pixel position
Returns
-------
float
Distance between image center and pixel position
'''
return cls.lens.mapping_function(theta) / cls.sensor.pixel_width
@classmethod
def r2theta(cls, r):
'''
Calculates angle to the optical axes for a given distance to the image center
Parameters
-----------
r: float
distance between image center and point in mm
Returns
-------
float
Angle to the optical axis
'''
return cls.lens.inverse_mapping_function(r * cls.sensor.pixel_width)
class Lens:
'''
Class describing the lens of an AllSkyCamera.
If a Lens does not have one of the 4 implemented
mapping functions (e.g. because of distortions),
override `mapping_function` and `inverse_mapping`
Attributes
----------
mapping: string
The mapping function of the lens, one of
* "gnomonical" for non-fisheye lenses
* "equidistant"
* "stereographic"
* "equisolid_angle", e.g. for the Sigma 4.5mm f2.8
'''
@u.quantity_input(focal_length=u.mm)
class Sensor:
'''
Class containing image sensor properties
Attributes
----------
resolution_row: float
Number of pixels in the image on the y axis
resolution_col: float
Number of pixels in the image on the x axis
width: float
Width of the sensor in mm
height: float
lenght of the sensor in mm
'''
@u.quantity_input(width=u.mm, height=u.mm)
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
11748,
6468,
28338,
13,
41667,
355,
334,
198,
6738,
6468,
28338,
13,
37652,
17540,
1330,
5274,
7222,
585,
11,
42375,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
... | 2.613965 | 1,246 |
import scipy.stats as st
import numpy as np
import pandas as pd
import collections
def kernel_smooth(spike_vector, sigma, edges, bin_size=None, padding='symmetric', border_correction = False):
"""
Receives an array of spike times (point-process like), and smoothes it
by convolving with a _gaussian_ kernel, of width *sigma*. The time
position will be alocated with a time precision that is a ratio of sigma,
given by tp = sigma/precision_factor.
Parameters
----------
spike_vector : array
Point process like spike times, *in milisseconds*
sigma : int
Width of the window, in ms
edges : tuple
Starting and ending time of the window of interest, in ms.
precision_factor : int, default 10
Factor of the precision ratio sigma/temporal_precision
bin_size : int, default None
The size (in ms) of each step in the returning smoothed data.
By default is the minimum, equal to 1ms.
padding : str, default None
The kind of padding on array edges. Possible values are
'constant', 'edge', 'maximum', 'mean', 'median', 'minimum', 'reflect',
'symmetric', 'wrap', or a <function>.
border_correction : bool, default False
whether to divide borders by spikevector true contribution
Raises a ValueError if used adjoined with padding
Returns
-------
smoothed_data : array
The estimated firing rate as each interval of bin_size
in *spikes per second*
times : array
The time at the left edge of each interval
Notes
-----
Total kernel size is 6*sigma, 3 sigma for each size.
See also
--------
numpy.pad for padding options and information.
"""
tp = 1# int(sigma/precision_factor)
if bin_size is None:
bin_size = tp
try:
assert float(bin_size) == bin_size # Is multiple
except AssertionError:
raise ValueError("Bin size must be a multiple of temporal precision.")
n_bins = int(bin_size*int((edges[1]-edges[0])/bin_size))
edges= (edges[0], bin_size*int(n_bins/bin_size)+edges[0])
if edges[1] <= edges[0]:
return ([],[])
if sigma is None:
return np.histogram(spike_vector, bins=int((edges[1]-edges[0])/bin_size), range=edges)
spike_count, times = np.histogram(spike_vector, bins=n_bins, range=edges)
each_size_len = int(3*sigma + 1)
if padding is not None:
if border_correction:
raise ValueError('Padding and correction cannot be used together')
spike_count = np.pad(spike_count, each_size_len, padding)
s=sigma # Just for one-lining below
kernel = st.norm(0,s).pdf( np.linspace(-3*s, 3*s, 2*each_size_len + 1) )
smoothed = np.convolve(spike_count, kernel,
'valid' if padding is not None else 'same')
if border_correction:
contrib = st.norm(0,s).cdf(np.linspace(0, 3*s, each_size_len))
smoothed[:each_size_len] /= contrib
smoothed[-each_size_len:]/= contrib[::-1]
cs = np.hstack((0, smoothed.cumsum()))*1000/bin_size
return np.diff(cs[::bin_size]), times[:-bin_size:bin_size]
def remove_baseline(activity, baseline, baseline_size=None):
"""
Removes the mean baseline firing rate from the activity.
Parameters
----------
activity : DataFrame
DataFrame of firing rates (in spikes/*s*)
with a single Identifier column *by*, that will be used to select
the corresponding baseline
baseline : DataFrame
Indexed in the same way as the important features of activity
may be composed of the mean firing rate or the baseline spike times.
BE CAREFUL: Do _NOT_ use smoothed firing rates
baseline_size : number (default None)
The duration of the baseline, *in seconds*.
Ignored if firing rates are given
"""
if isinstance(baseline.iloc[0,0], collections.Sized):
assert baseline_size is not None
firing_rate = lambda x: len(x)/baseline_size # Number of spikes per sec
baseline = baseline.applymap(firing_rate)
else:
assert isinstance(baseline.iloc[0,0], float)
return (activity - baseline)[activity.columns] | [
11748,
629,
541,
88,
13,
34242,
355,
336,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
17268,
198,
198,
4299,
9720,
62,
5796,
5226,
7,
2777,
522,
62,
31364,
11,
264,
13495,
11,
13015,
11,
987... | 2.642946 | 1,602 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
34184,
1187,
1330,
163... | 2.446809 | 47 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from contrail_api_cli.resource import Collection
from contrail_api_cli.utils import printo
from contrail_api_cli.exceptions import ResourceNotFound
from contrail_api_cli.schema import require_schema
from contrail_api_cli.command import Command
class MigrateLB22132(Command):
"""Command to migrate LB created in 2.21 to 3.2.
In 2.21 the admin_state flag didn't matter, but in 3.2 if it is
not set to True the haproxy config file will not be generated.
This command set admin_state to True for all lb-pools, lb-members, lb-vips, lb-hms.
"""
description = "Migrate LBs from 2.21 to 3.2"
@require_schema(version="2.21")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
25736,
346,
62,
15042,
62,
44506,
13,
31092,
1330,
12251,
198,
6738,
25736,
346,
62,
15042,
6... | 2.962656 | 241 |
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aif360.algorithms.inprocessing
import lale.docstrings
import lale.operators
from .util import (
_BaseInEstimatorImpl,
_categorical_fairness_properties,
_categorical_input_predict_proba_schema,
_categorical_input_predict_schema,
_categorical_output_predict_proba_schema,
_categorical_output_predict_schema,
_categorical_supervised_input_fit_schema,
)
_input_fit_schema = _categorical_supervised_input_fit_schema
_input_predict_schema = _categorical_input_predict_schema
_output_predict_schema = _categorical_output_predict_schema
_input_predict_proba_schema = _categorical_input_predict_proba_schema
_output_predict_proba_schema = _categorical_output_predict_proba_schema
_hyperparams_schema = {
"description": "Hyperparameter schema.",
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints.",
"type": "object",
"additionalProperties": False,
"required": [
*_categorical_fairness_properties.keys(),
"redact",
"preparation",
"eta",
],
"relevantToOptimizer": ["eta"],
"properties": {
**_categorical_fairness_properties,
"redact": {
"description": "Whether to redact protected attributes before data preparation (recommended) or not.",
"type": "boolean",
"default": True,
},
"preparation": {
"description": "Transformer, which may be an individual operator or a sub-pipeline.",
"anyOf": [
{"laleType": "operator"},
{"description": "lale.lib.lale.NoOp", "enum": [None]},
],
"default": None,
},
"eta": {
"description": "Fairness penalty parameter.",
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"default": 1.0,
"minimumForOptimizer": 0.03125,
"maximumForOptimizer": 32768,
},
},
},
],
}
_combined_schemas = {
"description": """`PrejudiceRemover`_ in-estimator fairness mitigator. Adds a discrimination-aware regularization term to the learning objective (`Kamishima et al. 2012`_).
.. _`PrejudiceRemover`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.algorithms.inprocessing.PrejudiceRemover.html
.. _`Kamishima et al. 2012`: https://doi.org/10.1007/978-3-642-33486-3_3
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.aif360.prejudice_remover.html#lale.lib.aif360.prejudice_remover.PrejudiceRemover",
"import_from": "aif360.sklearn.inprocessing",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
PrejudiceRemover = lale.operators.make_operator(
_PrejudiceRemoverImpl, _combined_schemas
)
lale.docstrings.set_docstrings(PrejudiceRemover)
| [
2,
15069,
12131,
19764,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
73... | 2.241342 | 1,848 |
import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
######################################################################
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
# Define the ResNet50-based Model
# Define the DenseNet121-based Model
# debug model structure
#net = ft_net(751)
net = ft_net_dense(751)
#print(net)
input = Variable(torch.FloatTensor(8, 3, 224, 224))
output = net(input)
print('net output size:')
print(output.shape)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
2315,
198,
6738,
28034,
10178,
1330,
4981,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
29113,
29113,
4242,
2235,
198,
198,
2,... | 3.252874 | 174 |
#!/usr/bin/env python
"""
Test move stage.
"""
from storm_control.test.hal.standardHalTest import halTest
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
14402,
1445,
3800,
13,
198,
37811,
198,
6738,
6388,
62,
13716,
13,
9288,
13,
14201,
13,
20307,
40202,
14402,
1330,
10284,
14402,
628,
198
] | 3.085714 | 35 |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.dataframe as dd
import numpy as np
from nvtabular.dispatch import DataFrameType, annotate
from .moments import _custom_moments
from .operator import ColumnSelector, Operator
from .stat_operator import StatOperator
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.681614 | 223 |
from typing import Union
from bson import ObjectId
from glasskit.queue import BaseTask
PostIndexerTask.register()
| [
6738,
19720,
1330,
4479,
198,
6738,
275,
1559,
1330,
9515,
7390,
198,
6738,
5405,
15813,
13,
36560,
1330,
7308,
25714,
628,
198,
198,
6307,
15732,
263,
25714,
13,
30238,
3419,
198
] | 3.774194 | 31 |
import json
from json import JSONDecoder, JSONEncoder
| [
11748,
33918,
198,
6738,
33918,
1330,
19449,
10707,
12342,
11,
19449,
27195,
12342,
628,
628,
628,
628,
628
] | 3.5 | 18 |
"""
Script to evaluate multiple policies in one environment using a range (1D grid) of domain parameters
"""
import os.path as osp
import torch as to
import numpy as np
import pandas as pd
import pprint
import pyrado
from pyrado.domain_randomization.utils import param_grid
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.environments.pysim.quanser_cartpole import QCartPoleSwingUpSim, QCartPoleStabSim
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environment_wrappers.utils import typed_env
from pyrado.environments.pysim.quanser_qube import QQubeSim
from pyrado.logger.experiment import setup_experiment, save_list_of_dicts_to_yaml
from pyrado.sampling.parallel_evaluation import eval_domain_params, conditional_actnorm_wrapper
from pyrado.sampling.sampler_pool import SamplerPool
from pyrado.utils.argparser import get_argparser
from pyrado.utils.data_types import dict_arraylike_to_float
from pyrado.utils.experiments import load_experiment
from pyrado.utils.input_output import print_cbt
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
if args.max_steps == pyrado.inf:
args.max_steps = 2000
print_cbt(f'Set maximum number of time steps to {args.max_steps}', 'y')
# Create one-dim evaluation grid
param_spec = dict()
if args.env_name == QBallBalancerSim.name:
# Create the environment for evaluating
env = QBallBalancerSim(dt=args.dt, max_steps=args.max_steps, load_experimental_tholds=True)
# param_spec['g'] = np.linspace(8.91, 12.91, num=11, endpoint=True)
# param_spec['m_ball'] = np.linspace(0.001, 0.033, num=11, endpoint=True)
# param_spec['r_ball'] = np.linspace(0.01, 0.1, num=11, endpoint=True)
# param_spec['r_arm'] = np.linspace(0.0254*0.3, 0.0254*1.7, num=11, endpoint=True)
# param_spec['l_plate'] = np.linspace(0.275*0.3, 0.275*1.7, num=11, endpoint=True)
# param_spec['J_l'] = np.linspace(5.2822e-5 * 0.5, 5.2822e-5 * 1.5, num=11, endpoint=True)
# param_spec['J_m'] = np.linspace(4.6063e-7*0.5, 4.6063e-7*1.5, num=11, endpoint=True)
# param_spec['K_g'] = np.linspace(70*0.5, 70*1.5, num=11, endpoint=True)
# param_spec['eta_g'] = np.linspace(0.6, 1.0, num=11, endpoint=True)
# param_spec['eta_m'] = np.linspace(0.49, 0.89, num=11, endpoint=True)
# param_spec['k_m'] = np.linspace(0.0077*0.3, 0.0077*1.7, num=11, endpoint=True)
# param_spec['k_m'] = np.linspace(0.004, 0.012, num=11, endpoint=True)
# param_spec['R_m'] = np.linspace(2.6*0.5, 2.6*1.5, num=11, endpoint=True)
# param_spec['B_eq'] = np.linspace(0.0, 0.2, num=11, endpoint=True)
# param_spec['c_frict'] = np.linspace(0, 0.15, num=11, endpoint=True)
# param_spec['V_thold_x_pos'] = np.linspace(0.0, 1.5, num=11, endpoint=True)
# param_spec['V_thold_x_neg'] = np.linspace(-1.5, 0.0, num=11, endpoint=True)
# param_spec['V_thold_y_pos'] = np.linspace(0.0, 1.5, num=11, endpoint=True)
# param_spec['V_thold_y_neg'] = np.linspace(-1.5, 0, num=11, endpoint=True)
# param_spec['offset_th_x'] = np.linspace(-15./180*np.pi, 15./180*np.pi, num=11, endpoint=True)
# param_spec['offset_th_y'] = np.linspace(-15./180*np.pi, 15./180*np.pi, num=11, endpoint=True)
# Get the experiments' directories to load from
prefixes = [
osp.join(pyrado.EXP_DIR, 'FILL_IN', 'FILL_IN'),
]
exp_names = [
'',
]
exp_labels = [
'',
]
elif args.env_name in [QCartPoleStabSim.name, QCartPoleSwingUpSim.name]:
# Create the environment for evaluating
if args.env_name == QCartPoleSwingUpSim.name:
env = QCartPoleSwingUpSim(dt=args.dt, max_steps=args.max_steps)
else:
env = QCartPoleStabSim(dt=args.dt, max_steps=args.max_steps)
# param_spec['g'] = np.linspace(9.8*10.7, 9.81*1.3, num=11 endpoint=True)
param_spec['m_cart'] = np.linspace(0.38*0.7, 0.38*1.3, num=11, endpoint=True)
# param_spec['l_rail'] = np.linspace(0.841*0.7, 0.841*1.3, num=11, endpoint=True)
# param_spec['eta_m'] = np.linspace(0.9*0.7, 0.9*1.3, num=11, endpoint=True)
# param_spec['eta_g'] = np.linspace(0.9*0.7, 0.9*1.3, num=11, endpoint=True)
# param_spec['K_g'] = np.linspace(3.71*0.7, 3.71*1.3, num=11, endpoint=True)
# param_spec['J_m'] = np.linspace(3.9e-7*0.7, 3.9e-7*1.3, num=11, endpoint=True)
# param_spec['r_mp'] = np.linspace(6.35e-3*0.7, 6.35e-3*1.3, num=11, endpoint=True)
# param_spec['R_m'] = np.linspace(2.6*0.7, 2.6*1.3, num=11, endpoint=True)
# param_spec['k_m'] = np.linspace(7.67e-3*0.7, 7.67e-3*1.3, num=11, endpoint=True)
# param_spec['B_pole'] = np.linspace(0.0024*0.7, 0.0024*1.3, num=11, endpoint=True)
# param_spec['B_eq'] = np.linspace(5.4*0.7, 5.4*1.3, num=11, endpoint=True)
# param_spec['m_pole'] = np.linspace(0.127*0.7, 0.127*1.3, num=11, endpoint=True)
# param_spec['l_pole'] = np.linspace(0.641/2*0.7, 0.641/2*1.3, num=11, endpoint=True)
# Get the experiments' directories to load from
prefixes = [
osp.join(pyrado.EXP_DIR, 'FILL_IN', 'FILL_IN'),
]
exp_names = [
'',
]
exp_labels = [
'',
]
elif args.env_name == QQubeSim.name:
env = QQubeSim(dt=args.dt, max_steps=args.max_steps)
# param_spec['g'] = np.linspace(9.81*0.7, 9.81*1.3, num=11, endpoint=True)
# param_spec['Rm'] = np.linspace(8.4*0.7, 8.4*1.3, num=11, endpoint=True)
# param_spec['km'] = np.linspace(0.042*0.7, 0.042*1.3, num=11, endpoint=True)
# param_spec['Mr'] = np.linspace(0.095*0.7, 0.095*1.3, num=11, endpoint=True)
# param_spec['Lr'] = np.linspace(0.085*0.7, 0.085*1.3, num=11, endpoint=True)
# param_spec['Dr'] = np.linspace(5e-6*0.2, 5e-6*5, num=11, endpoint=True) # 5e-6
# param_spec['Mp'] = np.linspace(0.024*0.7, 0.024*1.3, num=11, endpoint=True)
# param_spec['Lp'] = np.linspace(0.129*0.7, 0.129*1.3, num=11, endpoint=True)
# param_spec['Dp'] = np.linspace(1e-6*0.2, 1e-6n*5, num=11, endpoint=True) # 1e-6
# Get the experiments' directories to load from
prefixes = [
osp.join(pyrado.EXP_DIR, 'FILL_IN', 'FILL_IN'),
]
exp_names = [
'',
]
exp_labels = [
'',
]
else:
raise pyrado.ValueErr(given=args.env_name, eq_constraint=f'{QBallBalancerSim.name}, {QCartPoleStabSim.name},'
f'{QCartPoleSwingUpSim.name}, or {QQubeSim.name}')
# Always add an action delay wrapper (with 0 delay by default)
if typed_env(env, ActDelayWrapper) is None:
env = ActDelayWrapper(env)
# param_spec['act_delay'] = np.linspace(0, 60, num=21, endpoint=True, dtype=int)
if not len(param_spec.keys()) == 1:
raise pyrado.ValueErr(msg='Do not vary more than one domain parameter for this script! (Check action delay.)')
varied_param_key = ''.join(param_spec.keys()) # to get a str
if not (len(prefixes) == len(exp_names) and len(prefixes) == len(exp_labels)):
raise pyrado.ShapeErr(msg=f'The lengths of prefixes, exp_names, and exp_labels must be equal, '
f'but they are {len(prefixes)}, {len(exp_names)}, and {len(exp_labels)}!')
# Load the policies
ex_dirs = [osp.join(p, e) for p, e in zip(prefixes, exp_names)]
policies = []
for ex_dir in ex_dirs:
_, policy, _ = load_experiment(ex_dir)
policies.append(policy)
# Create one-dim results grid and ensure right number of rollouts
param_list = param_grid(param_spec)
param_list *= args.num_ro_per_config
# Fix initial state (set to None if it should not be fixed)
init_state = None
# Crate empty data frame
df = pd.DataFrame(columns=['policy', 'ret', 'len', varied_param_key])
# Evaluate all policies
for i, policy in enumerate(policies):
# Create a new sampler pool for every policy to synchronize the random seeds i.e. init states
pool = SamplerPool(args.num_envs)
# Seed the sampler
if args.seed is not None:
pool.set_seed(args.seed)
print_cbt(f'Set seed to {args.seed}', 'y')
else:
print_cbt('No seed was set', 'r', bright=True)
# Add an action normalization wrapper if the policy was trained with one
env = conditional_actnorm_wrapper(env, ex_dirs, i)
# Sample rollouts
ros = eval_domain_params(pool, env, policy, param_list, init_state)
# Compute results metrics
rets = [ro.undiscounted_return() for ro in ros]
lengths = [float(ro.length) for ro in ros] # int values are not numeric in pandas
vaired_param_values = [ro.rollout_info['domain_param'][varied_param_key] for ro in ros]
varied_param = {varied_param_key: vaired_param_values}
df = df.append(pd.DataFrame(dict(policy=exp_labels[i], ret=rets, len=lengths, **varied_param)),
ignore_index=True)
metrics = dict(
avg_len=df.groupby('policy').mean()['len'].to_dict(),
avg_ret=df.groupby('policy').mean()['ret'].to_dict(),
median_ret=df.groupby('policy').median()['ret'].to_dict(),
min_ret=df.groupby('policy').min()['ret'].to_dict(),
max_ret=df.groupby('policy').max()['ret'].to_dict(),
std_ret=df.groupby('policy').std()['ret'].to_dict(),
quantile5_ret=df.groupby('policy').quantile(q=0.05)['ret'].to_dict(),
quantile95_ret=df.groupby('policy').quantile(q=0.95)['ret'].to_dict()
)
pprint.pprint(metrics)
# Create subfolder and save
save_dir = setup_experiment('multiple_policies', args.env_name, varied_param_key, base_dir=pyrado.EVAL_DIR)
save_list_of_dicts_to_yaml([
{'ex_dirs': ex_dirs},
{
'varied_param': varied_param_key,
'num_rpp': args.num_ro_per_config, 'seed': args.seed, 'dt': args.dt, 'max_steps': args.max_steps
},
dict_arraylike_to_float(metrics)],
save_dir, file_name='summary'
)
df.to_pickle(osp.join(save_dir, 'df_mp_grid_1d.pkl'))
| [
37811,
198,
7391,
284,
13446,
3294,
4788,
287,
530,
2858,
1262,
257,
2837,
357,
16,
35,
10706,
8,
286,
7386,
10007,
198,
37811,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
28034,
355,
284,
198,
11748,
299,
32152,
355,
459... | 2.104945 | 4,955 |
from .gd import GD
import numpy as np
class GDAdaDelta(GD):
"""
:note: This class update the weights of the passed layer by using AdaDelta method, the needed hyper-parameters in
this class is (roh).
Although GDAdaDelta inherits GD class, GDAdaDelta doesn't use it as it has dynamic learning rate
"""
ID = 1
def __init__(self, roh=0.9, *args, **kwargs):
"""
:param roh: hyper-parameter set empirically by the user, value ]0 , 1[
:type roh: positive real number
"""
super(GDAdaDelta, self).__init__(*args, **kwargs)
self.__roh = roh
def optimize(self, layer, delta: np.ndarray, number_of_examples, *args, **kwargs) -> None:
"""
:note: 1-This function update the layer weights according to this algorithm
weights = weights - root( S / (A + epsilon) * ∂L/∂W
S = roh * S + (1 - roh) * S / (A + epsilon) * square(∂L/∂W)
A = roh * A + (1 - roh) * square(∂L/∂W)
A and S are accumulators initialized by zero matrix with dimensions like the layer's weights
A and S have two parts the accumulator of the weights part and bias part
A for weights ==> layer.a for bias ==> layer.ao
S for weights ==> layer.k for bias ==> layer.ko
2- Initialize accumulators as a layer attribute if they are not already there
:param layer: a layer in the training network
:type layer: layer
:param delta: the chain rule of multiplying the partial derivative of the loss function by the desired layer
weights passing by all activation functions (backpropagation)
:type delta: np.ndarray
:param number_of_examples: number of examples in the dataset
:type number_of_examples: positive integer
"""
delta_w, delta_b = self.update_delta(layer, delta, number_of_examples)
if not hasattr(layer, "a"):
layer.a = np.zeros_like(layer.weights)
layer.ao = np.zeros_like(layer.bias)
if not hasattr(layer, "k"):
layer.k = np.zeros_like(layer.weights)
layer.ko = np.zeros_like(layer.bias)
layer.a = self.__roh * layer.a + (1 - self.__roh) * np.square(delta_w)
layer.ao = self.__roh * layer.ao + (1 - self.__roh) * np.square(delta_b)
layer.k = self.__roh * layer.k + (1 - self.__roh) * layer.k / (layer.a + np.finfo(float).eps) * np.square(delta_w)
layer.ko = self.__roh * layer.ko + (1 - self.__roh) * layer.ko / (layer.ao + np.finfo(float).eps) * np.square(delta_b)
layer.weights = layer.weights - np.power(layer.k / (layer.a + np.finfo(float).eps), 0.5) * delta_w
layer.bias = layer.bias - np.power(layer.ko / (layer.ao + np.finfo(float).eps), 0.5) * delta_b
def flush(self, layer):
"""
:note: This function deletes the added attributes to objects (accumulators)
:param layer: a layer in the training network
:type layer: layer
"""
del layer.ao
del layer.a
del layer.ko
del layer.k
def _save(self):
"""
This function saves the hyper parameters of the optimizing technique
"""
return {
"lr": self._learning_rate,
"roh": self.__roh
}
@staticmethod
def load(data):
"""
This function loads the hyper parameters of the optimizing technique
"""
return GDAdaDelta(learning_rate=data["lr"], roh=data["rho"])
| [
6738,
764,
21287,
1330,
27044,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4871,
27044,
2782,
64,
42430,
7,
45113,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1058,
11295,
25,
770,
1398,
4296,
262,
19590,
286,
262,
3804,
76... | 2.288179 | 1,565 |
# -*- coding: utf-8 -*-
from dnsdb_common.library.IPy import IP
from dnsdb_common.library.exception import BadParam
from . import commit_on_success
from . import db
from .models import DnsColo
from .models import DnsRecord
from .models import IpPool
from .models import Subnets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
288,
5907,
9945,
62,
11321,
13,
32016,
13,
4061,
88,
1330,
6101,
198,
6738,
288,
5907,
9945,
62,
11321,
13,
32016,
13,
1069,
4516,
1330,
7772,
22973,
198,
6738,
... | 3.134831 | 89 |
import argparse
import re
from Bio import SeqIO
import os
# counts files
# deltes file upon request for next set of isoforms
##########################################################################
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
201,
198,
11748,
302,
201,
198,
6738,
16024,
1330,
1001,
80,
9399,
201,
198,
11748,
28686,
201,
198,
201,
198,
2,
9853,
3696,
201,
198,
201,
198,
2,
1619,
4879,
2393,
2402,
2581,
329,
1306,
900,
286,
47279,
23914,
... | 3.303797 | 79 |
# coding: utf-8
"""
BitMEX API
## REST API for the BitMEX Trading Platform [View Changelog](/app/apiChangelog) ---- #### Getting Started Base URI: [https://www.bitmex.com/api/v1](/api/v1) ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ##### Swagger Specification [⇩ Download Swagger JSON](swagger.json) ---- ## All API Endpoints Click to expand a section. # noqa: E501
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from bitmex_swagger.api_client import ApiClient
class PositionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def position_get(self, **kwargs): # noqa: E501
"""Get your positions. # noqa: E501
This endpoint is used for retrieving position information. The fields largely follow the [FIX spec](http://www.onixs.biz/fix-dictionary/5.0.SP2/msgType_AP_6580.html) definitions. Some selected fields are explained in more detail below. The fields _account_, _symbol_, _currency_ are unique to each position and form its key. * **account**: Your unique account ID. * **symbol**: The contract for this position. * **currency**: The margin currency for this position. * **underlying**: Meta data of the _symbol_. * **quoteCurrency**: Meta data of the _symbol_, All prices are in the _quoteCurrency_ * **commission**: The maximum of the maker, taker, and settlement fee. * **initMarginReq**: The initial margin requirement. This will be at least the symbol's default initial maintenance margin, but can be higher if you choose lower leverage. * **maintMarginReq**: The maintenance margin requirement. This will be at least the symbol's default maintenance maintenance margin, but can be higher if you choose a higher risk limit. * **riskLimit**: This is a function of your _maintMarginReq_. * **leverage**: 1 / initMarginReq. * **crossMargin**: True/false depending on whether you set cross margin on this position. * **deleveragePercentile**: Indicates where your position is in the ADL queue. * **rebalancedPnl**: The value of realised PNL that has transferred to your wallet for this position. * **prevRebalancedPnl**: The value of realised PNL that has transferred to your wallet for this position since the position was closed. * **currentQty**: The current position amount in contracts. * **currentCost**: The current cost of the position in the settlement currency of the symbol (_currency_). * **currentComm**: The current commission of the position in the settlement currency of the symbol (_currency_). * **realisedCost**: The realised cost of this position calculated with regard to average cost accounting. * **unrealisedCost**: _currentCost_ - _realisedCost_. * **grossOpenCost**: The absolute value of your open orders for this symbol. * **grossOpenPremium**: The amount your bidding above the mark price in the settlement currency of the symbol (_currency_). * **markPrice**: The mark price of the symbol in _quoteCurrency_. * **markValue**: The _currentQty_ at the mark price in the settlement currency of the symbol (_currency_). * **homeNotional**: Value of position in units of _underlying_. * **foreignNotional**: Value of position in units of _quoteCurrency_. * **realisedPnl**: The negative of _realisedCost_. * **unrealisedGrossPnl**: _markValue_ - _unrealisedCost_. * **unrealisedPnl**: _unrealisedGrossPnl_. * **liquidationPrice**: Once markPrice reaches this price, this position will be liquidated. * **bankruptPrice**: Once markPrice reaches this price, this position will have no equity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_get(async=True)
>>> result = thread.get()
:param async bool
:param str filter: Table filter. For example, send {\"symbol\": \"XBTUSD\"}.
:param str columns: Which columns to fetch. For example, send [\"columnName\"].
:param float count: Number of rows to fetch.
:return: list[Position]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.position_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.position_get_with_http_info(**kwargs) # noqa: E501
return data
def position_get_with_http_info(self, **kwargs): # noqa: E501
"""Get your positions. # noqa: E501
This endpoint is used for retrieving position information. The fields largely follow the [FIX spec](http://www.onixs.biz/fix-dictionary/5.0.SP2/msgType_AP_6580.html) definitions. Some selected fields are explained in more detail below. The fields _account_, _symbol_, _currency_ are unique to each position and form its key. * **account**: Your unique account ID. * **symbol**: The contract for this position. * **currency**: The margin currency for this position. * **underlying**: Meta data of the _symbol_. * **quoteCurrency**: Meta data of the _symbol_, All prices are in the _quoteCurrency_ * **commission**: The maximum of the maker, taker, and settlement fee. * **initMarginReq**: The initial margin requirement. This will be at least the symbol's default initial maintenance margin, but can be higher if you choose lower leverage. * **maintMarginReq**: The maintenance margin requirement. This will be at least the symbol's default maintenance maintenance margin, but can be higher if you choose a higher risk limit. * **riskLimit**: This is a function of your _maintMarginReq_. * **leverage**: 1 / initMarginReq. * **crossMargin**: True/false depending on whether you set cross margin on this position. * **deleveragePercentile**: Indicates where your position is in the ADL queue. * **rebalancedPnl**: The value of realised PNL that has transferred to your wallet for this position. * **prevRebalancedPnl**: The value of realised PNL that has transferred to your wallet for this position since the position was closed. * **currentQty**: The current position amount in contracts. * **currentCost**: The current cost of the position in the settlement currency of the symbol (_currency_). * **currentComm**: The current commission of the position in the settlement currency of the symbol (_currency_). * **realisedCost**: The realised cost of this position calculated with regard to average cost accounting. * **unrealisedCost**: _currentCost_ - _realisedCost_. * **grossOpenCost**: The absolute value of your open orders for this symbol. * **grossOpenPremium**: The amount your bidding above the mark price in the settlement currency of the symbol (_currency_). * **markPrice**: The mark price of the symbol in _quoteCurrency_. * **markValue**: The _currentQty_ at the mark price in the settlement currency of the symbol (_currency_). * **homeNotional**: Value of position in units of _underlying_. * **foreignNotional**: Value of position in units of _quoteCurrency_. * **realisedPnl**: The negative of _realisedCost_. * **unrealisedGrossPnl**: _markValue_ - _unrealisedCost_. * **unrealisedPnl**: _unrealisedGrossPnl_. * **liquidationPrice**: Once markPrice reaches this price, this position will be liquidated. * **bankruptPrice**: Once markPrice reaches this price, this position will have no equity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_get_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str filter: Table filter. For example, send {\"symbol\": \"XBTUSD\"}.
:param str columns: Which columns to fetch. For example, send [\"columnName\"].
:param float count: Number of rows to fetch.
:return: list[Position]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter', 'columns', 'count'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method position_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'columns' in params:
query_params.append(('columns', params['columns'])) # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/position', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Position]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def position_isolate_margin(self, symbol, **kwargs): # noqa: E501
"""Enable isolated margin or cross margin per-position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_isolate_margin(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Position symbol to isolate. (required)
:param bool enabled: True for isolated margin, false for cross margin.
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.position_isolate_margin_with_http_info(symbol, **kwargs) # noqa: E501
else:
(data) = self.position_isolate_margin_with_http_info(symbol, **kwargs) # noqa: E501
return data
def position_isolate_margin_with_http_info(self, symbol, **kwargs): # noqa: E501
"""Enable isolated margin or cross margin per-position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_isolate_margin_with_http_info(symbol, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Position symbol to isolate. (required)
:param bool enabled: True for isolated margin, false for cross margin.
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'enabled'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method position_isolate_margin" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `position_isolate_margin`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'enabled' in params:
form_params.append(('enabled', params['enabled'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/position/isolate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Position', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def position_transfer_isolated_margin(self, symbol, amount, **kwargs): # noqa: E501
"""Transfer equity in or out of a position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_transfer_isolated_margin(symbol, amount, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to isolate. (required)
:param float amount: Amount to transfer, in Satoshis. May be negative. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.position_transfer_isolated_margin_with_http_info(symbol, amount, **kwargs) # noqa: E501
else:
(data) = self.position_transfer_isolated_margin_with_http_info(symbol, amount, **kwargs) # noqa: E501
return data
def position_transfer_isolated_margin_with_http_info(self, symbol, amount, **kwargs): # noqa: E501
"""Transfer equity in or out of a position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_transfer_isolated_margin_with_http_info(symbol, amount, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to isolate. (required)
:param float amount: Amount to transfer, in Satoshis. May be negative. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'amount'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method position_transfer_isolated_margin" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `position_transfer_isolated_margin`") # noqa: E501
# verify the required parameter 'amount' is set
if ('amount' not in params or
params['amount'] is None):
raise ValueError("Missing the required parameter `amount` when calling `position_transfer_isolated_margin`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'amount' in params:
form_params.append(('amount', params['amount'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/position/transferMargin', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Position', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def position_update_leverage(self, symbol, leverage, **kwargs): # noqa: E501
"""Choose leverage for a position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_update_leverage(symbol, leverage, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to adjust. (required)
:param float leverage: Leverage value. Send a number between 0.01 and 100 to enable isolated margin with a fixed leverage. Send 0 to enable cross margin. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.position_update_leverage_with_http_info(symbol, leverage, **kwargs) # noqa: E501
else:
(data) = self.position_update_leverage_with_http_info(symbol, leverage, **kwargs) # noqa: E501
return data
def position_update_leverage_with_http_info(self, symbol, leverage, **kwargs): # noqa: E501
"""Choose leverage for a position. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_update_leverage_with_http_info(symbol, leverage, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to adjust. (required)
:param float leverage: Leverage value. Send a number between 0.01 and 100 to enable isolated margin with a fixed leverage. Send 0 to enable cross margin. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'leverage'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method position_update_leverage" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `position_update_leverage`") # noqa: E501
# verify the required parameter 'leverage' is set
if ('leverage' not in params or
params['leverage'] is None):
raise ValueError("Missing the required parameter `leverage` when calling `position_update_leverage`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'leverage' in params:
form_params.append(('leverage', params['leverage'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/position/leverage', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Position', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def position_update_risk_limit(self, symbol, risk_limit, **kwargs): # noqa: E501
"""Update your risk limit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_update_risk_limit(symbol, risk_limit, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to update risk limit on. (required)
:param float risk_limit: New Risk Limit, in Satoshis. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.position_update_risk_limit_with_http_info(symbol, risk_limit, **kwargs) # noqa: E501
else:
(data) = self.position_update_risk_limit_with_http_info(symbol, risk_limit, **kwargs) # noqa: E501
return data
def position_update_risk_limit_with_http_info(self, symbol, risk_limit, **kwargs): # noqa: E501
"""Update your risk limit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.position_update_risk_limit_with_http_info(symbol, risk_limit, async=True)
>>> result = thread.get()
:param async bool
:param str symbol: Symbol of position to update risk limit on. (required)
:param float risk_limit: New Risk Limit, in Satoshis. (required)
:return: Position
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['symbol', 'risk_limit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method position_update_risk_limit" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'symbol' is set
if ('symbol' not in params or
params['symbol'] is None):
raise ValueError("Missing the required parameter `symbol` when calling `position_update_risk_limit`") # noqa: E501
# verify the required parameter 'risk_limit' is set
if ('risk_limit' not in params or
params['risk_limit'] is None):
raise ValueError("Missing the required parameter `risk_limit` when calling `position_update_risk_limit`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'symbol' in params:
form_params.append(('symbol', params['symbol'])) # noqa: E501
if 'risk_limit' in params:
form_params.append(('riskLimit', params['risk_limit'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/position/riskLimit', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Position', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
4722,
44,
6369,
7824,
628,
220,
220,
220,
22492,
30617,
7824,
329,
262,
4722,
44,
6369,
25469,
19193,
220,
685,
7680,
609,
8368,
519,
16151,
14,
1324,
14,
15042,
... | 2.583312 | 11,577 |
#
# PySNMP MIB module HPN-ICF-E1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-E1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:26:02 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, IpAddress, NotificationType, Counter64, TimeTicks, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Bits, iso, Counter32, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "NotificationType", "Counter64", "TimeTicks", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Bits", "iso", "Counter32", "ModuleIdentity", "Integer32")
TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString")
hpnicfE1 = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28))
hpnicfE1.setRevisions(('2012-07-16 17:41', '2010-04-08 18:55', '2009-06-08 17:41', '2004-12-01 14:36',))
if mibBuilder.loadTexts: hpnicfE1.setLastUpdated('201207161741Z')
if mibBuilder.loadTexts: hpnicfE1.setOrganization('')
hpnicfe1InterfaceStatusTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1), )
if mibBuilder.loadTexts: hpnicfe1InterfaceStatusTable.setStatus('current')
hpnicfe1InterfaceStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfe1InterfaceStatusEntry.setStatus('current')
hpnicfe1InterfaceInErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInErrs.setStatus('current')
hpnicfe1InterfaceInRuntsErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInRuntsErrs.setStatus('current')
hpnicfe1InterfaceInGiantsErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInGiantsErrs.setStatus('current')
hpnicfe1InterfaceInCrcErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInCrcErrs.setStatus('current')
hpnicfe1InterfaceInAlignErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInAlignErrs.setStatus('current')
hpnicfe1InterfaceInOverRunsErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInOverRunsErrs.setStatus('current')
hpnicfe1InterfaceInDribblesErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInDribblesErrs.setStatus('current')
hpnicfe1InterfaceInAbortedSeqErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInAbortedSeqErrs.setStatus('current')
hpnicfe1InterfaceInNoBufferErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInNoBufferErrs.setStatus('current')
hpnicfe1InterfaceInFramingErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceInFramingErrs.setStatus('current')
hpnicfe1InterfaceOutputErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceOutputErrs.setStatus('current')
hpnicfe1InterfaceOutUnderRunErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceOutUnderRunErrs.setStatus('current')
hpnicfe1InterfaceOutCollisonsErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceOutCollisonsErrs.setStatus('current')
hpnicfe1InterfaceOutDeferedErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1InterfaceOutDeferedErrs.setStatus('current')
hpnicfe1Table = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2), )
if mibBuilder.loadTexts: hpnicfe1Table.setStatus('current')
hpnicfe1Entry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfe1Entry.setStatus('current')
hpnicfe1Type = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 1), Bits().clone(namedValues=NamedValues(("voice", 0), ("pos", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1Type.setStatus('current')
hpnicfe1Clock = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("slave", 1), ("master", 2), ("internal", 3), ("line", 4), ("linePri", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfe1Clock.setStatus('current')
hpnicfe1FrameFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("crc4", 1), ("nocrc4", 2))).clone('crc4')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfe1FrameFormat.setStatus('current')
hpnicfe1LineCode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3))).clone(namedValues=NamedValues(("ami", 1), ("hdb3", 3))).clone('hdb3')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfe1LineCode.setStatus('current')
hpnicfe1PriSetTimeSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 5), HpnicfE1TimeSlot()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfe1PriSetTimeSlot.setStatus('current')
hpnicfe1DChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1DChannelIndex.setStatus('current')
hpnicfe1SubScribLineChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1SubScribLineChannelIndex.setStatus('current')
hpnicfe1FcmChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1FcmChannelIndex.setStatus('current')
hpnicfe1InterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 3), )
if mibBuilder.loadTexts: hpnicfe1InterfaceTable.setStatus('current')
hpnicfe1InterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfe1InterfaceEntry.setStatus('current')
hpnicfe1ControllerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfe1ControllerIndex.setStatus('current')
hpnicfe1TimeSlotSetTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4), )
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetTable.setStatus('current')
hpnicfe1TimeSlotSetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetEntry.setStatus('current')
hpnicfe1TimeSlotSetGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 30))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetGroupId.setStatus('current')
hpnicfe1TimeSlotSetSignalType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("unkown", 1), ("em-delay", 2), ("em-immediate", 3), ("em-wink", 4), ("fxo-ground", 5), ("fxo-loop", 6), ("fxs-ground", 7), ("fxs-loop", 8), ("r2", 9)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetSignalType.setStatus('current')
hpnicfe1TimeSlotSetList = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4, 1, 3), HpnicfE1TimeSlot()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetList.setStatus('current')
hpnicfe1TimeSlotSetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 28, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfe1TimeSlotSetRowStatus.setStatus('current')
mibBuilder.exportSymbols("HPN-ICF-E1-MIB", hpnicfe1InterfaceEntry=hpnicfe1InterfaceEntry, hpnicfe1InterfaceTable=hpnicfe1InterfaceTable, hpnicfe1InterfaceInAlignErrs=hpnicfe1InterfaceInAlignErrs, hpnicfe1TimeSlotSetSignalType=hpnicfe1TimeSlotSetSignalType, PYSNMP_MODULE_ID=hpnicfE1, hpnicfE1=hpnicfE1, hpnicfe1InterfaceInOverRunsErrs=hpnicfe1InterfaceInOverRunsErrs, hpnicfe1InterfaceStatusEntry=hpnicfe1InterfaceStatusEntry, hpnicfe1InterfaceInNoBufferErrs=hpnicfe1InterfaceInNoBufferErrs, hpnicfe1Entry=hpnicfe1Entry, hpnicfe1Clock=hpnicfe1Clock, hpnicfe1InterfaceInAbortedSeqErrs=hpnicfe1InterfaceInAbortedSeqErrs, hpnicfe1TimeSlotSetRowStatus=hpnicfe1TimeSlotSetRowStatus, hpnicfe1PriSetTimeSlot=hpnicfe1PriSetTimeSlot, hpnicfe1SubScribLineChannelIndex=hpnicfe1SubScribLineChannelIndex, hpnicfe1InterfaceOutputErrs=hpnicfe1InterfaceOutputErrs, hpnicfe1TimeSlotSetEntry=hpnicfe1TimeSlotSetEntry, hpnicfe1LineCode=hpnicfe1LineCode, hpnicfe1FrameFormat=hpnicfe1FrameFormat, hpnicfe1InterfaceInCrcErrs=hpnicfe1InterfaceInCrcErrs, HpnicfE1TimeSlot=HpnicfE1TimeSlot, hpnicfe1DChannelIndex=hpnicfe1DChannelIndex, hpnicfe1ControllerIndex=hpnicfe1ControllerIndex, hpnicfe1FcmChannelIndex=hpnicfe1FcmChannelIndex, hpnicfe1InterfaceInErrs=hpnicfe1InterfaceInErrs, hpnicfe1InterfaceOutCollisonsErrs=hpnicfe1InterfaceOutCollisonsErrs, hpnicfe1InterfaceOutDeferedErrs=hpnicfe1InterfaceOutDeferedErrs, hpnicfe1InterfaceInRuntsErrs=hpnicfe1InterfaceInRuntsErrs, hpnicfe1InterfaceInFramingErrs=hpnicfe1InterfaceInFramingErrs, hpnicfe1InterfaceInGiantsErrs=hpnicfe1InterfaceInGiantsErrs, hpnicfe1InterfaceStatusTable=hpnicfe1InterfaceStatusTable, hpnicfe1InterfaceOutUnderRunErrs=hpnicfe1InterfaceOutUnderRunErrs, hpnicfe1TimeSlotSetTable=hpnicfe1TimeSlotSetTable, hpnicfe1TimeSlotSetGroupId=hpnicfe1TimeSlotSetGroupId, hpnicfe1InterfaceInDribblesErrs=hpnicfe1InterfaceInDribblesErrs, hpnicfe1Table=hpnicfe1Table, hpnicfe1TimeSlotSetList=hpnicfe1TimeSlotSetList, hpnicfe1Type=hpnicfe1Type)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
6574,
45,
12,
2149,
37,
12,
36,
16,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,... | 2.566566 | 4,642 |
import json
if __name__=="__main__":
print(parse_config("config")) | [
11748,
33918,
198,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
29572,
62,
11250,
7203,
11250,
48774
] | 2.730769 | 26 |
# Copyright 2017 David Hein
#
# Licensed under the MIT License. If the LICENSE file is missing, you
# can find the MIT license terms here: https://opensource.org/licenses/MIT
import re
import threading
import time
import unittest
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import os
import shutil
from app import create_app
import sys, traceback
import ssl
| [
2,
15069,
2177,
3271,
26431,
198,
2,
198,
2,
49962,
739,
262,
17168,
13789,
13,
1002,
262,
38559,
24290,
2393,
318,
4814,
11,
345,
198,
2,
460,
1064,
262,
17168,
5964,
2846,
994,
25,
3740,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,... | 3.681416 | 113 |
#!/usr/bin/env python
# coding=utf-8
import rospy
from sensor_msgs.msg import Range
from temizlik_robotu.srv import *
#__________________________________________________________________________________________________
#
# TemizlikRobotuEngeldenSakinma
#
# AMAÇ: Robotun engelden sakınması ve hareket emrini vermesi için
# oluşturulmuş sınıftır.
#
# SINIF DEĞİŞKENLERİ:
#
# ### Constructor değişkenleridir. İlk ve varsayılan değerleri
# ### aktarılmaktadır.
#
# self.robot_hareket_emir
# self.onceki_robot_hareket_emir_durumu
# self.robot_hareket_emir_degisim_kontrolu
# self.engel_menzil_limit_degeri
#
#
#
# FONKSİYON PROTOTİPLERİ:
#
# // Constructor
# def __init__(self):
#
# def ana_fonksiyon(self):
# def sonar_callback_fonksiyonu(self, menzil_mesaji):
# def engel_tespiti_istemcisi(self, istek):
#
#
#
# NOTLAR:
#
# GELİŞTİRME GEÇMİŞİ:
#
# Yazar:
# Tarih: 10.02.2020
# Versiyon: v_1.0
#
#
#__________________________________________________________________________________________________
#______________________________________________________________________________________________
#
# FONKSIYON ADI: __init__
# FONKSIYON AÇIKLAMASI: Constructor fonksiyodur. Bütün değişkenlere
# ilk ve varsayılan değerleri atanmaktadır.
#
#
# DEĞİŞKENLER:
# ADI TIPI AÇIKLAMASI
# robot_hareket_emir bool Mevcut zamandaki Sonar topiğini dinleyerek engelin olup olmadığını ifade eder. Bu değer False ise engelin var olduğunu belirtir.
# onceki_robot_hareket_emir_durumu bool Bir önceki robot_hareket_emir durumunu ifade eder.
# robot_hareket_emir_degisim_kontrolu bool robot_hareket_emir ve onceki_robot_hareket_emir_durumu karşılaştırarak durumun değişip değişmediğini ifade eder.
# Eğer değişim var ise "engel_bilgi" servisine istek yollar.
# engel_menzil_limit_degeri float Parametreler yaml dosyasından okunmuş Engel Menzil Limit Degeri değerini ifade eder.
# Sonar topiğinden okunan mesafe bilgisinin engel olarak belirleyebilmek için kullanılan limit değerdir.
# Eğer okunan değer bu değerin altında ise robot_hareket_emir değeri True olur.
#
#
# PARAMETRELER:
# ADI TIPI AÇIKLAMASI
#
#
# DÖNÜS:
# ADI TIPI AÇIKLAMASI
#
#
# GEREKLILIK:
#
#
#______________________________________________________________________________________________
#______________________________________________________________________________________________
#
# FONKSIYON ADI: ana_fonksiyon
# FONKSIYON AÇIKLAMASI: Yayıncı dinlenerek okunan robot_hareket_emir değeri robot_hareket_emir_degisim_kontrolu
# değeri True olduğunda "engel_bilgi" servisine istek olarak gönderilmektedir.
#
#
# PARAMETRELER:
# ADI TIPI AÇIKLAMASI
#
#
# DÖNÜS:
# ADI TIPI AÇIKLAMASI
#
#
# GEREKLILIK:
# Bu fonksiyon kullanılabilmesi için "/sonar1" topiğine abone olabilmesi gerekmektedir.
#
#______________________________________________________________________________________________
#______________________________________________________________________________________________
#
# FONKSIYON ADI: sonar_callback_fonksiyonu
# FONKSIYON AÇIKLAMASI: "Sonar1" yayıncısı dinlenerek okunan değer engel_menzil_limit_degeri ile karşılaştırılır.
# Eğer bu değerden az ise robot_hareket_emir değeri False olur. robot_hareket_emir ve
# onceki_robot_hareket_emir_durumu birbirine eşit değil ise robot_hareket_emir_degisim_kontrolu
# True olur.
#
#
# PARAMETRELER:
# ADI TIPI AÇIKLAMASI
# menzil_mesaji Range Topiğin değerinin okunması için gereken mesaj tipidir.
#
# DÖNÜS:
# ADI TIPI AÇIKLAMASI
#
# GEREKLILIK:
# Bu fonksiyon çağrılabilmesi için "/sonar1" topiğine abone olmak gerekmektedir.
#
#______________________________________________________________________________________________
#______________________________________________________________________________________________
#
# FONKSIYON ADI: engel_tespiti_istemcisi
# FONKSIYON AÇIKLAMASI: Robotun robot_hareket_emir değerini servis ile iletişim kurması için kullanılan
# istemci fonksiyonudur.
#
#
# PARAMETRELER:
# ADI TIPI AÇIKLAMASI
# istek EngelBilgi Servise robot_hareket_emir değerini gönderir.
#
# DÖNÜS:
# ADI TIPI AÇIKLAMASI
# yanit EngelBilgi Servisten gönderilen yanit değerini alır.
#
# GEREKLILIK:
# Bu fonksiyon kullanılabilmesi için "engel_bilgi" servisinin açık olması gerekmektedir.
#
#______________________________________________________________________________________________
if __name__ == '__main__':
try:
rospy.init_node('temizlik_robotu_engelden_sakinma_dugumu', anonymous=True)
# TemizlikRobotuEngeldenSakinma() sınıfını çağırmaktadır.
dugum = TemizlikRobotuEngeldenSakinma()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
686,
2777,
88,
198,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
13667,
198,
6738,
2169,
528,
46965,
62,
305,
13645,
84,
13,
27891... | 1.836821 | 3,297 |
#!/usr/bin/env python
# coding: utf-8
# This file is part of Adblock Plus <https://adblockplus.org/>,
# Copyright (C) 2006-2015 Eyeo GmbH
#
# Adblock Plus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# Adblock Plus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# This is a reference script to validate the checksum in downloadable #
# subscription. This performs the same validation as Adblock Plus when it #
# downloads the subscription. #
# #
# To validate a subscription file, run the script like this: #
# #
# python validateChecksum.py < subscription.txt #
# #
# Note: your subscription file should be saved in UTF-8 encoding, otherwise #
# the validation will fail. #
# #
#############################################################################
import sys, re, codecs, hashlib, base64
checksumRegexp = re.compile(r'^\s*!\s*checksum[\s\-:]+([\w\+\/=]+).*\n', re.I | re.M)
if __name__ == '__main__':
validate(readStream(sys.stdin))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
770,
2393,
318,
636,
286,
1215,
9967,
8227,
1279,
5450,
1378,
324,
9967,
9541,
13,
2398,
15913,
11,
198,
2,
15069,
357,
34,
8,
4793,
... | 2.251469 | 851 |
# -*- coding: utf-8 -*-
"""
unit test for the filters
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2008 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import Markup, Environment
CAPITALIZE = '''{{ "foo bar"|capitalize }}'''
CENTER = '''{{ "foo"|center(9) }}'''
DEFAULT = '''{{ missing|default("no") }}|{{ false|default('no') }}|\
{{ false|default('no', true) }}|{{ given|default("no") }}'''
DICTSORT = '''{{ foo|dictsort }}|\
{{ foo|dictsort(true) }}|\
{{ foo|dictsort(false, 'value') }}'''
BATCH = '''{{ foo|batch(3)|list }}|{{ foo|batch(3, 'X')|list }}'''
SLICE = '''{{ foo|slice(3)|list }}|{{ foo|slice(3, 'X')|list }}'''
ESCAPE = '''{{ '<">&'|escape }}'''
STRIPTAGS = '''{{ foo|striptags }}'''
FILESIZEFORMAT = '{{ 100|filesizeformat }}|\
{{ 1000|filesizeformat }}|\
{{ 1000000|filesizeformat }}|\
{{ 1000000000|filesizeformat }}|\
{{ 1000000000000|filesizeformat }}|\
{{ 100|filesizeformat(true) }}|\
{{ 1000|filesizeformat(true) }}|\
{{ 1000000|filesizeformat(true) }}|\
{{ 1000000000|filesizeformat(true) }}|\
{{ 1000000000000|filesizeformat(true) }}'
FIRST = '''{{ foo|first }}'''
FLOAT = '''{{ "42"|float }}|{{ "ajsghasjgd"|float }}|{{ "32.32"|float }}'''
FORMAT = '''{{ "%s|%s"|format("a", "b") }}'''
INDENT = '''{{ foo|indent(2) }}|{{ foo|indent(2, true) }}'''
INT = '''{{ "42"|int }}|{{ "ajsghasjgd"|int }}|{{ "32.32"|int }}'''
JOIN = '''{{ [1, 2, 3]|join("|") }}'''
LAST = '''{{ foo|last }}'''
LENGTH = '''{{ "hello world"|length }}'''
LOWER = '''{{ "FOO"|lower }}'''
PPRINT = '''{{ data|pprint }}'''
RANDOM = '''{{ seq|random }}'''
REVERSE = '''{{ "foobar"|reverse|join }}|{{ [1, 2, 3]|reverse|list }}'''
STRING = '''{{ range(10)|string }}'''
TITLE = '''{{ "foo bar"|title }}'''
TRIM = '''{{ " foo "|trim }}'''
TRUNCATE = '''{{ data|truncate(15, true, ">>>") }}|\
{{ data|truncate(15, false, ">>>") }}|\
{{ smalldata|truncate(15) }}'''
UPPER = '''{{ "foo"|upper }}'''
URLIZE = '''{{ "foo http://www.example.com/ bar"|urlize }}'''
WORDCOUNT = '''{{ "foo bar baz"|wordcount }}'''
BLOCK = '''{% filter lower|escape %}<HEHE>{% endfilter %}'''
CHAINING = '''{{ ['<foo>', '<bar>']|first|upper|escape }}'''
SUM = '''{{ [1, 2, 3, 4, 5, 6]|sum }}'''
ABS = '''{{ -1|abs }}|{{ 1|abs }}'''
ROUND = '''{{ 2.7|round }}|{{ 2.1|round }}|\
{{ 2.1234|round(2, 'floor') }}|{{ 2.1|round(0, 'ceil') }}'''
XMLATTR = '''{{ {'foo': 42, 'bar': 23, 'fish': none,
'spam': missing, 'blub:blub': '<?>'}|xmlattr }}'''
SORT = '''{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}'''
GROUPBY = '''{% for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}: {{ list|join(', ') }}
{% endfor %}'''
FILTERTAG = '''{% filter upper|replace('FOO', 'foo') %}foobar{% endfilter %}'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
4326,
1332,
329,
262,
16628,
198,
220,
220,
220,
220,
27156,
15116,
93,
628,
220,
220,
220,
1058,
22163,
4766,
25,
3648,
416,
943,
1084,
657... | 2.20122 | 1,312 |
from chatterbot.adapters import Adapter
class OutputAdapter(Adapter):
"""
A generic class that can be overridden by a subclass to provide extended
functionality, such as delivering a response to an API endpoint.
"""
def process_response(self, statement, session_id=None):
"""
Override this method in a subclass to implement customized functionality.
:param statement: The statement that the chat bot has produced in response to some input.
:param session_id: The unique id of the current chat session.
:returns: The response statement.
"""
return statement
| [
6738,
37303,
13645,
13,
324,
12126,
1330,
43721,
201,
198,
201,
198,
201,
198,
4871,
25235,
47307,
7,
47307,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
317,
14276,
1398,
326,
460,
307,
23170,
4651,
416,
257,
47611,... | 3.022936 | 218 |
import json
import os
import os.path
import pytest
from ..constants import (
COLUMNS,
FILE_EXTENSION,
ROWS,
)
from ..exceptions import (
DatabaseDoesNotExist,
DatabaseExists,
TableExists,
MissingColumns,
)
from ..main import (
create_database,
create_table,
drop_database,
get_db_file_name,
insert,
select,
)
DB_NAME = 'app'
TABLE_NAME = 'thing'
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
11485,
9979,
1187,
1330,
357,
198,
220,
220,
220,
20444,
5883,
8035,
11,
198,
220,
220,
220,
45811,
62,
13918,
16938,
2849,
11,
... | 2.389535 | 172 |
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import absolute_import
import string
import os
from .stemmer import Stemmer, lemmatizer
from system.misc import parse_all_dates
from gensim.models import KeyedVectors
from nltk.corpus import stopwords
_word2vec = None
__all__ = [
'init_word2vec',
'WordMatcher',
'FuzzyWordMatcher',
'MorphosemanticLinkMatcher',
'SemanticWordMatcher',
'FuzzySpanMatcher',
'NamedEntityMatcher',
'FuzzyNamedEntityMatcher',
'SemanticNamedEntityMatcher',
'DateEntityMatcher',
'URLEntityMatcher',
'OrdinalEntityMatcher',
'MinusPolarityMatcher',
'BelocatedAtMatcher',
'TemporalQuantityMatcher'
]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
4731,
198,
11748,
28686,
198,
6738,
764,
927,
647,
1330,
520,... | 2.545139 | 288 |
# Chapter 10
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
## Review
So far we have been working with distributions of only one variable. In this notebook we'll take a step toward multivariate distributions, starting with two variables.
We'll use cross-tabulation to compute a **joint distribution**, then use the joint distribution to compute **conditional distributions** and **marginal distributions**.
We will re-use `pmf_from_seq`, which I introduced in a previous notebook.
def pmf_from_seq(seq):
"""Make a PMF from a sequence of values.
seq: sequence
returns: Series representing a PMF
"""
pmf = pd.Series(seq).value_counts(sort=False).sort_index()
pmf /= pmf.sum()
return pmf
## Cross tabulation
To understand joint distributions, I'll start with cross tabulation. And to demonstrate cross tabulation, I'll generate a dataset of colors and fruits.
Here are the possible values.
colors = ['red', 'yellow', 'green']
fruits = ['apple', 'banana', 'grape']
And here's a random sample of 100 fruits.
np.random.seed(2)
fruit_sample = np.random.choice(fruits, 100, replace=True)
We can use `pmf_from_seq` to compute the distribution of fruits.
pmf_fruit = pmf_from_seq(fruit_sample)
pmf_fruit
And here's what it looks like.
pmf_fruit.plot.bar(color='C0')
plt.ylabel('Probability')
plt.title('Distribution of fruit');
Similarly, here's a random sample of colors.
color_sample = np.random.choice(colors, 100, replace=True)
Here's the distribution of colors.
pmf_color = pmf_from_seq(color_sample)
pmf_color
And here's what it looks like.
pmf_color.plot.bar(color='C1')
plt.ylabel('Probability')
plt.title('Distribution of colors');
Looking at these distributions, we know the proportion of each fruit, ignoring color, and we know the proportion of each color, ignoring fruit type.
But if we only have the distributions and not the original data, we don't know how many apples are green, for example, or how many yellow fruits are bananas.
We can compute that information using `crosstab`, which computes the number of cases for each combination of fruit type and color.
xtab = pd.crosstab(color_sample, fruit_sample,
rownames=['color'], colnames=['fruit'])
xtab
The result is a DataFrame with colors along the rows and fruits along the columns.
## Heatmap
The following function plots a cross tabulation using a pseudo-color plot, also known as a heatmap.
It represents each element of the cross tabulation with a colored square, where the color corresponds to the magnitude of the element.
The following function generates a heatmap using the Matplotlib function `pcolormesh`:
def plot_heatmap(xtab):
"""Make a heatmap to represent a cross tabulation.
xtab: DataFrame containing a cross tabulation
"""
plt.pcolormesh(xtab)
# label the y axis
ys = xtab.index
plt.ylabel(ys.name)
locs = np.arange(len(ys)) + 0.5
plt.yticks(locs, ys)
# label the x axis
xs = xtab.columns
plt.xlabel(xs.name)
locs = np.arange(len(xs)) + 0.5
plt.xticks(locs, xs)
plt.colorbar()
plt.gca().invert_yaxis()
plot_heatmap(xtab)
## Joint Distribution
A cross tabulation represents the "joint distribution" of two variables, which is a complete description of two distributions, including all of the conditional distributions.
If we normalize `xtab` so the sum of the elements is 1, the result is a joint PMF:
joint = xtab / xtab.to_numpy().sum()
joint
Each column in the joint PMF represents the conditional distribution of color for a given fruit.
For example, we can select a column like this:
col = joint['apple']
col
If we normalize it, we get the conditional distribution of color for a given fruit.
col / col.sum()
Each row of the cross tabulation represents the conditional distribution of fruit for each color.
If we select a row and normalize it, like this:
row = xtab.loc['red']
row / row.sum()
The result is the conditional distribution of fruit type for a given color.
## Conditional distributions
The following function takes a joint PMF and computes conditional distributions:
def conditional(joint, name, value):
"""Compute a conditional distribution.
joint: DataFrame representing a joint PMF
name: string name of an axis
value: value to condition on
returns: Series representing a conditional PMF
"""
if joint.columns.name == name:
cond = joint[value]
elif joint.index.name == name:
cond = joint.loc[value]
return cond / cond.sum()
The second argument is a string that identifies which axis we want to select; in this example, `'fruit'` means we are selecting a column, like this:
conditional(joint, 'fruit', 'apple')
And `'color'` means we are selecting a row, like this:
conditional(joint, 'color', 'red')
**Exercise:** Compute the conditional distribution of color for bananas. What is the probability that a banana is yellow?
# Solution goes here
# Solution goes here
## Marginal distributions
Given a joint distribution, we can compute the unconditioned distribution of either variable.
If we sum along the rows, which is axis 0, we get the distribution of fruit type, regardless of color.
joint.sum(axis=0)
If we sum along the columns, which is axis 1, we get the distribution of color, regardless of fruit type.
joint.sum(axis=1)
These distributions are called "[marginal](https://en.wikipedia.org/wiki/Marginal_distribution#Multivariate_distributions)" because of the way they are often displayed. We'll see an example later.
As we did with conditional distributions, we can write a function that takes a joint distribution and computes the marginal distribution of a given variable:
def marginal(joint, name):
"""Compute a marginal distribution.
joint: DataFrame representing a joint PMF
name: string name of an axis
returns: Series representing a marginal PMF
"""
if joint.columns.name == name:
return joint.sum(axis=0)
elif joint.index.name == name:
return joint.sum(axis=1)
Here's the marginal distribution of fruit.
pmf_fruit = marginal(joint, 'fruit')
pmf_fruit
And the marginal distribution of color:
pmf_color = marginal(joint, 'color')
pmf_color
The sum of the marginal PMF is the same as the sum of the joint PMF, so if the joint PMF was normalized, the marginal PMF should be, too.
joint.to_numpy().sum()
pmf_color.sum()
However, due to floating point error, the total might not be exactly 1.
pmf_fruit.sum()
**Exercise:** The following cells load the data from the General Social Survey that we used in Notebooks 1 and 2.
# Load the data file
import os
if not os.path.exists('gss_bayes.csv'):
!wget https://github.com/AllenDowney/BiteSizeBayes/raw/master/gss_bayes.csv
gss = pd.read_csv('gss_bayes.csv', index_col=0)
As an exercise, you can use this data to explore the joint distribution of two variables:
* `partyid` encodes each respondent's political affiliation, that is, the party the belong to. [Here's the description](https://gssdataexplorer.norc.org/variables/141/vshow).
* `polviews` encodes their political alignment on a spectrum from liberal to conservative. [Here's the description](https://gssdataexplorer.norc.org/variables/178/vshow).
The values for `partyid` are
```
0 Strong democrat
1 Not str democrat
2 Ind,near dem
3 Independent
4 Ind,near rep
5 Not str republican
6 Strong republican
7 Other party
```
The values for `polviews` are:
```
1 Extremely liberal
2 Liberal
3 Slightly liberal
4 Moderate
5 Slightly conservative
6 Conservative
7 Extremely conservative
```
Make a cross tabulation of `gss['partyid']` and `gss['polviews']` and normalize it to make a joint PMF.
# Solution goes here
Use `plot_heatmap` to display a heatmap of the joint distribution. What patterns do you notice?
plot_heatmap(joint2)
plt.xlabel('polviews')
plt.title('Joint distribution of polviews and partyid');
Use `marginal` to compute the marginal distributions of `partyid` and `polviews`, and plot the results.
# Solution goes here
# Solution goes here
Use `conditional` to compute the conditional distribution of `partyid` for people who identify themselves as "Extremely conservative" (`polviews==7`). How many of them are "strong Republicans" (`partyid==6`)?
# Solution goes here
Use `conditional` to compute the conditional distribution of `polviews` for people who identify themselves as "Strong Democrat" (`partyid==0`). How many of them are "Extremely liberal" (`polviews==1`)?
# Solution goes here
## Review
In this notebook we started with cross tabulation, which we normalized to create a joint distribution, which describes the distribution of two (or more) variables and all of their conditional distributions.
We used heatmaps to visualize cross tabulations and joint distributions.
Then we defined `conditional` and `marginal` functions that take a joint distribution and compute conditional and marginal distributions for each variables.
As an exercise, you had a chance to apply the same methods to explore the relationship between political alignment and party affiliation using data from the General Social Survey.
You might have noticed that we did not use Bayes's Theorem in this notebook. [In the next notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/11_faceoff.ipynb) we'll take the ideas from this notebook and apply them Bayesian inference.
| [
2,
7006,
838,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2235,
6602,
198,
198,
2396,
1290,
356,
423,
587,
1762,
351,
24570,
... | 3.322738 | 2,863 |
from .base import CONFIG, Config, SimpleConfig
from .fields import BoolField, Field, FloatField, IntField, MethodField, StringField
__all__ = [
'SimpleConfig', 'Config', 'CONFIG',
'BoolField', 'Field', 'FloatField', 'IntField', 'MethodField', 'StringField',
]
| [
6738,
764,
8692,
1330,
25626,
11,
17056,
11,
17427,
16934,
198,
6738,
764,
25747,
1330,
347,
970,
15878,
11,
7663,
11,
48436,
15878,
11,
2558,
15878,
11,
11789,
15878,
11,
10903,
15878,
628,
198,
834,
439,
834,
796,
685,
198,
220,
220... | 3.176471 | 85 |
import logging
import subprocess
from django.conf import settings
logger = logging.getLogger('django.cache_purge_hooks')
VARNISHADM_HOST = getattr(settings, 'VARNISHADM_HOST', 'localhost')
VARNISHADM_PORT = getattr(settings, 'VARNISHADM_PORT', 6082)
VARNISHADM_SECRET = getattr(settings, 'VARNISHADM_SECRET', '/etc/varnish/secret')
VARNISHADM_SITE_DOMAIN = getattr(settings, 'VARNISHADM_SITE_DOMAIN', '.*')
VARNISHADM_BIN = getattr(settings, 'VARNISHADM_ADM_BIN', '/usr/bin/varnishadm')
| [
11748,
18931,
198,
11748,
850,
14681,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
28241,
14208,
13,
23870,
62,
14225,
469,
62,
25480,
82,
11537,
198,
198,
53,
1503... | 2.333333 | 210 |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import six, bson, os
from bson.json_util import loads
from mpcontribs.rest.rester import MPContribsRester
from mpcontribs.io.core.utils import clean_value
from mpcontribs.io.archieml.mpfile import MPFile
class DiluteSoluteDiffusionRester(MPContribsRester):
"""DiluteSoluteDiffusion-specific convenience functions to interact with MPContribs REST interface"""
query = {'project': 'dilute_solute_diffusion'}
z = loads(open(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'z.json'
), 'r').read())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
2237,
11,
275,
1559,
11,
28686,
198,
6738,
275,
1559,
13,
17752,
62,
22602,
1330,
15989... | 2.731818 | 220 |
import socket
import struct
import binascii
from scapy.all import *
| [
11748,
17802,
198,
11748,
2878,
198,
11748,
9874,
292,
979,
72,
198,
6738,
629,
12826,
13,
439,
1330,
1635,
628
] | 3.45 | 20 |
from systems.commands.index import Command
| [
6738,
3341,
13,
9503,
1746,
13,
9630,
1330,
9455,
628
] | 4.4 | 10 |
from flask import request, session
from flask.views import MethodView
from flask import current_app
from ..utils.http import redirect_next_referrer, redirect_next
from ..utils.permissions import LOGGEDIN, PERMISSIONS, lookup_permissions
from ..utils.adldap import ADLDAPauth
| [
6738,
42903,
1330,
2581,
11,
6246,
198,
6738,
42903,
13,
33571,
1330,
11789,
7680,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
198,
6738,
11485,
26791,
13,
4023,
1330,
18941,
62,
19545,
62,
260,
2232,
11751,
11,
18941,
62,
19545,
198,
... | 3.644737 | 76 |
import requests
def teams_message(message, channel, colour, image=False):
"""sends a card on teams"""
card={
"@context": "http://schema.org/extensions",
"@type": "MessageCard",
"themeColor": colour,
"title": "Automated DrillBit analysis",
"text": message
}
if image:
card['text']+=" - [Click here for image](%s)" % image
wh_resp=requests.post(url=channel, json=card)
if wh_resp.status_code != 200:
print(wh_resp.text)
| [
11748,
7007,
198,
198,
4299,
3466,
62,
20500,
7,
20500,
11,
6518,
11,
9568,
11,
2939,
28,
25101,
2599,
198,
220,
220,
220,
37227,
82,
2412,
257,
2657,
319,
3466,
37811,
198,
220,
220,
220,
2657,
34758,
198,
220,
220,
220,
220,
220,
... | 2.38756 | 209 |
import requests
from typing import List, Dict, Union, Optional
from WH_Utils.Objects.Event import Event
from WH_Utils.Objects.Prospect import Prospect
from WH_Utils.Objects.Company import Company
base_url = "https://db.wealthawk.com"
def get_event_by_person(
client: Union[Prospect, str], auth_header: Dict[str, str]
) -> Event:
""" """
id = client.id if isinstance(client, Prospect) else client
path = "/relate/person_to_event_by_person"
url = base_url + path
params = {"eventID": id}
event_id = requests.get(url, params).json()[0]["eventID"]
event = Event(WH_ID=event_id, auth_header=auth_header)
return event
def get_company_by_person(
client: Union[Prospect, str], auth_header: Dict[str, str]
) -> Company:
""" """
id = client.id if isinstance(client, Prospect) else client
path = "/relate/person_to_company_by_person"
url = base_url + path
params = {"eventID": id}
companyID = requests.get(url, params).json()[0]["companyID"]
company = Company(WH_ID=companyID, auth_header=auth_header)
return company
def post_prospect(
auth_dict: Dict[str, str],
prospect: Prospect,
event: Event,
company: Optional[Company] = None,
) -> requests.Response:
"""
Adds a prospect with all connections to other entities as appropriate
Args:
auth_dict: Dict[str, str]
The WH auth dict to login
prospect: Prospect
The prospect you want to add to the database
event: Event
The event the prospect is related to
company: Optional[Company]
If the client was related to a company, put it here
Returns:
status_code: int
200 if it works
Raise:
Error
"""
return
| [
11748,
7007,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4479,
11,
32233,
198,
198,
6738,
7655,
62,
18274,
4487,
13,
10267,
82,
13,
9237,
1330,
8558,
198,
6738,
7655,
62,
18274,
4487,
13,
10267,
82,
13,
2964,
4443,
1330,
26736,
... | 2.611765 | 680 |
class Componente(object):
"""docstring for Componente"""
class Computadora(object):
"""docstring for Computadora"""
componentes = [] | [
4871,
35100,
68,
7,
15252,
2599,
198,
220,
220,
220,
37227,
15390,
8841,
329,
35100,
68,
37811,
198,
198,
4871,
22476,
324,
5799,
7,
15252,
2599,
198,
220,
220,
220,
37227,
15390,
8841,
329,
22476,
324,
5799,
37811,
628,
220,
220,
220... | 3.173913 | 46 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#info:http://www.wooyun.org/bugs/wooyun-2010-0135128
import urlparse
if __name__ == '__main__':
from dummy import *
audit(assign('kinggate', 'https://202.103.238.229/')[1]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
10951,
25,
4023,
1378,
2503,
13,
21638,
726,
403,
13,
2398,
14,
32965,
14,
21638,
726,
403,
12,
10333,
12,
... | 2.230769 | 104 |
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This file is originally from:
# https://github.com/ros/ros_comm/blob/6e5016f4b2266d8a60c9a1e163c4928b8fc7115e/tools/rostopic/src/rostopic/__init__.py
from argparse import ArgumentTypeError
import threading
import time
import traceback
import rclpy
from rclpy.qos import qos_profile_sensor_data
from ros2cli.node.direct import DirectNode
from ros2topic.api import get_msg_class
from ros2topic.api import TopicNameCompleter
from ros2topic.verb import VerbExtension
DEFAULT_WINDOW_SIZE = 100
class BwVerb(VerbExtension):
"""Display bandwidth used by topic."""
def _rostopic_bw(node, topic, window_size=DEFAULT_WINDOW_SIZE):
"""Periodically print the received bandwidth of a topic to console until shutdown."""
# pause bw until topic is published
msg_class = get_msg_class(node, topic, blocking=True, include_hidden_topics=True)
if msg_class is None:
node.destroy_node()
return
rt = ROSTopicBandwidth(node, window_size)
node.create_subscription(
msg_class,
topic,
rt.callback,
qos_profile_sensor_data,
raw=True
)
print('Subscribed to [%s]' % topic)
timer = node.create_timer(1, rt.print_bw)
while rclpy.ok():
rclpy.spin_once(node)
node.destroy_timer(timer)
node.destroy_node()
rclpy.shutdown()
| [
2,
15069,
357,
66,
8,
3648,
11,
33021,
45502,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
10442,
13789,
12729,
357,
21800,
13789,
362,
13,
15,
8,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,... | 2.968623 | 988 |
from __future__ import unicode_literals
from django import forms
class PagingFormMixin(forms.Form):
"""Form mixin that includes paging page number and page size."""
p = forms.IntegerField(label='Page', initial=1, required=False)
ps = forms.IntegerField(label='Page Size', initial=25, required=False)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
1330,
5107,
628,
198,
4871,
350,
3039,
8479,
35608,
259,
7,
23914,
13,
8479,
2599,
198,
220,
220,
220,
37227,
8479,
5022,
259,
326,
3407,
279,
3... | 3.247423 | 97 |
#!/usr/bin/env python3
# coding=utf-8
import requests
import os
HttpUserAgent = r"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
birthText = download(birthdata_url)
# print('birthText', birthText)
birth_data = birthText.split('\n')
print(len(birth_data))
birth_header = birth_data[0].split('\t')
print(birth_header)
birth_data = [[float(x) for x in y.split('\t') if len(x) >= 1] for y in birth_data[1:] if len(y) >= 1]
print(len(birth_data))
print(len(birth_data[0]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
7007,
198,
11748,
28686,
198,
198,
43481,
12982,
36772,
796,
374,
1,
44,
8590,
5049,
14,
20,
13,
15,
357,
14155,
37638,
26,
8180,
... | 2.568841 | 276 |
from django.urls import path
from . import views
from .api import views
urlpatterns = [
path('', views.PostListView.as_view(), name=None),
path('create/', views.PostCreateView.as_view(), name=None),
path('<int:pk>/', views.PostDetailView.as_view(), name=None)
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
6738,
764,
15042,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
6307,
8053,
7680,
13,
292,
62,
1177,
... | 2.712871 | 101 |
from typing import List, Tuple
from collections import namedtuple
import numpy as np
from .frequency_band import FrequencyBand
_FilterSeries: Tuple[float, float, float] = namedtuple( # type: ignore
"FilterSeries", "min max width"
)
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
35324,
62,
3903,
1330,
31902,
31407,
198,
198,
62,
22417,
27996,
25,
309,
29291,
58,
22468,
... | 3.347222 | 72 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 22 17:07:17 2014
@author: Quentin
"""
###################################### TIPE : L-Systèmes - 2e année ############################
######################################## L-systèmes paramétriques ##############################
def nbr_virgule(s):
""" Compte le nombre de virgule dans un mot"""
n = 0
for k in s:
if k == ',':
n+=1
return n
def mot_parenthese(mot):
""" Extrait le premier mot entre parenthèse rencontré d'une chaine de caractère"""
s = ''
k=0
while mot[k] != '(':
k+=1
k+=1
while mot[k] != ')':
s+= mot[k]
k+=1
return s
def dico_var(old,s) :
""" Construit un dictionnaire qui associe les paramètres à leur valeurs """
L = []
k=0
while old[k] == s[k] and old[k] != '(' :
k+=1
b = True
while b:
k+=1
i = k
var = '' #récupère le paramètre de la règle
while old[i] != ',' and old[i] != ')':
var += old[i]
i+=1
val = '' # récupère la valeur actuelle du paramètre
while s[k] != ',' and s[k] != ')':
val += s[k]
k+=1
L.append((var,int(val))) # L est une liste de couple (paramètre, valeur)
if old[i] == ')' or s[k] == ')': # on continue tant qu'il reste des paramètres
b = False
return dict(L)
# regles de la forme : ("pred","condition","succ")
def chaine_parametrique(axiome,regles,niveau):
""" Fonction qui construit la chaine d'un L-système paramétrique au niveau demandé """
l = axiome
n2 = len(regles)
for n in range(niveau): #pour chaque niveau
L=''
k = 0
while k < len(l): #on parcourt la chaine actuelle
b1 = False # booléen qui vérifie si il y a eu un changement
for p in range(n2):
old,cond,new = regles[p][0],regles[p][1],regles[p][2] # old = pred, new = succ
if l[k] == old:
# si le prédecesseur n'a pas de paramètre (prédécesseur est une lettre) et que la lettre actuelle est égale au prédecesseur
L += new # on ajoute le successeur
b1 = True # il y a eu un changement
incr = 1 # incr compte le nombre de lettre dont il faut avancer
elif l[k] == old[0]:
# si la lettre actuelle correspond à la lettre du prédecesseur mais que celui ci a un paramètre
s = l[k] + l[k+1]
j = 2
while l[k+j] != ')':
s += l[k+j]
j+=1
s += l[k+j]
# on extrait la chaine contenant la lettre ainsi que les valeurs des paramètres
if nbr_virgule(s) == nbr_virgule(old): # on vérifie qu'il y a le même nombre de paramètres
dico = dico_var(old,s) # on construit un dico associant paramètre et valeur
if eval(cond,dico) : # on évalue les conditions en fonction des paramètres
incr = j+1
i = 0
b1 = True
while i < len(new): # on rajoute le successeur avec les paramètres modifiés
if new[i] != '(':
L += new[i]
i+=1
else :
L += new[i]
i+= 1
b2 = True
while b2:
s2 = ''
while new[i] != ',' and new[i] != ')' :
s2 += new[i]
i+=1
L += str(eval(s2,dico))
if new[i] == ')':
b2 = False
L += new[i]
i+=1
if not b1 :
L += l[k]
incr = 1
k += incr
l=L
return l
#Test de la chaine parametrique
#print chaine_parametrique( axiome = "B(2)A(4,4)", regles = [("A(x,y)","y <= 3","A(x*2,x+y)"),("A(x,y)","y>3","B(x)A(x/y,0)"),("B(x)","x<1","C"),("B(x)","x>=1","B(x-1)")], niveau = 6)
#print chaine_parametrique(axiome = "I(9)a(13)", regles = [("a(t)","t>0","[&(70)L]/(137.5)I(10)a(t-1)"),("a(t)","t==0","[&(70)L]/(137.5)I(10)A"),("A","*","[&(18)u(4)FFI(10)I(5)KKKK]/(137.5)I(8)A"),("I(t)","t>0","FI(t-1)"),("I(t)","t==0","F"),("u(t)","t>0","&(9)u(t-1)"),("u(t)","t==0","&(9)"),("L","*","[{-(18)FI(7)+(18)FI(7)+(18)FI(7)}][{+(18)FI(7)-(18)FI(7)-(18)FI(7)}]"),("K","*","[&(18){+(18)FI(2)-(36)FI(2)}][&(18){-(18)FI(2)+(36)FI(2)}]/(90)")], niveau = 15)
def traduction_parametrique(chaine, distance, angle):
""" Fonction qui traduit une chaine de L-système paramétrique en procédure Géotortue"""
fichier = open("Chaine.txt","w")
fichier.write("pour plante\n")
l = len(chaine)
k = 0
i=0
j=50
b = False
fin = False
couleur = 'vert'
while k < l:
if k == l-1 :
fin = True
if chaine[k] == '(':
b = True
elif chaine[k] == ')' :
b = False
elif chaine[k] == 'F' :
if not fin:
if chaine[k+1] == '(':
distance = float(mot_parenthese(chaine[k:]))
fichier.write("av {0}\n".format(distance))
elif chaine[k] == 'f' :
if not fin:
if chaine[k+1] == '(':
distance = float(mot_parenthese(chaine[k:]))
fichier.write("lc\n av {0} \n bc\n".format(distance))
elif chaine[k] == '+' :
if not fin:
if chaine[k+1] == '(':
angle = float(mot_parenthese(chaine[k:]))
fichier.write("tg {0}\n".format(angle))
elif chaine[k] == '&' :
if not fin:
if chaine[k+1] == '(':
angle = float(mot_parenthese(chaine[k:]))
fichier.write("pvh {0}\n".format(angle))
elif chaine[k] == '/' :
if not fin:
if chaine[k+1] == '(':
angle = float(mot_parenthese(chaine[k:]))
fichier.write("pvd {0}\n".format(angle))
elif chaine[k] == "[" :
i+=1
fichier.write("à {0}\n tlp X(Achille) Y(Achille) Z(Achille)\nimite Achille\nà Achille\n".format(i))
elif chaine[k] == "]" :
fichier.write("tlp X({0}) Y({0}) Z({0})\nimite {0}\n".format(i))
i-=1
elif chaine[k] == "{" :
fichier.write("remplis [\n")
# j=50
elif chaine[k] == "}" :
fichier.write("]\n crayon noir\n")
# fichier.write(" à {2}\n crayon {3}\n remplis [boucle k de 50 à {0} [ tlp X(k) Y(k) Z(k)\nvise k+1\nav dist(k,k+1)\n]\ntlp X({1}) Y({1}) Z({1})\nvise 50\nav dist(50,{1})\n ]\nà Achille\n".format(j-1,j,j+1,couleur))
elif chaine[k] == "'" :
fichier.write("crayon vert\n")
elif chaine[k] == "`" :
fichier.write("crayon rouge\n")
elif chaine[k] == "." and not b :
fichier.write("à {0}\n tlp X(Achille) Y(Achille) Z(Achille)\nimite Achille\nà Achille\n".format(j))
j+=1
k+=1
fichier.write("fin")
def LSysteme_param(axiome,regles,niveau,distance, angle):
""" Construit la procédure pour géotortue associée au L-système paramétrique rentré"""
chaine = chaine_parametrique(axiome,regles,niveau)
traduction_parametrique(chaine, distance, angle)
#test capsella (old)
#LSysteme_param( axiome = "I(9)a(13)", regles = [("a(t)","t>0","[&(70)L]/(137.5)I(10)a(t-1)"),("a(t)","t==0","[&(70)L]/(137.5)I(10)A"),("A","*","[&(18)u(4)FFI(10)I(5)KKKK]/(137.5)I(2)A"),("I(t)","t>0","FI(t-1)"),("I(t)","t==0","F"),("u(t)","t>0","&(9)u(t-1)"),("u(t)","t==0","&(9)"),("L","*","['{+(-18)FI(7)+(18)FI(7)+(18)FI(7)}]['{+(18)FI(7)+(-18)FI(7)+(-18)FI(7)}]"),("K","*","[&(18)`{+(18)FI(2)+(-36)FI(2)}][&(18)`{+(-18)FI(2)+(36)FI(2)}]/(90)")], niveau = 25, distance = 10, angle = 18)
#capsella (new, feuille triangle)
#LSysteme_param( axiome = "I(9)a(13)", regles = [("a(t)","t>0","[&(70)L]/(137.5)I(10)a(t-1)"),("a(t)","t==0","[&(70)L]/(137.5)I(10)A"),("A","*","[&(18)u(4)FFI(10)I(5)KKKK]/(137.5)I(2)A"),("I(t)","t>0","FI(t-1)"),("I(t)","t==0","F"),("u(t)","t>0","&(9)u(t-1)"),("u(t)","t==0","&(9)"),("L","*","[F(70)I(5)'{+(30)F(30)I(5)+(-120)F(30)I(5)+(-120)F(30)I(5)}]"),("K","*","[&(18)`{+(18)FI(2)+(-36)FI(2)}][&(18)`{+(-18)FI(2)+(36)FI(2)}]/(90)")], niveau = 15, distance = 10, angle = 18)
#test Lychnis coronaria
#LSysteme_param( axiome = "A(7)", regles = [("A(t)","t==7","FI(20)[&(60)L]/(90)[&(45)A(0)]/(90)[&(60)L]/(90)[&(45)A(4)]FI(10)K"),("A(t)","t<7","A(t+1)"),("I(t)","t>0","FFI(t-1)"),("L","*","['{+(-18)FI(7)+(18)FI(7)+(18)FI(7)}]['{+(18)FI(7)+(-18)FI(7)+(-18)FI(7)}]"),("K","*","[&(18)`{+(18)FI(2)+(-36)FI(2)}][&(18)`{+(-18)FI(2)+(36)FI(2)}]/(90)")], niveau =20, distance = 10, angle = 18)
#test feuille 1
#traduction_parametrique("'{[++++f.][++ff.][+fff.][fffff.][+(-45)fff.][+(-90)ff.][+(-180)f.]}",20,45)
#test feuille 2
#LSysteme_param(axiome = "[A][B]", regles = [("A","*","[+A{.].C.}"),("B","*","[-B{.].C.}"),("C","*","fC")],niveau = 5, distance = 20, angle =60)
#test
#LSysteme_param( axiome = "A(7)", regles = [("A(t)","t==7","FI(20)[&(60)L]/(90)[&(45)A(0)]/(90)[&(60)L]/(90)[&(45)A(4)]FI(10)B"),("A(t)","t<7","A(t+1)"),("I(t)","t>0","FFI(t-1)"),("L","*","['{+(-18)FI(7)+(18)FI(7)+(18)FI(7)}]['{+(18)FI(7)+(-18)FI(7)+(-18)FI(7)}]"),("K","*","[&(18)`{+(18)FI(2)+(-36)FI(2)}][&(18)`{+(-18)FI(2)+(36)FI(2)}]/(90)"),("B","*","[&&&P/W////W////W////W////W]"),("W","*","[`^F][{&&&&-f+f|-f+f}]")], niveau =20, distance = 10, angle = 18)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2556,
2534,
1596,
25,
2998,
25,
1558,
1946,
198,
198,
31,
9800,
25,
42447,
198,
37811,
198,
198,
29113,
4242,
2235,
309,
4061,
36,
1058,
... | 1.674834 | 6,040 |
#!/usr/bin/env python3
import sys; assert sys.version_info[0] >= 3, "Python 3 required."
from pyblake2 import blake2b
from sapling_key_components import to_scalar, prf_expand, diversify_hash, DerivedAkNk, DerivedIvk
from sapling_generators import SPENDING_KEY_BASE, PROVING_KEY_BASE
from utils import i2leosp, i2lebsp, lebs2osp
from ff1 import ff1_aes256_encrypt
from tv_output import render_args, render_tv, option, Some
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25064,
26,
6818,
25064,
13,
9641,
62,
10951,
58,
15,
60,
18189,
513,
11,
366,
37906,
513,
2672,
526,
198,
198,
6738,
12972,
2436,
539,
17,
1330,
698,
539,
17,
65,
198,
1... | 2.668571 | 175 |
import csv, re
from flask import Blueprint, request
from . import *
from .utility import *
from .db_func import *
bp = Blueprint('misc', __name__)
# TODO: Add Mailchimp stats
@bp.route('/newsletter', methods=["GET"])
| [
11748,
269,
21370,
11,
302,
198,
6738,
42903,
1330,
39932,
11,
2581,
198,
198,
6738,
764,
1330,
1635,
198,
6738,
764,
315,
879,
1330,
1635,
198,
6738,
764,
9945,
62,
20786,
1330,
1635,
198,
198,
46583,
796,
39932,
10786,
44374,
3256,
... | 3.027397 | 73 |
# -*- coding: utf-8 -*-
import sys
import wx
from wx.lib.mixins.listctrl import CheckListCtrlMixin, TextEditMixin, \
ListCtrlAutoWidthMixin, ListRowHighlighter
from collections import OrderedDict
MEDIUM_GREY = wx.Colour(224, 224, 224)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
198,
11748,
266,
87,
198,
6738,
266,
87,
13,
8019,
13,
19816,
1040,
13,
4868,
44755,
1330,
6822,
8053,
40069,
35608,
259,
11,
8255,
18378,
35608,... | 2.6 | 95 |
# sqlalchemy/ext/baked.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
return call
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery,
lambda sess, q=query: q.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause, )
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:]
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
@baked_lazyload._add_unbound_all_fn
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
| [
2,
44161,
282,
26599,
14,
2302,
14,
65,
4335,
13,
9078,
198,
2,
15069,
357,
34,
8,
5075,
12,
5539,
262,
16363,
2348,
26599,
7035,
290,
20420,
198,
2,
1279,
3826,
37195,
20673,
2393,
29,
198,
2,
198,
2,
770,
8265,
318,
636,
286,
... | 2.442973 | 4,077 |
from pprint import pformat
from docutils.nodes import (
field_list, field, field_name, field_body, paragraph,
strong, inline, emphasis, line_block, line, literal, literal_block)
from docutils.parsers.rst.directives import unchanged
from sphinx.util.compat import Directive
from confmodel.config import ConfigField
| [
6738,
279,
4798,
1330,
279,
18982,
198,
198,
6738,
2205,
26791,
13,
77,
4147,
1330,
357,
198,
220,
220,
220,
2214,
62,
4868,
11,
2214,
11,
2214,
62,
3672,
11,
2214,
62,
2618,
11,
7322,
11,
198,
220,
220,
220,
1913,
11,
26098,
11,
... | 3.27451 | 102 |
from .bugu_bot import bugu_bot
| [
6738,
764,
25456,
84,
62,
13645,
1330,
809,
5162,
62,
13645,
198
] | 2.583333 | 12 |
name = "mpesa" | [
3672,
796,
366,
3149,
49183,
1
] | 2.333333 | 6 |
# -*- coding: utf-8 -*-
from pathlib import Path
from survey.exporter.tex.configuration import Configuration
from survey.exporter.tex.survey2tex import Survey2Tex
from survey.models import Survey
from survey.tests.management.test_management import TestManagement
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
5526,
13,
1069,
26634,
13,
16886,
13,
11250,
3924,
1330,
28373,
198,
6738,
5526,
13,
1069,
26634,
13,
16886,
13,
1179... | 3.643836 | 73 |
from .File.Controller import FileController
from .File.Service import FileService
# from .File.Repository import RepositoryController
| [
6738,
764,
8979,
13,
22130,
1330,
9220,
22130,
198,
6738,
764,
8979,
13,
16177,
1330,
9220,
16177,
198,
2,
422,
764,
8979,
13,
6207,
13264,
1330,
1432,
13264,
22130,
198
] | 4.466667 | 30 |
# coding: utf-8
"""
THORChain API
This documentation outlines the API for THORChain. NOTE: This document is a **work in progress**. # noqa: E501
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from thornode_client.configuration import Configuration
class Node(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'node_address': 'str',
'status': 'str',
'pub_key_set': 'object',
'validator_cons_pub_key': 'str',
'bond': 'str',
'active_block_height': 'str',
'bond_address': 'str',
'status_since': 'str',
'signer_membership': 'list[object]',
'requested_to_leave': 'bool',
'forced_to_leave': 'bool',
'ip_address': 'str',
'version': 'str',
'slash_points': 'str',
'jail': 'object',
'current_award': 'str',
'observe_chains': 'str',
'preflight_status': 'object'
}
attribute_map = {
'node_address': 'node_address',
'status': 'status',
'pub_key_set': 'pub_key_set',
'validator_cons_pub_key': 'validator_cons_pub_key',
'bond': 'bond',
'active_block_height': 'active_block_height',
'bond_address': 'bond_address',
'status_since': 'status_since',
'signer_membership': 'signer_membership',
'requested_to_leave': 'requested_to_leave',
'forced_to_leave': 'forced_to_leave',
'ip_address': 'ip_address',
'version': 'version',
'slash_points': 'slash_points',
'jail': 'jail',
'current_award': 'current_award',
'observe_chains': 'observe_chains',
'preflight_status': 'preflight_status'
}
def __init__(self, node_address=None, status=None, pub_key_set=None, validator_cons_pub_key=None, bond=None, active_block_height=None, bond_address=None, status_since=None, signer_membership=None, requested_to_leave=None, forced_to_leave=None, ip_address=None, version=None, slash_points=None, jail=None, current_award=None, observe_chains=None, preflight_status=None, _configuration=None): # noqa: E501
"""Node - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._node_address = None
self._status = None
self._pub_key_set = None
self._validator_cons_pub_key = None
self._bond = None
self._active_block_height = None
self._bond_address = None
self._status_since = None
self._signer_membership = None
self._requested_to_leave = None
self._forced_to_leave = None
self._ip_address = None
self._version = None
self._slash_points = None
self._jail = None
self._current_award = None
self._observe_chains = None
self._preflight_status = None
self.discriminator = None
if node_address is not None:
self.node_address = node_address
if status is not None:
self.status = status
if pub_key_set is not None:
self.pub_key_set = pub_key_set
if validator_cons_pub_key is not None:
self.validator_cons_pub_key = validator_cons_pub_key
if bond is not None:
self.bond = bond
if active_block_height is not None:
self.active_block_height = active_block_height
if bond_address is not None:
self.bond_address = bond_address
if status_since is not None:
self.status_since = status_since
if signer_membership is not None:
self.signer_membership = signer_membership
if requested_to_leave is not None:
self.requested_to_leave = requested_to_leave
if forced_to_leave is not None:
self.forced_to_leave = forced_to_leave
if ip_address is not None:
self.ip_address = ip_address
if version is not None:
self.version = version
if slash_points is not None:
self.slash_points = slash_points
if jail is not None:
self.jail = jail
if current_award is not None:
self.current_award = current_award
if observe_chains is not None:
self.observe_chains = observe_chains
if preflight_status is not None:
self.preflight_status = preflight_status
@property
def node_address(self):
"""Gets the node_address of this Node. # noqa: E501
node address # noqa: E501
:return: The node_address of this Node. # noqa: E501
:rtype: str
"""
return self._node_address
@node_address.setter
def node_address(self, node_address):
"""Sets the node_address of this Node.
node address # noqa: E501
:param node_address: The node_address of this Node. # noqa: E501
:type: str
"""
self._node_address = node_address
@property
def status(self):
"""Gets the status of this Node. # noqa: E501
status , values can be active,disabled,standby # noqa: E501
:return: The status of this Node. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Node.
status , values can be active,disabled,standby # noqa: E501
:param status: The status of this Node. # noqa: E501
:type: str
"""
self._status = status
@property
def pub_key_set(self):
"""Gets the pub_key_set of this Node. # noqa: E501
:return: The pub_key_set of this Node. # noqa: E501
:rtype: object
"""
return self._pub_key_set
@pub_key_set.setter
def pub_key_set(self, pub_key_set):
"""Sets the pub_key_set of this Node.
:param pub_key_set: The pub_key_set of this Node. # noqa: E501
:type: object
"""
self._pub_key_set = pub_key_set
@property
def validator_cons_pub_key(self):
"""Gets the validator_cons_pub_key of this Node. # noqa: E501
the consensus pubkey used by the node # noqa: E501
:return: The validator_cons_pub_key of this Node. # noqa: E501
:rtype: str
"""
return self._validator_cons_pub_key
@validator_cons_pub_key.setter
def validator_cons_pub_key(self, validator_cons_pub_key):
"""Sets the validator_cons_pub_key of this Node.
the consensus pubkey used by the node # noqa: E501
:param validator_cons_pub_key: The validator_cons_pub_key of this Node. # noqa: E501
:type: str
"""
self._validator_cons_pub_key = validator_cons_pub_key
@property
def bond(self):
"""Gets the bond of this Node. # noqa: E501
current bond # noqa: E501
:return: The bond of this Node. # noqa: E501
:rtype: str
"""
return self._bond
@bond.setter
def bond(self, bond):
"""Sets the bond of this Node.
current bond # noqa: E501
:param bond: The bond of this Node. # noqa: E501
:type: str
"""
self._bond = bond
@property
def active_block_height(self):
"""Gets the active_block_height of this Node. # noqa: E501
block height this node become active # noqa: E501
:return: The active_block_height of this Node. # noqa: E501
:rtype: str
"""
return self._active_block_height
@active_block_height.setter
def active_block_height(self, active_block_height):
"""Sets the active_block_height of this Node.
block height this node become active # noqa: E501
:param active_block_height: The active_block_height of this Node. # noqa: E501
:type: str
"""
self._active_block_height = active_block_height
@property
def bond_address(self):
"""Gets the bond_address of this Node. # noqa: E501
bond address # noqa: E501
:return: The bond_address of this Node. # noqa: E501
:rtype: str
"""
return self._bond_address
@bond_address.setter
def bond_address(self, bond_address):
"""Sets the bond_address of this Node.
bond address # noqa: E501
:param bond_address: The bond_address of this Node. # noqa: E501
:type: str
"""
self._bond_address = bond_address
@property
def status_since(self):
"""Gets the status_since of this Node. # noqa: E501
block height this node become current status # noqa: E501
:return: The status_since of this Node. # noqa: E501
:rtype: str
"""
return self._status_since
@status_since.setter
def status_since(self, status_since):
"""Sets the status_since of this Node.
block height this node become current status # noqa: E501
:param status_since: The status_since of this Node. # noqa: E501
:type: str
"""
self._status_since = status_since
@property
def signer_membership(self):
"""Gets the signer_membership of this Node. # noqa: E501
a list of vault public key that this node is a member of # noqa: E501
:return: The signer_membership of this Node. # noqa: E501
:rtype: list[object]
"""
return self._signer_membership
@signer_membership.setter
def signer_membership(self, signer_membership):
"""Sets the signer_membership of this Node.
a list of vault public key that this node is a member of # noqa: E501
:param signer_membership: The signer_membership of this Node. # noqa: E501
:type: list[object]
"""
self._signer_membership = signer_membership
@property
def requested_to_leave(self):
"""Gets the requested_to_leave of this Node. # noqa: E501
indicate whether this node had requested to leave_height # noqa: E501
:return: The requested_to_leave of this Node. # noqa: E501
:rtype: bool
"""
return self._requested_to_leave
@requested_to_leave.setter
def requested_to_leave(self, requested_to_leave):
"""Sets the requested_to_leave of this Node.
indicate whether this node had requested to leave_height # noqa: E501
:param requested_to_leave: The requested_to_leave of this Node. # noqa: E501
:type: bool
"""
self._requested_to_leave = requested_to_leave
@property
def forced_to_leave(self):
"""Gets the forced_to_leave of this Node. # noqa: E501
indicate whether this node had been forced to leave by the network or not, if this field is true , usually means this node had been banned # noqa: E501
:return: The forced_to_leave of this Node. # noqa: E501
:rtype: bool
"""
return self._forced_to_leave
@forced_to_leave.setter
def forced_to_leave(self, forced_to_leave):
"""Sets the forced_to_leave of this Node.
indicate whether this node had been forced to leave by the network or not, if this field is true , usually means this node had been banned # noqa: E501
:param forced_to_leave: The forced_to_leave of this Node. # noqa: E501
:type: bool
"""
self._forced_to_leave = forced_to_leave
@property
def ip_address(self):
"""Gets the ip_address of this Node. # noqa: E501
node ip address # noqa: E501
:return: The ip_address of this Node. # noqa: E501
:rtype: str
"""
return self._ip_address
@ip_address.setter
def ip_address(self, ip_address):
"""Sets the ip_address of this Node.
node ip address # noqa: E501
:param ip_address: The ip_address of this Node. # noqa: E501
:type: str
"""
self._ip_address = ip_address
@property
def version(self):
"""Gets the version of this Node. # noqa: E501
the version of thornode software this node is running # noqa: E501
:return: The version of this Node. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this Node.
the version of thornode software this node is running # noqa: E501
:param version: The version of this Node. # noqa: E501
:type: str
"""
self._version = version
@property
def slash_points(self):
"""Gets the slash_points of this Node. # noqa: E501
the slash points the node accumulated when they are active , slash points will be reset next time when node become active # noqa: E501
:return: The slash_points of this Node. # noqa: E501
:rtype: str
"""
return self._slash_points
@slash_points.setter
def slash_points(self, slash_points):
"""Sets the slash_points of this Node.
the slash points the node accumulated when they are active , slash points will be reset next time when node become active # noqa: E501
:param slash_points: The slash_points of this Node. # noqa: E501
:type: str
"""
self._slash_points = slash_points
@property
def jail(self):
"""Gets the jail of this Node. # noqa: E501
:return: The jail of this Node. # noqa: E501
:rtype: object
"""
return self._jail
@jail.setter
def jail(self, jail):
"""Sets the jail of this Node.
:param jail: The jail of this Node. # noqa: E501
:type: object
"""
self._jail = jail
@property
def current_award(self):
"""Gets the current_award of this Node. # noqa: E501
node current award # noqa: E501
:return: The current_award of this Node. # noqa: E501
:rtype: str
"""
return self._current_award
@current_award.setter
def current_award(self, current_award):
"""Sets the current_award of this Node.
node current award # noqa: E501
:param current_award: The current_award of this Node. # noqa: E501
:type: str
"""
self._current_award = current_award
@property
def observe_chains(self):
"""Gets the observe_chains of this Node. # noqa: E501
chain and block heights this node is observing , this is useful to know whether a node is falling behind in regards to observing # noqa: E501
:return: The observe_chains of this Node. # noqa: E501
:rtype: str
"""
return self._observe_chains
@observe_chains.setter
def observe_chains(self, observe_chains):
"""Sets the observe_chains of this Node.
chain and block heights this node is observing , this is useful to know whether a node is falling behind in regards to observing # noqa: E501
:param observe_chains: The observe_chains of this Node. # noqa: E501
:type: str
"""
self._observe_chains = observe_chains
@property
def preflight_status(self):
"""Gets the preflight_status of this Node. # noqa: E501
:return: The preflight_status of this Node. # noqa: E501
:rtype: object
"""
return self._preflight_status
@preflight_status.setter
def preflight_status(self, preflight_status):
"""Sets the preflight_status of this Node.
:param preflight_status: The preflight_status of this Node. # noqa: E501
:type: object
"""
self._preflight_status = preflight_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Node, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Node):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Node):
return True
return self.to_dict() != other.to_dict()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
2320,
1581,
35491,
7824,
628,
220,
220,
220,
770,
10314,
27430,
262,
7824,
329,
2320,
1581,
35491,
13,
220,
24550,
25,
770,
3188,
318,
257,
12429,
1818,
287,
4371,... | 2.292094 | 7,792 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198
] | 3.444444 | 9 |
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Common interface options
"""
__docformat__ = 'restructuredtext'
from reproman.support.param import Parameter
from reproman.support.constraints import EnsureChoice
from reproman.support.constraints import EnsureInt, EnsureNone, EnsureStr
trace_opt = Parameter(
args=("--trace",),
action="store_true",
doc="""if set, trace execution within the environment""")
#
# Resource specifications
#
resref_arg = Parameter(
args=("resref",),
metavar="RESOURCE",
doc="""Name or ID of the resource to operate on. To see available resources, run
'reproman ls'""",
constraints=EnsureStr() | EnsureNone())
resref_opt = Parameter(
args=("-r", "--resource",),
dest="resref",
metavar="RESOURCE",
doc="""Name or ID of the resource to operate on. To see available resources, run
'reproman ls'""",
constraints=EnsureStr() | EnsureNone())
resref_type_opt = Parameter(
args=("--resref-type",),
metavar="TYPE",
doc="""A resource can be referenced by its name or ID. In the unlikely
case that a name collides with an ID, explicitly specify 'name' or 'id' to
disambiguate.""",
constraints=EnsureChoice("auto", "name", "id"))
| [
2,
409,
25,
900,
39747,
28,
19,
40379,
28,
19,
1509,
28,
19,
645,
316,
25,
198,
2,
22492,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
44386,
22492,
198,
2,
198,
... | 3.016162 | 495 |