content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import string
from utils import *
IDEAL_Q_LENGTH = 10
MAX_SCORE = 17
class Ranker:
'''
Holds the ranking data structure for how questions are scored and sorted
The q_list becomes a max priority queue, with the max score at the front.
Properties:
q_list: (list(Question, int)) list tuples of unranked questions and scores
avg_coref_len: (int) average length of coref clusters
'''
| [
11748,
4731,
198,
6738,
3384,
4487,
1330,
1635,
198,
198,
14114,
1847,
62,
48,
62,
43,
49494,
796,
838,
198,
22921,
62,
6173,
6965,
796,
1596,
198,
198,
4871,
10916,
263,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
9340,
... | 2.827815 | 151 |
from . import arduino
from . import platformio
from . import unsupported
| [
6738,
764,
1330,
610,
24493,
198,
6738,
764,
1330,
3859,
952,
198,
6738,
764,
1330,
24222,
198
] | 4.294118 | 17 |
"""
Create Profiles for existing Users
"""
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def create_profiles(apps, schema_editor):
"""
Create Profiles for all users that do't have it
"""
Users = apps.get_model("auth", "User")
Profile = apps.get_model("profiles", "Profile")
for user in Users.objects.all():
if not hasattr(user, 'profile'):
Profile.objects.create(user=user)
| [
37811,
198,
16447,
4415,
2915,
329,
4683,
18987,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
... | 2.877193 | 171 |
import severus
| [
11748,
1750,
385,
628
] | 4 | 4 |
from threading import current_thread
from django.utils.deprecation import MiddlewareMixin
_requests = {}
| [
6738,
4704,
278,
1330,
1459,
62,
16663,
201,
198,
201,
198,
6738,
42625,
14208,
13,
26791,
13,
10378,
8344,
341,
1330,
6046,
1574,
35608,
259,
201,
198,
201,
198,
201,
198,
62,
8897,
3558,
796,
23884,
201,
198,
201,
198,
201,
198
] | 2.809524 | 42 |
import json
import pandas as pd
import os
def getDict(title, dict):
'''
获取一个子列表,并返回该列表
'''
for child in dict:
if child['title'] == title:
return child
return None
def readSingleCarFile(path):
'''
读位于allData目录下的单个车辆信息json文件
'''
with open(path) as f:
car_dict = json.load(f)
return {
'id': car_dict['dataRough']['baseInfo']['carOtherInfo']['clueId'], # clueId
'car_name': car_dict['dataRough']['carCommodityInfo']['basicInfo']['titleDesc'], # 车辆详细型号
'car_brand': car_dict['dataRough']['baseInfo']['carOtherInfo']['minorName'], # 车辆品牌
'car_tag': car_dict['dataRough']['baseInfo']['carOtherInfo']['tagName'], # 具体型号
'price': car_dict['dataRough']['carCommodityInfo']['carPriceInfo']['styleData']['price']['value'], # 二手车价格 单位:元
'new_price': car_dict['dataRough']['carCommodityInfo']['carPriceInfo']['styleData']['newPrice']['value'], # 新车价格 单位:元
'complexOutlook': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['reportResultAnalysis']['complex'] if 'reportResultAnalysis' in car_dict['dataRough']['carCommodityInfo']['carRecordInfo'] else None, # 整体成色,如果不存在该词条返回None
'firstCert': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['salienceItem'][0]['value'], # 首次上牌年月
'odograph': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['salienceItem'][1]['value'], # 表显里程
'allPower': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['summary'][2]['value'], # 总功率 单位kW
'carBelong': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['summary'][3]['value'], # 车牌归属地
'range': car_dict['dataRough']['carCommodityInfo']['carRecordInfo']['summary'][4]['value'], # 续航里程
'isDome': 1 if car_dict['dataDetail']['list'][0]['children'][1]['content'] == '国产' else 0, # 是否为国产
'wheelBase': getDict('车身结构', car_dict['dataDetail']['list'])['children'][0]['content'] if getDict('车身结构', car_dict['dataDetail']['list']) else None, # 轴距(mm)
'drivingMode': getDict('底盘转向', car_dict['dataDetail']['list'])['children'][0]['content'] if getDict('底盘转向', car_dict['dataDetail']['list']) else None, # 驱动方式
}
if __name__ == "__main__":
path = 'crawl_for_guazi/newData'
allCarFiles = os.listdir(path)
df = pd.DataFrame([])
for singleFileName in allCarFiles:
if '.json' not in singleFileName:
continue
singleCardict = readSingleCarFile(path + f'/{singleFileName}')
tempdf = pd.DataFrame(singleCardict, index=[0])
df = df.append(tempdf)
df = df.reset_index(drop = True)
# print(df.head())
# print(df.shape)
df.to_csv('crawl_for_guazi/newData.csv', index = False)
| [
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
628,
198,
4299,
651,
35,
713,
7,
7839,
11,
8633,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
5525,
236,
115,
20998,
244,
31660,
10310,
103,
36310,
2... | 1.861772 | 1,512 |
"""BL Schedule."""
| [
37811,
9148,
19281,
526,
15931,
198
] | 3.166667 | 6 |
from tadataka.optimization.functions import Function
| [
6738,
36264,
48088,
13,
40085,
1634,
13,
12543,
2733,
1330,
15553,
628,
198
] | 4.230769 | 13 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements listing projects and setting default project."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.magic
import IPython.core.display
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import fnmatch
import datalab.utils.commands
import datalab.context
@IPython.core.magic.register_line_cell_magic
| [
2,
15069,
1584,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,
2845,
198,
2,
287,
11846,
351,
262,
13789,
... | 3.787313 | 268 |
from flask import Blueprint, render_template, request, flash, jsonify, redirect, url_for
from flask_login import login_required, current_user
from . import db, cursor, dbmysql
import json
import random
from . import user_info
views = Blueprint('views', __name__)
@views.route('/', methods=['GET', 'POST'])
@login_required
@views.route('/register', methods=['GET', 'POST'])
@login_required
@views.route('/report', methods=['GET', 'POST'])
@login_required
@views.route('/search', methods=['GET', 'POST']) | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
2581,
11,
7644,
11,
33918,
1958,
11,
18941,
11,
19016,
62,
1640,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
35827,
11,
1459,
62,
7220,
198,
6738,
764,
1330,
20613,
11,
23493,
11,
... | 3.15528 | 161 |
import operator
from idaapi import *
from idautils import *
# Globals: risky but friendly peeps to speed up coding
# A CFG constructed by angr CFGFast analysis
cfg = ''
# Count of methods discovered by auto-analysis
methods_identified = 0
# A list of FuncInfo() structures holding information on
# functions identified by auto-analysis
functions_info = []
# The structure holding relevant information on a function
# Populate the data structures by filling up information on methods found in the blob
# Populate the data structures by filling up information on methods found in the blob
# The methods reading from and writing to the eMMC card are the primary sources
# of taint. It's highly likely that these methods will be invoked many more times
# over the other ones. The real bummer is when methods like __stack_chk_fail() tops
# in the list :-( To make the situation worse, a bunch of libc functions precede
# mmc_read(). Can we eliminate these by computing symbolic summaries? | [
11748,
10088,
198,
6738,
220,
3755,
15042,
1330,
1635,
198,
6738,
4686,
2306,
4487,
1330,
1635,
628,
198,
2,
40713,
874,
25,
17564,
475,
8030,
613,
25386,
284,
2866,
510,
19617,
198,
2,
317,
18551,
38,
12006,
416,
281,
2164,
18551,
21... | 4.115702 | 242 |
#coding: utf-8
import json, pymysql, datetime, time
import mysqlcredentials
| [
2,
66,
7656,
25,
3384,
69,
12,
23,
198,
11748,
33918,
11,
279,
4948,
893,
13976,
11,
4818,
8079,
11,
640,
198,
11748,
48761,
66,
445,
14817,
628
] | 2.75 | 28 |
"""Models for handling metadata."""
import dataclasses
import logging
from django.db import models
from django.db.models import F, Func
from django.utils.translation import gettext_lazy as _
from edd.fields import VarCharField
from .common import EDDSerialize
logger = logging.getLogger(__name__)
class MetadataGroup(models.Model):
"""Group together types of metadata with a label."""
group_name = VarCharField(
help_text=_("Name of the group/class of metadata."),
unique=True,
verbose_name=_("Group Name"),
)
@dataclasses.dataclass
class Metadata:
"""Mirrors fields of MetadataType, to define built-in Metadata."""
# required
for_context: str
type_name: str
uuid: str
# optional
default_value: str = None
input_type: str = None
postfix: str = None
prefix: str = None
type_field: str = None
type_i18n: str = None
class MetadataType(models.Model, EDDSerialize):
"""Type information for arbitrary key-value data stored on EDDObject instances."""
# defining values to use in the for_context field
STUDY = "S"
LINE = "L"
ASSAY = "A"
CONTEXT_SET = ((STUDY, _("Study")), (LINE, _("Line")), (ASSAY, _("Assay")))
# pre-defined values that should always exist in the system
_SYSTEM_TYPES = (
# type_field metadata to map to Model object fields
Metadata(
for_context=ASSAY,
input_type="textarea",
type_field="description",
type_i18n="main.models.Assay.description",
type_name="Assay Description",
uuid="4929a6ad-370c-48c6-941f-6cd154162315",
),
Metadata(
for_context=ASSAY,
input_type="user",
type_field="experimenter",
type_i18n="main.models.Assay.experimenter",
type_name="Assay Experimenter",
uuid="15105bee-e9f1-4290-92b2-d7fdcb3ad68d",
),
Metadata(
for_context=ASSAY,
input_type="string",
type_field="name",
type_i18n="main.models.Assay.name",
type_name="Assay Name",
uuid="33125862-66b2-4d22-8966-282eb7142a45",
),
Metadata(
for_context=LINE,
input_type="carbon_source",
type_field="carbon_source",
type_i18n="main.models.Line.carbon_source",
type_name="Carbon Source(s)",
uuid="4ddaf92a-1623-4c30-aa61-4f7407acfacc",
),
Metadata(
for_context=LINE,
input_type="checkbox",
type_field="control",
type_i18n="main.models.Line.control",
type_name="Control",
uuid="8aa26735-e184-4dcd-8dd1-830ec240f9e1",
),
Metadata(
for_context=LINE,
input_type="user",
type_field="contact",
type_i18n="main.models.Line.contact",
type_name="Line Contact",
uuid="13672c8a-2a36-43ed-928f-7d63a1a4bd51",
),
Metadata(
for_context=LINE,
input_type="textarea",
type_field="description",
type_i18n="main.models.Line.description",
type_name="Line Description",
uuid="5fe84549-9a97-47d2-a897-8c18dd8fd34a",
),
Metadata(
for_context=LINE,
input_type="user",
type_field="experimenter",
type_i18n="main.models.Line.experimenter",
type_name="Line Experimenter",
uuid="974c3367-f0c5-461d-bd85-37c1a269d49e",
),
Metadata(
for_context=LINE,
input_type="string",
type_field="name",
type_i18n="main.models.Line.name",
type_name="Line Name",
uuid="b388bcaa-d14b-4d7f-945e-a6fcb60142f2",
),
Metadata(
for_context=LINE,
input_type="strain",
type_field="strains",
type_i18n="main.models.Line.strains",
type_name="Strain(s)",
uuid="292f1ca7-30de-4ba1-89cd-87d2f6291416",
),
# "true" metadata, but directly referenced by code for specific purposes
Metadata(
default_value="--",
for_context=LINE,
input_type="media",
type_i18n="main.models.Line.Media",
type_name="Media",
uuid="463546e4-a67e-4471-a278-9464e78dbc9d",
),
Metadata(
for_context=ASSAY,
# TODO: consider making this: input_type="readonly"
input_type="string",
type_i18n="main.models.Assay.original",
type_name="Original Name",
uuid="5ef6500e-0f8b-4eef-a6bd-075bcb655caa",
),
Metadata(
for_context=LINE,
input_type="replicate",
type_i18n="main.models.Line.replicate",
type_name="Replicate",
uuid="71f5cd94-4dd4-45ca-a926-9f0717631799",
),
Metadata(
for_context=ASSAY,
input_type="time",
type_i18n="main.models.Assay.Time",
type_name="Time",
uuid="6629231d-4ef0-48e3-a21e-df8db6dfbb72",
),
)
_SYSTEM_DEF = {t.type_name: t for t in _SYSTEM_TYPES}
SYSTEM = {t.type_name: t.uuid for t in _SYSTEM_TYPES}
# optionally link several metadata types into a common group
group = models.ForeignKey(
MetadataGroup,
blank=True,
help_text=_("Group for this Metadata Type"),
null=True,
on_delete=models.PROTECT,
verbose_name=_("Group"),
)
# a default label for the type; should normally use i18n lookup for display
type_name = VarCharField(
help_text=_("Name for Metadata Type"), verbose_name=_("Name")
)
# an i18n lookup for type label
type_i18n = VarCharField(
blank=True,
help_text=_("i18n key used for naming this Metadata Type."),
null=True,
verbose_name=_("i18n Key"),
)
# field to store metadata, or None if stored in metadata
type_field = VarCharField(
blank=True,
default=None,
help_text=_(
"Model field where metadata is stored; blank stores in metadata dictionary."
),
null=True,
verbose_name=_("Field Name"),
)
# type of the input on front-end; support checkboxes, autocompletes, etc
# blank/null falls back to plain text input field
input_type = VarCharField(
blank=True,
help_text=_("Type of input fields for values of this Metadata Type."),
null=True,
verbose_name=_("Input Type"),
)
# a default value to use if the field is left blank
default_value = VarCharField(
blank=True,
help_text=_("Default value for this Metadata Type."),
verbose_name=_("Default Value"),
)
# label used to prefix values
prefix = VarCharField(
blank=True,
help_text=_("Prefix text appearing before values of this Metadata Type."),
verbose_name=_("Prefix"),
)
# label used to postfix values (e.g. unit specifier)
postfix = VarCharField(
blank=True,
help_text=_("Postfix text appearing after values of this Metadata Type."),
verbose_name=_("Postfix"),
)
# target object for metadata
for_context = VarCharField(
choices=CONTEXT_SET,
help_text=_("Type of EDD Object this Metadata Type may be added to."),
verbose_name=_("Context"),
)
# linking together EDD instances will be easier later if we define UUIDs now
uuid = models.UUIDField(
editable=False,
help_text=_("Unique identifier for this Metadata Type."),
unique=True,
verbose_name=_("UUID"),
)
@classmethod
@classmethod
def system(cls, name):
"""Load a pre-defined system-wide MetadataType."""
typedef = cls._SYSTEM_DEF.get(name, None)
if typedef is None:
raise cls.DoesNotExist
fields = {f.name for f in dataclasses.fields(Metadata)}
defaults = {k: v for k, v in typedef.__dict__.items() if k in fields and v}
meta, created = cls.objects.get_or_create(uuid=typedef.uuid, defaults=defaults)
return meta
def decode_value(self, value):
"""
Default MetadataType class reflects back the passed value loaded from
JSON. Subclasses may try to modify the value to convert to arbitrary
Python values instead of a JSON-compatible dict.
"""
return value
def encode_value(self, value):
"""
Default MetadataType class reflects back the passed value to send to
JSON. Subclasses may try to modify the value to serialize arbitrary
Python values to a JSON-compatible value.
"""
return value
class EDDMetadata(models.Model):
"""Base class for EDD models supporting metadata."""
metadata = models.JSONField(
blank=True,
help_text=_("JSON-based metadata dictionary."),
default=dict,
verbose_name=_("Metadata"),
)
def metadata_add(self, metatype, value, append=True):
"""
Adds metadata to the object.
By default, if there is already metadata of the same type, the value is
appended to a list with previous value(s). Set kwarg `append` to False
to overwrite previous values.
"""
if not self.allow_metadata(metatype):
raise ValueError(
f"The metadata type '{metatype.type_name}' does not apply "
f"to {type(self)} objects."
)
if metatype.type_field is None:
if append:
prev = self.metadata_get(metatype)
if hasattr(prev, "append"):
prev.append(value)
value = prev
elif prev is not None:
value = [prev, value]
self.metadata[metatype.pk] = metatype.encode_value(value)
else:
temp = getattr(self, metatype.type_field)
if hasattr(temp, "add"):
if append:
temp.add(value)
else:
setattr(self, metatype.type_field, [value])
else:
setattr(self, metatype.type_field, value)
def metadata_clear(self, metatype):
"""Removes all metadata of the type from this object."""
if metatype.type_field is None:
self.metadata.pop(metatype.pk, None)
# for backward-compatibility, also check string version
self.metadata.pop(f"{metatype.pk}", None)
else:
temp = getattr(self, metatype.type_field)
if hasattr(temp, "clear"):
temp.clear()
else:
setattr(self, metatype.type_field, None)
def metadata_get(self, metatype, default=None):
"""Returns the metadata on this object matching the type."""
if metatype.type_field is None:
# for backward-compatibility, also check string version
value = self.metadata.get(
metatype.pk, self.metadata.get(f"{metatype.pk}", None)
)
if value is None:
return default
return metatype.decode_value(value)
return getattr(self, metatype.type_field)
def metadata_remove(self, metatype, value):
"""Removes metadata with a value matching the argument for the type."""
sentinel = object()
prev = self.metadata_get(metatype, default=sentinel)
# only act when metatype already existed
if prev is not sentinel:
if value == prev:
# clear for single values
self.metadata_clear(metatype)
elif hasattr(prev, "remove"):
# for lists, call remove
try:
prev.remove(value)
self.metadata_add(metatype, prev, append=False)
except ValueError:
# don't care if the value didn't exist
pass
| [
37811,
5841,
1424,
329,
9041,
20150,
526,
15931,
198,
198,
11748,
4818,
330,
28958,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
376,
11,
11138,
66,
198,
6738... | 2.076818 | 5,871 |
App.open("java -jar C:/JabRef-4.2-fat.jar")
wait(30)
click("1529632189350.png")
wait(2)
click("1529632296782.png")
wait(2)
click("1530899089323.png")
wait(2)
click("1530899105356.png")
wait(2)
click("1530899134798.png")
wait(2)
click("1530899120685.png")
wait(2)
click("1530899134798.png")
wait(2)
click("1530899165770.png")
wait(2)
click("1530899134798.png")
wait(2)
click("1530899192513.png")
wait(2)
click("1530899134798.png")
wait(2)
click("1530899212578.png")
| [
4677,
13,
9654,
7203,
12355,
532,
9491,
327,
14079,
41,
397,
8134,
12,
19,
13,
17,
12,
17359,
13,
9491,
4943,
198,
17077,
7,
1270,
8,
198,
12976,
7203,
1314,
27137,
2624,
1507,
6052,
1120,
13,
11134,
4943,
198,
17077,
7,
17,
8,
19... | 2.084821 | 224 |
#!/usr/bin/python
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Get and set email flags for a given user
import optparse
import sys
# SYNC_ME: Search: Scripts: Load pyserver.
import os
import sys
sys.path.insert(0, os.path.abspath('%s/util'
% (os.path.abspath(os.curdir),)))
import pyserver_glue
import conf
import g
import logging
from util_ import logging2
from util_.console import Console
log_level = logging.DEBUG
#log_level = logging2.VERBOSE2
#log_level = logging2.VERBOSE4
#log_level = logging2.VERBOSE
conf.init_logging(True, True, Console.getTerminalSize()[0]-1, log_level)
log = g.log.getLogger('email_flags')
# ***
from gwis import user_email
from util_ import db_glue
usage = '''
$ export PYSERVER_HOME= location of your pyserver directory
View flags: $./\%prog USER
Set flags: $./\%prog --FLAG VALUE EMAIL_ADDRESS|USERNAME
Flags:
--enable-email enable a user to receive emails
--enable-research-email enable a user to receive research related emails
--enable-wr-digest enable watch region notification daily digests
--dont-study exclude a user from analysis (e.g. a Cyclopath dev)
--bouncing flag a users email address as bouncing
--login-permitted disable login for a user'''
valid_flags = [
'enable-email',
'enable-research-email',
'enable-wr-digest',
'dont-study',
'bouncing',
'login-permitted',
]
if (__name__ == '__main__'):
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
15069,
357,
66,
8,
4793,
12,
6390,
3310,
658,
286,
262,
2059,
286,
8919,
13,
198,
2,
1114,
15665,
2846,
11,
766,
262,
2393,
38559,
24290,
13,
198,
198,
2,
3497,
290,
900,
3053,
... | 2.666093 | 581 |
import pytest
import threading
from unittest.mock import Mock, call
from quickrpc.promise import Promise, PromiseDoneError, PromiseTimeoutError, PromiseDeadlockError
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
4704,
278,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
869,
198,
6738,
2068,
81,
14751,
13,
16963,
786,
1330,
34920,
11,
34920,
45677,
12331,
11,
34920,
48031,
12331,
11,
34920,
20489,
5354,
... | 3.366667 | 60 |
import numpy as np
import scipy.ndimage as nd
import pyKinectTools.algs.Dijkstras as dgn
# from pyKinectTools.utils.DepthUtils import *
from pyKinectTools.utils.DepthUtils import depthIm2PosIm
from copy import deepcopy
from skimage.draw import circle
from IPython import embed
from pylab import *
def geodesic_extrema_MPI(im_pos, centroid=None, iterations=1, visualize=False, box=None):
'''
im : im_pos (NxMx3)
'''
if centroid==None:
try:
centroid = np.array(nd.center_of_mass(im_pos[:,:,2]), dtype=np.int16)
except:
return np.array([])
if box is not None:
im_pos = im_pos[box]
im_pos = np.ascontiguousarray(im_pos, dtype=np.int16)
if visualize:
cost_map = np.zeros([im_pos.shape[0], im_pos.shape[1]], dtype=np.uint16)
extrema = dgn.geodesic_map_MPI(cost_map, im_pos, np.array(centroid, dtype=np.int16), iterations, 1)
cost_map = np.array(extrema[-1])
extrema = extrema[:-1]
extrema = np.array([x for x in extrema])
return extrema, cost_map
else:
extrema = np.array(dgn.geodesic_extrema_MPI(im_pos, np.array(centroid, dtype=np.int16), iterations))
return extrema
def connect_extrema(im_pos, target, markers, visualize=False):
'''
im_pos : XYZ positions of each point in image formation (n x m x 3)
'''
height, width,_ = im_pos.shape
centroid = np.array(target)
im_pos = np.ascontiguousarray(im_pos.astype(np.int16))
cost_map = np.ascontiguousarray(np.zeros([height, width], dtype=np.uint16))
extrema = dgn.geodesic_map_MPI(cost_map, im_pos, np.array(centroid, dtype=np.int16), 1, 1)
cost_map = extrema[-1]
trails = []
for m in markers:
trail = dgn.geodesic_trail(cost_map.copy()+(32000*(im_pos[:,:,2]==0)).astype(np.uint16), np.array(m, dtype=np.int16))
trails += [trail.copy()]
if visualize:
cost_map = deepcopy(cost_map)
circ = circle(markers[0][0],markers[0][1], 5)
circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
cost_map[circ[0], circ[1]] = 0
for i,t in enumerate(trails[1:]):
# embed()
cost_map[t[:,0], t[:,1]] = 0
circ = circle(markers[i+1][0],markers[i+1][1], 5)
circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
cost_map[circ[0], circ[1]] = 0
return trails, cost_map
else:
return trails
def distance_map(im, centroid, scale=1):
'''
---Parameters---
im_depth :
centroid :
---Returns---
distance_map
'''
im_depth = np.ascontiguousarray(im.copy())
objSize = im_depth.shape
max_value = 32000
mask = im_depth > 0
# Get discrete form of position/depth matrix
# embed()
depth_min = im_depth[mask].min()
depth_max = im_depth[mask].max()
depth_diff = depth_max - depth_min
if depth_diff < 1:
depth_diff = 1
scale_to = scale / float(depth_diff)
# Ensure the centroid is within the boundaries
# Segfaults if on the very edge(!) so set border as 1 to resolution-2
centroid[0] = centroid[0] if centroid[0] > 0 else 1
centroid[0] = centroid[0] if centroid[0] < im.shape[0]-1 else im.shape[0]-2
centroid[1] = centroid[1] if centroid[1] > 0 else 1
centroid[1] = centroid[1] if centroid[1] < im.shape[1]-1 else im.shape[1]-2
# Scale depth image
im_depth_scaled = np.ascontiguousarray(np.array( (im_depth-depth_min)*scale_to, dtype=np.uint16))
# im_depth_scaled = np.ascontiguousarray(np.array( (im_depth-depth_min), dtype=np.uint16))
im_depth_scaled *= mask
# Initialize all but starting point as max
distance_map = np.zeros([objSize[0],objSize[1]], dtype=np.uint16)+max_value
distance_map[centroid[0], centroid[1]] = 0
# Set which pixels are in/out of bounds
visited_map = np.zeros_like(distance_map, dtype=np.uint8)
visited_map[-mask] = 255
centroid = np.array(centroid, dtype=np.int16)
# embed()
dgn.distance_map(distance_map, visited_map, im_depth_scaled.astype(np.uint16), centroid, int(scale))
return distance_map.copy()
def generateKeypoints(im, centroid, iterations=10, scale=6):
'''
---Parameters---
im_depth :
centroid :
---Returns---
extrema
distance_map
'''
x,y = centroid
maps = []
extrema = []
# Get N distance maps. For 2..N centroid is previous farthest distance.
for i in range(iterations):
im_dist = distance_map(np.ascontiguousarray(im.copy()), centroid=[x,y], scale=scale)
im_dist[im_dist>=32000] = 0
maps += [im_dist.copy()]
max_ = np.argmax(np.min(np.dstack(maps),-1))
max_px = np.unravel_index(max_, im.shape)
x,y = max_px
extrema += [[x,y]]
im_min = np.min(np.dstack(maps),-1)
im_min = (im_min/(float(im_min.max())/255.)).astype(np.uint8)
# Visualize
im -= im[im>0].min()
for c in extrema:
# im_min[c[0]-3:c[0]+4, c[1]-3:c[1]+4] = im_min.max()
im[c[0]-3:c[0]+4, c[1]-3:c[1]+4] = im.max()
import cv2
cv2.imshow("Extrema", im/float(im.max()))
# cv2.imshow("Extrema", im_min/float(im_min.max()))
cv2.waitKey(30)
return extrema, im_min
''' --- Other functions --- '''
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
358,
9060,
355,
299,
67,
198,
11748,
12972,
49681,
478,
33637,
13,
14016,
82,
13,
35,
45961,
2536,
292,
355,
288,
4593,
198,
198,
2,
422,
12972,
49681,
478,
33637,
13,
267... | 2.332552 | 2,132 |
import asynctest
import asynctest.mock as amock
from opsdroid.core import OpsDroid
from opsdroid.matchers import match_always
from opsdroid.message import Message
from opsdroid.parsers.always import parse_always
class TestParserAlways(asynctest.TestCase):
"""Test the opsdroid always parser."""
| [
198,
11748,
355,
2047,
310,
395,
198,
11748,
355,
2047,
310,
395,
13,
76,
735,
355,
716,
735,
198,
198,
6738,
39628,
67,
3882,
13,
7295,
1330,
26123,
35,
3882,
198,
6738,
39628,
67,
3882,
13,
6759,
3533,
1330,
2872,
62,
33770,
198,
... | 3.15625 | 96 |
"""
Manages VMware storage policies
(called pbm because the vCenter endpoint is /pbm)
Examples
========
Storage policy
--------------
.. code-block:: python
{
"name": "salt_storage_policy"
"description": "Managed by Salt. Random capability values.",
"resource_type": "STORAGE",
"subprofiles": [
{
"capabilities": [
{
"setting": {
"type": "scalar",
"value": 2
},
"namespace": "VSAN",
"id": "hostFailuresToTolerate"
},
{
"setting": {
"type": "scalar",
"value": 2
},
"namespace": "VSAN",
"id": "stripeWidth"
},
{
"setting": {
"type": "scalar",
"value": true
},
"namespace": "VSAN",
"id": "forceProvisioning"
},
{
"setting": {
"type": "scalar",
"value": 50
},
"namespace": "VSAN",
"id": "proportionalCapacity"
},
{
"setting": {
"type": "scalar",
"value": 0
},
"namespace": "VSAN",
"id": "cacheReservation"
}
],
"name": "Rule-Set 1: VSAN",
"force_provision": null
}
],
}
Dependencies
============
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See
`Issue #29537 <https://github.com/saltstack/salt/issues/29537>` for more
information.
"""
import copy
import logging
import sys
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils.dictdiffer import recursive_diff
from salt.utils.listdiffer import list_diff
# External libraries
try:
from pyVmomi import VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def mod_init(low):
"""
Init function
"""
return True
def default_vsan_policy_configured(name, policy):
"""
Configures the default VSAN policy on a vCenter.
The state assumes there is only one default VSAN policy on a vCenter.
policy
Dict representation of a policy
"""
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
policy_copy = copy.deepcopy(policy)
proxy_type = __salt__["vsphere.get_proxy_type"]()
log.trace("proxy_type = %s", proxy_type)
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__["{}.get_details".format(proxy_type)]()["vcenter"]
log.info("Running %s on vCenter '%s'", name, vcenter)
log.trace("policy = %s", policy)
changes_required = False
ret = {"name": name, "changes": {}, "result": None, "comment": None}
comments = []
changes = {}
changes_required = False
si = None
try:
# TODO policy schema validation
si = __salt__["vsphere.get_service_instance_via_proxy"]()
current_policy = __salt__["vsphere.list_default_vsan_policy"](si)
log.trace("current_policy = {}".format(current_policy))
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get("subprofiles"):
if len(policy["subprofiles"]) > 1:
raise ArgumentValueError(
"Multiple sub_profiles ({0}) are not "
"supported in the input policy"
)
subprofile = policy["subprofiles"][0]
current_subprofile = current_policy["subprofiles"][0]
capabilities_differ = list_diff(
current_subprofile["capabilities"],
subprofile.get("capabilities", []),
key="id",
)
del policy["subprofiles"]
if subprofile.get("capabilities"):
del subprofile["capabilities"]
del current_subprofile["capabilities"]
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile, dict(subprofile))
del current_policy["subprofiles"]
policy_differ = recursive_diff(current_policy, policy)
if policy_differ.diffs or capabilities_differ.diffs or subprofile_differ.diffs:
if (
"name" in policy_differ.new_values
or "description" in policy_differ.new_values
):
raise ArgumentValueError(
"'name' and 'description' of the default VSAN policy "
"cannot be updated"
)
changes_required = True
if __opts__["test"]:
str_changes = []
if policy_differ.diffs:
str_changes.extend(
[change for change in policy_differ.changes_str.split("\n")]
)
if subprofile_differ.diffs or capabilities_differ.diffs:
str_changes.append("subprofiles:")
if subprofile_differ.diffs:
str_changes.extend(
[
" {}".format(change)
for change in subprofile_differ.changes_str.split("\n")
]
)
if capabilities_differ.diffs:
str_changes.append(" capabilities:")
str_changes.extend(
[
" {}".format(change)
for change in capabilities_differ.changes_str2.split(
"\n"
)
]
)
comments.append(
"State {} will update the default VSAN policy on "
"vCenter '{}':\n{}"
"".format(name, vcenter, "\n".join(str_changes))
)
else:
__salt__["vsphere.update_storage_policy"](
policy=current_policy["name"],
policy_dict=policy_copy,
service_instance=si,
)
comments.append(
"Updated the default VSAN policy in vCenter '{}'".format(vcenter)
)
log.info(comments[-1])
new_values = policy_differ.new_values
new_values["subprofiles"] = [subprofile_differ.new_values]
new_values["subprofiles"][0][
"capabilities"
] = capabilities_differ.new_values
if not new_values["subprofiles"][0]["capabilities"]:
del new_values["subprofiles"][0]["capabilities"]
if not new_values["subprofiles"][0]:
del new_values["subprofiles"]
old_values = policy_differ.old_values
old_values["subprofiles"] = [subprofile_differ.old_values]
old_values["subprofiles"][0][
"capabilities"
] = capabilities_differ.old_values
if not old_values["subprofiles"][0]["capabilities"]:
del old_values["subprofiles"][0]["capabilities"]
if not old_values["subprofiles"][0]:
del old_values["subprofiles"]
changes.update(
{"default_vsan_policy": {"new": new_values, "old": old_values}}
)
log.trace(changes)
__salt__["vsphere.disconnect"](si)
except CommandExecutionError as exc:
log.error("Error: {}".format(exc))
if si:
__salt__["vsphere.disconnect"](si)
if not __opts__["test"]:
ret["result"] = False
ret.update(
{"comment": exc.strerror, "result": False if not __opts__["test"] else None}
)
return ret
if not changes_required:
# We have no changes
ret.update(
{
"comment": (
"Default VSAN policy in vCenter "
"'{}' is correctly configured. "
"Nothing to be done.".format(vcenter)
),
"result": True,
}
)
else:
ret.update(
{
"comment": "\n".join(comments),
"changes": changes,
"result": None if __opts__["test"] else True,
}
)
return ret
def storage_policies_configured(name, policies):
"""
Configures storage policies on a vCenter.
policies
List of dict representation of the required storage policies
"""
comments = []
changes = []
changes_required = False
ret = {"name": name, "changes": {}, "result": None, "comment": None}
log.trace("policies = {}".format(policies))
si = None
try:
proxy_type = __salt__["vsphere.get_proxy_type"]()
log.trace("proxy_type = {}".format(proxy_type))
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__["{}.get_details".format(proxy_type)]()["vcenter"]
log.info("Running state '%s' on vCenter '%s'", name, vcenter)
si = __salt__["vsphere.get_service_instance_via_proxy"]()
current_policies = __salt__["vsphere.list_storage_policies"](
policy_names=[policy["name"] for policy in policies], service_instance=si
)
log.trace("current_policies = {}".format(current_policies))
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
for policy in policies:
policy_copy = copy.deepcopy(policy)
filtered_policies = [
p for p in current_policies if p["name"] == policy["name"]
]
current_policy = filtered_policies[0] if filtered_policies else None
if not current_policy:
changes_required = True
if __opts__["test"]:
comments.append(
"State {} will create the storage policy "
"'{}' on vCenter '{}'"
"".format(name, policy["name"], vcenter)
)
else:
__salt__["vsphere.create_storage_policy"](
policy["name"], policy, service_instance=si
)
comments.append(
"Created storage policy '{}' on "
"vCenter '{}'".format(policy["name"], vcenter)
)
changes.append({"new": policy, "old": None})
log.trace(comments[-1])
# Continue with next
continue
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get("subprofiles"):
if len(policy["subprofiles"]) > 1:
raise ArgumentValueError(
"Multiple sub_profiles ({0}) are not "
"supported in the input policy"
)
subprofile = policy["subprofiles"][0]
current_subprofile = current_policy["subprofiles"][0]
capabilities_differ = list_diff(
current_subprofile["capabilities"],
subprofile.get("capabilities", []),
key="id",
)
del policy["subprofiles"]
if subprofile.get("capabilities"):
del subprofile["capabilities"]
del current_subprofile["capabilities"]
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile, dict(subprofile))
del current_policy["subprofiles"]
policy_differ = recursive_diff(current_policy, policy)
if (
policy_differ.diffs
or capabilities_differ.diffs
or subprofile_differ.diffs
):
changes_required = True
if __opts__["test"]:
str_changes = []
if policy_differ.diffs:
str_changes.extend(
[change for change in policy_differ.changes_str.split("\n")]
)
if subprofile_differ.diffs or capabilities_differ.diffs:
str_changes.append("subprofiles:")
if subprofile_differ.diffs:
str_changes.extend(
[
" {}".format(change)
for change in subprofile_differ.changes_str.split(
"\n"
)
]
)
if capabilities_differ.diffs:
str_changes.append(" capabilities:")
str_changes.extend(
[
" {}".format(change)
for change in capabilities_differ.changes_str2.split(
"\n"
)
]
)
comments.append(
"State {} will update the storage policy '{}'"
" on vCenter '{}':\n{}"
"".format(name, policy["name"], vcenter, "\n".join(str_changes))
)
else:
__salt__["vsphere.update_storage_policy"](
policy=current_policy["name"],
policy_dict=policy_copy,
service_instance=si,
)
comments.append(
"Updated the storage policy '{}'"
"in vCenter '{}'"
"".format(policy["name"], vcenter)
)
log.info(comments[-1])
# Build new/old values to report what was changed
new_values = policy_differ.new_values
new_values["subprofiles"] = [subprofile_differ.new_values]
new_values["subprofiles"][0][
"capabilities"
] = capabilities_differ.new_values
if not new_values["subprofiles"][0]["capabilities"]:
del new_values["subprofiles"][0]["capabilities"]
if not new_values["subprofiles"][0]:
del new_values["subprofiles"]
old_values = policy_differ.old_values
old_values["subprofiles"] = [subprofile_differ.old_values]
old_values["subprofiles"][0][
"capabilities"
] = capabilities_differ.old_values
if not old_values["subprofiles"][0]["capabilities"]:
del old_values["subprofiles"][0]["capabilities"]
if not old_values["subprofiles"][0]:
del old_values["subprofiles"]
changes.append({"new": new_values, "old": old_values})
else:
# No diffs found - no updates required
comments.append(
"Storage policy '{}' is up to date. "
"Nothing to be done.".format(policy["name"])
)
__salt__["vsphere.disconnect"](si)
except CommandExecutionError as exc:
log.error("Error: {}".format(exc))
if si:
__salt__["vsphere.disconnect"](si)
if not __opts__["test"]:
ret["result"] = False
ret.update(
{"comment": exc.strerror, "result": False if not __opts__["test"] else None}
)
return ret
if not changes_required:
# We have no changes
ret.update(
{
"comment": (
"All storage policy in vCenter "
"'{}' is correctly configured. "
"Nothing to be done.".format(vcenter)
),
"result": True,
}
)
else:
ret.update(
{
"comment": "\n".join(comments),
"changes": {"storage_policies": changes},
"result": None if __opts__["test"] else True,
}
)
return ret
def default_storage_policy_assigned(name, policy, datastore):
"""
Assigns a default storage policy to a datastore
policy
Name of storage policy
datastore
Name of datastore
"""
log.info(
"Running state {} for policy '{}', datastore '{}'."
"".format(name, policy, datastore)
)
changes = {}
changes_required = False
ret = {"name": name, "changes": {}, "result": None, "comment": None}
si = None
try:
si = __salt__["vsphere.get_service_instance_via_proxy"]()
existing_policy = __salt__["vsphere.list_default_storage_policy_of_datastore"](
datastore=datastore, service_instance=si
)
if existing_policy["name"] == policy:
comment = (
"Storage policy '{}' is already assigned to "
"datastore '{}'. Nothing to be done."
"".format(policy, datastore)
)
else:
changes_required = True
changes = {
"default_storage_policy": {
"old": existing_policy["name"],
"new": policy,
}
}
if __opts__["test"]:
comment = (
"State {} will assign storage policy '{}' to datastore '{}'."
).format(name, policy, datastore)
else:
__salt__["vsphere.assign_default_storage_policy_to_datastore"](
policy=policy, datastore=datastore, service_instance=si
)
comment = ("Storage policy '{} was assigned to datastore '{}'.").format(
policy, name
)
log.info(comment)
except CommandExecutionError as exc:
log.error("Error: {}".format(exc))
if si:
__salt__["vsphere.disconnect"](si)
ret.update(
{"comment": exc.strerror, "result": False if not __opts__["test"] else None}
)
return ret
ret["comment"] = comment
if changes_required:
ret.update({"changes": changes, "result": None if __opts__["test"] else True})
else:
ret["result"] = True
return ret
| [
37811,
198,
5124,
1095,
37754,
6143,
4788,
198,
7,
7174,
279,
20475,
780,
262,
410,
23656,
36123,
318,
1220,
79,
20475,
8,
198,
198,
27730,
198,
2559,
198,
198,
31425,
2450,
198,
26171,
198,
198,
492,
2438,
12,
9967,
3712,
21015,
628,... | 1.852619 | 11,053 |
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name="mattermostwrapper",
packages=['mattermostwrapper'],
version="2.2",
author="Brian Hopkins",
author_email="btotharye@gmail.com",
url='https://github.com/btotharye/mattermostwrapper.git',
download_url='https://github.com/btotharye/mattermostwrapper/archive/2.2.tar.gz',
description=("A mattermost api v4 wrapper to interact with api"),
license="MIT",
install_requires=[
'requests',
],
classifiers=[],
)
| [
2,
16622,
4702,
900,
37623,
10141,
625,
1233,
26791,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
47635,
1712,
48553,
1600,
198,
220,
220,
220,
10392,
28,
17816,
47635,
1712,
48553,
... | 2.712871 | 202 |
from typing import List
import pytest
import numpy as np
import pandas as pd
from obp.dataset import (
linear_reward_function,
logistic_reward_function,
linear_behavior_policy_logit,
SyntheticSlateBanditDataset,
)
from obp.types import BanditFeedback
# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description
invalid_input_of_init = [
(
"4",
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
1,
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
5,
"4",
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
-1,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
10,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
3,
0,
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(
5,
3,
"2",
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(5, 3, 2, "aaa", "independent", "pbm", 1, "reward_type must be either"),
(5, 3, 2, "binary", "aaa", "pbm", 1, "reward_structure must be one of"),
(5, 3, 2, "binary", "independent", "aaa", 1, "click_model must be one of"),
(5, 3, 2, "binary", "independent", "pbm", "x", "random_state must be an integer"),
(5, 3, 2, "binary", "independent", "pbm", None, "random_state must be an integer"),
]
@pytest.mark.parametrize(
"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description",
invalid_input_of_init,
)
# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description
valid_input_of_obtain_batch_bandit_feedback = [
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive",
),
(
10,
3,
2,
"continuous",
123,
1000,
"standard_additive",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"standard_additive continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"independent",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"independent continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_additive",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"cascade_additive continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_additive",
None,
None,
None,
False,
"Random policy and reward function (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential (binary reward)",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_exponential",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"cascade_exponential (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential (binary reward)",
),
(
10,
3,
2,
"continuous",
123,
1000,
"standard_exponential",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"standard_exponential (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent, pbm click model (binary reward)",
),
]
@pytest.mark.parametrize(
"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description",
valid_input_of_obtain_batch_bandit_feedback,
)
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
909,
79,
13,
19608,
292,
316,
1330,
357,
198,
220,
220,
220,
14174,
62,
260,
904,
62,
8... | 1.844304 | 4,573 |
from unittest import TestCase
from mock import Mock, MagicMock
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim
from cloudshell.cp.vcenter.common.vcenter.task_waiter import SynchronousTaskWaiter
from cloudshell.cp.vcenter.common.vcenter.vmomi_service import pyVmomiService
from cloudshell.cp.vcenter.models.VCenterConnectionDetails import VCenterConnectionDetails
from cloudshell.cp.vcenter.network.dvswitch.name_generator import DvPortGroupNameGenerator
from cloudshell.cp.vcenter.network.vnic.vnic_service import VNicService
from cloudshell.cp.vcenter.vm.dvswitch_connector import *
from cloudshell.cp.vcenter.vm.portgroup_configurer import *
from cloudshell.cp.vcenter.vm.vnic_to_network_mapper import VnicToNetworkMapper
from cloudshell.tests.utils.testing_credentials import TestCredentials
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
15290,
1330,
44123,
11,
6139,
44,
735,
198,
6738,
12972,
53,
320,
13,
8443,
1330,
10880,
13313,
11,
3167,
8443,
198,
6738,
12972,
53,
76,
12753,
1330,
43907,
198,
6738,
6279,
291... | 3.322581 | 248 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import logging
import threading
LOGGER = logging.getLogger('keybinder')
def configure_logging(log_level=None):
"""Performs basic logging configuration.
:param log_level: logging level, e.g. logging.DEBUG
Default: logging.INFO
:param show_logger_names: bool - flag to show logger names in output
"""
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level or logging.INFO)
class KeyBinder(object):
"""Binds keys to functions globally.
.. code-block:: python
def do(): print('do')
KeyBinder.activate({
'Ctrl-K': do,
})
"""
def __init__(self, keymap=None, listen_events=None):
"""
:param dict keymap: Key name to function mapping.
Example:
.. code-block:: python
def do(): print('do')
{
'Ctrl-K': do,
'1': None, # Just intercept.
}
:param int listen_events: X Events or a combination of them.
Examples:
* Xlib.X.KeyPressMask
* Xlib.X.KeyPressMask | Xlib.X.ButtonReleaseMask
"""
from Xlib import X, XK
from Xlib.display import Display
self.x = X
self.xk = XK
self.disp = Display()
self.screen = self.disp.screen().root
self.events = listen_events or self.x.KeyPressMask
self.keymap = keymap or {}
self.mapped = {}
@classmethod
def activate(cls, keymap=None, listen_events=None, run_thread=False):
"""Alternative constructor.
Performs keys binding and runs a listener thread.
:param dict keymap: Key name to function mapping.
:param int listen_events: X Events or a combination of them.
:param bool run_thread: Run a key listening loop in a thread.
:rtype: KeyBinder
"""
binder = cls(keymap=keymap, listen_events=listen_events)
if keymap:
binder.register_keys()
else:
binder.sniff()
if run_thread:
binder.run_thread()
else:
binder.listen()
return binder
def listen(self):
"""Run keys events listening loop."""
events = self.events
screen = self.screen
mapped = self.mapped
while True:
event = screen.display.next_event()
capture = event.type & events
if not capture:
continue
keycode = event.detail
key, handler = mapped.get(keycode, (keycode, None))
if handler:
handler()
else:
LOGGER.info('Intercepted key: %s', key)
def run_thread(self):
"""Runs key events listening loop in a thread."""
grabber = threading.Thread(target=self.listen)
grabber.daemon = True
grabber.start()
def register_key(self, key, modifier_default='NumLock'):
"""Registers a key to listen to.
:param str|unicode|int key: Key name or code.
:param str|unicode modifier_default: Use this modifier if none specified.
:rtype: bool
"""
x = self.x
modifiers_map = {
'Ctrl': x.ControlMask, # 37 105
'Shift': x.ShiftMask, # 50 62
'CapsLock': x.LockMask, # 66
'Alt': x.Mod1Mask, # 64 108
'NumLock': x.Mod2Mask, # 77
'Super': x.Mod4Mask, # 133 134
}
has_error = []
modifier_alias = None
modifiers, keycode = self._parse_key(key)
modifier_alias = modifier_alias or modifier_default
modifier_mask = 0
for modifier in modifiers:
modifier_mask |= modifiers_map[modifier]
# Simulate X.AnyModifier as it leads to BadAccess, as if somebody has already grabbed it before us.
modifiers_all = [
modifier_mask,
modifier_mask | modifiers_map['NumLock'],
modifier_mask | modifiers_map['CapsLock'],
modifier_mask | modifiers_map['NumLock'] | modifiers_map['CapsLock'],
]
for mod in modifiers_all:
self.screen.grab_key(keycode, mod, True, x.GrabModeAsync, x.GrabModeAsync, on_error)
success = not has_error
if success:
self.mapped[keycode] = (key, self.keymap[key])
return success
def register_keys(self):
"""Registers all keys from current keymap."""
# screen.change_attributes(event_mask=capture_events)
for key in self.keymap.keys():
if not self.register_key(key):
LOGGER.warning('Unable to register handler for: %s', key)
def sniff(self):
"""Grab all events. Useful for keycode sniffing."""
x = self.x
self.screen.grab_keyboard(self.events, x.GrabModeAsync, x.GrabModeAsync, x.CurrentTime)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
18931,
198,
11748,
4704,
278,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
10786... | 2.18658 | 2,310 |
import json
import dash_html_components as html
import dash
from dash.testing import wait
from dash.dependencies import Input, Output, State, ALL, MATCH
from dash.testing.plugin import *
from .. import BaseDashView
| [
11748,
33918,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
198,
6738,
14470,
13,
33407,
1330,
4043,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
11,
1812,
11,
11096,
11,
337,
11417,
198,
6738... | 3.688525 | 61 |
__author__ = 'Richard'
TAGGING_RECEIVER = "tagging_receiver"
DISTRIBUTOR = "distributor" | [
834,
9800,
834,
796,
705,
22245,
6,
628,
198,
42197,
38,
2751,
62,
2200,
5222,
38757,
796,
366,
12985,
2667,
62,
260,
39729,
1,
198,
26288,
5446,
9865,
3843,
1581,
796,
366,
17080,
2455,
273,
1
] | 2.5 | 36 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is the entry point for the command-line interface (CLI) application.
It can be used as a handy facility for running the task from a command line.
.. note::
To learn more about Click visit the
`project website <http://click.pocoo.org/5/>`_. There is also a very
helpful `tutorial video <https://www.youtube.com/watch?v=kNke39OZ2k0>`_.
To learn more about running Luigi, visit the Luigi project's
`Read-The-Docs <http://luigi.readthedocs.io/en/stable/>`_ page.
.. currentmodule:: covid_data_tracker.cli
.. moduleauthor:: Sid Gupta <team@granular.ai>
"""
import logging
import click
from .__init__ import __version__
from covid_data_tracker.registry import PluginRegistry
from covid_data_tracker.util import plugin_selector, country_downloader
import tabulate
import pandas as pd
LOGGING_LEVELS = {
0: logging.NOTSET,
1: logging.ERROR,
2: logging.WARN,
3: logging.INFO,
4: logging.DEBUG,
} #: a mapping of `verbose` option counts to logging levels
class Info(object):
"""An information object to pass data between CLI functions."""
def __init__(self): # Note: This object must have an empty constructor.
"""Create a new instance."""
self.verbose: int = 0
# pass_info is a decorator for functions that pass 'Info' objects.
#: pylint: disable=invalid-name
pass_info = click.make_pass_decorator(Info, ensure=True)
# Change the options to below to suit the actual options for your task (or
# tasks).
@click.group()
@click.option("--verbose", "-v", count=True, help="Enable verbose output.")
@pass_info
def cli(info: Info, verbose: int):
"""Run covidtracker."""
# Use the verbosity count to determine the logging level...
if verbose > 0:
logging.basicConfig(
level=LOGGING_LEVELS[verbose]
if verbose in LOGGING_LEVELS
else logging.DEBUG
)
click.echo(
click.style(
f"Verbose logging is enabled. "
f"(LEVEL={logging.getLogger().getEffectiveLevel()})",
fg="yellow",
)
)
info.verbose = verbose
@cli.command('list')
@pass_info
def list_countries(_: Info):
"""List all countries for which a plugin is available."""
[click.echo(i) for i in list(PluginRegistry)]
@cli.command()
@click.option("--country", "-c", prompt="Select a country.")
@pass_info
def info(_: Info, country: str):
"""Get country level information on sources and download strategy."""
country_plugin = plugin_selector(country)
info = country_plugin.get_info()
click.echo(tabulate.tabulate(info[1:], info[0]))
@cli.command()
# @click.option("--all", "-A",
# help="Select all countries. (overrides --country)",
# callback=download_all,
# is_flag=True,
# is_eager=True)
@click.option("--country", "-c", help="Select a country.", prompt="Select a country, (or pass nothing to download all)", default="")
@pass_info
def download(_: Info, country: str):
"""Download country level statistics."""
if not country:
click.echo(f"attempting to find available data for every country")
with click.progressbar(list(PluginRegistry)) as countries:
# df = pd.DataFrame()
country_rows = {}
for country in countries:
try:
country_plugin = plugin_selector(country)
country_plugin.fetch()
country_plugin.check_instance_attributes()
country_plugin.create_country_row()
meta = {"Author": country_plugin.AUTHOR,
"Source": country_plugin.UNIQUE_SOURCE,
"Date": country_plugin.DATE}
# if not len(df.columns):
# df.columns = country_plugin.country_row.index
country_rows[country] = dict(country_plugin.country_row,
**meta)
except Exception as e:
print(f"unable to download for {country}")
print(e)
df = pd.DataFrame.from_dict(country_rows, orient="index")
df.to_csv('country_data.csv')
else:
country_downloader(country)
@cli.command()
def version():
"""Get the library version."""
click.echo(click.style(f"{__version__}", bold=True))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
1212,
318,
262,
5726,
966,
329,
262,
3141,
12,
1370,
7071,
357,
5097,
40,
8,
3586,
13,
198,
198,
1... | 2.359811 | 1,901 |
from ..util import run
| [
6738,
11485,
22602,
1330,
1057,
628
] | 4 | 6 |
import click
import pandas as pd
import tensorflow as tf
@click.command()
@click.argument("src", nargs=-1)
@click.argument("dst", nargs=1)
@click.option(
"--type", type=str, default="tf", help="Type of datasets to merge."
) # noqa
@click.option("--debug", is_flag=True, help="Set level logging to DEBUG.")
def merge(src, dst, type, debug):
"""
Merges existing datasets into a single one.
"""
if debug:
tf.logging.set_verbosity(tf.logging.DEBUG)
else:
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Saving records to "{}"'.format(dst))
if type == "tf":
writer = tf.python_io.TFRecordWriter(dst)
total_records = 0
for src_file in src:
total_src_records = 0
for record in tf.python_io.tf_record_iterator(src_file):
writer.write(record)
total_src_records += 1
total_records += 1
tf.logging.info(
'Saved {} records from "{}"'.format(total_src_records, src_file)
)
tf.logging.info('Saved {} to "{}"'.format(total_records, dst))
writer.close()
elif type == "csv":
total_records = 0
dfs = []
for src_file in src:
df = pd.read_csv(src_file, sep=",")
total_src_records = len(df)
tf.logging.info(
'Saved {} csv records from "{}"'.format(total_src_records, src_file)
)
total_records += total_src_records
dfs.append(df)
merged_df = pd.concat(dfs)
merged_df.reset_index(drop=True, inplace=True)
tf.logging.info('Saved {} to "{}"'.format(total_records, dst))
merged_df.to_csv(dst)
| [
11748,
3904,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
49140,
7203,
10677,
1600,
299,
22046,
10779,
16,
8,
198,
31,
12976,
13,
49140,
720... | 2.075089 | 839 |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 25 20:34:32 2014
@author: Imane
"""
import numpy as np
import matplotlib.pyplot as plt
#from os import listdir
#from os.path import isfile, join
#from zscoring import zscoringNII
#from masking import maskdata
from sklearn.decomposition import PCA
from k_means import kmeans
#Applying PCA and plotting
fn = "dataZM\dataMask2.npy"
d=np.load(fn)
pca = PCA(n_components=2)
pca.fit(d)
dpca=pca.transform(d)
plt.scatter(dpca[:,0], dpca[:,1], marker='o', color='b')
#Applying kmeans and plotting
idx, ctrs = kmeans(dpca, 2)
plt.scatter(dpca[(idx==0),0], dpca[(idx==0),1], marker='o', color='r')
plt.scatter(dpca[(idx==1),0], dpca[(idx==1),1], marker='o', color='b')
plt.scatter(ctrs[:,0], ctrs[:,1], marker='o', color='k', linewidths=5)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2556,
1679,
1160,
25,
2682,
25,
2624,
1946,
198,
198,
31,
9800,
25,
314,
805,
68,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11... | 2.263006 | 346 |
from flask import Flask, request, jsonify, make_response
from flask_restplus import Api, Resource, fields
import joblib
import numpy as np
from nltk.corpus import stopwords
import nltk
import json
import stanza
import pandas as pd
from nltk.probability import FreqDist
pd.set_option("display.max_colwidth", 200)
import numpy as np
import re
import spacy
import gensim
from gensim import corpora
import pyLDAvis
import pyLDAvis.gensim
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import isfile, join
from os import listdir
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.preprocessing import normalize
flask_app = Flask(__name__)
app = Api(app = flask_app,
version = "2.0",
title = "Classificador de Assuntos de Acórdãos do TCU",
description = "Prediz o assunto de acórdãos do TCU")
name_space = app.namespace('prediction', description='Prediction APIs')
model = app.model('Prediction params',
{'sepalLength': fields.Float(required = True,
description="Sepal Length",
help="Sepal Length cannot be blank"),
'sepalWidth': fields.Float(required = True,
description="Sepal Width",
help="Sepal Width cannot be blank"),
'petalLength': fields.Float(required = True,
description="Petal Length",
help="Petal Length cannot be blank"),
'petalWidth': fields.Float(required = True,
description="Petal Width",
help="Petal Width cannot be blank")})
classifier = joblib.load('classifier.joblib')
df = joblib.load('df.jolib')
tf_idf_array = joblib.load('tf_idf_array.joblib')
textos = joblib.load('data.joblib')
@name_space.route("/") | [
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
11,
787,
62,
26209,
198,
6738,
42903,
62,
2118,
9541,
1330,
5949,
72,
11,
20857,
11,
7032,
198,
11748,
1693,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
13,
... | 2.56 | 700 |
"""
刷新指数日线数据
"""
import time
from multiprocessing import Pool
import pandas as pd
from retry.api import retry_call
from ..mongodb import get_db
from ..setting.constants import MAIN_INDEX, MARKET_START, MAX_WORKER
from ..utils import ensure_dtypes
from ..utils.db_utils import to_dict
from ..utils.log_utils import make_logger
from ..websource.wy import fetch_history, get_index_base
logger = make_logger('网易指数日线')
db_name = "wy_index_daily"
col_dtypes = {
'd_cols': ['日期'],
's_cols': ['股票代码', '名称'],
'i_cols': ['成交量', '成交笔数'],
}
| [
37811,
198,
198,
26344,
115,
23877,
108,
162,
234,
229,
46763,
108,
33768,
98,
163,
118,
123,
46763,
108,
162,
235,
106,
198,
198,
37811,
198,
11748,
640,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
198,
11748,
19798,
292,
355,... | 2.048327 | 269 |
import unittest
from ._common import *
from Phen2Gene import Phen2Gene
FOLDER = 'exec'
KBASE_PATH = os.environ.get('KBASE_PATH', '/kbase')
| [
11748,
555,
715,
395,
198,
198,
6738,
47540,
11321,
1330,
1635,
198,
198,
6738,
34828,
17,
39358,
1330,
34828,
17,
39358,
198,
198,
37,
3535,
14418,
796,
705,
18558,
6,
198,
198,
22764,
11159,
62,
34219,
796,
28686,
13,
268,
2268,
13,... | 2.648148 | 54 |
import math
import numpy as np
from rllab.envs.mujoco.gather.gather_env import GatherEnv
from sandbox.finetuning.envs.mujoco.snake_env import SnakeEnv
APPLE = 0
BOMB = 1
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
374,
297,
397,
13,
268,
14259,
13,
76,
23577,
25634,
13,
70,
1032,
13,
70,
1032,
62,
24330,
1330,
402,
1032,
4834,
85,
198,
6738,
35204,
13,
15643,
316,
46493,
13,
268,
14... | 2.492754 | 69 |
"""Get RWIS FTP password from the database settings"""
from __future__ import print_function
from pyiem.util import get_properties
def main():
"""Go Main Go"""
props = get_properties()
print(props['rwis_ftp_password'])
if __name__ == '__main__':
main()
| [
37811,
3855,
33212,
1797,
45854,
9206,
422,
262,
6831,
6460,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
12972,
26597,
13,
22602,
1330,
651,
62,
48310,
628,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
52... | 3 | 91 |
import time
import json
import functools
from math import atan2
import matplotlib.pyplot as plt
# Debug func
# Plotting points/hull
# Graham Func
#Assumption -> Input is vertices of polygon given in order (CCW) starting with leftmost point
if __name__ == "__main__":
a = [[0,0] , [5,0] , [6,1] , [3,2] , [7,5] , [2,3] , [0,5] , [1,2]]
H = graham(a)
count=0
j=0
i=0
flag = 0
# Finding pockets comparing convex hull and given vertices
while j < len(H):
# print(i,j)
if a[i] == H[j]:
i+=1
j+=1
if flag == 1:
flag = 0
count+=1
else:
i+=1
if flag == 0:
flag = 1
count+=1
# print(i,j)
if i < len(a):
count+=1
if count%2 == 0:
n = count//2
else:
n = count//2 + 1
print("Number of pockets is {}".format(n))
plot(a,H,1,a) | [
11748,
640,
198,
11748,
33918,
198,
11748,
1257,
310,
10141,
198,
6738,
10688,
1330,
379,
272,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
31687,
25439,
198,
198,
2,
28114,
889,
2173,
14,
71,
724,
... | 1.851852 | 513 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
@webnotes.whitelist(allow_roles=["System Manager", "Administrator"])
| [
2,
15069,
357,
66,
8,
2211,
11,
5313,
11822,
21852,
18367,
83,
13,
12052,
13,
198,
2,
17168,
13789,
13,
4091,
5964,
13,
14116,
198,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
... | 3.457831 | 83 |
from tsadm.log import TSAdmLogger
from tsadm.views.base import TSAdmView
logger = TSAdmLogger(__name__)
| [
6738,
40379,
324,
76,
13,
6404,
1330,
26136,
2782,
76,
11187,
1362,
198,
6738,
40379,
324,
76,
13,
33571,
13,
8692,
1330,
26136,
2782,
76,
7680,
198,
198,
6404,
1362,
796,
26136,
2782,
76,
11187,
1362,
7,
834,
3672,
834,
8,
198
] | 2.5 | 42 |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 16:54:20 2017
@author: HeyDude
"""
import os
import json
import warnings
import logging
import pydicom
import SimpleITK as sitk
import SimpleDicomToolkit as sdtk
VERSION = 0.93
class Database(sdtk.Logger):
""" Creates a Sqlite3 table from a list of dicom files. Each header entry
is stored in a seperate column. Sequences are stored in a single column """
_path = None
_DATABASE_FILE = 'minidicom.db' # default file name for database
_images = None # cache dict with images
_image = None # cache single image
_headers = None # cache list of headers
_tagnames = None # cache for tagnames in current selection
_MAX_FILES = 5000 # max number of files to be read by property images
_sort_slices_by = None # Dicom field name to sort slices by field value
#_LOG_LEVEL = logging.DEBUG
def __init__(self, path, force_rebuild=False, scan=True, silent=False,
SUV=True, in_memory=False, use_private_tags=False):
"""
Create a dicom database from path
force_rebuild: Deletes the database file and generates a new database
scan: Scans for all dicom files in the path and updates
the database. Missing files will be removed as well
silent: Supress progressbar and log messages except errors
in_memory: Don't save database to disk. Creates a temporary
database in memory.
use_private_tags: Set to True to include private tags in the database.
[Experimental]
"""
if silent:
self._LOG_LEVEL = logging.ERROR
super().__init__()
self.builder = DatabaseBuilder(path=path, scan=scan,
force_rebuild=force_rebuild,
in_memory=in_memory,
use_private_tags=use_private_tags,
silent=silent)
self.logger.info('Database building completed')
self.database = self.builder.database
self.SUV = SUV
self._selection = {}
self.reset()
self.database.close()
@property
@property
def files(self):
""" Retrieve all files with path from the database """
file_list = self.get_column(self.builder.FILENAME_COL, close=True)
file_list = [file.replace('\\', '/') for file in file_list]
return file_list
@property
@property
@property
def columns(self):
""" Return all column names in database. """
return self.database.column_names(self.builder.MAIN_TABLE, close=True)
@property
def non_tag_columns(self):
""" Return column names that are not dicom tagnames """
return (self.builder.FILENAME_COL,
self.builder.FILE_SIZE_COL,
sdtk.SQLiteWrapper.ROWID,
self.builder.TAGNAMES_COL)
@property
def tag_names(self):
""" Return the tag names that are in the database """
if self._tagnames is None:
self._tagnames = self._get_tagnames() # use caching
return self._tagnames
@property
def headers(self):
""" Get pydicom headers from values in database. Pydicom headers
are generated from database content. """
if len(self.files) > self._MAX_FILES:
msg = 'Number of files exceeds MAX_FILES property'
raise IOError(msg)
if self._headers is not None:
return self._headers
if len(self) < 1:
self._headers = []
return self.headers
headers = []
uids = self.SOPInstanceUID
if not isinstance(uids, list):
uids = [uids]
headers = [self.header_for_uid(uid) for uid in uids]
self._headers = headers
return self._headers
@property
def series_count(self):
""" Return number of series in database """
return self._count_tag('SeriesInstanceUID')
@property
def study_count(self):
""" Return number of studies in database """
return self._count_tag('StudyInstanceUID')
@property
def patient_count(self):
""" Return number of patients in database """
return self._count_tag('PatientID')
@property
def instance_count(self):
""" Return number of instances in database, equal to number of files"""
return self._count_tag('SOPInstanceUID')
@property
def image(self):
""" Returns an sitk image for the files in the files property.
All files must belong to the same dicom series
(same SeriesInstanceUID). """
if self._image is not None:
return self._image
assert hasattr(self, 'SeriesInstanceUID')
assert isinstance(self.SeriesInstanceUID, str)
image = sdtk.dicom_reader.read_serie(self.sorted_files, SUV=False,
folder=self.builder.path)
# get first uid from file
uid = self.SOPInstanceUID
if isinstance(uid, list):
uid = uid[0]
# generate header with SUV metadata
header = self.header_for_uid(uid)
# calculate suv scale factor
try:
bqml_to_suv = sdtk.dicom_reader.suv_scale_factor(header)
except:
if self.SUV:
warnings.warn('\nNo SUV information found, disabling SUV\n',
RuntimeWarning)
bqml_to_suv = 1
if self.SUV and bqml_to_suv != 1:
image *= bqml_to_suv
image.bqml_to_suv = bqml_to_suv
self._image = image
return self._image
@property
def images(self):
""" Returns a dictionary with keys the SeriesInstanceUID and
values the sitkimage belonging tot the set of files belonging to
the same dicom series (same SeriesInstanceUID). Number of files
in the files property cannot exceed the MAX_FILES property.
This prevents reading of too large data sets """
if len(self.files) > self._MAX_FILES:
raise IOError('Number of files exceeds MAX_FILES property')
if self._images is not None:
return self._images
assert hasattr(self, sdtk.SERIESINSTANCEUID)
images = {}
selection = self.selection.copy()
for uid in self.SeriesInstanceUID:
images[uid] = self.select(SeriesInstanceUID=uid).image
self.reset().select(**selection)
self._images = images
return self._images
@property
def array(self):
""" Return dicom data as numpy array """
return sitk.GetArrayFromImage(self.image)
@property
def arrays(self):
""" Return dicom data as dictionary with key the SeriesInstanceUID
and value to corresponding numpy array. """
return dict([(key, sitk.GetArrayFromImage(image)) \
for key, image in self.images.items()])
@property
@sort_slices_by.setter
def sort_slices_by(self, value):
"""
Sort slices by given dicom filed
"""
self._sort_slices_by = value
@property
def sorted_files(self):
"""
Sort files by the dicom tag name stored in property sort_slices_by.
SimpleIKT Image Reader (unfortunately) expects sorted files to
create a volume e.g. CT slices.
"""
sort_by = self.sort_slices_by
if self.instance_count > 1 and sort_by is None:
warnings.warn('\nSlice Sorting Failed Before Reading!\n',
RuntimeWarning)
files = self.database.get_column_where(self.builder.MAIN_TABLE,
self.builder.FILENAME_COL,
sort_by=sort_by,
sort_decimal=True,
**self._selection)
files = [file.replace('\\', '/') for file in files]
return files
def select(self, close=True, **kwargs):
""" Make an selection in the database, based on values of tags
for example. For example to select only patient 123456 from the
database:
database.select(PatientID='123456')
To select patient 123456 and study 'MyCTScan' do:
database.select(PatientID='123456').select(StudyDescription='MyCTScan')
or
database.select(PatientID='123456', StudyDescription='MyCTScan')
The latter would use fewer SQL statements, results are the same.
"""
# encode key word arguments
for tag, value in kwargs.items():
if tag in self.non_tag_columns:
continue
value = self._encode_value(tag, value)
kwargs[tag] = value
self._selection.update(kwargs)
self._reset_cache()
return self
def header_for_uid(self, sopinstanceuid):
""" Return a pydicom header for the requested sopinstanceuid """
uid = sdtk.Encoder.encode_value_with_tagname('SOPInstanceUID',
sopinstanceuid)
h_dicts = self.database.get_row_dict(self.builder.MAIN_TABLE,
SOPInstanceUID=uid)
if not h_dicts:
msg = 'SOPInstanceUID %s not in database'
self.logger.info(msg, uid)
elif len(h_dicts) > 1:
msg = 'SOPInstanceUID {0} not unique'
raise ValueError(msg.format(uid))
h_dict = h_dicts[0]
h_dict = {tag: h_dict[tag] for tag in self.tag_names}
return self._decode(h_dict)
def reset(self, tags=None):
""" After a query a subset of the database is visible, use reset
to make all data visible again. """
if tags:
tags = [tags] if not isinstance(tags, list) else tags
for tag in tags:
self._selection.pop(tag, None)
else:
self._selection = {}
self._reset_cache()
return self
def get_column(self, column_name, distinct=True,
sort=True, close=True, parse=True):
""" Return the unique values for a column with column_name """
if sort:
sort_by = column_name
else:
sort_by = None
values = self.database.get_column(self.builder.MAIN_TABLE,
column_name, sort_by=sort_by,
distinct=distinct,
close=False, **self._selection)
self.logger.debug('parising column....')
if parse and column_name not in self.non_tag_columns:
values = [sdtk.Decoder.decode_entry(column_name, vi)[0] \
for vi in values]
if close:
self.database.close()
return values
def _get_tagnames(self):
""" Return the tag names that are in the database """
tagname_rows = self.get_column(self.builder.TAGNAMES_COL,
distinct=True, parse=False)
tagnames = set()
for row in tagname_rows:
for tagname in json.loads(row):
tagnames.add(tagname)
return tuple(tagnames)
@staticmethod
@staticmethod
class DatabaseBuilder(sdtk.Logger):
""" Build a dicom database from a folder or set of files """
FILENAME_COL = 'dicom_file_name' # colum in table that stores filenames
FILE_SIZE_COL = 'file_size_bytes' # store size of files
TAGNAMES_COL = 'dicom_tag_names' # column that stores tag names for file
MAIN_TABLE = 'DicomMetaDataTable' # stores values for each tag
_INFO_TABLE = 'Info' # store database version
_INFO_DESCRIPTION_COL = 'Description'
_INFO_PATH_COL = 'path'
_INFO_VALUE_COL = 'Value'
_FILENAME_TABLE = 'FileNameTable' # stores non dicom files
#_LOG_LEVEL = logging.DEBUG
_chunk_size = 1000 # number of files to read before committing
@property
def files(self):
""" Return all files dicom and non dicom that were added or tried to
add to the database. These files will not be re-added."""
return self.database.get_column(self._FILENAME_TABLE,
self.FILENAME_COL)
@property
@path.setter
@property
@version.setter
def open_database(self, database_file, path, force_rebuild=False):
""" Open the sqlite database in the file, rebuild if asked """
database = sdtk.SQLiteWrapper(database_file)
database._LOG_LEVEL = self._LOG_LEVEL
is_latest = self.get_version(database) == VERSION
self.logger.debug('Databae Version: %s', self.get_version(database))
self.logger.debug('Latest Version: %s', str(VERSION))
if not is_latest:
msg = 'Old Database Structure Found, rebuilding recommended!'
self.logger.info(msg)
if force_rebuild:
msg = 'Removing tables from: %s'
self.logger.info(msg, database.database_file)
database.delete_all_tables()
if not self.MAIN_TABLE in database.table_names:
self._create_main_table(database)
if not self._INFO_TABLE in database.table_names:
self._create_info_table(database, path=path)
if not self._FILENAME_TABLE in database.table_names:
self._create_filename_table(database)
return database
@staticmethod
def get_version(database):
""" Return the version of the database """
if DatabaseBuilder._INFO_TABLE not in database.table_names:
v = 0
else:
v = database.get_column(DatabaseBuilder._INFO_TABLE,
DatabaseBuilder._INFO_VALUE_COL)[0]
return float(v)
def file_list(self, path, index=True):
""" Search path recursively and return a list of all files """
# gather file list
if index:
self.logger.info('Scanning for new files')
files = sdtk.FileScanner.files_in_folder(path, recursive=True)
else:
files = []
return files
def insert_file(self, file, _existing_column_names=None, close=True):
""" Insert a dicom file to the database """
self.logger.debug('Inserting: %s', file)
self.database.insert_row_dict(self._FILENAME_TABLE,
{self.FILENAME_COL: file})
if _existing_column_names is None:
table = DatabaseBuilder.MAIN_TABLE
_existing_column_names = self.database.column_names(table)
# read file from disk
fullfile = os.path.join(self.path, file)
try:
header = pydicom.read_file(fullfile, stop_before_pixels=True)
except FileNotFoundError:
# skip file when file had been removed between scanning and
# the time point the file is opened.
self.logger.info('{0} not found.'.format(fullfile))
return _existing_column_names
except AttributeError:
# Attribute error is thrown when reading a dicom dirfile by pydiom
self.logger.info('{0} not proper dicom.'.format(fullfile))
return _existing_column_names
except:
msg = ('WARNING: Unhandled exception while reading {0}. '
'File is skipped')
print(msg.format(fullfile))
return _existing_column_names
# convert header to dictionary
try:
hdict = DatabaseBuilder._encode(
header, use_private_tags=self.use_private_tags)
except:
self.logger.info('Cannot add: %s', file)
return _existing_column_names
# store tag names
hdict[self.TAGNAMES_COL] = json.dumps(list(hdict.keys()))
hdict[self.FILENAME_COL] = file # add filenmae to dictionary
hdict[self.FILE_SIZE_COL] = os.path.getsize(fullfile)
# determine which columns need to be added to the database
newcols = [c for c in hdict.keys() if c not in _existing_column_names]
# add columns
self._add_column_for_tags(newcols, skip_check=True)
# encode dictionary values to json and stor in database
try:
self.database.insert_row_dict(self.MAIN_TABLE, hdict, close=close)
except:
msg = ('Could not insert file: {0}'.format(file))
self.database.close()
raise IOError(msg)
if close:
self.database.close()
self.logger.debug(newcols)
self.logger.debug('Inserted: %s', file)
return newcols
def remove_files(self, file_names):
""" Remove file list from the database """
for file_name in file_names:
self.remove_file(file_name, close=False)
self.database.close()
def remove_file(self, file_name, close=True):
""" Remove file from database """
self.database.delete_rows(DatabaseBuilder.MAIN_TABLE,
column=DatabaseBuilder.FILENAME_COL,
value=file_name, close=False)
self.database.delete_rows(DatabaseBuilder._FILENAME_TABLE,
column=DatabaseBuilder.FILENAME_COL,
value=file_name, close=False)
if close:
self.database.close()
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def chunks(iterable, chunksize):
"""Yield successive n-sized chunks from iterable."""
for i in range(0, len(iterable), chunksize):
yield iterable[i:i + chunksize]
@staticmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
8621,
220,
642,
1467,
25,
4051,
25,
1238,
2177,
198,
198,
31,
9800,
25,
14690,
35,
2507,
198,
37811,
198,
11748,
28686,
198,
11748,
33918,... | 2.176808 | 8,365 |
# Generated by Django 3.1.1 on 2020-09-18 10:05
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
16,
319,
12131,
12,
2931,
12,
1507,
838,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2018 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Plotting/visualisation functions"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import range
from builtins import object
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.pyplot import figure, subplot, subplots, gcf, gca, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
try:
import mpldatacursor as mpldc
except ImportError:
have_mpldc = False
else:
have_mpldc = True
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
def attach_keypress(fig):
"""
Attach a key press event handler that configures keys for closing a
figure and changing the figure size. Keys 'e' and 'c' respectively
expand and contract the figure, and key 'q' closes it.
**Note:** Resizing may not function correctly with all matplotlib
backends (a
`bug <https://github.com/matplotlib/matplotlib/issues/10083>`__
has been reported).
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object
Figure to which event handling is to be attached
"""
# Avoid multiple even handlers attached to the same figure
if not hasattr(fig, '_sporco_keypress_cid'):
cid = fig.canvas.mpl_connect('key_press_event', press)
fig._sporco_keypress_cid = cid
def plot(y, x=None, ptyp='plot', xlbl=None, ylbl=None, title=None,
lgnd=None, lglc=None, lwidth=1.5, lstyle='solid', msize=6.0,
mstyle='None', fgsz=None, fgnm=None, fig=None, ax=None):
"""
Plot points or lines in 2D. If a figure object is specified then the
plot is drawn in that figure, and fig.show() is not called. The figure
is closed on key entry 'q'.
Parameters
----------
y : array_like
1d or 2d array of data to plot. If a 2d array, each column is
plotted as a separate curve.
x : array_like, optional (default None)
Values for x-axis of the plot
ptyp : string, optional (default 'plot')
Plot type specification (options are 'plot', 'semilogx',
'semilogy', and 'loglog')
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
lgnd : list of strings, optional (default None)
List of legend string
lglc : string, optional (default None)
Legend location string
lwidth : float, optional (default 1.5)
Line width
lstyle : string, optional (default 'solid')
Line style (see :class:`matplotlib.lines.Line2D`)
msize : float, optional (default 6.0)
Marker size
mstyle : string, optional (default 'None')
Marker style (see :mod:`matplotlib.markers`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if ptyp not in ('plot', 'semilogx', 'semilogy', 'loglog'):
raise ValueError("Invalid plot type '%s'" % ptyp)
pltmth = getattr(ax, ptyp)
if x is None:
pltln = pltmth(y, linewidth=lwidth, linestyle=lstyle,
marker=mstyle, markersize=msize)
else:
pltln = pltmth(x, y, linewidth=lwidth, linestyle=lstyle,
marker=mstyle, markersize=msize)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl)
if ylbl is not None:
ax.set_ylabel(ylbl)
if lgnd is not None:
ax.legend(lgnd, loc=lglc)
attach_keypress(fig)
if have_mpldc:
mpldc.datacursor(pltln)
if figp is None:
fig.show()
return fig, ax
def surf(z, x=None, y=None, elev=None, azim=None, xlbl=None, ylbl=None,
zlbl=None, title=None, lblpad=8.0, cntr=None, cmap=None,
fgsz=None, fgnm=None, fig=None, ax=None):
"""
Plot a 2D surface in 3D. If a figure object is specified then the
surface is drawn in that figure, and fig.show() is not called. The
figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
elev : float
Elevation angle (in degrees) in the z plane
azim : foat
Azimuth angle (in degrees) in the x,y plane
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
zlbl : string, optional (default None)
Label for z-axis
title : string, optional (default None)
Figure title
lblpad : float, optional (default 8.0)
Label padding
cntr : int or sequence of ints, optional (default None)
If not None, plot contours of the surface on the lower end of
the z-axis. An int specifies the number of contours to plot, and
a sequence specifies the specific contour levels to plot.
cmap : :class:`matplotlib.colors.Colormap` object, optional (default None)
Colour map for surface. If none specifed, defaults to cm.coolwarm
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of creating one
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = plt.axes(projection='3d')
else:
if ax is None:
ax = plt.axes(projection='3d')
else:
# See https://stackoverflow.com/a/43563804
# https://stackoverflow.com/a/35221116
if ax.name != '3d':
ax.remove()
ax = fig.add_subplot(*ax.get_geometry(), projection='3d')
if elev is not None or azim is not None:
ax.view_init(elev=elev, azim=azim)
if cmap is None:
cmap = cm.coolwarm
if x is None:
x = range(z.shape[1])
if y is None:
y = range(z.shape[0])
xg, yg = np.meshgrid(x, y)
ax.plot_surface(xg, yg, z, rstride=1, cstride=1, cmap=cmap)
if cntr is not None:
offset = np.around(z.min() - 0.2 * (z.max() - z.min()), 3)
ax.contour(xg, yg, z, cntr, linewidths=2, cmap=cmap, linestyles="solid",
offset=offset)
ax.set_zlim(offset, ax.get_zlim()[1])
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, labelpad=lblpad)
if ylbl is not None:
ax.set_ylabel(ylbl, labelpad=lblpad)
if zlbl is not None:
ax.set_zlabel(zlbl, labelpad=lblpad)
attach_keypress(fig)
if figp is None:
fig.show()
return fig, ax
def contour(z, x=None, y=None, v=5, xlbl=None, ylbl=None, title=None,
cfntsz=10, lfntsz=None, intrp='bicubic', alpha=0.5, cmap=None,
vmin=None, vmax=None, fgsz=None, fgnm=None, fig=None, ax=None):
"""
Contour plot of a 2D surface. If a figure object is specified then the
plot is drawn in that figure, and fig.show() is not called. The figure
is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
v : int or sequence of ints, optional (default 5)
An int specifies the number of contours to plot, and a sequence
specifies the specific contour levels to plot.
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
cfntsz : int or None, optional (default 10)
Contour label font size. No contour labels are displayed if
set to 0 or None.
lfntsz : int, optional (default None)
Axis label font size. The default font size is used if set to None.
intrp : string, optional (default 'bicubic')
Specify type of interpolation used to display image underlying
contours (see ``interpolation`` parameter of
:meth:`matplotlib.axes.Axes.imshow`)
alpha : float, optional (default 0.5)
Underlying image display alpha value
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for surface. If none specifed, defaults to cm.coolwarm
vmin, vmax : float, optional (default None)
Set upper and lower bounds for the colour map (see the corresponding
parameters of :meth:`matplotlib.axes.Axes.imshow`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if cmap is None:
cmap = cm.coolwarm
if x is None:
x = np.arange(z.shape[1])
else:
x = np.array(x)
if y is None:
y = np.arange(z.shape[0])
else:
y = np.array(y)
xg, yg = np.meshgrid(x, y)
cntr = ax.contour(xg, yg, z, v, colors='black')
if cfntsz is not None and cfntsz > 0:
plt.clabel(cntr, inline=True, fontsize=cfntsz)
im = ax.imshow(z, origin='lower', interpolation=intrp, aspect='auto',
extent=[x.min(), x.max(), y.min(), y.max()], cmap=cmap,
vmin=vmin, vmax=vmax, alpha=alpha)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, fontsize=lfntsz)
if ylbl is not None:
ax.set_ylabel(ylbl, fontsize=lfntsz)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, ax=ax, cax=cax)
attach_keypress(fig)
if have_mpldc:
mpldc.datacursor()
if figp is None:
fig.show()
return fig, ax
def imview(img, title=None, copy=True, fltscl=False, intrp='nearest',
norm=None, cbar=False, cmap=None, fgsz=None, fgnm=None,
fig=None, ax=None):
"""
Display an image. Pixel values are displayed when the pointer is over
valid image data. If a figure object is specified then the image is
drawn in that figure, and fig.show() is not called. The figure is
closed on key entry 'q'.
Parameters
----------
img : array_like, shape (Nr, Nc) or (Nr, Nc, 3) or (Nr, Nc, 4)
Image to display
title : string, optional (default None)
Figure title
copy : boolean, optional (default True)
If True, create a copy of input `img` as a reference for displayed
pixel values, ensuring that displayed values do not change when the
array changes in the calling scope. Set this flag to False if the
overhead of an additional copy of the input image is not acceptable.
fltscl : boolean, optional (default False)
If True, rescale and shift floating point arrays to [0,1]
intrp : string, optional (default 'nearest')
Specify type of interpolation used to display image (see
``interpolation`` parameter of :meth:`matplotlib.axes.Axes.imshow`)
norm : :class:`matplotlib.colors.Normalize` object, optional (default None)
Specify the :class:`matplotlib.colors.Normalize` instance used to
scale pixel values for input to the colour map
cbar : boolean, optional (default False)
Flag indicating whether to display colorbar
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for image. If none specifed, defaults to cm.Greys_r
for monochrome image
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
if img.ndim > 2 and img.shape[2] != 3:
raise ValueError('Argument img must be an Nr x Nc array or an '
'Nr x Nc x 3 array')
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
ax.set_adjustable('box-forced')
imgd = img.copy()
if copy:
# Keep a separate copy of the input image so that the original
# pixel values can be display rather than the scaled pixel
# values that are actually plotted.
img = img.copy()
if cmap is None and img.ndim == 2:
cmap = cm.Greys_r
if np.issubdtype(img.dtype, np.floating):
if fltscl:
imgd -= imgd.min()
imgd /= imgd.max()
if img.ndim > 2:
imgd = np.clip(imgd, 0.0, 1.0)
elif img.dtype == np.uint16:
imgd = np.float16(imgd) / np.iinfo(np.uint16).max
elif img.dtype == np.int16:
imgd = np.float16(imgd) - imgd.min()
imgd /= imgd.max()
if norm is None:
im = ax.imshow(imgd, cmap=cmap, interpolation=intrp, vmin=imgd.min(),
vmax=imgd.max())
else:
im = ax.imshow(imgd, cmap=cmap, interpolation=intrp, norm=norm)
ax.set_yticklabels([])
ax.set_xticklabels([])
if title is not None:
ax.set_title(title)
if cbar or cbar is None:
orient = 'vertical' if img.shape[0] >= img.shape[1] else 'horizontal'
pos = 'right' if orient == 'vertical' else 'bottom'
divider = make_axes_locatable(ax)
cax = divider.append_axes(pos, size="5%", pad=0.2)
if cbar is None:
# See http://chris35wills.github.io/matplotlib_axis
if hasattr(cax, 'set_facecolor'):
cax.set_facecolor('none')
else:
cax.set_axis_bgcolor('none')
for axis in ['top', 'bottom', 'left', 'right']:
cax.spines[axis].set_linewidth(0)
cax.set_xticks([])
cax.set_yticks([])
else:
plt.colorbar(im, ax=ax, cax=cax, orientation=orient)
ax.format_coord = format_coord
attach_keypress(fig)
if have_mpldc:
mpldc.datacursor(display='single')
if figp is None:
fig.show()
return fig, ax
def close(fig=None):
"""
Close figure(s). If a figure object reference or figure number is
provided, close the specified figure, otherwise close all figures.
Parameters
----------
fig : :class:`matplotlib.figure.Figure` object or integer,\
optional (default None)
Figure object or number of figure to close
"""
if fig is None:
plt.close('all')
else:
plt.close(fig)
def set_ipython_plot_backend(backend='qt'):
"""
Set matplotlib backend within an ipython shell. Ths function has the
same effect as the line magic ``%matplotlib [backend]`` but is called
as a function and includes a check to determine whether the code is
running in an ipython shell, so that it can safely be used within a
normal python script since it has no effect when not running in an
ipython shell.
Parameters
----------
backend : string, optional (default 'qt')
Name of backend to be passed to the ``%matplotlib`` line magic
command
"""
from sporco.util import in_ipython
if in_ipython():
# See https://stackoverflow.com/questions/35595766
get_ipython().run_line_magic('matplotlib', backend)
def set_notebook_plot_backend(backend='inline'):
"""
Set matplotlib backend within a Jupyter Notebook shell. Ths function
has the same effect as the line magic ``%matplotlib [backend]`` but is
called as a function and includes a check to determine whether the code
is running in a notebook shell, so that it can safely be used within a
normal python script since it has no effect when not running in a
notebook shell.
Parameters
----------
backend : string, optional (default 'inline')
Name of backend to be passed to the ``%matplotlib`` line magic
command
"""
from sporco.util import in_notebook
if in_notebook():
# See https://stackoverflow.com/questions/35595766
get_ipython().run_line_magic('matplotlib', backend)
def config_notebook_plotting():
"""
Configure plotting functions for inline plotting within a Jupyter
Notebook shell. This function has no effect when not within a
notebook shell, and may therefore be used within a normal python
script.
"""
# Check whether running within a notebook shell and have
# not already monkey patched the plot function
from sporco.util import in_notebook
module = sys.modules[__name__]
if in_notebook() and module.plot.__name__ == 'plot':
# Set inline backend (i.e. %matplotlib inline) if in a notebook shell
set_notebook_plot_backend()
# Replace plot function with a wrapper function that discards
# its return value (within a notebook with inline plotting, plots
# are duplicated if the return value from the original function is
# not assigned to a variable)
plot_original = module.plot
module.plot = plot_wrap
# Replace surf function with a wrapper function that discards
# its return value (see comment for plot function)
surf_original = module.surf
module.surf = surf_wrap
# Replace contour function with a wrapper function that discards
# its return value (see comment for plot function)
contour_original = module.contour
module.contour = contour_wrap
# Replace imview function with a wrapper function that discards
# its return value (see comment for plot function)
imview_original = module.imview
module.imview = imview_wrap
# Disable figure show method (results in a warning if used within
# a notebook with inline plotting)
import matplotlib.figure
matplotlib.figure.Figure.show = show_disable
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
1853,
12,
7908,
416,
19252,
83,
370,
48988,
3900,
1279,
4679,
358,
83,
31,
494,
1453,
13,
2398,
29,
198,
2,
1439,
2489,
10395,
13,
347,
10305,
... | 2.447272 | 8,449 |
from onmt.translate.Translator import Translator
from onmt.translate.Translation import Translation, TranslationBuilder
from onmt.translate.Beam import Beam, GNMTGlobalScorer
__all__ = [Translator, Translation, Beam, GNMTGlobalScorer, TranslationBuilder]
| [
6738,
319,
16762,
13,
7645,
17660,
13,
8291,
41880,
1330,
3602,
41880,
198,
6738,
319,
16762,
13,
7645,
17660,
13,
48313,
1330,
33322,
11,
33322,
32875,
198,
6738,
319,
16762,
13,
7645,
17660,
13,
3856,
321,
1330,
25855,
11,
15484,
1375... | 3.764706 | 68 |
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
sns.set_context("poster")
import sys, os
from nested_dict import nested_dict
import pandas as pd
import numpy as np
from pyfasta import Fasta
import os
import re
from scipy import stats
import util
get_stat(prefix='/home/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape_different_cutoff')
# null_sequential_pattern()
# expression_vs_null_pct()
# reactivity_stat_df1 = value_dist('/home/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.out:/home/gongjing/project/shape_imputation/data/mes_wc_vivo/3.shape/shape.c200T2M0m0.out:/home/gongjing/project/shape_imputation/data/DMSseq_fibroblast_vivo/3.shape/shape.c200T2M0m0.out:/home/gongjing/project/shape_imputation/data/DMSseq_K562_vivo/3.shape/shape.c200T2M0m0.out', label='icSHAPE_HEK293:icSHAPE_mES:DMSseq_fibroblast:DMSseq_K562', savefn='/home/gongjing/project/shape_imputation/data/DMSseq_fibroblast_vivo/3.shape/shape_value_dist.pdf')
# reactivity_stat_df2 = value_dist('/home/gongjing/project/shape_imputation/data/DMSseq_fibroblast_vivo/3.shape/shape.c200T2M0m0.out')
# shape_fragment_null_sequential_pattern()
# f='/data/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.out.windowsHasNull/low_depth_null/sampling/windowLen100.sliding100.train.low_60_1234.null_pattern.x10.chrom0.txt'
# f='/data/gongjing/project/shape_imputation/data/hek_wc_vivo/3.shape/shape.c200T2M0m0.out.windowsHasNull/train_x10_then_pct30_maxL20/windowLen100.sliding100.trainx10_randomNULL0.3.txt'
# shape_fragment_null_sequential_pattern(out=f) | [
11748,
2603,
29487,
8019,
355,
285,
489,
198,
76,
489,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
82,
5907,
13,
2617,
7,
7635,
2625,
8... | 2.351199 | 709 |
# -*- coding: utf-8 -*-
r"""
\
+-------------------+--------------------+
| |Screenshot1| | |Screenshot3| |
+-------------------+--------------------+
A program for calculating the mass of XAFS [X-Ray Absorption Fine Structure]
samples. The chemical formula parser understands parentheses and weight
percentage, also in nested form. XAFSmassQt reports the quantity (weight,
thickness or pressure) together with the expected height of the absorption
edge.
.. |Screenshot1| image:: _images/1powder_140.png
:scale: 66 %
.. |Screenshot3| image:: _images/3gas_140.png
:scale: 66 %
Dependencies
------------
numpy, pyparsing and matplotlib are required. Qt must be provided by either
PyQt4, PyQt5 or PySide.
Get XAFSmass
------------
XAFSmass is available as source distribution from
`PyPI <https://pypi.python.org/pypi/XAFSmass>`_ or
`Github <https://github.com/kklmn/XAFSmass>`__.
The distribution archive also includes documentation.
Installation
------------
Unzip the .zip file into a suitable directory and run ``python XAFSmassQt.py``.
On Windows, run ``pythonw XAFSmassQt.py`` or give it a .pyw extension to
suppress the console window.
You may want to run ``python setup.py install`` in order to put the XAFSmass
package to the standard location.
Citing XAFSmass
---------------
Please cite XAFSmass as:
`K. Klementiev and R. Chernikov, "XAFSmass: a program for calculating the
optimal mass of XAFS samples", J. Phys.: Conf. Ser. 712 (2016) 012008,
doi:10.1088/1742-6596/712/1/012008
<http://dx.doi.org/10.1088/1742-6596/712/1/012008>`_.
Theoretical references used
---------------------------
The tabulated scattering factors are taken from Henke et al. (10 eV < *E* < 30
keV) [Henke]_, Brennan & Cowan (30 eV < *E* < 509 keV) [BrCo]_ and Chantler
(11 eV < *E* < 405 keV) [Chantler]_.
.. note::
The tables of f'' factors consider only photoelectric
cross-sections. The tabulation by Chantler can optionally have
*total* absorption cross-sections. This option is enabled by selecting
the data table 'Chantler total (NIST)'.
.. [Henke] http://henke.lbl.gov/optical_constants/asf.html
B.L. Henke, E.M. Gullikson, and J.C. Davis, *X-ray interactions:
photoabsorption, scattering, transmission, and reflection at
E=50-30000 eV, Z=1-92*, Atomic Data and Nuclear Data Tables
**54** (no.2) (1993) 181-342.
.. [BrCo] http://www.bmsc.washington.edu/scatter/periodic-table.html
ftp://ftpa.aps.anl.gov/pub/cross-section_codes/
S. Brennan and P.L. Cowan, *A suite of programs for calculating
x-ray absorption, reflection and diffraction performance for a
variety of materials at arbitrary wavelengths*, Rev. Sci. Instrum.
**63** (1992) 850-853.
.. [Chantler] http://physics.nist.gov/PhysRefData/FFast/Text/cover.html
http://physics.nist.gov/PhysRefData/FFast/html/form.html
C. T. Chantler, *Theoretical Form Factor, Attenuation, and
Scattering Tabulation for Z = 1 - 92 from E = 1 - 10 eV to E = 0.4 -
1.0 MeV*, J. Phys. Chem. Ref. Data **24** (1995) 71-643.
Usage
-----
Chemical formula parser
~~~~~~~~~~~~~~~~~~~~~~~
The parser understands chemical elements, optionally followed by atomic
quantities or weight percentages. A group of atoms can be enclosed in
parentheses and assigned a common quantity or wt%. Some examples are given
above the edit line. For example, `Cu%1Zn%1((Al2O3)%10SiO2)` means 1 wt% of Cu
and 1 wt% of Zn in an aluminosilicate matrix composed of 10 wt% of alumina in
silica.
For the search of an unknown elemental concentration, give `x` to the element
of interest.
Calculation of mass and absorption step for powder samples
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
You typically do not need the calculated values at exactly the edge
position but rather at an energy somewhere above it. The list of edges
offers the edge positions plus 50 eV. You are free to specify any energy
within the range of the selected tabulation.
A typical application is the calculation of the mass for a powder sample. The
optimal *optical* sample thickness μd depends on the absorption levels selected
for the ionization chambers (see below). Typically, μd is between 2 and 3 (e.g.
for a 17.4% absorption level for the 1st chamber and a 50% level for the 2nd
chamber, the optimal thickness is 2.42). However, if you get the absorption
step more that 1.5 (reported by the drop-down list "absorptance step = "), it
is recommended to reduce the sample mass to avoid the potential thickness
effect due to possible inhomogeneity in the wafer. If your sample is diluted
and you get a very low absorption step, do not try to make the wafer thicker
hoping that you will get better spectra -- you will not: the optimal thickness
gets *the best* signal-to-noise ratio (it is in this sense the optimal). You
can only try to measure your absorption spectra with another registration
technique: in fluorescence or electron yield modes.
.. image:: _images/SNtransm050.png
:scale: 50 %
.. image:: _images/SNtransm100.png
:scale: 50 %
Calculation of thickness and absorption step for samples with known density
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here you can calculate the thickness of the sample with known density (usually,
a foil). Commercial foils are highly homogeneous in thickness, so that you may
ignore large step jumps and pay attention to the total μd only.
Calculation of gas pressure for ionization chambers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. caution::
For nitrogen, do not forget the 2: N2, not just N!
Start with the 2nd ionization chamber (IC). If a reference foil is placed
between the 2nd and the 3rd IC, the fraction of x-rays absorbed by the 2nd IC
is usually set to 50%. If the reference foil is not needed, one can select the
total absorption (100%). For these two cases the optimal absorption of the 1st
IC at a certain μd is found from the figures above showing the levels of
signal-to-noise ratio.
For exploring mixtures of several gases, give the gases in parentheses, e.g.
as (Ar)(N2). The corresponding number of sliders will appear that define
partial pressures. The program will calculate the molar weight of each gas and
update the chemical formula and the total attenuation.
Calculation of an unknown elemental concentration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Case 1: *You know the composition of the matrix*
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You need an absorption spectrum taken without the sample (empty spectrum) but
with the same state of the ionization chambers. You then subtract it from the
spectrum of the sample, e.g. in VIPER, and get a real (i.e. not vertically
shifted) absorption coefficient. Determine the value of μd above the edge
(μTd), the edge jump (Δμd) and its uncertainty (δμd). Specify the chemical
formula with x.
Case 2: *You know the sample mass and area*
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Determine the edge jump (Δμd). For the pure element find such a value for μTd
that the absorption step in the pull-down list be equal to your experimental
Δμd. This will give you the mass of the element of interest. Just divide it by
the total mass to get the weight percentage.
Finding the scattering factors f''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you need to know the scattering factor f'' at different energies and/or its
jump at an edge (Δf''), XAFSmass provides a graphical tool for this.
For example, you may need these values to determine the composition of a binary
compound if you have the experimental edge heights at two edges. The absorption
step Δμd at an absorption edge of energy E is proportional to Δf''ν/E, where ν
is the amount of (resonantly) absorbing atoms in mole. Hence, the atomic ratio
of two elements in the same sample is
:math:`\nu_A/\nu_B = (\Delta\mu d)_A/(\Delta\mu d)_B\cdot[\Delta f_B''
/\Delta f_A'' \cdot E_A/E_B]`. For binary compounds
:math:`{\rm A}_x{\rm B}_{1-x}` the concentration :math:`x` is calculated then
as :math:`x = (\nu_A/\nu_B)/[1+(\nu_A/\nu_B)]`.
"""
__module__ = "XAFSmass"
__versioninfo__ = (1, 3, 9)
__version__ = '.'.join(map(str, __versioninfo__))
__author__ = \
"Konstantin Klementiev (MAX IV Laboratory), " +\
"Roman Chernikov (Canadian Light Source)"
__email__ = \
"konstantin.klementiev@gmail.com, rchernikov@gmail.com"
__date__ = "09 Jul 2019"
__license__ = "MIT license"
| [
171,
119,
123,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
81,
37811,
198,
59,
198,
198,
10,
1783,
6329,
10,
19351,
10,
198,
91,
220,
220,
930,
34204,
16,
91,
220,
220,
930,
220,
220,
220,
930,
34204,
18,
91... | 3.338589 | 2,537 |
#!/usr/bin/env python3
from __future__ import division
import argparse
import collections
import glob
import shelve
import os.path
import numpy as np
import matplotlib.pyplot as plt
import plot_helpers
import weibull_workload
axes = 'shape sigma load timeshape njobs'.split()
parser = argparse.ArgumentParser(description="plot CDF of slowdown")
parser.add_argument('dirname', help="directory in which results are stored")
parser.add_argument('--shape', type=float, default=0.25,
help="shape for job size distribution; default: 0.25")
parser.add_argument('--sigma', type=float, default=0.5,
help="sigma for size estimation error log-normal "
"distribution; default: 0.5")
parser.add_argument('--load', type=float, default=0.9,
help="load for the generated workload; default: 0.9")
parser.add_argument('--timeshape', type=float, default=1,
help="shape for the Weibull distribution of job "
"inter-arrival times; default: 1 (i.e. exponential)")
parser.add_argument('--njobs', type=int, default=10000,
help="number of jobs in the workload; default: 10000")
parser.add_argument('--nolatex', default=False, action='store_true',
help="disable LaTeX rendering")
parser.add_argument('--xmin', type=float, default=1,
help="minimum value on the x axis")
parser.add_argument('--xmax', type=float,
help="maximum value on the x axis")
parser.add_argument('--ymin', type=float, default=0,
help="minimum value on the y axis")
parser.add_argument('--ymax', type=float, default=1,
help="maximum value on the y axis")
parser.add_argument('--nolegend', default=False, action='store_true',
help="don't put a legend in the plot")
parser.add_argument('--legend_loc', default=0,
help="location for the legend (see matplotlib doc)")
parser.add_argument('--normal_error', default=False, action='store_true',
help="error function distributed according to a normal "
"rather than a log-normal")
parser.add_argument('--alt_schedulers', default=False, action='store_true',
help="plot schedulers that are variants of FSPE+PS")
parser.add_argument('--save', help="don't show but save in target filename")
args = parser.parse_args()
if args.alt_schedulers:
plotted = 'FSPE+PS FSPE+LAS SRPTE+PS SRPTE+LAS PS'.split()
styles = {'FSPE+PS': '-', 'FSPE+LAS': '--',
'SRPTE+PS': ':', 'SRPTE+LAS': '-.',
'PS': '-'}
colors = {'FSPE+PS': 'r', 'FSPE+LAS': 'r',
'SRPTE+PS': 'r', 'SRPTE+LAS': 'r',
'PS': '0.6'}
else:
plotted = 'SRPTE FSPE FSPE+PS PS LAS FIFO'.split()
styles = {'FIFO': ':', 'PS': '-', 'LAS': '--',
'SRPTE': '--', 'FSPE': ':', 'FSPE+PS': '-'}
colors = {'FIFO': '0.6', 'PS': '0.6', 'LAS': '0.6',
'SRPTE': 'r', 'FSPE': 'r', 'FSPE+PS': 'r'}
fname_regex = [str(getattr(args, ax)) for ax in axes]
head = 'normal' if args.normal_error else 'res'
glob_str = os.path.join(args.dirname,
'{}_{}_[0-9.]*.s'.format(head, '_'.join(fname_regex)))
fnames = glob.glob(glob_str)
results = collections.defaultdict(list)
for fname in fnames:
print('.', end='', flush=True)
seed = int(os.path.splitext(fname)[0].split('_')[-1])
job_sizes = sizes(seed)
try:
shelve_ = shelve.open(fname, 'r')
except:
# the file is being written now
continue
else:
for scheduler in plotted:
for sojourns in shelve_[scheduler]:
slowdowns = (sojourn / size
for sojourn, size in zip(sojourns, job_sizes))
results[scheduler].extend(slowdowns)
print()
fig = plt.figure(figsize=(8, 4.5))
ax = fig.add_subplot(111)
ax.set_xlabel("slowdown")
ax.set_ylabel("ECDF")
ys = np.linspace(max(0, args.ymin), min(1, args.ymax), 100)
for scheduler in plotted:
slowdowns = results[scheduler]
slowdowns.sort()
last_idx = len(slowdowns) - 1
indexes = np.linspace(max(0, args.ymin) * last_idx,
min(1, args.ymax) * last_idx,
100).astype(int)
xs = [slowdowns[idx] for idx in indexes]
style = styles[scheduler]
label = 'PSBS' if scheduler == 'FSPE+PS' else scheduler
ax.semilogx(xs, ys, style, label=label, linewidth=4,
color=colors[scheduler])
if not args.nolegend:
ax.legend(loc=args.legend_loc, ncol=2)
ax.tick_params(axis='x', pad=7)
ax.set_xlim(left=args.xmin)
if args.xmax is not None:
ax.set_xlim(right=args.xmax)
ax.set_ylim(args.ymin, args.ymax)
if not args.nolatex:
plot_helpers.config_paper(20)
plt.tight_layout(1)
plt.grid()
if args.save is not None:
plt.savefig(args.save)
else:
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
15095,
198,
11748,
7497,
303,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
299... | 2.151778 | 2,306 |
import os
import os.path
import shutil
import tempfile
has_symlink = False
compat_test_dir = tempfile.mkdtemp()
# Check for symlink support (available and usable)
src = os.path.join(compat_test_dir, "src")
dst = os.path.join(compat_test_dir, "dst")
with open(src, "w"):
pass
try:
os.symlink(src, dst)
except (AttributeError, OSError):
# AttributeError if symlink is not available (Python <= 3.2 on Windows)
# OSError if we don't have the symlink privilege (on Windows)
pass # Leave has_symlink false
else:
has_symlink = True
shutil.rmtree(compat_test_dir)
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
628,
198,
10134,
62,
1837,
4029,
676,
796,
10352,
628,
198,
5589,
265,
62,
9288,
62,
15908,
796,
20218,
7753,
13,
28015,
67,
29510,
3419,
198,
... | 2.547414 | 232 |
import pytest
from django.shortcuts import reverse
from questionbank.users.constants import ADMIN
pytestmark = pytest.mark.django_db
def test_user_role(user, admin_user):
"""
calling user.role method should return the user group which they are in.
if the user is not in any group, NotImplementedError is raised
"""
assert admin_user.role == ADMIN
with pytest.raises(NotImplementedError):
assert user.role
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
9575,
198,
198,
6738,
1808,
17796,
13,
18417,
13,
9979,
1187,
1330,
5984,
23678,
198,
198,
9078,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
28241,
14208,
62,
9945,
62... | 2.986577 | 149 |
s = input()
length = len(s)
word_len = int(length / 3)
t = ""
if s[0:word_len] == s[word_len:word_len*2]:
t = s[0:word_len]
elif s[word_len:word_len*2] == s[word_len*2:word_len*3]:
t = s[word_len:word_len*2]
else:
t = s[0:word_len]
print(t) | [
82,
796,
5128,
3419,
198,
198,
13664,
796,
18896,
7,
82,
8,
198,
4775,
62,
11925,
796,
493,
7,
13664,
1220,
513,
8,
198,
198,
83,
796,
13538,
198,
198,
361,
264,
58,
15,
25,
4775,
62,
11925,
60,
6624,
264,
58,
4775,
62,
11925,
... | 1.868613 | 137 |
"""MCP45XX and MCP46XX commands."""
WRITE = 0x00 << 2
"""Writes to the device."""
INCREASE = 0x01 << 2
"""Increase the resistance."""
DECREASE = 0x02 << 2
"""Decrease the resistance."""
READ = 0x03 << 2
"""Read the current value."""
| [
37811,
44,
8697,
2231,
8051,
290,
337,
8697,
3510,
8051,
9729,
526,
15931,
628,
198,
18564,
12709,
796,
657,
87,
405,
9959,
362,
198,
37811,
20257,
274,
284,
262,
3335,
526,
15931,
198,
198,
30158,
2200,
11159,
796,
657,
87,
486,
9959... | 2.735632 | 87 |
# Copyright 2018 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import json
import os
import re
import tempfile
import time
from mock import patch
from alibabacloud import ClientConfig, get_resource
from alibabacloud.clients.ecs_20140526 import EcsClient
from alibabacloud.exceptions import HttpErrorException, ServerException
from tests.base import SDKTestBase
| [
2,
15069,
2864,
41992,
10130,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.671937 | 253 |
import os
import json
from azureml.core import Workspace
from azureml.exceptions import WorkspaceException, AuthenticationException, ProjectSystemException
from azureml.core.authentication import ServicePrincipalAuthentication
from adal.adal_error import AdalError
from msrest.exceptions import AuthenticationError
from json import JSONDecodeError
from utils import AMLConfigurationException, required_parameters_provided, mask_parameter
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
33918,
198,
198,
6738,
35560,
495,
4029,
13,
7295,
1330,
10933,
10223,
198,
6738,
35560,
495,
4029,
13,
1069,
11755,
1330,
10933,
10223,
16922,
11,
48191,
16922,
11,
4935,
11964,
16922,
198,
6738,
35560,
495,
402... | 3.934426 | 122 |
arr = [ 1, 2, 3, 4, 5, 6, 7, 8, 9]
x = 10
# Function call
result = binary_Search(arr, 0, len(arr)-1, x)
if result != -1:
print ("Element is present at index % d" % result)
else:
print ("Element is not present in array")
| [
220,
198,
3258,
796,
685,
352,
11,
362,
11,
513,
11,
604,
11,
642,
11,
718,
11,
767,
11,
807,
11,
860,
60,
220,
198,
87,
796,
838,
198,
220,
220,
198,
2,
15553,
869,
220,
198,
20274,
796,
13934,
62,
18243,
7,
3258,
11,
657,
... | 2.336538 | 104 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import pandas as pd
import model
from data_reader import load_data, DataReader, DataReaderFastText, FasttextModel
FLAGS = tf.flags.FLAGS
def run_test(session, m, data, batch_size, num_steps):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
def main(print):
''' Loads trained model and evaluates it on test split '''
if FLAGS.load_model_for_test is None:
print('Please specify checkpoint file to load model from')
return -1
if not os.path.exists(FLAGS.load_model_for_test + ".index"):
print('Checkpoint file not found', FLAGS.load_model_for_test)
return -1
word_vocab, char_vocab, word_tensors, char_tensors, max_word_length, words_list = \
load_data(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS)
test_reader = DataReader(word_tensors['test'], char_tensors['test'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
fasttext_model_path = None
if FLAGS.fasttext_model_path:
fasttext_model_path = FLAGS.fasttext_model_path
if 'fasttext' in FLAGS.embedding:
fasttext_model = FasttextModel(fasttext_path=fasttext_model_path).get_fasttext_model()
test_ft_reader = DataReaderFastText(words_list=words_list, batch_size=FLAGS.batch_size,
num_unroll_steps=FLAGS.num_unroll_steps,
model=fasttext_model,
data='test')
print('initialized test dataset reader')
with tf.Graph().as_default(), tf.Session() as session:
# tensorflow seed must be inside graph
tf.set_random_seed(FLAGS.seed)
np.random.seed(seed=FLAGS.seed)
''' build inference graph '''
with tf.variable_scope("Model"):
m = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=0,
embedding=FLAGS.embedding,
fasttext_word_dim=300,
acoustic_features_dim=4)
m.update(model.loss_graph(m.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
global_step = tf.Variable(0, dtype=tf.int32, name='global_step')
saver = tf.train.Saver()
saver.restore(session, FLAGS.load_model_for_test)
print('Loaded model from' + str(FLAGS.load_model_for_test) + 'saved at global step' + str(global_step.eval()))
''' training starts here '''
rnn_state = session.run(m.initial_rnn_state)
count = 0
avg_loss = 0
start_time = time.time()
for batch_kim, batch_ft in zip(test_reader.iter(), test_ft_reader.iter()):
count += 1
x, y = batch_kim
loss, rnn_state, logits = session.run([
m.loss,
m.final_rnn_state,
m.logits
], {
m.input2: batch_ft,
m.input: x,
m.targets: y,
m.initial_rnn_state: rnn_state
})
avg_loss += loss
avg_loss /= count
time_elapsed = time.time() - start_time
print("test loss = %6.8f, perplexity = %6.8f" % (avg_loss, np.exp(avg_loss)))
print("test samples:" + str( count*FLAGS.batch_size) + "time elapsed:" + str( time_elapsed) + "time per one batch:" +str(time_elapsed/count))
save_data_to_csv(avg_loss, count, time_elapsed)
if __name__ == "__main__":
tf.app.run()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
... | 2.003055 | 2,291 |
# This example demonstrates the use of the get_all_boards function
import py8chan
if __name__ == '__main__':
main()
| [
2,
770,
1672,
15687,
262,
779,
286,
262,
651,
62,
439,
62,
12821,
2163,
198,
11748,
12972,
23,
3147,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 3.184211 | 38 |
fim = 1
vgremio = 0
vinter = 0
empate = 0
final = 0
soma = 0
while (fim == 1):
inter, gremio = map(int, input().split())
soma += 1
if inter > gremio:
vinter = vinter + 1
elif gremio > inter:
vgremio = vgremio + 1
elif inter == gremio:
empate += 1
print("Novo grenal (1-sim 2-nao)")
fim = int(input())
print(soma,"grenais")
print("Inter:%d" %(vinter))
print("Gremio:%d" %(vgremio))
print("Empates:%d" %(empate))
if vinter > vgremio:
print("Inter venceu mais")
elif vgremio > vinter:
print("Gremio venceu mais")
else:
print("Nao houve vencedor")
| [
69,
320,
796,
352,
198,
45119,
2787,
952,
796,
657,
198,
85,
3849,
796,
657,
198,
45787,
378,
796,
657,
198,
20311,
796,
657,
198,
82,
6086,
796,
657,
198,
4514,
357,
69,
320,
6624,
352,
2599,
198,
220,
220,
220,
987,
11,
308,
2... | 1.942424 | 330 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ENXEASY",
version="1.0",
author="Pooyan Nayyeri",
author_email="pnnayyeri@gmail.com",
description="ENX EASY absolute rotary encoders library for Raspberry Pi.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pnnayyeri/ENXEASY",
packages=setuptools.find_packages(),
install_requires=['RPi.GPIO', 'graycode'],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
keywords = [
"raspberrypi",
"encoder",
"gpio",
"absolute",
"rotary"
]
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.427126 | 494 |
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.dwi import \
MR_DIFFUSION_SEQUENCES
| [
6738,
288,
291,
296,
62,
48610,
13,
26791,
13,
43167,
62,
15255,
9250,
13,
3107,
3007,
13,
43395,
13,
67,
37686,
13,
67,
37686,
1330,
3467,
198,
220,
220,
220,
17242,
62,
35,
29267,
2937,
2849,
62,
5188,
10917,
24181,
1546,
198
] | 2.357143 | 42 |
# Ported from square/wire:
# wire-library/wire-schema/src/commonMain/kotlin/com/squareup/wire/schema/internal/parser/TypeElement.kt
from dataclasses import dataclass
from karapace.protobuf.location import Location
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from karapace.protobuf.option_element import OptionElement
@dataclass
| [
2,
4347,
276,
422,
6616,
14,
21809,
25,
198,
2,
6503,
12,
32016,
14,
21809,
12,
15952,
2611,
14,
10677,
14,
11321,
13383,
14,
74,
313,
2815,
14,
785,
14,
23415,
929,
14,
21809,
14,
15952,
2611,
14,
32538,
14,
48610,
14,
6030,
2018... | 3.052632 | 114 |
import sys
import numpy as np
import os.path as osp
from functools import partial
from copy import copy as _copy, deepcopy as _deepcopy
from .apply import check_and_convert, sparse_apply
from .io import load_npz
| [
11748,
25064,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
201,
198,
6738,
1257,
310,
10141,
1330,
13027,
201,
198,
6738,
4866,
1330,
4866,
355,
4808,
30073,
11,
2769,
30073,
355,
4808,
2208... | 3.013514 | 74 |
# -*- coding: utf-8 -*-
import os.path
import PyQt4.uic
__all__ = ['loadUi']
def loadUi(modpath, widget):
"""
Uses the PyQt4.uic.loadUI method to load the inputed ui file associated
with the given module path and widget class information on the inputed
widget.
:param modpath | str
:param widget | QWidget
"""
# generate the uifile path
basepath = os.path.dirname(modpath)
basename = widget.__class__.__name__.lower()
uifile = os.path.join(basepath, 'ui/%s.ui' % basename)
# load the ui
PyQt4.uic.loadUi(uifile, widget)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
9485,
48,
83,
19,
13,
84,
291,
198,
198,
834,
439,
834,
796,
37250,
2220,
52,
72,
20520,
628,
198,
4299,
3440,
52,
72,
7,
... | 2.346614 | 251 |
import datetime
import warnings
from functools import update_wrapper
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, cast
from dagster import check
from dagster.core.definitions.partition import (
PartitionScheduleDefinition,
PartitionSetDefinition,
ScheduleType,
TimeBasedPartitionParams,
)
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.utils.partitions import (
DEFAULT_DATE_FORMAT,
DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE,
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE,
DEFAULT_MONTHLY_FORMAT,
create_offset_partition_selector,
)
from ..mode import DEFAULT_MODE_NAME
from ..schedule import ScheduleDefinition
if TYPE_CHECKING:
from dagster import ScheduleExecutionContext, Partition
# Error messages are long
# pylint: disable=C0301
def schedule(
cron_schedule: str,
pipeline_name: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
tags_fn: Optional[Callable[["ScheduleExecutionContext"], Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[["ScheduleExecutionContext"], bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
execution_timezone: Optional[str] = None,
description: Optional[str] = None,
job: Optional[PipelineDefinition] = None,
) -> Callable[[Callable[["ScheduleExecutionContext"], Dict[str, Any]]], ScheduleDefinition]:
"""Create a schedule.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the run config
for the scheduled execution.
Args:
cron_schedule (str): A valid cron string specifying when the schedule will run, e.g.,
``'45 23 * * 6'`` for a schedule that runs at 11:45 PM every Saturday.
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
name (Optional[str]): The name of the schedule to create.
tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach
to the scheduled runs.
tags_fn (Optional[Callable[[ScheduleExecutionContext], Optional[Dict[str, str]]]]): A function
that generates tags to attach to the schedules runs. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a dictionary of tags (string
key-value pairs). You may set only one of ``tags`` and ``tags_fn``.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[[ScheduleExecutionContext], bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
description (Optional[str]): A human-readable description of the schedule.
job (Optional[PipelineDefinition]): Experimental
"""
return inner
def monthly_schedule(
pipeline_name: Optional[str],
start_date: datetime.datetime,
name: Optional[str] = None,
execution_day_of_month: int = 1,
execution_time: datetime.time = datetime.time(0, 0),
tags_fn_for_date: Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[["ScheduleExecutionContext"], bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
end_date: Optional[datetime.datetime] = None,
execution_timezone: Optional[str] = None,
partition_months_offset: Optional[int] = 1,
description: Optional[str] = None,
job: Optional[PipelineDefinition] = None,
) -> Callable[[Callable[[datetime.datetime], Dict[str, Any]]], PartitionScheduleDefinition]:
"""Create a partitioned schedule that runs monthly.
The decorated function should accept a datetime object as its only argument. The datetime
represents the date partition that it's meant to run on.
The decorated function should return a run configuration dictionary, which will be used as
configuration for the scheduled run.
The decorator produces a :py:class:`~dagster.PartitionScheduleDefinition`.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_day_of_month (int): The day of the month on which to run the schedule (must be
between 1 and 31).
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
partition_months_offset (Optional[int]): How many months back to go when choosing the partition
for a given schedule execution. For example, when partition_months_offset=1, the schedule
that executes during month N will fill in the partition for month N-1.
(Default: 1)
description (Optional[str]): A human-readable description of the schedule.
job (Optional[PipelineDefinition]): Experimental
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.opt_str_param(pipeline_name, "pipeline_name")
check.int_param(execution_day_of_month, "execution_day")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
check.opt_int_param(partition_months_offset, "partition_months_offset")
check.opt_str_param(description, "description")
if (
start_date.day != 1
or start_date.hour != 0
or start_date.minute != 0
or start_date.second != 0
):
warnings.warn(
"`start_date` must be at the beginning of the first day of the month for a monthly "
"schedule. Use `execution_day_of_month` and `execution_time` to execute the schedule "
"at a specific time within the month. For example, to run the schedule at 3AM on the "
"23rd of each month starting in October, your schedule definition would look like:"
"""
@monthly_schedule(
start_date=datetime.datetime(2020, 10, 1),
execution_day_of_month=23,
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
if execution_day_of_month <= 0 or execution_day_of_month > 31:
raise DagsterInvalidDefinitionError(
"`execution_day_of_month={}` is not valid for monthly schedule. Execution day must be "
"between 1 and 31".format(execution_day_of_month)
)
return inner
def weekly_schedule(
pipeline_name: Optional[str],
start_date: datetime.datetime,
name: Optional[str] = None,
execution_day_of_week: int = 0,
execution_time: datetime.time = datetime.time(0, 0),
tags_fn_for_date: Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[["ScheduleExecutionContext"], bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
end_date: Optional[datetime.datetime] = None,
execution_timezone: Optional[str] = None,
partition_weeks_offset: Optional[int] = 1,
description: Optional[str] = None,
job: Optional[PipelineDefinition] = None,
) -> Callable[[Callable[[datetime.datetime], Dict[str, Any]]], PartitionScheduleDefinition]:
"""Create a partitioned schedule that runs daily.
The decorated function should accept a datetime object as its only argument. The datetime
represents the date partition that it's meant to run on.
The decorated function should return a run configuration dictionary, which will be used as
configuration for the scheduled run.
The decorator produces a :py:class:`~dagster.PartitionScheduleDefinition`.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_day_of_week (int): The day of the week on which to run the schedule. Must be
between 0 (Sunday) and 6 (Saturday).
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
partition_weeks_offset (Optional[int]): How many weeks back to go when choosing the partition
for a given schedule execution. For example, when partition_weeks_offset=1, the schedule
that executes during week N will fill in the partition for week N-1.
(Default: 1)
description (Optional[str]): A human-readable description of the schedule.
job (Optional[PipelineDefinition]): Experimental
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.opt_str_param(pipeline_name, "pipeline_name")
check.int_param(execution_day_of_week, "execution_day_of_week")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
check.opt_int_param(partition_weeks_offset, "partition_weeks_offset")
check.opt_str_param(description, "description")
if start_date.hour != 0 or start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of a day for a weekly schedule. "
"Use `execution_time` to execute the schedule at a specific time of day. For example, "
"to run the schedule at 3AM each Tuesday starting on 10/20/2020, your schedule "
"definition would look like:"
"""
@weekly_schedule(
start_date=datetime.datetime(2020, 10, 20),
execution_day_of_week=1,
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
if execution_day_of_week < 0 or execution_day_of_week >= 7:
raise DagsterInvalidDefinitionError(
"`execution_day_of_week={}` is not valid for weekly schedule. Execution day must be "
"between 0 [Sunday] and 6 [Saturday]".format(execution_day_of_week)
)
return inner
def daily_schedule(
pipeline_name: Optional[str],
start_date: datetime.datetime,
name: Optional[str] = None,
execution_time: datetime.time = datetime.time(0, 0),
tags_fn_for_date: Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[["ScheduleExecutionContext"], bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
end_date: Optional[datetime.datetime] = None,
execution_timezone: Optional[str] = None,
partition_days_offset: Optional[int] = 1,
description: Optional[str] = None,
job: Optional[PipelineDefinition] = None,
) -> Callable[[Callable[[datetime.datetime], Dict[str, Any]]], PartitionScheduleDefinition]:
"""Create a partitioned schedule that runs daily.
The decorated function should accept a datetime object as its only argument. The datetime
represents the date partition that it's meant to run on.
The decorated function should return a run configuration dictionary, which will be used as
configuration for the scheduled run.
The decorator produces a :py:class:`~dagster.PartitionScheduleDefinition`.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
partition_days_offset (Optional[int]): How many days back to go when choosing the partition
for a given schedule execution. For example, when partition_days_offset=1, the schedule
that executes during day N will fill in the partition for day N-1.
(Default: 1)
description (Optional[str]): A human-readable description of the schedule.
job (Optional[PipelineDefinition]): Experimental
"""
check.opt_str_param(pipeline_name, "pipeline_name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_str_param(name, "name")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.opt_str_param(execution_timezone, "execution_timezone")
check.opt_int_param(partition_days_offset, "partition_days_offset")
check.opt_str_param(description, "description")
if start_date.hour != 0 or start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of a day for a daily schedule. "
"Use `execution_time` to execute the schedule at a specific time of day. For example, "
"to run the schedule at 3AM each day starting on 10/20/2020, your schedule "
"definition would look like:"
"""
@daily_schedule(
start_date=datetime.datetime(2020, 10, 20),
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
fmt = DEFAULT_DATE_FORMAT
return inner
def hourly_schedule(
pipeline_name: Optional[str],
start_date: datetime.datetime,
name: Optional[str] = None,
execution_time: datetime.time = datetime.time(0, 0),
tags_fn_for_date: Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = "default",
should_execute: Optional[Callable[["ScheduleExecutionContext"], bool]] = None,
environment_vars: Optional[Dict[str, str]] = None,
end_date: Optional[datetime.datetime] = None,
execution_timezone: Optional[str] = None,
partition_hours_offset: Optional[int] = 1,
description: Optional[str] = None,
job: Optional[PipelineDefinition] = None,
) -> Callable[[Callable[[datetime.datetime], Dict[str, Any]]], PartitionScheduleDefinition]:
"""Create a partitioned schedule that runs hourly.
The decorated function should accept a datetime object as its only argument. The datetime
represents the date partition that it's meant to run on.
The decorated function should return a run configuration dictionary, which will be used as
configuration for the scheduled run.
The decorator produces a :py:class:`~dagster.PartitionScheduleDefinition`.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create. By default, this will be the name
of the decorated function.
execution_time (datetime.time): The time at which to execute the schedule. Only the minutes
component will be respected -- the hour should be 0, and will be ignored if it is not 0.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
partition_hours_offset (Optional[int]): How many hours back to go when choosing the partition
for a given schedule execution. For example, when partition_hours_offset=1, the schedule
that executes during hour N will fill in the partition for hour N-1.
(Default: 1)
description (Optional[str]): A human-readable description of the schedule.
job (Optional[PipelineDefinition]): Experimental
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.opt_str_param(pipeline_name, "pipeline_name")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
check.opt_int_param(partition_hours_offset, "partition_hours_offset")
check.opt_str_param(description, "description")
if start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of the hour for an hourly schedule. "
"Use `execution_time` to execute the schedule at a specific time within the hour. For "
"example, to run the schedule each hour at 15 minutes past the hour starting at 3AM "
"on 10/20/2020, your schedule definition would look like:"
"""
@hourly_schedule(
start_date=datetime.datetime(2020, 10, 20, 3),
execution_time=datetime.time(0, 15)
):
def my_schedule_definition(_):
...
"""
)
if execution_time.hour != 0:
warnings.warn(
"Hourly schedule {schedule_name} created with:\n"
"\tschedule_time=datetime.time(hour={hour}, minute={minute}, ...)."
"Since this is an hourly schedule, the hour parameter will be ignored and the schedule "
"will run on the {minute} mark for the previous hour interval. Replace "
"datetime.time(hour={hour}, minute={minute}, ...) with "
"datetime.time(minute={minute}, ...) to fix this warning."
)
return inner
| [
11748,
4818,
8079,
198,
11748,
14601,
198,
6738,
1257,
310,
10141,
1330,
4296,
62,
48553,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4377,
11,
4889,
540,
11,
360,
713,
11,
7343,
11,
32233,
11,
3350,
198,
198,
6738,
48924,
170... | 2.834717 | 9,033 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Teslagrad. Прохождение игры на 100%. Карта расположения и изображения свитков (Сайт GamesisArt.ru)
import os
from urllib.parse import urljoin
import requests
# Cache
if not os.path.exists('scrolls.html'):
rs = requests.get('http://gamesisart.ru/guide/Teslagrad_Prohozhdenie_4.html#Scrolls')
html = rs.content
with open('scrolls.html', 'wb') as f:
f.write(html)
else:
html = open('scrolls.html', 'rb').read()
URL = 'http://gamesisart.ru/guide/Teslagrad_Prohozhdenie_4.html#Scrolls'
DIR_SCROLLS = 'scrolls'
from bs4 import BeautifulSoup
root = BeautifulSoup(html, 'html.parser')
img_urls = [img['src'] for img in root.select('img[src]')]
img_urls = [urljoin(URL, url_img) for url_img in img_urls if '/Teslagrad_Scroll_' in url_img]
print(len(img_urls), img_urls)
if not os.path.exists(DIR_SCROLLS):
os.mkdir(DIR_SCROLLS)
# Save images
for url in img_urls:
rs = requests.get(url)
img_data = rs.content
file_name = DIR_SCROLLS + '/' + os.path.basename(url)
with open(file_name, 'wb') as f:
f.write(img_data)
# Merge all image into one
IMAGE_WIDTH = 200
IMAGE_HEIGHT = 376
ROWS = 9
COLS = 4
SCROOLS_WIDTH = IMAGE_WIDTH * COLS
SCROOLS_HEIGHT = IMAGE_HEIGHT * ROWS
from PIL import Image
image = Image.new('RGB', (SCROOLS_WIDTH, SCROOLS_HEIGHT))
import glob
file_names = glob.glob('scrolls/*.jpg')
# Sort by <number>: Teslagrad_Scroll_<number>.jpg'
file_names.sort(key=lambda x: int(x.split('.')[0].split('_')[-1]))
it = iter(file_names)
for y in range(0, SCROOLS_HEIGHT, IMAGE_HEIGHT):
for x in range(0, SCROOLS_WIDTH, IMAGE_WIDTH):
file_name = next(it)
img = Image.open(file_name)
image.paste(img, (x, y))
image.save('scrolls.jpg')
image.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
2,
10696,
30909,
6335,
13,
12466,
253,
21169,
15166... | 2.149112 | 845 |
from django.core.management import BaseCommand
from django.db import ProgrammingError
from constants.jobs import JobLifeCycle
from db.models.tensorboards import TensorboardJob
from scheduler import tensorboard_scheduler
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
9945,
1330,
30297,
12331,
198,
198,
6738,
38491,
13,
43863,
1330,
15768,
14662,
20418,
2375,
198,
6738,
20613,
13,
27530,
13,
83,
22854,
12821,
1330,... | 3.894737 | 57 |
from torch.optim.lr_scheduler import *
from face_recognition.model import *
from face_recognition.utils.utils import *
from torch import nn
import pytorch_lightning as pl
from torchmetrics.functional.classification.accuracy import accuracy
# will be used during inference
| [
6738,
28034,
13,
40085,
13,
14050,
62,
1416,
704,
18173,
1330,
1635,
198,
6738,
1986,
62,
26243,
653,
13,
19849,
1330,
1635,
198,
6738,
1986,
62,
26243,
653,
13,
26791,
13,
26791,
1330,
1635,
198,
6738,
28034,
1330,
299,
77,
198,
1174... | 3.61039 | 77 |
import json
from os.path import getmtime
from threading import Thread
from time import sleep
from pyswip import Prolog
from telegram.bot import Bot
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram.ext import Updater
from telegram.utils.request import Request
from handlers import start, summarize
from textProcessing import WordProcessing
| [
11748,
33918,
201,
198,
6738,
28686,
13,
6978,
1330,
651,
76,
2435,
201,
198,
6738,
4704,
278,
1330,
14122,
201,
198,
6738,
640,
1330,
3993,
201,
198,
201,
198,
6738,
279,
893,
86,
541,
1330,
1041,
6404,
201,
198,
6738,
573,
30536,
... | 3.529915 | 117 |
import requests
import json
from lxml.html import fromstring
from chp6.login import login, parse_form
COUNTRY_OR_DISTRICT_URL = 'http://example.python-scraping.com/edit/United-Kingdom-233'
VIEW_URL = 'http://example.python-scraping.com/view/United-Kingdom-233'
if __name__ == '__main__':
add_population()
| [
11748,
7007,
198,
11748,
33918,
198,
6738,
300,
19875,
13,
6494,
1330,
422,
8841,
198,
6738,
442,
79,
21,
13,
38235,
1330,
17594,
11,
21136,
62,
687,
198,
198,
34,
19385,
40405,
62,
1581,
62,
26288,
5446,
18379,
62,
21886,
796,
705,
... | 2.79646 | 113 |
"""
Train an initial model to compute non-IID client datasets based on the latent representations of samples.
"""
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from torchvision.models import vgg11_bn
from tqdm import tqdm
from federated.configs import cfg_fl as cfg
from federated.network import BaseConvNet, adjust_model_layers
# Embedding helper functions
def compute_embeddings(net, dataset, args, split):
"""
Actually compute embeddings given a dataset
"""
total_embeddings = []
total = 0
correct = 0
dataloader = DataLoader(dataset, batch_size=args.bs_val,
shuffle=False, num_workers=args.num_workers)
net.eval()
net.to(args.device)
save_output = SaveOutput()
hook_handles = []
for layer in net.modules():
# if isinstance(layer, torch.nn.AdaptiveAvgPool2d):
# handle = layer.register_forward_hook(save_output)
# hook_handles.append(handle)
if isinstance(layer, torch.nn.Linear):
handle = layer.register_forward_hook(save_output)
hook_handles.append(handle)
with torch.no_grad():
for i, data in enumerate(tqdm(dataloader,
desc=f'Computing embeddings ({split})')):
inputs, labels = data
inputs = inputs.to(args.device)
outputs = net(inputs)
# embeddings = net.embed(inputs)
# total_embeddings.append(embeddings.detach().cpu().numpy())
# Compute classification accuracy of setup model
_, predicted = torch.max(outputs.data, 1)
total += labels.shape[0]
correct += (predicted.cpu() == labels).sum().item()
total_embeddings = [None] * len(save_output.outputs)
for ix, output in enumerate(save_output.outputs):
total_embeddings[ix] = output.detach().cpu().numpy().squeeze()
# total_embeddings = [e.flatten() for e in total_embeddings]
n_samples = len(dataset.targets)
total_embeddings_fc1 = np.stack(total_embeddings[0::3]).reshape((n_samples, -1))
total_embeddings_fc2 = np.stack(total_embeddings[1::3]).reshape((n_samples, -1))
total_embeddings_fc3 = np.stack(total_embeddings[2::3]).reshape((n_samples, -1))
num_samples = len(dataset.targets)
total_embeddings_fc1
print(total_embeddings_fc1.shape)
print(total_embeddings_fc2.shape)
print(total_embeddings_fc3.shape)
print(f'Latent distribution setup model accuracy: {100 * correct / total:<.2f}%')
# total_embeddings = np.concatenate(total_embeddings)
return total_embeddings_fc1, total_embeddings_fc2, total_embeddings_fc3
| [
37811,
198,
44077,
281,
4238,
2746,
284,
24061,
1729,
12,
40,
2389,
5456,
40522,
1912,
319,
262,
41270,
24612,
286,
8405,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
198,
11748,
28034,
198,
11748,
280... | 2.212396 | 1,323 |
A, B, C, D = input().split(" ")
n1, n2, n3, n4 = float(A), float(B), float(C), float(D)
estado = str("")
media_final = False
teste = 0
media = (n1 * 2 + n2 * 3 + n3 * 4 + n4 * 1) / (2 + 3 + 4 + 1)
print(f"Media: {media:.1f}")
if media >= 7:
estado = "Aluno aprovado."
elif media < 5:
estado = "Aluno reprovado."
else:
media_final = True
estado = "Aluno em exame."
print(f"{estado}")
teste = float(input())
print(f"Nota do exame: {teste:.1f}")
media = (media + teste) / 2
if media >= 5:
estado = "Aluno aprovado."
else:
estado = "Aluno reprovado."
print(f"{estado}")
if media_final:
print(f"Media final: {media:.1f}")
| [
32,
11,
347,
11,
327,
11,
360,
796,
5128,
22446,
35312,
7203,
366,
8,
198,
77,
16,
11,
299,
17,
11,
299,
18,
11,
299,
19,
796,
12178,
7,
32,
828,
12178,
7,
33,
828,
12178,
7,
34,
828,
12178,
7,
35,
8,
198,
395,
4533,
796,
... | 2.083077 | 325 |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Main suite tests for the Atom renderer.
"""
import logging
import os
import pytest
import editor_python_test_tools.hydra_test_utils as hydra
logger = logging.getLogger(__name__)
EDITOR_TIMEOUT = 300
TEST_DIRECTORY = os.path.join(os.path.dirname(__file__), "atom_hydra_scripts")
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("level", ["auto_test"])
class TestAtomEditorComponentsMain(object):
"""Holds tests for Atom components."""
def test_AtomEditorComponents_AddedToEntity(self, request, editor, level, workspace, project, launcher_platform):
"""
Please review the hydra script run by this test for more specific test info.
Tests the following Atom components and verifies all "expected_lines" appear in Editor.log:
1. Display Mapper
2. Light
3. Radius Weight Modifier
4. PostFX Layer
5. Physical Sky
6. Global Skylight (IBL)
7. Exposure Control
8. Directional Light
9. DepthOfField
10. Decal (Atom)
"""
cfg_args = [level]
expected_lines = [
# Decal (Atom) Component
"Decal (Atom) Entity successfully created",
"Decal (Atom)_test: Component added to the entity: True",
"Decal (Atom)_test: Component removed after UNDO: True",
"Decal (Atom)_test: Component added after REDO: True",
"Decal (Atom)_test: Entered game mode: True",
"Decal (Atom)_test: Exit game mode: True",
"Decal (Atom) Controller|Configuration|Material: SUCCESS",
"Decal (Atom)_test: Entity is hidden: True",
"Decal (Atom)_test: Entity is shown: True",
"Decal (Atom)_test: Entity deleted: True",
"Decal (Atom)_test: UNDO entity deletion works: True",
"Decal (Atom)_test: REDO entity deletion works: True",
# DepthOfField Component
"DepthOfField Entity successfully created",
"DepthOfField_test: Component added to the entity: True",
"DepthOfField_test: Component removed after UNDO: True",
"DepthOfField_test: Component added after REDO: True",
"DepthOfField_test: Entered game mode: True",
"DepthOfField_test: Exit game mode: True",
"DepthOfField_test: Entity disabled initially: True",
"DepthOfField_test: Entity enabled after adding required components: True",
"DepthOfField Controller|Configuration|Camera Entity: SUCCESS",
"DepthOfField_test: Entity is hidden: True",
"DepthOfField_test: Entity is shown: True",
"DepthOfField_test: Entity deleted: True",
"DepthOfField_test: UNDO entity deletion works: True",
"DepthOfField_test: REDO entity deletion works: True",
# Exposure Control Component
"Exposure Control Entity successfully created",
"Exposure Control_test: Component added to the entity: True",
"Exposure Control_test: Component removed after UNDO: True",
"Exposure Control_test: Component added after REDO: True",
"Exposure Control_test: Entered game mode: True",
"Exposure Control_test: Exit game mode: True",
"Exposure Control_test: Entity disabled initially: True",
"Exposure Control_test: Entity enabled after adding required components: True",
"Exposure Control_test: Entity is hidden: True",
"Exposure Control_test: Entity is shown: True",
"Exposure Control_test: Entity deleted: True",
"Exposure Control_test: UNDO entity deletion works: True",
"Exposure Control_test: REDO entity deletion works: True",
# Global Skylight (IBL) Component
"Global Skylight (IBL) Entity successfully created",
"Global Skylight (IBL)_test: Component added to the entity: True",
"Global Skylight (IBL)_test: Component removed after UNDO: True",
"Global Skylight (IBL)_test: Component added after REDO: True",
"Global Skylight (IBL)_test: Entered game mode: True",
"Global Skylight (IBL)_test: Exit game mode: True",
"Global Skylight (IBL) Controller|Configuration|Diffuse Image: SUCCESS",
"Global Skylight (IBL) Controller|Configuration|Specular Image: SUCCESS",
"Global Skylight (IBL)_test: Entity is hidden: True",
"Global Skylight (IBL)_test: Entity is shown: True",
"Global Skylight (IBL)_test: Entity deleted: True",
"Global Skylight (IBL)_test: UNDO entity deletion works: True",
"Global Skylight (IBL)_test: REDO entity deletion works: True",
# Physical Sky Component
"Physical Sky Entity successfully created",
"Physical Sky component was added to entity",
"Entity has a Physical Sky component",
"Physical Sky_test: Component added to the entity: True",
"Physical Sky_test: Component removed after UNDO: True",
"Physical Sky_test: Component added after REDO: True",
"Physical Sky_test: Entered game mode: True",
"Physical Sky_test: Exit game mode: True",
"Physical Sky_test: Entity is hidden: True",
"Physical Sky_test: Entity is shown: True",
"Physical Sky_test: Entity deleted: True",
"Physical Sky_test: UNDO entity deletion works: True",
"Physical Sky_test: REDO entity deletion works: True",
# PostFX Layer Component
"PostFX Layer Entity successfully created",
"PostFX Layer_test: Component added to the entity: True",
"PostFX Layer_test: Component removed after UNDO: True",
"PostFX Layer_test: Component added after REDO: True",
"PostFX Layer_test: Entered game mode: True",
"PostFX Layer_test: Exit game mode: True",
"PostFX Layer_test: Entity is hidden: True",
"PostFX Layer_test: Entity is shown: True",
"PostFX Layer_test: Entity deleted: True",
"PostFX Layer_test: UNDO entity deletion works: True",
"PostFX Layer_test: REDO entity deletion works: True",
# Radius Weight Modifier Component
"Radius Weight Modifier Entity successfully created",
"Radius Weight Modifier_test: Component added to the entity: True",
"Radius Weight Modifier_test: Component removed after UNDO: True",
"Radius Weight Modifier_test: Component added after REDO: True",
"Radius Weight Modifier_test: Entered game mode: True",
"Radius Weight Modifier_test: Exit game mode: True",
"Radius Weight Modifier_test: Entity is hidden: True",
"Radius Weight Modifier_test: Entity is shown: True",
"Radius Weight Modifier_test: Entity deleted: True",
"Radius Weight Modifier_test: UNDO entity deletion works: True",
"Radius Weight Modifier_test: REDO entity deletion works: True",
# Light Component
"Light Entity successfully created",
"Light_test: Component added to the entity: True",
"Light_test: Component removed after UNDO: True",
"Light_test: Component added after REDO: True",
"Light_test: Entered game mode: True",
"Light_test: Exit game mode: True",
"Light_test: Entity is hidden: True",
"Light_test: Entity is shown: True",
"Light_test: Entity deleted: True",
"Light_test: UNDO entity deletion works: True",
"Light_test: REDO entity deletion works: True",
# Display Mapper Component
"Display Mapper Entity successfully created",
"Display Mapper_test: Component added to the entity: True",
"Display Mapper_test: Component removed after UNDO: True",
"Display Mapper_test: Component added after REDO: True",
"Display Mapper_test: Entered game mode: True",
"Display Mapper_test: Exit game mode: True",
"Display Mapper_test: Entity is hidden: True",
"Display Mapper_test: Entity is shown: True",
"Display Mapper_test: Entity deleted: True",
"Display Mapper_test: UNDO entity deletion works: True",
"Display Mapper_test: REDO entity deletion works: True",
]
unexpected_lines = [
"Trace::Assert",
"Trace::Error",
"Traceback (most recent call last):",
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
editor,
"hydra_AtomEditorComponents_AddedToEntity.py",
timeout=EDITOR_TIMEOUT,
expected_lines=expected_lines,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
null_renderer=True,
cfg_args=cfg_args,
)
| [
37811,
198,
15269,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
198,
1890,
1844,
6634,
290,
5964,
2846,
3387,
766,
262,
38559,
24290,
379,
262,
6808,
286,
428,
6082,
13,
198,
198,
4303,
36227,
12,
34156,
12,
3323... | 2.445055 | 3,822 |
# The MIT License (MIT)
#
# Copyright (c) 2013 Brad Ruderman
# Copyright (c) 2014 Paul Colomiets
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
from .lowlevel.TCLIService.ttypes import TFetchResultsReq
from .lowlevel.TCLIService.ttypes import TGetResultSetMetadataReq
from .lowlevel.TCLIService.ttypes import TExecuteStatementReq
from .lowlevel.TCLIService.ttypes import TFetchOrientation, TCloseOperationReq
from .lowlevel.TCLIService.ttypes import TGetSchemasReq, TTypeId
from .error import Pyhs2Exception
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
8114,
17421,
2224,
198,
2,
15069,
357,
66,
8,
1946,
3362,
1623,
12753,
1039,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
28... | 3.535632 | 435 |
# Standard modules
import numpy as np
import random
import pandas as pd
from flask import Flask, request, render_template
# Scikit Learn modules
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler
from sklearn.metrics import f1_score
from sklearn.compose import ColumnTransformer
model = None
app = Flask(__name__)
# Load index page
@app.route('/')
@app.route('/submit', methods=['GET', 'POST'])
# Run app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=80) | [
2,
8997,
13103,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
198,
198,
2,
10286,
15813,
14365,
13103,
198,
6738,
1341,
35720,
... | 3.214634 | 205 |
n=int(input())
sum=0.0
for i in range(1,n+1):
sum += float(float(i)/(i+1))
print(sum)
| [
77,
28,
600,
7,
15414,
28955,
198,
16345,
28,
15,
13,
15,
198,
1640,
1312,
287,
2837,
7,
16,
11,
77,
10,
16,
2599,
198,
220,
220,
220,
2160,
15853,
12178,
7,
22468,
7,
72,
20679,
7,
72,
10,
16,
4008,
198,
4798,
7,
16345,
8,
... | 1.914894 | 47 |
"""
@Author : dilless
@Time : 2018/6/25 22:46
@File : paper_download.py
"""
import os
import re
from datetime import datetime
import MySQLdb
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
if __name__ == '__main__':
db = MySQLAccess()
down = DownloadPaper()
urls = db.get_urls()
for url in urls:
page_url = url[0]
down.download_paper(page_url)
db.update_is_down_status(page_url)
| [
37811,
198,
198,
31,
13838,
220,
1058,
288,
359,
408,
198,
31,
7575,
220,
220,
220,
1058,
2864,
14,
21,
14,
1495,
2534,
25,
3510,
198,
31,
8979,
220,
220,
220,
1058,
3348,
62,
15002,
13,
9078,
198,
37811,
198,
11748,
28686,
198,
1... | 2.484375 | 192 |
from api.strings import id_key, type_key, name_key
from api.ver1.offices.strings import loc_gov_type, mca, state_type, gov, leg_type, prezzo, fed_type, sen
political_offices = [ { id_key: 1, type_key: loc_gov_type, name_key: mca}, { id_key: 2, type_key: state_type, name_key: prezzo }, { id_key: 3, type_key: fed_type, name_key: sen }, { id_key: 3, type_key: leg_type, name_key: gov } ]
| [
6738,
40391,
13,
37336,
1330,
4686,
62,
2539,
11,
2099,
62,
2539,
11,
1438,
62,
2539,
198,
6738,
40391,
13,
332,
16,
13,
2364,
1063,
13,
37336,
1330,
1179,
62,
9567,
62,
4906,
11,
285,
6888,
11,
1181,
62,
4906,
11,
467,
85,
11,
... | 2.503226 | 155 |
import radio
import random
from microbit import display, Image, button_a, sleep
# Création de la liste "flash" contenant les images de l'animation
# Comprends-tu comment ça fonctionne ?
flash = [Image().invert()*(i/9) for i in range(9, -1, -1)]
# La radio ne marchera pas sauf si on l'allume !
radio.on()
# Boucle événementielle.
while True:
# Le bouton A envoie un message "flash"
if button_a.was_pressed():
radio.send('flash') # a-ha
# On lit tous les messages entrant
incoming = radio.receive()
if incoming == 'flash':
# Si il y a un message "flash" entrant
# on affiche l'animation du flash de luciole après une petite
# pause de durée aléatoire.
sleep(random.randint(50, 350))
display.show(flash, delay=100, wait=False)
# On re-diffuse aléatoirement le message flash après une petite
# pause
if random.randint(0, 9) == 0:
sleep(500)
radio.send('flash') # a-ha | [
11748,
5243,
198,
11748,
4738,
198,
6738,
4580,
2545,
1330,
3359,
11,
7412,
11,
4936,
62,
64,
11,
3993,
198,
198,
2,
3864,
2634,
341,
390,
8591,
1351,
68,
366,
34167,
1,
542,
268,
415,
10287,
4263,
390,
300,
6,
11227,
341,
198,
2,... | 2.384988 | 413 |
from otree.api import *
doc = """
Random number of rounds for multiplayer (random stopping rule)
"""
# PAGES
page_sequence = [MyPage, ResultsWaitPage, Results]
| [
6738,
267,
21048,
13,
15042,
1330,
1635,
628,
198,
15390,
796,
37227,
198,
29531,
1271,
286,
9196,
329,
16913,
357,
25120,
12225,
3896,
8,
198,
37811,
628,
628,
628,
198,
198,
2,
350,
25552,
628,
628,
198,
7700,
62,
43167,
796,
685,
... | 3.326923 | 52 |
import csv
import math
training_dataset = []
training_labels = []
test_dataset = []
test_labels = []
# Populate training and test sets
with open('iris_training.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', dialect=csv.excel_tab)
for row in spamreader:
training_dataset.append([ float(row[3]), float(row[2]), float(row[1]), float(row[0]) ])
training_labels.append(row[4])
with open('iris_test.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', dialect=csv.excel_tab)
for row in spamreader:
test_dataset.append([ float(row[3]), float(row[2]), float(row[1]), float(row[0]) ])
test_labels.append(row[4])
print('1 NN Label Actual Label')
# Classify and compare with actual label
for i in range(len(test_dataset)):
test_instance = test_dataset[i]
distances = []
for j in range(len(training_dataset)):
distances.append(euclideanDistance(test_instance, training_dataset[j]))
print(training_labels[distances.index(min(distances))], test_labels[i]) | [
11748,
269,
21370,
198,
11748,
10688,
198,
198,
34409,
62,
19608,
292,
316,
796,
17635,
198,
34409,
62,
23912,
1424,
796,
17635,
198,
198,
9288,
62,
19608,
292,
316,
796,
17635,
198,
9288,
62,
23912,
1424,
796,
17635,
198,
198,
2,
809... | 2.672727 | 385 |
#
# PySNMP MIB module Wellfleet-DVMRP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-DVMRP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:33:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Integer32, NotificationType, Counter32, Bits, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, IpAddress, Counter64, Gauge32, TimeTicks, ObjectIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Integer32", "NotificationType", "Counter32", "Bits", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "IpAddress", "Counter64", "Gauge32", "TimeTicks", "ObjectIdentity", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfDvmrpGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfDvmrpGroup")
wfDvmrpBase = MibIdentifier((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1))
wfDvmrpBaseCreate = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseCreate.setStatus('mandatory')
wfDvmrpBaseEnable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseEnable.setStatus('mandatory')
wfDvmrpBaseState = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpres", 4))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpBaseState.setStatus('mandatory')
wfDvmrpBaseFullUpdateInterval = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 2000)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseFullUpdateInterval.setStatus('mandatory')
wfDvmrpBaseTriggeredUpdateInterval = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 1000)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseTriggeredUpdateInterval.setStatus('mandatory')
wfDvmrpBaseLeafTimeout = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(25, 4000)).clone(200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseLeafTimeout.setStatus('mandatory')
wfDvmrpBaseNeighborTimeout = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(35, 8000)).clone(35)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseNeighborTimeout.setStatus('mandatory')
wfDvmrpBaseRouteExpirationTimeout = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 4000)).clone(140)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseRouteExpirationTimeout.setStatus('mandatory')
wfDvmrpBaseGarbageTimeout = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(40, 8000)).clone(260)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseGarbageTimeout.setStatus('mandatory')
wfDvmrpBaseEstimatedRoutes = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 2147483647)).clone(25)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseEstimatedRoutes.setStatus('mandatory')
wfDvmrpBaseNeighborProbeInterval = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseNeighborProbeInterval.setStatus('mandatory')
wfDvmrpBaseRouteSwitchTimeout = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 2000)).clone(140)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseRouteSwitchTimeout.setStatus('mandatory')
wfDvmrpBaseActualRoutes = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpBaseActualRoutes.setStatus('mandatory')
wfDvmrpBaseDebugLevel = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 14), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseDebugLevel.setStatus('mandatory')
wfDvmrpBasePruningEnable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBasePruningEnable.setStatus('mandatory')
wfDvmrpBaseFragmentMtuThreshold = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(910, 2147483647)).clone(1514)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseFragmentMtuThreshold.setStatus('obsolete')
wfDvmrpBaseMaxRoutes = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseMaxRoutes.setStatus('mandatory')
wfDvmrpBasePolicyEnable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBasePolicyEnable.setStatus('obsolete')
wfDvmrpBaseHolddownEnable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpBaseHolddownEnable.setStatus('mandatory')
wfDvmrpCircuitEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2), )
if mibBuilder.loadTexts: wfDvmrpCircuitEntryTable.setStatus('mandatory')
wfDvmrpCircuitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1), ).setIndexNames((0, "Wellfleet-DVMRP-MIB", "wfDvmrpCircuitCCT"))
if mibBuilder.loadTexts: wfDvmrpCircuitEntry.setStatus('mandatory')
wfDvmrpCircuitCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitCreate.setStatus('mandatory')
wfDvmrpCircuitEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitEnable.setStatus('mandatory')
wfDvmrpCircuitState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("invalid", 4), ("notpres", 5))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitState.setStatus('mandatory')
wfDvmrpCircuitCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitCCT.setStatus('mandatory')
wfDvmrpCircuitRouteEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitRouteEnable.setStatus('mandatory')
wfDvmrpCircuitMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitMetric.setStatus('mandatory')
wfDvmrpCircuitRouteThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 254)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitRouteThreshold.setStatus('mandatory')
wfDvmrpCircuitInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInPkts.setStatus('mandatory')
wfDvmrpCircuitOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutPkts.setStatus('mandatory')
wfDvmrpCircuitInRouteUpdates = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInRouteUpdates.setStatus('mandatory')
wfDvmrpCircuitOutRouteUpdates = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutRouteUpdates.setStatus('mandatory')
wfDvmrpCircuitInPktDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInPktDiscards.setStatus('mandatory')
wfDvmrpCircuitOutPktDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutPktDiscards.setStatus('mandatory')
wfDvmrpCircuitFwdCacheSize = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(32, 512)).clone(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitFwdCacheSize.setStatus('mandatory')
wfDvmrpCircuitFwdCacheTTL = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(7200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitFwdCacheTTL.setStatus('mandatory')
wfDvmrpCircuitAdvertiseSelf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitAdvertiseSelf.setStatus('mandatory')
wfDvmrpCircuitFwdCacheEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitFwdCacheEntries.setStatus('mandatory')
wfDvmrpCircuitInPrunePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInPrunePkts.setStatus('mandatory')
wfDvmrpCircuitOutPrunePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutPrunePkts.setStatus('mandatory')
wfDvmrpCircuitInGraftPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInGraftPkts.setStatus('mandatory')
wfDvmrpCircuitOutGraftPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutGraftPkts.setStatus('mandatory')
wfDvmrpCircuitInGraftAckPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitInGraftAckPkts.setStatus('mandatory')
wfDvmrpCircuitOutGraftAckPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitOutGraftAckPkts.setStatus('mandatory')
wfDvmrpCircuitDefaultRouteSupply = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("generate", 3))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitDefaultRouteSupply.setStatus('mandatory')
wfDvmrpCircuitDefaultRouteListen = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitDefaultRouteListen.setStatus('mandatory')
wfDvmrpCircuitReportDependProbe = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitReportDependProbe.setStatus('mandatory')
wfDvmrpCircuitPruneLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(7200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpCircuitPruneLifeTime.setStatus('mandatory')
wfDvmrpCircuitAcceptAggregateRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitAcceptAggregateRoutes.setStatus('mandatory')
wfDvmrpCircuitAnnounceAggregatedRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpCircuitAnnounceAggregatedRoutes.setStatus('mandatory')
wfDvmrpTunnelEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3), )
if mibBuilder.loadTexts: wfDvmrpTunnelEntryTable.setStatus('mandatory')
wfDvmrpTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1), ).setIndexNames((0, "Wellfleet-DVMRP-MIB", "wfDvmrpTunnelCCT"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpTunnelLocalRouterIpAddress"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpTunnelRemoteRouterIpAddress"))
if mibBuilder.loadTexts: wfDvmrpTunnelEntry.setStatus('mandatory')
wfDvmrpTunnelCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelCreate.setStatus('mandatory')
wfDvmrpTunnelEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelEnable.setStatus('mandatory')
wfDvmrpTunnelState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("invalid", 4), ("notpres", 5))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelState.setStatus('mandatory')
wfDvmrpTunnelCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelCCT.setStatus('mandatory')
wfDvmrpTunnelRemoteRouterIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelRemoteRouterIpAddress.setStatus('mandatory')
wfDvmrpTunnelEncapsMode = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ip-in-ip", 1), ("lssr", 2))).clone('ip-in-ip')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelEncapsMode.setStatus('mandatory')
wfDvmrpTunnelMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelMetric.setStatus('mandatory')
wfDvmrpTunnelThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 254)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelThreshold.setStatus('mandatory')
wfDvmrpTunnelInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInPkts.setStatus('mandatory')
wfDvmrpTunnelOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutPkts.setStatus('mandatory')
wfDvmrpTunnelInRouteUpdates = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInRouteUpdates.setStatus('mandatory')
wfDvmrpTunnelOutRouteUpdates = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutRouteUpdates.setStatus('mandatory')
wfDvmrpTunnelInPktDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInPktDiscards.setStatus('mandatory')
wfDvmrpTunnelOutPktDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutPktDiscards.setStatus('mandatory')
wfDvmrpTunnelLocalRouterIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 15), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelLocalRouterIpAddress.setStatus('mandatory')
wfDvmrpTunnelFwdCacheSize = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(32, 512)).clone(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelFwdCacheSize.setStatus('mandatory')
wfDvmrpTunnelFwdCacheTTL = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(7200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelFwdCacheTTL.setStatus('mandatory')
wfDvmrpTunnelFwdCacheEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelFwdCacheEntries.setStatus('mandatory')
wfDvmrpTunnelInPrunePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInPrunePkts.setStatus('mandatory')
wfDvmrpTunnelOutPrunePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutPrunePkts.setStatus('mandatory')
wfDvmrpTunnelInGraftPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInGraftPkts.setStatus('mandatory')
wfDvmrpTunnelOutGraftPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutGraftPkts.setStatus('mandatory')
wfDvmrpTunnelInGraftAckPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelInGraftAckPkts.setStatus('mandatory')
wfDvmrpTunnelOutGraftAckPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelOutGraftAckPkts.setStatus('mandatory')
wfDvmrpTunnelDefaultRouteSupply = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("generate", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelDefaultRouteSupply.setStatus('mandatory')
wfDvmrpTunnelDefaultRouteListen = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelDefaultRouteListen.setStatus('mandatory')
wfDvmrpTunnelCtrlMsgMode = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("native", 1), ("ip-in-ip", 2))).clone('native')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelCtrlMsgMode.setStatus('mandatory')
wfDvmrpTunnelReportDependProbe = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelReportDependProbe.setStatus('mandatory')
wfDvmrpTunnelPruneLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(7200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfDvmrpTunnelPruneLifeTime.setStatus('mandatory')
wfDvmrpTunnelAcceptAggregateRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelAcceptAggregateRoutes.setStatus('mandatory')
wfDvmrpTunnelAnnounceAggregatedRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 3, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpTunnelAnnounceAggregatedRoutes.setStatus('mandatory')
wfDvmrpRouteEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4), )
if mibBuilder.loadTexts: wfDvmrpRouteEntryTable.setStatus('mandatory')
wfDvmrpRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1), ).setIndexNames((0, "Wellfleet-DVMRP-MIB", "wfDvmrpRouteSourceNetwork"))
if mibBuilder.loadTexts: wfDvmrpRouteEntry.setStatus('mandatory')
wfDvmrpRouteSourceNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteSourceNetwork.setStatus('mandatory')
wfDvmrpRouteSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteSourceMask.setStatus('mandatory')
wfDvmrpRouteNextHopRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteNextHopRouter.setStatus('mandatory')
wfDvmrpRouteNextHopInterfaceCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteNextHopInterfaceCCT.setStatus('mandatory')
wfDvmrpRouteNextHopInterfaceTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteNextHopInterfaceTunnelId.setStatus('mandatory')
wfDvmrpRouteNextHopInterfaceLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteNextHopInterfaceLocalTunnelId.setStatus('mandatory')
wfDvmrpRouteTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteTimer.setStatus('mandatory')
wfDvmrpRouteState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteState.setStatus('mandatory')
wfDvmrpRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteMetric.setStatus('mandatory')
wfDvmrpRouteProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteProtocol.setStatus('mandatory')
wfDvmrpRouteType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteType.setStatus('mandatory')
wfDvmrpRouteAggregatedType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 4, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteAggregatedType.setStatus('mandatory')
wfDvmrpRouteInterfaceEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5), )
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceEntryTable.setStatus('mandatory')
wfDvmrpRouteInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1), ).setIndexNames((0, "Wellfleet-DVMRP-MIB", "wfDvmrpRouteInterfaceSourceNetwork"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpRouteInterfaceParentCCT"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpRouteInterfaceParentLocalTunnelId"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpRouteInterfaceParentTunnelId"))
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceEntry.setStatus('mandatory')
wfDvmrpRouteInterfaceSourceNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceSourceNetwork.setStatus('mandatory')
wfDvmrpRouteInterfaceSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceSourceMask.setStatus('mandatory')
wfDvmrpRouteInterfaceNextHopInterfaceCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceNextHopInterfaceCCT.setStatus('mandatory')
wfDvmrpRouteInterfaceNextHopInterfaceLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceNextHopInterfaceLocalTunnelId.setStatus('mandatory')
wfDvmrpRouteInterfaceNextHopInterfaceTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceNextHopInterfaceTunnelId.setStatus('mandatory')
wfDvmrpRouteInterfaceState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceState.setStatus('mandatory')
wfDvmrpRouteInterfaceDominantRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceDominantRouter.setStatus('mandatory')
wfDvmrpRouteInterfaceSubordinateRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 8), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceSubordinateRouter.setStatus('mandatory')
wfDvmrpRouteInterfaceHoldDownTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceHoldDownTimer.setStatus('mandatory')
wfDvmrpRouteInterfaceSPInDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceSPInDiscards.setStatus('obsolete')
wfDvmrpRouteInterfaceSPOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceSPOutDiscards.setStatus('obsolete')
wfDvmrpRouteInterfaceThresholdOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceThresholdOutDiscards.setStatus('obsolete')
wfDvmrpRouteInterfaceInSuccessfulPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceInSuccessfulPkts.setStatus('obsolete')
wfDvmrpRouteInterfaceOutSuccessfulPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceOutSuccessfulPkts.setStatus('obsolete')
wfDvmrpRouteInterfaceParentCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceParentCCT.setStatus('mandatory')
wfDvmrpRouteInterfaceParentLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 16), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceParentLocalTunnelId.setStatus('mandatory')
wfDvmrpRouteInterfaceParentTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 5, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpRouteInterfaceParentTunnelId.setStatus('mandatory')
wfDvmrpNeighboringRouterEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6), )
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterEntryTable.setStatus('mandatory')
wfDvmrpNeighboringRouterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1), ).setIndexNames((0, "Wellfleet-DVMRP-MIB", "wfDvmrpNeighboringRouterCCT"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpNeighboringRouterLocalTunnelId"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpNeighboringRouterTunnelId"), (0, "Wellfleet-DVMRP-MIB", "wfDvmrpNeighboringRouterIpAddr"))
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterEntry.setStatus('mandatory')
wfDvmrpNeighboringRouterCCT = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterCCT.setStatus('mandatory')
wfDvmrpNeighboringRouterLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterLocalTunnelId.setStatus('mandatory')
wfDvmrpNeighboringRouterTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterTunnelId.setStatus('mandatory')
wfDvmrpNeighboringRouterIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterIpAddr.setStatus('mandatory')
wfDvmrpNeighboringRouterState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterState.setStatus('mandatory')
wfDvmrpNeighboringRouterTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterTimer.setStatus('mandatory')
wfDvmrpNeighboringRouterGenId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterGenId.setStatus('mandatory')
wfDvmrpNeighboringRouterMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterMajorVersion.setStatus('mandatory')
wfDvmrpNeighboringRouterMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterMinorVersion.setStatus('mandatory')
wfDvmrpNeighboringRouterCapabilities = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 12, 6, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfDvmrpNeighboringRouterCapabilities.setStatus('mandatory')
mibBuilder.exportSymbols("Wellfleet-DVMRP-MIB", wfDvmrpBaseFullUpdateInterval=wfDvmrpBaseFullUpdateInterval, wfDvmrpRouteInterfaceNextHopInterfaceLocalTunnelId=wfDvmrpRouteInterfaceNextHopInterfaceLocalTunnelId, wfDvmrpNeighboringRouterIpAddr=wfDvmrpNeighboringRouterIpAddr, wfDvmrpCircuitDefaultRouteSupply=wfDvmrpCircuitDefaultRouteSupply, wfDvmrpTunnelOutPrunePkts=wfDvmrpTunnelOutPrunePkts, wfDvmrpNeighboringRouterCCT=wfDvmrpNeighboringRouterCCT, wfDvmrpBaseFragmentMtuThreshold=wfDvmrpBaseFragmentMtuThreshold, wfDvmrpRouteAggregatedType=wfDvmrpRouteAggregatedType, wfDvmrpRouteInterfaceHoldDownTimer=wfDvmrpRouteInterfaceHoldDownTimer, wfDvmrpCircuitEnable=wfDvmrpCircuitEnable, wfDvmrpTunnelDefaultRouteSupply=wfDvmrpTunnelDefaultRouteSupply, wfDvmrpRouteInterfaceSubordinateRouter=wfDvmrpRouteInterfaceSubordinateRouter, wfDvmrpNeighboringRouterEntry=wfDvmrpNeighboringRouterEntry, wfDvmrpTunnelFwdCacheTTL=wfDvmrpTunnelFwdCacheTTL, wfDvmrpBasePolicyEnable=wfDvmrpBasePolicyEnable, wfDvmrpRouteSourceMask=wfDvmrpRouteSourceMask, wfDvmrpCircuitOutGraftAckPkts=wfDvmrpCircuitOutGraftAckPkts, wfDvmrpRouteInterfaceState=wfDvmrpRouteInterfaceState, wfDvmrpBaseActualRoutes=wfDvmrpBaseActualRoutes, wfDvmrpTunnelPruneLifeTime=wfDvmrpTunnelPruneLifeTime, wfDvmrpRouteTimer=wfDvmrpRouteTimer, wfDvmrpCircuitOutRouteUpdates=wfDvmrpCircuitOutRouteUpdates, wfDvmrpTunnelInGraftPkts=wfDvmrpTunnelInGraftPkts, wfDvmrpTunnelMetric=wfDvmrpTunnelMetric, wfDvmrpBaseCreate=wfDvmrpBaseCreate, wfDvmrpRouteMetric=wfDvmrpRouteMetric, wfDvmrpBaseNeighborProbeInterval=wfDvmrpBaseNeighborProbeInterval, wfDvmrpTunnelRemoteRouterIpAddress=wfDvmrpTunnelRemoteRouterIpAddress, wfDvmrpBase=wfDvmrpBase, wfDvmrpTunnelEnable=wfDvmrpTunnelEnable, wfDvmrpCircuitDefaultRouteListen=wfDvmrpCircuitDefaultRouteListen, wfDvmrpRouteInterfaceEntry=wfDvmrpRouteInterfaceEntry, wfDvmrpCircuitReportDependProbe=wfDvmrpCircuitReportDependProbe, wfDvmrpTunnelAcceptAggregateRoutes=wfDvmrpTunnelAcceptAggregateRoutes, wfDvmrpTunnelFwdCacheSize=wfDvmrpTunnelFwdCacheSize, wfDvmrpNeighboringRouterTunnelId=wfDvmrpNeighboringRouterTunnelId, wfDvmrpTunnelEntryTable=wfDvmrpTunnelEntryTable, wfDvmrpNeighboringRouterMajorVersion=wfDvmrpNeighboringRouterMajorVersion, wfDvmrpRouteInterfaceOutSuccessfulPkts=wfDvmrpRouteInterfaceOutSuccessfulPkts, wfDvmrpTunnelOutPkts=wfDvmrpTunnelOutPkts, wfDvmrpBaseMaxRoutes=wfDvmrpBaseMaxRoutes, wfDvmrpTunnelEntry=wfDvmrpTunnelEntry, wfDvmrpNeighboringRouterCapabilities=wfDvmrpNeighboringRouterCapabilities, wfDvmrpNeighboringRouterEntryTable=wfDvmrpNeighboringRouterEntryTable, wfDvmrpCircuitInGraftAckPkts=wfDvmrpCircuitInGraftAckPkts, wfDvmrpBaseRouteExpirationTimeout=wfDvmrpBaseRouteExpirationTimeout, wfDvmrpCircuitState=wfDvmrpCircuitState, wfDvmrpRouteInterfaceSPOutDiscards=wfDvmrpRouteInterfaceSPOutDiscards, wfDvmrpCircuitInRouteUpdates=wfDvmrpCircuitInRouteUpdates, wfDvmrpRouteEntry=wfDvmrpRouteEntry, wfDvmrpRouteInterfaceNextHopInterfaceCCT=wfDvmrpRouteInterfaceNextHopInterfaceCCT, wfDvmrpTunnelLocalRouterIpAddress=wfDvmrpTunnelLocalRouterIpAddress, wfDvmrpBaseHolddownEnable=wfDvmrpBaseHolddownEnable, wfDvmrpNeighboringRouterLocalTunnelId=wfDvmrpNeighboringRouterLocalTunnelId, wfDvmrpTunnelCCT=wfDvmrpTunnelCCT, wfDvmrpTunnelState=wfDvmrpTunnelState, wfDvmrpRouteNextHopInterfaceCCT=wfDvmrpRouteNextHopInterfaceCCT, wfDvmrpCircuitOutPrunePkts=wfDvmrpCircuitOutPrunePkts, wfDvmrpTunnelInPkts=wfDvmrpTunnelInPkts, wfDvmrpTunnelInRouteUpdates=wfDvmrpTunnelInRouteUpdates, wfDvmrpNeighboringRouterTimer=wfDvmrpNeighboringRouterTimer, wfDvmrpNeighboringRouterState=wfDvmrpNeighboringRouterState, wfDvmrpRouteInterfaceSourceMask=wfDvmrpRouteInterfaceSourceMask, wfDvmrpRouteInterfaceParentLocalTunnelId=wfDvmrpRouteInterfaceParentLocalTunnelId, wfDvmrpRouteType=wfDvmrpRouteType, wfDvmrpTunnelCtrlMsgMode=wfDvmrpTunnelCtrlMsgMode, wfDvmrpCircuitInPkts=wfDvmrpCircuitInPkts, wfDvmrpCircuitEntry=wfDvmrpCircuitEntry, wfDvmrpCircuitInPrunePkts=wfDvmrpCircuitInPrunePkts, wfDvmrpCircuitOutGraftPkts=wfDvmrpCircuitOutGraftPkts, wfDvmrpTunnelThreshold=wfDvmrpTunnelThreshold, wfDvmrpTunnelInPrunePkts=wfDvmrpTunnelInPrunePkts, wfDvmrpNeighboringRouterMinorVersion=wfDvmrpNeighboringRouterMinorVersion, wfDvmrpCircuitOutPkts=wfDvmrpCircuitOutPkts, wfDvmrpCircuitFwdCacheEntries=wfDvmrpCircuitFwdCacheEntries, wfDvmrpTunnelFwdCacheEntries=wfDvmrpTunnelFwdCacheEntries, wfDvmrpRouteEntryTable=wfDvmrpRouteEntryTable, wfDvmrpRouteProtocol=wfDvmrpRouteProtocol, wfDvmrpCircuitFwdCacheTTL=wfDvmrpCircuitFwdCacheTTL, wfDvmrpBaseEstimatedRoutes=wfDvmrpBaseEstimatedRoutes, wfDvmrpCircuitRouteThreshold=wfDvmrpCircuitRouteThreshold, wfDvmrpTunnelOutRouteUpdates=wfDvmrpTunnelOutRouteUpdates, wfDvmrpRouteInterfaceDominantRouter=wfDvmrpRouteInterfaceDominantRouter, wfDvmrpBaseTriggeredUpdateInterval=wfDvmrpBaseTriggeredUpdateInterval, wfDvmrpCircuitEntryTable=wfDvmrpCircuitEntryTable, wfDvmrpCircuitAdvertiseSelf=wfDvmrpCircuitAdvertiseSelf, wfDvmrpTunnelReportDependProbe=wfDvmrpTunnelReportDependProbe, wfDvmrpRouteInterfaceParentCCT=wfDvmrpRouteInterfaceParentCCT, wfDvmrpRouteSourceNetwork=wfDvmrpRouteSourceNetwork, wfDvmrpBaseDebugLevel=wfDvmrpBaseDebugLevel, wfDvmrpTunnelInGraftAckPkts=wfDvmrpTunnelInGraftAckPkts, wfDvmrpRouteNextHopRouter=wfDvmrpRouteNextHopRouter, wfDvmrpRouteInterfaceThresholdOutDiscards=wfDvmrpRouteInterfaceThresholdOutDiscards, wfDvmrpRouteState=wfDvmrpRouteState, wfDvmrpRouteInterfaceSPInDiscards=wfDvmrpRouteInterfaceSPInDiscards, wfDvmrpRouteInterfaceNextHopInterfaceTunnelId=wfDvmrpRouteInterfaceNextHopInterfaceTunnelId, wfDvmrpBaseRouteSwitchTimeout=wfDvmrpBaseRouteSwitchTimeout, wfDvmrpCircuitInGraftPkts=wfDvmrpCircuitInGraftPkts, wfDvmrpCircuitCreate=wfDvmrpCircuitCreate, wfDvmrpCircuitAnnounceAggregatedRoutes=wfDvmrpCircuitAnnounceAggregatedRoutes, wfDvmrpRouteInterfaceParentTunnelId=wfDvmrpRouteInterfaceParentTunnelId, wfDvmrpBaseState=wfDvmrpBaseState, wfDvmrpBaseNeighborTimeout=wfDvmrpBaseNeighborTimeout, wfDvmrpTunnelInPktDiscards=wfDvmrpTunnelInPktDiscards, wfDvmrpTunnelOutGraftAckPkts=wfDvmrpTunnelOutGraftAckPkts, wfDvmrpRouteInterfaceSourceNetwork=wfDvmrpRouteInterfaceSourceNetwork, wfDvmrpRouteNextHopInterfaceLocalTunnelId=wfDvmrpRouteNextHopInterfaceLocalTunnelId, wfDvmrpBasePruningEnable=wfDvmrpBasePruningEnable, wfDvmrpBaseGarbageTimeout=wfDvmrpBaseGarbageTimeout, wfDvmrpRouteNextHopInterfaceTunnelId=wfDvmrpRouteNextHopInterfaceTunnelId, wfDvmrpCircuitFwdCacheSize=wfDvmrpCircuitFwdCacheSize, wfDvmrpTunnelDefaultRouteListen=wfDvmrpTunnelDefaultRouteListen, wfDvmrpTunnelOutPktDiscards=wfDvmrpTunnelOutPktDiscards, wfDvmrpBaseLeafTimeout=wfDvmrpBaseLeafTimeout, wfDvmrpTunnelAnnounceAggregatedRoutes=wfDvmrpTunnelAnnounceAggregatedRoutes, wfDvmrpCircuitPruneLifeTime=wfDvmrpCircuitPruneLifeTime, wfDvmrpTunnelOutGraftPkts=wfDvmrpTunnelOutGraftPkts, wfDvmrpCircuitCCT=wfDvmrpCircuitCCT, wfDvmrpCircuitRouteEnable=wfDvmrpCircuitRouteEnable, wfDvmrpRouteInterfaceInSuccessfulPkts=wfDvmrpRouteInterfaceInSuccessfulPkts, wfDvmrpTunnelEncapsMode=wfDvmrpTunnelEncapsMode, wfDvmrpBaseEnable=wfDvmrpBaseEnable, wfDvmrpCircuitInPktDiscards=wfDvmrpCircuitInPktDiscards, wfDvmrpNeighboringRouterGenId=wfDvmrpNeighboringRouterGenId, wfDvmrpCircuitAcceptAggregateRoutes=wfDvmrpCircuitAcceptAggregateRoutes, wfDvmrpTunnelCreate=wfDvmrpTunnelCreate, wfDvmrpCircuitMetric=wfDvmrpCircuitMetric, wfDvmrpRouteInterfaceEntryTable=wfDvmrpRouteInterfaceEntryTable, wfDvmrpCircuitOutPktDiscards=wfDvmrpCircuitOutPktDiscards)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
3894,
33559,
12,
35,
53,
13599,
47,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
... | 2.390165 | 17,021 |
import os
import json
import requests
from bs4 import BeautifulSoup as bs
from googlesearch import search
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
198,
6738,
467,
519,
829,
3679,
1330,
2989,
628,
198
] | 3.6 | 30 |
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2016 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals, print_function
from mock import (MagicMock, patch, PropertyMock)
from unittest import TestCase
from gs.group.member.subscribe.messagesender import GroupMessageSender
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
198,
2,
198,
2,
15069,
10673,
1584,
7467,
38,
14459,
13,
3262,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
378... | 3.911111 | 225 |
#!/usr/bin/env python
from __future__ import division
import rospy
from ros_abstraction import IkController
from geometry_msgs.msg import Point, Vector3
from std_msgs.msg import ColorRGBA
from visualization_msgs.msg import Marker
if __name__ == "__main__":
LegPositionReader()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
686,
2777,
88,
198,
6738,
686,
82,
62,
397,
301,
7861,
1330,
32840,
22130,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
6252... | 3.177778 | 90 |
# 学号:1827402013
# 姓名:司诺男
# IP:192.168.157.154
# 上传时间:2018/11/12 15:02:21
import math
if __name__=="__main__":
pass
| [
171,
119,
123,
2,
220,
10263,
255,
99,
20998,
115,
25,
1507,
1983,
1821,
6390,
201,
198,
2,
220,
10263,
100,
241,
28938,
235,
25,
20998,
116,
46237,
118,
18796,
115,
201,
198,
2,
220,
6101,
25,
17477,
13,
14656,
13,
18458,
13,
215... | 1.419355 | 93 |
# == CRS settings == #
CRS = {'init': 'epsg:4326'}
# == KEYS == #
# geopandas geometry key
GPD_GEO_KEY = "geometry"
# graph keys
NODE_TYPE_KEY = "nodetype"
EDGE_COST_KEY = "cost"
EDGE_LENGTH_KEY = "length"
SOLUTION_POWER_FLOW_KEY = "flow"
NODE_ELEVATION_KEY = "z"
ORIGINAL_EDGE_KEY = "original_edge"
PIPE_DIAMETER_KEY = "diameter"
VELOCITY_KEY = "velocity"
AVERAGE_PRESSURE_KEY = "Apressure"
CONSTRUCTION_COST_KEY = "ConstC"
HEAT_LOSS_COST_KEY = "HLC"
COOL_LOSS_COST_KEY = "CLC"
PUMPING_COST_KEY = "PumpC"
# supply keys
SUPPLY_POWER_CAPACITY_KEY = "capacity_MW"
SUPPLY_NODE_TYPE = "production"
SUPPLY_NODE_NAME_PREFIX = "S_"
OLD_SUPPLY_NODE_NAME_PREFIX = "OldS_"
SUPPLY_FIXED_COST = 1000.0 # meters
# buildings keys
BUILDING_CONSUMPTION_KEY = "MaxHeatDem"
BUILDING_NODE_TYPE = "building"
BUILDING_ID_KEY = "BuildingID"
BUILDING_NODE_NAME_PREFIX = "B_"
EXCLUDED_BUILDING_KEY = "IsExcluded"
BUILDING_CONSUMPTION_FACTOR_UNIT = 1e-3 # buildings consumptions are in GW and the plugin is working in MW
BUILDING_ID_KEY = "BuildingID"
BUILDING_USE_KEY = "Use"
BUILDING_ROOF_AREA_KEY = "RoofArea"
BUILDING_GROSS_FOOTPRINT_AREA_KEY = "GrossFA"
BUILDING_FLOORS_KEY = "NumberFloo"
CONNECTED_BUILDING_KEY = "Connected"
BUILDING_SURFACE_KEY = "Surface"
BUILDING_MAX_HEAT_DEM_KEY = "MaxHeatDem"
BUILDING_MAX_COOL_DEM_KEY = 'MaxCoolDem'
BUILDING_AVERAGE_HEAT_DEM_KEY = "AHeatDem"
BUILDING_AVERAGE_COOL_DEM_KEY = 'ACoolDem'
BUILDING_PEAK_HEAT_DEM_KEY = "PeakHeatDe"
BUILDING_PEAK_COOL_DEM_KEY = 'PeakCoolDe'
DAY_KEY = "DayOfYear"
HOUR_KEY = "HourOfDay"
# Streets keys
STREET_NODE_TYPE = "junction"
STREET_NODE_PEAK_DEMAND = "JunPeakDem"
# Least cost coefficient (%)
LEASTCOST_COEF = 30
LEASTCOST_COEF_KEY = 'LSTCcoef'
# Imaginary edges:
IM_PREFIX = "IM_"
# Output files :
SELECTED_BUILDINGS_FILE = "result_buildings.shp"
UNSELECTED_BUILDINGS_FILE = "result_unselected_buildings.shp"
SOLUTION_DISTRICT_EDGES_FILE = "solution_edges.shp"
SOLUTION_SUPPLY_EDGES_FILE = "solution_supply.shp"
SOLUTION_OLD_SUPPLY_EDGES_FILE = "solution_old_supply.shp"
SOLUTION_STP_EDGES_FILE = "solution_edges_stp.shp"
# == PLUGIN KEYS == #
STATUS_KEY = "Status"
EXCLUDED_KEY = "Excluded"
EXCLUDED_STATUS_VALUE = 0
INCLUDED_KEY = "Included"
INCLUDED_STATUS_VALUE = 1
EXISTING_KEY = "Already connected"
EXISTING_STATUS_VALUE = 2
LEASTCOST_KEY = "Least Cost"
LEASTCOST_STATUS_VALUE = 3
SUPPLY_NAME_KEY = "name"
COVERAGE_OBJECTIVE_DEFAULT = 50 # %
POSTPROCESS = True
| [
2,
6624,
327,
6998,
6460,
6624,
1303,
198,
34,
6998,
796,
1391,
6,
15003,
10354,
705,
25386,
70,
25,
3559,
2075,
6,
92,
198,
198,
2,
6624,
47134,
16309,
6624,
1303,
198,
2,
30324,
392,
292,
22939,
1994,
198,
38,
5760,
62,
38,
4720... | 2.211051 | 1,104 |
"""Main module."""
# ! /usr/bin/env python
#
# python I2C
#
# (C)2020 Aleksandr Saiapin <alstutor@gmail.com>
# (C)2006 Patrick Nomblot <pyI2C@nomblot.org>
# this is distributed under a free software license, see license.txt
import time
from typing import List
from pyi2c.protocol import I2CProtocol
I2C_REGISTER_WRITE = 0
I2C_REGISTER_READ = 1
| [
37811,
13383,
8265,
526,
15931,
198,
2,
5145,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
21015,
314,
17,
34,
198,
2,
198,
2,
357,
34,
8,
42334,
9300,
591,
46273,
25251,
499,
259,
1279,
282,
301,
38409,
31,
14816,
13,... | 2.604478 | 134 |
from datetime import datetime, timedelta
import numpy as np
from ..api.sqlalchemy_declarative import ouraSleepSummary, ouraReadinessSummary, withings, athlete, stravaSummary, \
strydSummary, fitbod, workoutStepLog, dbRefreshStatus
from sqlalchemy import func, cast, Date
from sweat.io.models.dataframes import WorkoutDataFrame, Athlete
from sweat.pdm import critical_power
from sweat.metrics.core import weighted_average_power
from sweat.metrics.power import *
import stravalib
from ..api.stravaApi import get_strava_client
from ..api.spotifyAPI import generate_recommendation_playlists
from stravalib import unithelper
from ..api.pelotonApi import peloton_mapping_df, roundTime, set_peloton_workout_recommendations
from dateutil.relativedelta import relativedelta
from ..app import app
from .database import engine
from ..utils import peloton_credentials_supplied, stryd_credentials_supplied, config
import os
import pandas as pd
from ..pages.performance import get_hrv_df, readiness_score_recommendation
types = ['time', 'latlng', 'distance', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp',
'moving', 'grade_smooth']
def training_workflow(min_non_warmup_workout_time, metric='hrv_baseline', athlete_id=1):
'''
Query db for oura hrv data, calculate rolling 7 day average, generate recommended workout and store in db.
Once stored, continuously check if workout has been completed and fill in 'Compelted' field
'''
# https://www.alancouzens.com/blog/Training_prescription_guided_by_HRV_in_cycling.pdf
try:
db_process_flag(flag=True)
# Check if entire table is empty, if so the earliest hrv plan can start is after 30 days of hrv readings
# If using readiness score, just use first score available
db_test = pd.read_sql(
sql=app.session.query(workoutStepLog).filter(workoutStepLog.athlete_id == athlete_id).statement,
con=engine, index_col='date')
oura_data_exists = True
if len(db_test) == 0:
try:
if metric == 'hrv':
min_oura_date = pd.to_datetime(
app.session.query(func.min(ouraSleepSummary.report_date))[0][0] + timedelta(59)).date()
if metric in ['hrv_baseline', 'zscore']:
min_oura_date = pd.to_datetime(
app.session.query(func.min(ouraSleepSummary.report_date))[0][0] + timedelta(29)).date()
elif metric == 'readiness':
min_oura_date = pd.to_datetime(
app.session.query(func.min(ouraReadinessSummary.report_date))[0][0]).date()
db_test.at[min_oura_date, 'athlete_id'] = athlete_id
db_test.at[min_oura_date, 'workout_step'] = 0
db_test.at[min_oura_date, 'workout_step_desc'] = 'Low'
db_test.at[min_oura_date, 'completed'] = 0
db_test.at[min_oura_date, 'rationale'] = 'This is the first date hrv thresholds could be calculated'
db_test.to_sql('workout_step_log', engine, if_exists='append', index=True)
except BaseException as e:
app.server.logger.error(f'Check enough oura data exists to generate workout recommendation: {e}')
oura_data_exists = False
db_process_flag(flag=False)
if oura_data_exists:
# Check if a step has already been inserted for today and if so check if workout has been completed yet
todays_plan = app.session.query(workoutStepLog).filter(workoutStepLog.athlete_id == athlete_id,
workoutStepLog.date == datetime.today().date()).first()
if todays_plan:
# If not yet "completed" keep checking throughout day
if todays_plan.completed == 0:
# If rest day, mark as completed
if todays_plan.workout_step == 4 or todays_plan.workout_step == 5:
todays_plan.completed = 1
app.session.commit()
else:
workout = app.session.query(stravaSummary).filter(
stravaSummary.start_day_local == datetime.today().date(),
stravaSummary.elapsed_time > min_non_warmup_workout_time,
# Only include workouts with a workout type specified when checking if workout has been completed for hrv workflow (i.e. ignore 'Other' workouts uploaded from apple watch)
stravaSummary.type != 'Workout').first()
if workout:
todays_plan.completed = 1
app.session.commit()
# If plan not yet created for today, create it
else:
metric_df = get_hrv_df()
if metric == 'hrv':
metric_df['within_swc'] = metric_df['within_daily_swc']
elif metric == 'hrv_baseline':
metric_df['within_swc'] = metric_df['within_flowchart_swc']
# elif metric == 'zscore':
# metric_df['within_swc'] = metric_df['within_zscore_swc']
# Wait for today's hrv to be loaded into cloud
if metric_df.index.max() == datetime.today().date(): # or (datetime.now() - timedelta(hours=12)) > pd.to_datetime(datetime.today().date()):
step_log_df = pd.read_sql(
sql=app.session.query(workoutStepLog.date, workoutStepLog.workout_step,
workoutStepLog.completed).filter(
workoutStepLog.athlete_id == 1).statement,
con=engine, index_col='date').sort_index(ascending=False)
### Modified version of flow chart to allow for additional MOD day in step 2 ###
# Store the last value of step 2 to cycle between MOD->MOD->HIIT every 3rd time
try:
last_hiit_mod = \
step_log_df[
(step_log_df['workout_step'].isin([21, 22, 23])) & (step_log_df['completed'] == 1)][
'workout_step'].head(1).values[0]
except:
last_hiit_mod = 20
next_hiit_mod = last_hiit_mod + 1 if last_hiit_mod != 23 else 21
step_log_df = step_log_df[step_log_df.index == step_log_df.index.max()]
# Store last step in variable for starting point in loop
last_db_step = step_log_df['workout_step'].iloc[0]
# Resample to today
step_log_df.at[pd.to_datetime(datetime.today().date()), 'workout_step'] = None
step_log_df.set_index(pd.to_datetime(step_log_df.index), inplace=True)
step_log_df = step_log_df.resample('D').mean()
# Remove first row from df so it does not get re inserted into db
step_log_df = step_log_df.iloc[1:]
# We already know there is no step for today from "current_step" parameter, so manually add today's date
step_log_df.at[pd.to_datetime(datetime.today().date()), 'completed'] = 0
# Check if gap between today and max date in step log, if so merge in all workouts for 'completed' flag
if step_log_df['completed'].isnull().values.any():
workouts = pd.read_sql(
sql=app.session.query(stravaSummary.start_day_local, stravaSummary.activity_id).filter(
stravaSummary.elapsed_time > min_non_warmup_workout_time).statement, con=engine,
index_col='start_day_local')
# Resample workouts to the per day level - just take max activity_id in case they were more than 1 workout for that day to avoid duplication of hrv data
workouts.set_index(pd.to_datetime(workouts.index), inplace=True)
workouts = workouts.resample('D').max()
step_log_df = step_log_df.merge(workouts, how='left', left_index=True, right_index=True)
# Completed = True if a workout (not just warmup) was done on that day or was a rest day
for x in step_log_df.index:
step_log_df.at[x, 'completed'] = 0 if np.isnan(step_log_df.at[x, 'activity_id']) else 1
# Generate row with yesterdays plan completions status for looping below through workout cycle logic
step_log_df['completed_yesterday'] = step_log_df['completed'].shift(1)
# Drop historical rows that were used for 'yesterday calcs' so we are only working with todays data
# step_log_df = step_log_df.iloc[1:]
# Merge dfs
df = pd.merge(step_log_df, metric_df, how='left', right_index=True, left_index=True)
# If using oura readiness score we don't use workflow, just recommend intensity based on score
if metric == 'readiness':
df['workout_step'] = 99 # dummy value
df['workout_step_desc'] = df['score'].apply(readiness_score_recommendation)
df['rationale'] = 'Oura Readiness Score'
# TODO: Update every 3rd 'Mod' to HIIT
# If using ithlete zscore we don't use workflow
elif metric == 'zscore':
df['workout_step'] = 99 # dummy value
df['workout_step_desc'] = df['z_recommendation']
df['rationale'] = 'Z Score Matrix'
# TODO: Update every 3rd 'Mod' to HIIT
# If using hrv or hrv baseline, use workflow
else:
last_step = last_db_step
for i in df.index:
# Completed / Completed_yesterday could show erroneous data for rest days, as the 0 is brought in based off if a workout is found in strava summary
df.at[i, 'completed_yesterday'] = 1 if last_step == 4 or last_step == 5 else df.at[
i, 'completed_yesterday']
# hrv_increase = df.at[i, 'rmssd_7'] >= df.at[i, 'rmssd_7_yesterday']
within_swc = df.at[i, 'within_swc']
# ### Low Threshold Exceptions ###
# # If lower threshold is crossed, switch to low intensity track
# if df.at[i, 'lower_threshold_crossed'] == True:
# current_step = 4
# rationale = '7 day HRV average crossed the lower threshold.'
# app.server.logger.debug('Lower threshold crossed. Setting current step = 4')
# # If we are below lower threshold, rest until back over threshold
# elif df.at[i, 'under_low_threshold'] == True:
# current_step = 5
# rationale = '7 day HRV average is under the lower threshold.'
# app.server.logger.debug('HRV is under threshold. Setting current step = 5')
# ### Upper Threshold Exceptions ###
# # If upper threshold is crossed, switch to high intensity
# elif df.at[i, 'upper_threshold_crossed'] == True:
# current_step = 1
# rationale = '7 day HRV average crossed the upper threshold.'
# app.server.logger.debug('Upper threshold crossed. Setting current step = 1')
# # If we are above upper threshold, load high intensity until back under threshold
# elif df.at[i, 'over_upper_threshold'] == True:
# if hrv_increase:
# current_step = 1
# rationale = '7 day HRV average increased and is still over the upper threshold.'
# else:
# current_step = 2
# rationale = "7 day HRV average decreased but is still over the upper threshold."
# app.server.logger.debug(
# 'HRV is above threshold. Setting current step = {}.'.format(current_step))
### Missed Workout Exceptions ###
# If workout was not completed yesterday but we are still within thresholds maintain current step
if df.at[i, 'completed_yesterday'] == 0 and within_swc and last_step in [1, 21, 22, 23]:
current_step = last_step
rationale = "Yesterday's workout was not completed and we are still within SWC."
app.server.logger.debug(
'No workout detected for previous day however still within thresholds. Maintaining last step = {}'.format(
current_step))
else:
app.server.logger.debug(
'No exceptions detected. Following the normal workout plan workflow.')
rationale = 'Normal workout plan workflow.'
# Workout workflow logic when no exceptions
if last_step == 0:
current_step = 1
elif last_step == 1:
current_step = next_hiit_mod if within_swc else 6
elif last_step in [21, 22, 23]:
current_step = 3
elif last_step == 3:
current_step = 1 if within_swc else 4
elif last_step == 4:
current_step = 6 if within_swc else 5
elif last_step == 5:
current_step = 6
elif last_step == 6:
current_step = 1 if within_swc else 4
df.at[i, 'completed'] = 1 if current_step == 4 or current_step == 5 else df.at[
i, 'completed']
df.at[i, 'workout_step'] = current_step
last_step = current_step
# Map descriptions and alternate every HIIT and Mod
df.at[i, 'workout_step_desc'] = \
{0: 'Low', 1: 'High', 21: 'Mod', 22: 'Mod', 23: 'HIIT', 3: 'Low', 4: 'Rest', 5: 'Rest',
6: 'Low'}[
df.at[i, 'workout_step']]
if df.at[i, 'workout_step'] in [21, 22, 23] and df.at[i, 'completed'] == 1:
next_hiit_mod = next_hiit_mod + 1 if next_hiit_mod != 23 else 21
df.at[i, 'rationale'] = rationale
df['athlete_id'] = athlete_id
df.reset_index(inplace=True)
# Insert into db
df = df[['athlete_id', 'date', 'workout_step', 'workout_step_desc', 'completed', 'rationale']]
df['date'] = df['date'].dt.date
df.to_sql('workout_step_log', engine, if_exists='append', index=False)
# Bookmark peloton classes
if peloton_credentials_supplied:
set_peloton_workout_recommendations()
# Create spotify playlist based on workout intensity recommendation
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
app.session.remove()
if athlete_info.spotify_playlists_switch == True:
generate_recommendation_playlists(
workout_intensity=df['workout_step_desc'].tail(1).values[0].lower().replace('hiit',
'mod') if athlete_info.spotify_use_rec_intensity else 'workout',
normalize=True,
time_period=athlete_info.spotify_time_period,
num_playlists=athlete_info.spotify_num_playlists)
except BaseException as e: # If workflow fails be sure to turn off processing flag
app.server.logger.error(e)
db_process_flag(flag=False)
app.session.remove()
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
15042,
13,
25410,
282,
26599,
62,
32446,
283,
876,
1330,
674,
64,
40555,
22093,
11,
674,
64,
5569,
1272,
22093,
11,
351,
654,
... | 1.913225 | 9,081 |
from flask import Flask, jsonify
from OpenSSL import SSL
app = Flask(__name__)
context = SSL.Context(SSL.TLSv1_1_METHOD)
context.use_privatekey_file("server.key")
context.use_certificate_file("server.crt")
@app.route("/")
@app.route("/data")
if __name__ == "__main__":
app.run(ssl_context=context)
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
198,
6738,
4946,
31127,
1330,
25952,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
22866,
796,
25952,
13,
21947,
7,
31127,
13,
51,
6561,
85,
16,
62,
16,
62,
49273,
8,
198,
22866,
... | 2.663793 | 116 |
#!/usr/bin/env python
"""
moveit_fk_demo.py - Version 0.1.1 2015-08-26
Use forward kinemtatics to move the arm to a specified set of joint angles
Copyright 2014 by Patrick Goebel <patrick@pirobot.org, www.pirobot.org>
Copyright 2015 by YS Pyo <passionvirus@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import sys
import moveit_commander
from control_msgs.msg import GripperCommand
GROUP_NAME_ARM = 'l_arm'
GROUP_NAME_GRIPPER = 'l_gripper'
if __name__ == "__main__":
try:
MoveItFKDemo()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
220,
220,
220,
1445,
270,
62,
69,
74,
62,
9536,
78,
13,
9078,
532,
10628,
657,
13,
16,
13,
16,
1853,
12,
2919,
12,
2075,
628,
220,
220,
220,
5765,
2651,
479,
774... | 2.939153 | 378 |
from flask import render_template
from app import app, cache
import app.views as v
@app.route('/')
@app.route('/index')
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
598,
1330,
598,
11,
12940,
198,
11748,
598,
13,
33571,
355,
410,
628,
628,
198,
31,
1324,
13,
38629,
10786,
14,
11537,
198,
31,
1324,
13,
38629,
10786,
14,
9630,
11537,
198
] | 3.1 | 40 |
from .model_selection import time_series_splitter, cv_forecaster, backtesting_forecaster, grid_search_forecaster, random_search_forecaster, bayesian_search_forecaster | [
6738,
764,
19849,
62,
49283,
1330,
640,
62,
25076,
62,
22018,
1967,
11,
269,
85,
62,
754,
17970,
11,
736,
33407,
62,
754,
17970,
11,
10706,
62,
12947,
62,
754,
17970,
11,
4738,
62,
12947,
62,
754,
17970,
11,
15489,
35610,
62,
12947,... | 3.608696 | 46 |
import os
from utils.config import opt_train,opt_test
from data.datasets import load_dataset
from noise2noise import Noise2Noise
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '1' # 选择哪块GPU运行 '0' or '1' or '0,1'
#训练
# train()
# 测试单张图片,将结果保存到文件夹下
test()
| [
11748,
28686,
198,
6738,
3384,
4487,
13,
11250,
1330,
2172,
62,
27432,
11,
8738,
62,
9288,
198,
6738,
1366,
13,
19608,
292,
1039,
1330,
3440,
62,
19608,
292,
316,
198,
6738,
7838,
17,
3919,
786,
1330,
30964,
17,
2949,
786,
628,
198,
... | 1.701149 | 174 |
import requests
url = 'http://localhost:5050/predict'
body = {
"text": "The insurance company is evil!"
}
response = requests.post(url, data=body)
print(response.json()) | [
11748,
7007,
198,
198,
6371,
796,
705,
4023,
1378,
36750,
25,
1120,
1120,
14,
79,
17407,
6,
198,
198,
2618,
796,
1391,
198,
220,
220,
220,
366,
5239,
1298,
366,
464,
5096,
1664,
318,
6181,
2474,
198,
92,
198,
198,
26209,
796,
7007,
... | 2.95 | 60 |
import random
from typing import Any, Callable, List
class RandomChoiceCompose:
"""
Randomly choose to apply one transform from a collection of transforms.
"""
| [
11748,
4738,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
7343,
628,
198,
4871,
14534,
46770,
7293,
577,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
14534,
306,
3853,
284,
4174,
530,
6121,
422,
257,
4947,
286,
31408,
13,
... | 3.625 | 48 |
from rfeed import *
| [
6738,
374,
12363,
1330,
1635,
628,
198
] | 3.142857 | 7 |
import os
import numpy as np
import pickle
import lz4.frame
import cv2
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
# Custom import
from werdich_cfr.tfutils.TFRprovider import Dset
from werdich_cfr.utils.processing import Videoconverter
from werdich_cfr.tfutils.tfutils import use_gpu_devices
#%% Select GPUs
physical_devices, device_list = use_gpu_devices(gpu_device_string='0,1')
#%% files and directories and parameters for all data sets
cfr_data_root = os.path.normpath('/mnt/obi0/andreas/data/cfr')
meta_date = '200617'
# Additional information for filename
meta_dir = os.path.join(cfr_data_root, 'metadata_'+meta_date)
# This should give us ~70% useful files
max_frame_time_ms = 33.34 # Maximum frame_time acceptable in ms
min_rate = 1/max_frame_time_ms*1e3
min_frames = 40 # Minimum number of frames at min_rate (2 s)
min_length = max_frame_time_ms*min_frames*1e-3
n_tfr_files = 8 # We should have at least one TFR file per GPU
#%% Support functions
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
#%% Data set files
dset_list = ['cfr', 'mbf_ammonia', 'mbf_rubidium']
tracer_list = ['ammonia', 'rubidium']
# THIS COULD BE A LOOP
for dset in dset_list:
#dset = dset_list[1]
cfr_meta_file = 'global_pet_echo_dataset_'+meta_date+'.parquet'
tfr_dir = os.path.join(cfr_data_root, 'tfr_'+meta_date, dset)
float_label_list = ['rest_global_mbf', 'stress_global_mbf', 'global_cfr_calc']
meta_df = pd.read_parquet(os.path.join(meta_dir, cfr_meta_file))
# Filter the data set for mbf models
tracer=dset.split('_')[-1]
if tracer in tracer_list:
meta_df = meta_df[meta_df.tracer_obi==tracer]
#%% Select one view and process files
# We cannot insert NAs into the label lists.
# Drop rows with NAs in the label columns
meda_df = meta_df.dropna(subset=float_label_list, how='any', axis=0)
print(f'Copying meta data {cfr_meta_file} into TFR format.')
print(f'Processing data set {dset} with tracer filter {list(meta_df.tracer_obi.unique())}')
print(f'Saving data to {tfr_dir}.')
view = 'a4c'
tfr_info = dset
for mode in meta_df.dset_mode.unique():
# Filter view, mode and rates. Shuffle.
df = meta_df[(meta_df.max_view == view) & (meta_df.dset_mode == mode)].sample(frac=1)
print('View:{}, mode:{}, min_rate:{}, min_length: {}, n_videos:{}'.format(view,
mode,
min_rate,
min_length,
len(df.filename.unique())))
file_list_complete = list(df.filename.unique())
# Split filename_list into multiple parts
# n_samples_per_file = max_samples_per_file
n_samples_per_file = int(np.ceil(len(file_list_complete)/n_tfr_files))
file_list_parts = list(chunks(file_list_complete, n_samples_per_file))
mag = int(np.floor(np.log10(len(file_list_parts)))) + 1
vc = Videoconverter(max_frame_time_ms=max_frame_time_ms, min_frames=min_frames, meta_df=meta_df)
# Each part will have its own TFR filename
for part, file_list in enumerate(file_list_parts):
# TFR filename
tfr_basename = tfr_info+'_'+view+'_'+mode+'_'+meta_date+'_'+str(part).zfill(mag)
tfr_filename = tfr_basename+'.tfrecords'
parquet_filename = tfr_basename+'.parquet'
failed_filename = tfr_basename+'.failed'
print()
print('Processing {} part {} of {}'.format(tfr_filename, part + 1, len(file_list_parts)))
# Data dictionaries
array_data_dict = {'image': []}
float_data_dict = {name: [] for name in float_label_list}
int_data_dict = {'record': []}
im_array_ser_list = [] # list of pd.Series object for the files in im_array_list
im_failed_ser_list = [] # list of pd.Series objects for failed videos
for f, filename in enumerate(file_list):
if (f+1) % 200 == 0:
print('Loaded video {} of {} into memory.'.format(f+1, len(file_list)))
ser_df = df.loc[df.filename == filename, :]
# Exclude post-2018 data if there is more than one row for this file
if ser_df.shape[0] > 1:
ser_df = ser_df[ser_df['post-2018'] == 0]
ser = ser_df.iloc[0]
error, im_array = vc.process_video(filename)
if np.any(im_array):
# Data dictionaries
array_data_dict['image'].append(im_array)
for label in float_label_list:
float_data_dict[label].append(ser[label])
int_data_dict['record'].append(ser.name)
ser_df2 = ser_df.assign(im_array_shape=[list(im_array.shape)])
im_array_ser_list.append(ser_df2)
else:
ser_df2 = ser_df.assign(err=[error])
im_failed_ser_list.append(ser_df2)
# Write TFR file
if len(array_data_dict['image']) > 0:
TFR_saver = Dset(data_root=tfr_dir)
TFR_saver.create_tfr(filename=tfr_filename,
array_data_dict=array_data_dict,
float_data_dict=float_data_dict,
int_data_dict=int_data_dict)
# Save feature names (needed for parsing the tfr files)
array_list = list(array_data_dict.keys())
array_list.append('shape')
feature_dict = {'array': array_list,
'float': list(float_data_dict.keys()),
'int': list(int_data_dict.keys()),
'features': list(TFR_saver.feature_dict.keys())}
feature_dict_file_name = os.path.splitext(cfr_meta_file)[0]+'.pkl'
feature_dict_file = os.path.join(tfr_dir, feature_dict_file_name)
# Save the feature. We need them to decode the data.
if not os.path.exists(feature_dict_file):
with open(feature_dict_file, 'wb') as fs:
pickle.dump(feature_dict, fs)
# When this is done, save the parquet file
im_array_df = pd.concat(im_array_ser_list)
im_array_df.to_parquet(os.path.join(tfr_dir, parquet_filename))
# Save the failed rows with the error messages
im_failed_df = pd.concat(im_failed_ser_list)
im_failed_df.to_parquet(os.path.join(tfr_dir, failed_filename))
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
300,
89,
19,
13,
14535,
198,
11748,
269,
85,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
30094,
13,
2617,
62,
18076,
10786,
13812,
13,
9806,
... | 1.981461 | 3,560 |
import numpy as np
inp1 = """<x=-1, y=0, z=2>
<x=2, y=-10, z=-7>
<x=4, y=-8, z=8>
<x=3, y=5, z=-1>"""
# moons 'Io', 'Europa', 'Ganymede', 'Callisto'
# unit test part1
moons = parse_input(inp1)
assert np.allclose(moons['Io'], np.array([-1, 0, 2]))
velocities = {key: np.array([0, 0, 0]) for key in moons}
if False:
printout(0, moons, velocities)
moons, velocities = step(moons, velocities)
printout(1, moons, velocities)
for _ in range(9):
moons, velocities = step(moons, velocities)
printout(10, moons, velocities)
assert total_energy(moons, velocities) == 179
# part1
moons = parse_input(open('data/input12').read().strip())
velocities = {key: np.array([0, 0, 0]) for key in moons}
for _ in range(1000):
moons, velocities = step(moons, velocities)
print(f'solution for part1: {total_energy(moons, velocities)}')
def get_cycle(moons, coordinate_index):
"""Finds a cycle in coordinate x1 only by simulating x1 = f(x1, vx1)."""
moons = {key: moons[key][coordinate_index + 0:coordinate_index + 1] for key in moons}
velocities = {key: np.array([0]) for key in moons}
initial_hash = hash(moons, velocities)
nsteps = 0
while True:
moons, velocities = step(moons, velocities)
nsteps += 1
curr_hash = hash(moons, velocities)
if curr_hash == initial_hash:
break
return nsteps
# part2 unit test
moons = parse_input(inp1)
period0 = get_cycle(moons, coordinate_index=0)
period1 = get_cycle(moons, coordinate_index=1)
period2 = get_cycle(moons, coordinate_index=2)
min_cycle = min(np.lcm(np.lcm(period0, period1), period2), np.lcm(np.lcm(period0, period2), period1),
np.lcm(np.lcm(period1, period2), period0))
assert min_cycle == 2772
# part2
moons = parse_input(open('data/input12').read().strip())
period0 = get_cycle(moons, coordinate_index=0)
period1 = get_cycle(moons, coordinate_index=1)
period2 = get_cycle(moons, coordinate_index=2)
min_cycle = min(np.lcm(np.lcm(period0, period1), period2), np.lcm(np.lcm(period0, period2), period1),
np.lcm(np.lcm(period1, period2), period0))
print(f"solution for part2: {min_cycle}")
| [
11748,
299,
32152,
355,
45941,
198,
198,
259,
79,
16,
796,
37227,
27,
87,
10779,
16,
11,
331,
28,
15,
11,
1976,
28,
17,
29,
198,
27,
87,
28,
17,
11,
331,
10779,
940,
11,
1976,
10779,
22,
29,
198,
27,
87,
28,
19,
11,
331,
107... | 2.320856 | 935 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 12:56:48 2019
@author: salim
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
os.chdir('/Users/salim/Desktop/EDEM/Python/Code')
a11 = pd.read_csv ('rentals_weather_2011.csv', sep=',', decimal='.')
a12 = pd.read_csv ('rentals_weather_2012.csv', sep=';', decimal=',')
a11 = a11.drop(columns=['Unnamed: 0'])
a11 = a11.rename(columns={'dteday_x':'dteday'})
a12.day = a12.day - 365
a12.drop(a12.tail(1).index,inplace=True) # drop last n rows (borrar ultima linea)
y12 = a12[['day', 'cnt']]
y11 = a11[['day', 'cnt']]
y12 = y12.rename(columns={'cnt':'cnt_12'})
y11 = y11.rename(columns={'cnt':'cnt_11'})
y112 = pd.merge(y11, y12, on='day')
#Representar dos años en una gráfica
plt.scatter(y112.day,y112.cnt_11)
plt.scatter(y112.day,y112.cnt_12)
plt.title("Figura . Rented Bicycles Comparation 11-12") #Titulo
plt.xlabel("Nr. of Day") # Establece el título del eje x
plt.ylabel("Nr. of Rented Bicycles") # Establece el título del eje y
plt.scatter(y112.day,y112.cnt_11,linewidths=1,label = 'Sales 2011',color ="Green")
plt.scatter(y112.day,y112.cnt_12,linewidths=1,label = 'Sales 2012', Color ="Blue")
plt.legend()
plt.savefig('Sales_11_12.jpg')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
8621,
2310,
1105,
25,
3980,
25,
2780,
13130,
198,
198,
31,
9800,
25,
3664,
320,
198... | 2.151361 | 588 |
with open("input.txt") as x:
lines = x.read().splitlines()
pairs = {"(": ")", "{": "}", "<": ">", "[": "]"}
points = {")": 1, "]": 2, "}": 3, ">": 4}
context = []
p_array = []
p = 0
corrupted = False
for l in lines:
context = []
corrupted = False
p = 0
for char in l:
if char in pairs.keys():
context.append(char)
elif char in pairs.values():
if char != pairs[context[-1]]:
corrupted = True
p += points[char]
break
else:
context = context[:-1]
if not corrupted:
context.reverse()
for c in context:
p = p * 5 + points[pairs[c]]
p_array += [p]
print(sorted(p_array)[len(p_array) // 2])
| [
4480,
1280,
7203,
15414,
13,
14116,
4943,
355,
2124,
25,
198,
220,
220,
220,
3951,
796,
2124,
13,
961,
22446,
35312,
6615,
3419,
198,
198,
79,
3468,
796,
1391,
18109,
1298,
366,
42501,
45144,
1298,
366,
92,
1600,
33490,
1298,
366,
29,... | 1.96401 | 389 |
#!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if __name__ == '__main__':
print(os.path.isdir(sys.argv[1]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
198,
2,
15069,
12131,
383,
376,
37533,
544,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
... | 2.935484 | 93 |
# -*- coding:utf-8 -*-
import sys
from github import Github
reload(sys)
sys.setdefaultencoding('utf-8')
# var legendData = ['linary', 'zhangyi', 'javame'];
# var seriesData = [{name: 'linary', value: 2}, {name: 'zhangyi', value: 3},{name: 'javame', value: 4}];
# var selected = {'linary': true, 'zhangyi': true, 'javame': false};
if __name__ == "__main__":
# using token
token = 'xxx...'
g = Github(token)
repo = g.get_repo("hugegraph/hugegraph")
# collect issues
issue_file = open('issues.txt', 'w')
all_issues = repo.get_issues(state="open")
for issue in all_issues:
line = '%s\t%s' % (issue.user.login, issue.title)
issue_file.write(line + '\n')
issue_file.close()
# handle user issues
authors = ['Linary', 'javeme', 'zhoney']
legend_data = []
series_data = {}
selected = {}
with open('issues.txt', "r+") as user_issues_file:
for issue_line in user_issues_file:
parts = issue_line.split('\t')
assert len(parts) == 2
user_name = parts[0]
issue_title = parts[1]
if user_name in series_data:
count = series_data[user_name]
count = count + 1
series_data[user_name] = count
else:
legend_data.append(user_name)
series_data[user_name] = 1
selected[user_name] = True
selected['Linary'] = False
selected['javeme'] = False
selected['zhoney'] = False
# convert to echarts data strcture
echarts = '''var legendData = %s;\nvar seriesData = %s;\nvar selected = %s;\n\nvar data = {legendData: legendData, seriesData: seriesData, selected: selected};
''' % (write_legend_data(legend_data), write_series_data(series_data), write_selected(selected))
print echarts
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
198,
6738,
33084,
1330,
38994,
198,
198,
260,
2220,
7,
17597,
8,
198,
17597,
13,
2617,
12286,
12685,
7656,
10786,
40477,
12,
23,
11537,
628,
198,
19... | 2.256691 | 822 |
# -*- coding: utf-8 -*-
from . import op_student
from . import op_admission
from . import account_invoice | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
1330,
1034,
62,
50139,
198,
6738,
764,
1330,
1034,
62,
324,
3411,
198,
6738,
764,
220,
1330,
1848,
62,
16340,
2942
] | 2.815789 | 38 |
import numpy as np
from multiagent_envs.multiagent.core import World, Agent, Landmark, Hole, Snack, Obstacle
from multiagent_envs.multiagent.scenario import BaseScenario
import pdb
from vars import pargs
import math
#from train import past
import random
from a2c_ppo_acktr.arguments import get_args
| [
11748,
299,
32152,
355,
45941,
198,
6738,
5021,
25781,
62,
268,
14259,
13,
41684,
25781,
13,
7295,
1330,
2159,
11,
15906,
11,
6379,
4102,
11,
24478,
11,
5489,
441,
11,
46378,
6008,
198,
6738,
5021,
25781,
62,
268,
14259,
13,
41684,
25... | 3.333333 | 90 |