content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
from abides_markets.generators import (
ConstantDepthGenerator,
ConstantOrderSizeGenerator,
UniformDepthGenerator,
UniformOrderSizeGenerator,
)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
450,
1460,
62,
34162,
13,
8612,
2024,
1330,
357,
198,
220,
220,
220,
20217,
48791,
8645,
1352,
11,
198,
220,
220,
220,
20217,
18743,
10699,
8645,
1352,
11,
198,
220,
220,
220,
35712,
4879... | 3.066667 | 60 |
from django.shortcuts import render, redirect, reverse
from django.views import View, generic
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import CommentForm
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
9575,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
11,
14276,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,... | 3.877193 | 57 |
# Generated by Django 3.2 on 2021-08-11 20:10
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
2919,
12,
1157,
1160,
25,
940,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.088235 | 34 |
import basic_func as bf
from gen_drivfunc import *
from fpexception_detection import FPexcption_detector_whole
from fpexception_detection import get_testing_point
from math_lib import rfl
from plot_domain import plot_2vfunc_domain
from plot_domain import plot_1func_domain
from mpmath import *
import itertools
import numpy as np
import random
import signal
signal.signal(signal.SIGALRM, handler)
NoConvergence = mp.NoConvergence
# flag 0 not need to repair 0 f
# flag 1 may need to repair 1 inf/nan f
# flag 2,3 overflow handle 2 inf inf 3 inf nan
# flag 4 domain error handle 4 nan nan
if __name__ == "__main__":
cal_exceptions()
| [
11748,
4096,
62,
20786,
355,
275,
69,
198,
6738,
2429,
62,
7553,
85,
20786,
1330,
1635,
198,
6738,
277,
24900,
4516,
62,
15255,
3213,
1330,
31459,
41194,
1159,
62,
15255,
9250,
62,
1929,
2305,
198,
6738,
277,
24900,
4516,
62,
15255,
3... | 3.080189 | 212 |
import binascii
import os
from typing import Union
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import QuerySet
from django.db.models.base import Model
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.status import (
HTTP_403_FORBIDDEN,
HTTP_409_CONFLICT,
HTTP_422_UNPROCESSABLE_ENTITY,
)
from scalade.utils import BASE64_REGEX, decode_scalade_token
class DecoratorShipper:
"""
Ships common used decorators as static methods.
"""
@staticmethod
@staticmethod
def extract_job_from_token(func):
"""
Decorator used in 'runtime' api views
to extract job from token.
"""
return wrapper
@staticmethod
def with_permission(perm: str):
"""
Decorator used in 'resources' api views
that restricts content using permissions.
"""
return decorator
| [
11748,
9874,
292,
979,
72,
198,
11748,
28686,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
42625,
1... | 2.754144 | 362 |
# Copyright (c) 2015 Simon Kennedy <sffjunkie+code@gmail.com>
from collections import namedtuple
__all__ = ['Element']
_Element = namedtuple('Element', "title reader writer key log")
| [
2,
15069,
357,
66,
8,
1853,
11288,
10401,
1279,
82,
487,
73,
2954,
494,
10,
8189,
31,
14816,
13,
785,
29,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
834,
439,
834,
796,
37250,
20180,
20520,
198,
198,
62,
20180,
796,
3... | 3.263158 | 57 |
from substratools import exceptions, Metrics
from substratools.utils import import_module, load_interface_from_module
import pytest
| [
6738,
47294,
265,
10141,
1330,
13269,
11,
3395,
10466,
198,
6738,
47294,
265,
10141,
13,
26791,
1330,
1330,
62,
21412,
11,
3440,
62,
39994,
62,
6738,
62,
21412,
198,
198,
11748,
12972,
9288,
628
] | 3.941176 | 34 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'chooseserver.no.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
6679,
4629,
18497,
13,
3919,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
1... | 2.829545 | 88 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 12:02:05 2018
@author: eub_hmy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import random
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='input/occupancy_data/datatest.txt',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--training_steps',
type=int,
default=3000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=1000,
help='How many lines to train on at a time.'
)
parser.add_argument(
'--task_index',
type=int,
default=0,
help="""\
Index of task within the job.\
"""
)
parser.add_argument(
'--ps_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--worker_hosts',
type=str,
default=0,
help="""\
Comma-separated list of hostname:port pairs.\
"""
)
parser.add_argument(
'--job_name',
type=str,
default=0,
help="""\
job name: worker or ps.\
"""
)
parser.add_argument(
'--issync',
type=int,
default=0,
help="""\
between graph or not.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
7653,
220,
718,
1105,
25,
2999,
25,
2713,
2864,
198,
198,
31,
9800,
25,
304,
549,
62,
71,
1820,
198,
37811,
198,
6738,
11593,
37443,
834,... | 2.103696 | 974 |
#
# PySNMP MIB module FA-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FA-EXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:11:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
connUnitPortEntry, = mibBuilder.importSymbols("FCMGMT-MIB", "connUnitPortEntry")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, ObjectIdentity, Counter64, Unsigned32, Integer32, Counter32, ModuleIdentity, IpAddress, NotificationType, Gauge32, TimeTicks, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "ObjectIdentity", "Counter64", "Unsigned32", "Integer32", "Counter32", "ModuleIdentity", "IpAddress", "NotificationType", "Gauge32", "TimeTicks", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
sw, = mibBuilder.importSymbols("SW-MIB", "sw")
faExt = ModuleIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28))
faExt.setRevisions(('2010-11-22 10:30', '2013-09-12 10:30', '2013-09-24 13:55', '2013-10-29 13:54',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: faExt.setRevisionsDescriptions(('Initial version of this module.', 'Added swConnUnitPortFECMode', 'Added swConnUnitPortFECState', 'Added notsupported value for swConnUnitPortFECState',))
if mibBuilder.loadTexts: faExt.setLastUpdated('201310291354Z')
if mibBuilder.loadTexts: faExt.setOrganization('Brocade Communications Systems, Inc.,')
if mibBuilder.loadTexts: faExt.setContactInfo('Customer Support Group Brocade Communications Systems, 1745 Technology Drive, San Jose, CA 95110 U.S.A Tel: +1-408-392-6061 Fax: +1-408-392-6656 Email: support@Brocade.COM WEB: www.brocade.com')
if mibBuilder.loadTexts: faExt.setDescription('The MIB module is Extension for FA-MIB. Copyright (c) 1996-2003 Brocade Communications Systems, Inc. All rights reserved.')
swSfpStatTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1), )
if mibBuilder.loadTexts: swSfpStatTable.setStatus('current')
if mibBuilder.loadTexts: swSfpStatTable.setDescription('This represents the diagnostic stats of SFPs.')
swFapwwnFeature = ObjectIdentity((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2))
if mibBuilder.loadTexts: swFapwwnFeature.setStatus('current')
if mibBuilder.loadTexts: swFapwwnFeature.setDescription('The OID sub-tree for Fapwwn feature. Using this feature user can configure virtual port WWN for a port.')
swPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3), )
if mibBuilder.loadTexts: swPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortConfigTable.setDescription('This represents the configuration of encryption / compression feature on a port')
swConnUnitPortTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4), )
if mibBuilder.loadTexts: swConnUnitPortTable.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortTable.setDescription('This represents the Conn unit Port entry')
swSfpStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swSfpStatEntry"))
swSfpStatEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swSfpStatEntry.setStatus('current')
if mibBuilder.loadTexts: swSfpStatEntry.setDescription('This represents the diagnostic stats of SFPs')
swSfpTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('centigrade').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTemperature.setStatus('current')
if mibBuilder.loadTexts: swSfpTemperature.setDescription('This object identifies the temperature of SFP')
swSfpVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli voltage').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpVoltage.setStatus('current')
if mibBuilder.loadTexts: swSfpVoltage.setDescription('This object identifies the voltage of SFP.')
swSfpCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('milli amphere').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpCurrent.setStatus('current')
if mibBuilder.loadTexts: swSfpCurrent.setDescription('This object identifies the current of SFP.')
swSfpRxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpRxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpRxPower.setDescription('This object identifies the Rx power consumption of SFP.')
swSfpTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpTxPower.setStatus('current')
if mibBuilder.loadTexts: swSfpTxPower.setDescription('This object identifies the Tx power consumption of SFP.')
swSfpPoweronHrs = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 6), Integer32()).setUnits('hours').setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpPoweronHrs.setStatus('current')
if mibBuilder.loadTexts: swSfpPoweronHrs.setDescription('This object identifies the power on hours of SFP. This is applicable only to 16G SFPs.')
swSfpUnitId = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swSfpUnitId.setStatus('current')
if mibBuilder.loadTexts: swSfpUnitId.setDescription('This object identifies unit ID of SFP. This is applicable only to QSFP.')
swPortFapwwnConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1), )
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigTable.setDescription('This represents the configuration of ports.')
swPortFapwwnConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortFapwwnConfigEntry"))
swPortFapwwnConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEntry.setDescription('This represents the configuration of ports.')
swPortFapwwnConfigEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigEnable.setDescription('Represents the Fapwwn status. This is for per port.')
swPortFapwwnConfigFapwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(256, 256)).setFixedLength(256)).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigFapwwn.setDescription('Represents the Fapwwn. For AG it is range of WWNs. If Fapwwn feature is not enabled in a port this object value is NA(Not Applicable.')
swPortFapwwnConfigType = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 2, 1, 1, 3), FapwwnType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortFapwwnConfigType.setStatus('current')
if mibBuilder.loadTexts: swPortFapwwnConfigType.setDescription('Represents the Fapwwn type. ')
swPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swPortConfigEntry"))
swPortConfigEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: swPortConfigEntry.setDescription('This represents the configuration of encryption / compression feature on a port')
swPortEncrypt = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 1), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortEncrypt.setStatus('current')
if mibBuilder.loadTexts: swPortEncrypt.setDescription('Represents the encryption status on a port.')
swPortCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 2), EncryptCompressStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCompression.setStatus('current')
if mibBuilder.loadTexts: swPortCompression.setDescription('Represents the compression status on port.')
swPortCipherKeySize = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherKeySize.setStatus('current')
if mibBuilder.loadTexts: swPortCipherKeySize.setDescription('Represents the Cipher key size. FOS supports 256 bytes key')
swPortCipherMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 3, 1, 4), CiperMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortCipherMode.setStatus('current')
if mibBuilder.loadTexts: swPortCipherMode.setDescription('Represents the Cipher mode. ')
swConnUnitPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1), )
connUnitPortEntry.registerAugmentions(("FA-EXT-MIB", "swConnUnitPortEntry"))
swConnUnitPortEntry.setIndexNames(*connUnitPortEntry.getIndexNames())
if mibBuilder.loadTexts: swConnUnitPortEntry.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortEntry.setDescription('This represents the Conn unit Port Entry')
swConnUnitPortCapableSpeeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortCapableSpeeds.setDescription('This represents the available speeds, that a port is capable of configuring')
swConnUnitPortSpeedMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("auto-neg", 1), ("static", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortSpeedMode.setDescription('This represents the type of speed modes that can be configured for the particular port. The modes that can be configured are auto-negotiable and static speeds.')
swConnUnitPortFECMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("disabled", 2), ("enabled", 3), ("notsupported", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECMode.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECMode.setDescription('This represents the port Forward Error Correction Mode. FEC feature is only applicable to 10G/16G platforms.')
swConnUnitPortFECState = MibTableColumn((1, 3, 6, 1, 4, 1, 1588, 2, 1, 1, 1, 28, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("active", 1), ("inactive", 2), ("notsupported", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swConnUnitPortFECState.setStatus('current')
if mibBuilder.loadTexts: swConnUnitPortFECState.setDescription('This represents FEC state of a port.If in link both sfp connected are of brocade vendor type then state is active, else it will be inactive.')
mibBuilder.exportSymbols("FA-EXT-MIB", CiperMode=CiperMode, swSfpCurrent=swSfpCurrent, swSfpTxPower=swSfpTxPower, swPortFapwwnConfigType=swPortFapwwnConfigType, swPortFapwwnConfigTable=swPortFapwwnConfigTable, faExt=faExt, swFapwwnFeature=swFapwwnFeature, swPortFapwwnConfigEntry=swPortFapwwnConfigEntry, swSfpVoltage=swSfpVoltage, swPortConfigEntry=swPortConfigEntry, swSfpRxPower=swSfpRxPower, FapwwnType=FapwwnType, swConnUnitPortCapableSpeeds=swConnUnitPortCapableSpeeds, swSfpPoweronHrs=swSfpPoweronHrs, swPortCompression=swPortCompression, swConnUnitPortEntry=swConnUnitPortEntry, PYSNMP_MODULE_ID=faExt, EncryptCompressStatus=EncryptCompressStatus, swPortEncrypt=swPortEncrypt, swSfpUnitId=swSfpUnitId, swSfpStatEntry=swSfpStatEntry, swConnUnitPortFECMode=swConnUnitPortFECMode, swPortCipherKeySize=swPortCipherKeySize, swPortFapwwnConfigFapwwn=swPortFapwwnConfigFapwwn, swConnUnitPortSpeedMode=swConnUnitPortSpeedMode, swPortCipherMode=swPortCipherMode, swConnUnitPortFECState=swConnUnitPortFECState, swSfpTemperature=swSfpTemperature, swSfpStatTable=swSfpStatTable, swConnUnitPortTable=swConnUnitPortTable, swPortFapwwnConfigEnable=swPortFapwwnConfigEnable, swPortConfigTable=swPortConfigTable)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
9677,
12,
13918,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
67,
615,
47562,
19,
... | 2.822044 | 4,872 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.export_inference_graph."""
import os
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
slim = tf.contrib.slim
if __name__ == '__main__':
tf.test.main()
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.585075 | 335 |
from PyQt5.Qt import *
from UI.reader_interface import Ui_MainWindow
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ReaderPane()
window.show()
sys.exit(app.exec_())
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
1330,
1635,
198,
6738,
12454,
13,
46862,
62,
39994,
1330,
471,
72,
62,
13383,
27703,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
... | 2.529412 | 85 |
import tensorflow as tf
import matplotlib.pyplot as plt
import time
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
640,
198
] | 3.238095 | 21 |
"""
API Provider filter
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import abc
import six
from vmware.vapi.core import ApiProvider
from vmware.vapi.data.serializers.introspection import (
convert_data_def_to_data_value)
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.provider.lib import augment_method_result_with_errors
logger = get_vapi_logger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ApiProviderFilter(ApiProvider):
"""
ApiProviderFilter is a base class for all ApiProvider filters.
This handles all the common methods and also takes care of augmenting
errors reported by an ApiProvider filter.
:type next_provider: :class:`vmware.vapi.core.ApiProvider`
:ivar next_provider: Next API Provider in the chain
"""
def __init__(self, next_provider=None, errors_to_augment=None, name=None):
"""
Initialize ApiProviderFilter
:type next_provider: :class:`vmware.vapi.core.ApiProvider` or ``None``
:param next_provider: API Provider to invoke the requests
:type errors_to_augment: :class:`list` of
:class:`vmware.vapi.data.definition.ErrorDefinition` or ``None``
:param errors_to_augment: List of error definitions to be added to
method definitions
:type name: :class:`str`
:param name: The name of the filter
"""
ApiProvider.__init__(self)
self.name = name if name is not None else self.__class__.__name__
self.next_provider = next_provider
self._error_defs_to_augment = errors_to_augment or []
self._error_values_to_augment = [
convert_data_def_to_data_value(error_def)
for error_def in self._error_defs_to_augment
]
@abc.abstractmethod
def invoke(self, service_id, operation_id, input_value, ctx):
"""
Invoke an API request. Derived classes of ApiProviderFilter
should call this method to invoke the request. This can be done
by: ApiProviderFilter.invoke(self, ctx, method_id, input_value).
This method calls the next API Provider. If the request is made to
"get" operation of vAPI Operation Introspection service, errors are
augmented to the method result.
:type service_id: :class:`str`
:param service_id: Service identifier
:type operation_id: :class:`str`
:param operation_id: Operation identifier
:type input_value: :class:`vmware.vapi.data.value.StructValue`
:param input_value: Method input parameters
:type ctx: :class:`vmware.vapi.core.ExecutionContext`
:param ctx: Execution context for this method
:rtype: :class:`vmware.vapi.core.MethodResult`
:return: Result of the method invocation
"""
method_result = self.next_provider.invoke(
service_id, operation_id, input_value, ctx)
return augment_method_result_with_errors(
service_id, operation_id, method_result,
self._error_values_to_augment)
def get_api_provider(self):
"""
Get the last provider in the chain.
:rtype: :class:`vmware.vapi.core.ApiProvider`
:return: Last provider in the provider chain which is not a filter
"""
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.get_api_provider()
return self.next_provider
def find_first_api_filter(self, name):
"""
Get the first filter with the specified name in the provider chain
:type name: :class:`str`
:param name: Filter name
:rtype: :class:`vmware.vapi.core.ApiProviderFilter` or ``None``
:return: First filter that matches the name
"""
if self.name == name:
return self
if isinstance(self.next_provider, ApiProviderFilter):
return self.next_provider.find_first_api_filter(name)
return None
| [
37811,
198,
17614,
32549,
8106,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
15996,
1574,
11,
3457,
2637,
198,
834,
22163,
4766,
834,
796,
705,
15269,
1853,
11,
2177,
37754,
11,
3457,
13,
220,
1439,
2489,
10395,
13,
1377,
37754,
73... | 2.493902 | 1,640 |
"""The WaveBlocks Project
Compute the kinetic and potential energies of a wavefunction.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import zeros
from WaveBlocksND import BlockFactory
from WaveBlocksND import KineticOperator
from WaveBlocksND import WaveFunction
from WaveBlocksND import BasisTransformationWF
def compute_energy(iom, blockid=0, eigentrafo=True, iseigen=True):
"""
:param iom: An :py:class:`IOManager: instance providing the simulation data.
:param blockid: The data block from which the values are read. Default is `0`.
:param eigentrafo: Whether to make a transformation into the eigenbasis.
:type eigentrafo: Boolean, default is ``True``.
:param iseigen: Whether the data is assumed to be in the eigenbasis.
:type iseigen: Boolean, default is ``True``
"""
parameters = iom.load_parameters()
# Number of time steps we saved
timesteps = iom.load_wavefunction_timegrid(blockid=blockid)
nrtimesteps = timesteps.shape[0]
# Construct grid from the parameters
grid = BlockFactory().create_grid(parameters)
# The potential used
Potential = BlockFactory().create_potential(parameters)
# The operators
KO = KineticOperator(grid)
KO.calculate_operator(parameters["eps"])
opT = KO
if eigentrafo is True:
opV = Potential.evaluate_at(grid)
else:
if iseigen is True:
opV = Potential.evaluate_eigenvalues_at(grid, as_matrix=True)
else:
opV = Potential.evaluate_at(grid, as_matrix=True)
# Basis transformator
if eigentrafo is True:
BT = BasisTransformationWF(Potential)
BT.set_grid(grid)
# And two empty wavefunctions
WF = WaveFunction(parameters)
WF.set_grid(grid)
WF2 = WaveFunction(parameters)
WF2.set_grid(grid)
# We want to save norms, thus add a data slot to the data file
iom.add_energy(parameters, timeslots=nrtimesteps, blockid=blockid)
nst = Potential.get_number_components()
if eigentrafo is True:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# Project wavefunction values to eigenbasis
BT.transform_to_eigen(WF)
ekinlist = []
epotlist = []
# For each component of |Psi>
values = WF.get_values()
for index, item in enumerate(values):
# tmp is the Vector (0, 0, 0, \psi_i, 0, 0, ...)
tmp = [zeros(item.shape) for z in range(nst)]
tmp[index] = item
WF2.set_values(tmp)
# Project this vector to the canonical basis
BT.transform_to_canonical(WF2)
# And calculate the energies of these components
ekinlist.append(WF2.kinetic_energy(opT, summed=True))
epotlist.append(WF2.potential_energy(opV, summed=True))
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
else:
# Iterate over all timesteps
for i, step in enumerate(timesteps):
print(" Computing energies of timestep %d" % step)
# Retrieve simulation data
values = iom.load_wavefunction(timestep=step, blockid=blockid)
values = [values[j, ...] for j in range(parameters["ncomponents"])]
WF.set_values(values)
# And calculate the energies of these components
ekinlist = WF.kinetic_energy(opT, summed=False)
epotlist = WF.potential_energy(opV, summed=False)
iom.save_energy((ekinlist, epotlist), timestep=step, blockid=blockid)
| [
37811,
464,
17084,
45356,
4935,
198,
198,
7293,
1133,
262,
37892,
290,
2785,
27598,
286,
257,
6769,
8818,
13,
198,
198,
31,
9800,
25,
371,
13,
20576,
21915,
198,
31,
22163,
4766,
25,
15069,
357,
34,
8,
2321,
11,
1584,
371,
13,
20576... | 2.382598 | 1,678 |
import argparse
from importlib import import_module
if __name__ == "__main__":
main() | [
11748,
1822,
29572,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419
] | 3.103448 | 29 |
"""
:mod:`dim` is an HTML parser and simple DOM implementation with CSS
selector support.
:mod:`dim`
- is a single module;
- has no dependency outside `PSL <https://docs.python.org/3/library/>`_;
- is not crazy long;
- supports Python 3.6 and forward,
so the file could be directly embedded in any Python 3.4+ application,
or even in a monolithic source file. :mod:`dim` was designed to ease the
development of `googler(1) <https://github.com/jarun/googler/>`_, which
itself promises to be a single Python script with zero third-party dep.
Simple example:
.. doctest::
>>> import dim
>>> html = '''
... <html>
... <body>
... <table id="primary">
... <thead>
... <tr><th class="bold">A</th><th>B</th></tr>
... </thead>
... <tbody>
... <tr class="highlight"><td class="bold">1</td><td>2</td></tr>
... <tr><td class="bold">3</td><td>4</td></tr>
... <tr><td class="bold">5</td><td>6</td></tr>
... <tr><td class="bold">7</td><td>8</td></tr>
... </tbody>
... </table>
... <table id="secondary">
... <thead>
... <tr><th class="bold">C</th><th>D</th></tr>
... </thead>
... <tbody></tbody>
... </table>
... </body>
... </html>'''
>>> root = dim.parse_html(html)
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight + tr > td.bold')]
['A', '3']
>>> [elem.text for elem in root.select_all('table#primary th.bold, '
... 'table#primary tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7']
>>> [elem.text for elem in root.select_all('th.bold, tr.highlight ~ tr > td.bold')]
['A', '3', '5', '7', 'C']
"""
import html
import re
from collections import OrderedDict
from enum import Enum
from html.parser import HTMLParser
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
Match,
Optional,
Sequence,
Tuple,
Union,
cast,
)
SelectorGroupLike = Union[str, "SelectorGroup", "Selector"]
class Node(object):
"""
Represents a DOM node.
Parts of JavaScript's DOM ``Node`` API and ``Element`` API are
mirrored here, with extensions. In particular, ``querySelector`` and
``querySelectorAll`` are mirrored.
Notable properties and methods: :meth:`attr()`, :attr:`classes`,
:attr:`html`, :attr:`text`, :meth:`ancestors()`,
:meth:`descendants()`, :meth:`select()`, :meth:`select_all()`,
:meth:`matched_by()`,
Attributes:
tag (:class:`Optional`\\[:class:`str`])
attrs (:class:`Dict`\\[:class:`str`, :class:`str`])
parent (:class:`Optional`\\[:class:`Node`])
children (:class:`List`\\[:class:`Node`])
"""
# Meant to be reimplemented by subclasses.
# HTML representation of the node. Meant to be implemented by
# subclasses.
def select(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""DOM ``querySelector`` clone. Returns one match (if any)."""
selector = self._normalize_selector(selector)
for node in self._select_all(selector):
return node
return None
def query_selector(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""Alias of :meth:`select`."""
return self.select(selector)
def select_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""DOM ``querySelectorAll`` clone. Returns all matches in a list."""
selector = self._normalize_selector(selector)
return list(self._select_all(selector))
def query_selector_all(self, selector: SelectorGroupLike) -> List["Node"]:
"""Alias of :meth:`select_all`."""
return self.select_all(selector)
def matched_by(
self, selector: SelectorGroupLike, root: Optional["Node"] = None
) -> bool:
"""
Checks whether this node is matched by `selector`.
See :meth:`SelectorGroup.matches()`.
"""
selector = self._normalize_selector(selector)
return selector.matches(self, root=root)
@staticmethod
def next_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
next_siblings = self.next_siblings()
if next_siblings:
return next_siblings[0]
else:
return None
def next_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.next_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def previous_sibling(self) -> Optional["Node"]:
""".. note:: Not O(1), use with caution."""
previous_siblings = self.previous_siblings()
if previous_siblings:
return previous_siblings[0]
else:
return None
def previous_siblings(self) -> List["Node"]:
"""
Compared to the natural DOM order, the order of returned nodes
are reversed. That is, the adjacent sibling (if any) is the
first in the returned list.
"""
parent = self.parent
if not parent:
return []
try:
index = parent.children.index(self)
if index > 0:
return parent.children[index - 1 :: -1]
else:
return []
except ValueError: # pragma: no cover
raise ValueError("node is not found in children of its parent")
def previous_element_sibling(self) -> Optional["ElementNode"]:
""".. note:: Not O(1), use with caution."""
for sibling in self.previous_siblings():
if isinstance(sibling, ElementNode):
return sibling
return None
def ancestors(
self, *, root: Optional["Node"] = None
) -> Generator["Node", None, None]:
"""
Ancestors are generated in reverse order of depth, stopping at
`root`.
A :class:`RuntimeException` is raised if `root` is not in the
ancestral chain.
"""
if self is root:
return
ancestor = self.parent
while ancestor is not root:
if ancestor is None:
raise RuntimeError("provided root node not found in ancestral chain")
yield ancestor
ancestor = ancestor.parent
if root:
yield root
def descendants(self) -> Generator["Node", None, None]:
"""Descendants are generated in depth-first order."""
for child in self.children:
yield child
yield from child.descendants()
def attr(self, attr: str) -> Optional[str]:
"""Returns the attribute if it exists on the node, otherwise ``None``."""
return self.attrs.get(attr)
@property
def html(self) -> str:
"""
HTML representation of the node.
(For a :class:`TextNode`, :meth:`html` returns the escaped version of the
text.
"""
return str(self)
def outer_html(self) -> str:
"""Alias of :attr:`html`."""
return self.html
def inner_html(self) -> str:
"""HTML representation of the node's children."""
return "".join(child.html for child in self.children)
@property
def text(self) -> str: # pragma: no cover
"""This property is expected to be implemented by subclasses."""
raise NotImplementedError
def text_content(self) -> str:
"""Alias of :attr:`text`."""
return self.text
@property
class ElementNode(Node):
"""
Represents an element node.
Note that tag and attribute names are case-insensitive; attribute
values are case-sensitive.
"""
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html
def __str__(self) -> str:
"""HTML representation of the node."""
s = "<" + self.tag
for attr, val in self.attrs.items():
s += ' %s="%s"' % (attr, html.escape(val))
if self.children:
s += ">"
s += "".join(str(child) for child in self.children)
s += "</%s>" % self.tag
else:
if _tag_is_void(self.tag):
s += "/>"
else:
s += "></%s>" % self.tag
return s
@property
def text(self) -> str:
"""The concatenation of all descendant text nodes."""
return "".join(child.text for child in self.children)
class TextNode(str, Node):
"""
Represents a text node.
Subclasses :class:`Node` and :class:`str`.
"""
# HTML-escaped form of the text node. use text() for unescaped
# version.
def __eq__(self, other: object) -> bool:
"""
Two text nodes are equal if and only if they are the same node.
For string comparison, use :attr:`text`.
"""
return self is other
def __ne__(self, other: object) -> bool:
"""
Two text nodes are non-equal if they are not the same node.
For string comparison, use :attr:`text`.
"""
return self is not other
@property
class DOMBuilderException(Exception):
"""
Exception raised when :class:`DOMBuilder` detects a bad state.
Attributes:
pos (:class:`Tuple`\\[:class:`int`, :class:`int`]):
Line number and offset in HTML input.
why (:class:`str`):
Reason of the exception.
"""
class DOMBuilder(HTMLParser):
"""
HTML parser / DOM builder.
Subclasses :class:`html.parser.HTMLParser`.
Consume HTML and builds a :class:`Node` tree. Once finished, use
:attr:`root` to access the root of the tree.
This parser cannot parse malformed HTML with tag mismatch.
"""
# Make parser behavior for explicitly and implicitly void elements
# (e.g., <hr> vs <hr/>) consistent. The former triggers
# handle_starttag only, whereas the latter triggers
# handle_startendtag (which by default triggers both handle_starttag
# and handle_endtag). See https://bugs.python.org/issue25258.
#
# An exception is foreign elements, which aren't considered void
# elements but can be explicitly marked as self-closing according to
# the HTML spec (e.g. <path/> is valid but <path> is not).
# Therefore, both handle_starttag and handle_endtag must be called,
# and handle_endtag should not be triggered from within
# handle_starttag in that case.
#
# Note that for simplicity we do not check whether the foreign
# element in question is allowed to be self-closing by spec. (The
# SVG spec unfortunately doesn't provide a readily available list of
# such elements.)
#
# https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
@property
def root(self) -> "Node":
"""
Finishes processing and returns the root node.
Raises :class:`DOMBuilderException` if there is no root tag or
root tag is not closed yet.
"""
if not self._stack:
raise DOMBuilderException(self.getpos(), "no root tag")
if self._stack[0]._partial:
raise DOMBuilderException(self.getpos(), "root tag not closed yet")
return self._stack[0]
def parse_html(html: str, *, ParserClass: type = DOMBuilder) -> "Node":
"""
Parses HTML string, builds DOM, and returns root node.
The parser may raise :class:`DOMBuilderException`.
Args:
html: input HTML string
ParserClass: :class:`DOMBuilder` or a subclass
Returns:
Root note of the parsed tree. If the HTML string contains
multiple top-level elements, only the first is returned and the
rest are lost.
"""
builder = ParserClass() # type: DOMBuilder
builder.feed(html)
builder.close()
return builder.root
class SelectorParserException(Exception):
"""
Exception raised when the selector parser fails to parse an input.
Attributes:
s (:class:`str`):
The input string to be parsed.
cursor (:class:`int`):
Cursor position where the failure occurred.
why (:class:`str`):
Reason of the failure.
"""
class SelectorGroup:
"""
Represents a group of CSS selectors.
A group of CSS selectors is simply a comma-separated list of
selectors. [#]_ See :class:`Selector` documentation for the scope of
support.
Typically, a :class:`SelectorGroup` is constructed from a string
(e.g., ``th.center, td.center``) using the factory function
:meth:`from_str`.
.. [#] https://www.w3.org/TR/selectors-3/#grouping
"""
@classmethod
def from_str(cls, s: str) -> "SelectorGroup":
"""
Parses input string into a group of selectors.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
Args:
s: input string
Returns:
Parsed group of selectors.
"""
i = 0
selectors = []
while i < len(s):
selector, i = Selector.from_str(s, i)
selectors.append(selector)
if not selectors:
raise SelectorParserException(s, i, "selector group is empty")
return cls(selectors)
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the group of selectors matches `node`.
The group of selectors matches `node` as long as one of the
selectors matches `node`.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
return any(selector.matches(node, root=root) for selector in self)
class Selector:
"""
Represents a CSS selector.
Recall that a CSS selector is a chain of one or more *sequences of
simple selectors* separated by *combinators*. [#selectors-3]_ This
concept is represented as a cons list of sequences of simple
selectors (in right to left order). This class in fact holds a
single sequence, with an optional combinator and reference to the
previous sequence.
For instance, ``main#main p.important.definition >
a.term[id][href]`` would be parsed into (schematically) the
following structure::
">" tag='a' classes=('term') attrs=([id], [href]) ~>
" " tag='p' classes=('important', 'definition') ~>
tag='main' id='main'
Each line is held in a separate instance of :class:`Selector`,
linked together by the :attr:`previous` attribute.
Supported grammar (from selectors level 3 [#selectors-3]_):
- Type selectors;
- Universal selectors;
- Class selectors;
- ID selectors;
- Attribute selectors;
- Combinators.
Unsupported grammar:
- Pseudo-classes;
- Pseudo-elements;
- Namespace prefixes (``ns|``, ``*|``, ``|``) in any part of any
selector.
Rationale:
- Pseudo-classes have too many variants, a few of which even
complete with an admittedly not-so-complex minilanguage. These add
up to a lot of code.
- Pseudo-elements are useless outside rendering contexts, hence out of
scope.
- Namespace support is too niche to be worth the parsing headache.
*Using namespace prefixes may confuse the parser!*
Note that the parser only loosely follows the spec and priotizes
ease of parsing (which includes readability and *writability* of
regexes), so some invalid selectors may be accepted (in fact, false
positives abound, but accepting valid inputs is a much more
important goal than rejecting invalid inputs for this library), and
some valid selectors may be rejected (but as long as you stick to
the scope outlined above and common sense you should be fine; the
false negatives shouldn't be used by actual human beings anyway).
In particular, whitespace character is simplified to ``\\s`` (ASCII
mode) despite CSS spec not counting U+000B (VT) as whitespace,
identifiers are simplified to ``[\\w-]+`` (ASCII mode), and strings
(attribute selector values can be either identifiers or strings)
allow escaped quotes (i.e., ``\\'`` inside single-quoted strings and
``\\"`` inside double-quoted strings) but everything else is
interpreted literally. The exact specs for CSS identifiers and
strings can be found at [#]_.
Certain selectors and combinators may be implemented in the parser
but not implemented in matching and/or selection APIs.
.. [#selectors-3] https://www.w3.org/TR/selectors-3/
.. [#] https://www.w3.org/TR/CSS21/syndata.html
Attributes:
tag (:class:`Optional`\\[:class:`str`]):
Type selector.
classes (:class:`List`\\[:class:`str`]):
Class selectors.
id (:class:`Optional`\\[:class:`str`]):
ID selector.
attrs (:class:`List`\\[:class:`AttributeSelector`]):
Attribute selectors.
combinator (:class:`Optional`\\[:class:`Combinator`]):
Combinator with the previous sequence of simple selectors in
chain.
previous (:class:`Optional`\\[:class:`Selector`]):
Reference to the previous sequence of simple selectors in
chain.
"""
# Format a single sequence of simple selectors, without combinator.
@classmethod
def from_str(cls, s: str, cursor: int = 0) -> Tuple["Selector", int]:
"""
Parses input string into selector.
This factory function only parses out one selector (up to a
comma or EOS), so partial consumption is allowed --- an optional
`cursor` is taken as input (0 by default) and the moved cursor
(either after the comma or at EOS) is returned as part of the
output.
:class:`SelectorParserException` is raised on invalid input. See
:class:`Selector` documentation for the scope of support.
If you need to completely consume a string representing
(potentially) a group of selectors, use
:meth:`SelectorGroup.from_str()`.
Args:
s: input string
cursor: initial cursor position on `s`
Returns:
A tuple containing the parsed selector and the moved the
cursor (either after a comma-delimiter, or at EOS).
"""
# Simple selectors.
TYPE_SEL = re.compile(r"[\w-]+", re.A)
UNIVERSAL_SEL = re.compile(r"\*")
ATTR_SEL = re.compile(
r"""\[
\s*(?P<attr>[\w-]+)\s*
(
(?P<op>[~|^$*]?=)\s*
(
(?P<val_identifier>[\w-]+)|
(?P<val_string>
(?P<quote>['"])
(?P<val_string_inner>.*?)
(?<!\\)(?P=quote)
)
)\s*
)?
\]""",
re.A | re.X,
)
CLASS_SEL = re.compile(r"\.([\w-]+)", re.A)
ID_SEL = re.compile(r"#([\w-]+)", re.A)
PSEUDO_CLASS_SEL = re.compile(r":[\w-]+(\([^)]+\))?", re.A)
PSEUDO_ELEM_SEL = re.compile(r"::[\w-]+", re.A)
# Combinators
DESCENDANT_COM = re.compile(r"\s+")
CHILD_COM = re.compile(r"\s*>\s*")
NEXT_SIB_COM = re.compile(r"\s*\+\s*")
SUB_SIB_COM = re.compile(r"\s*~\s*")
# Misc
WHITESPACE = re.compile(r"\s*")
END_OF_SELECTOR = re.compile(r"\s*($|,)")
tag = None
classes = []
id = None
attrs = []
combinator = None
selector = None
previous_combinator = None
i = cursor
# Skip leading whitespace
m = WHITESPACE.match(s, i)
if m:
i = m.end()
while i < len(s):
# Parse one simple selector.
#
# PEP 572 (assignment expressions; the one that burned Guido
# so much that he resigned as BDFL) would have been nice; it
# would have saved us from all the regex match
# reassignments, and worse still, the casts, since mypy
# complains about getting Optional[Match[str]] instead of
# Match[str].
if TYPE_SEL.match(s, i):
if tag:
raise SelectorParserException(s, i, "multiple type selectors found")
m = cast(Match[str], TYPE_SEL.match(s, i))
tag = m.group()
elif UNIVERSAL_SEL.match(s, i):
m = cast(Match[str], UNIVERSAL_SEL.match(s, i))
elif ATTR_SEL.match(s, i):
m = cast(Match[str], ATTR_SEL.match(s, i))
attr = m.group("attr")
op = m.group("op")
val_identifier = m.group("val_identifier")
quote = m.group("quote")
val_string_inner = m.group("val_string_inner")
if val_identifier is not None:
val = val_identifier
elif val_string_inner is not None:
val = val_string_inner.replace("\\" + quote, quote)
else:
val = None
if op is None:
type = AttributeSelectorType.BARE
elif op == "=":
type = AttributeSelectorType.EQUAL
elif op == "~=":
type = AttributeSelectorType.TILDE
elif op == "|=":
type = AttributeSelectorType.PIPE
elif op == "^=":
type = AttributeSelectorType.CARET
elif op == "$=":
type = AttributeSelectorType.DOLLAR
elif op == "*=":
type = AttributeSelectorType.ASTERISK
else: # pragma: no cover
raise SelectorParserException(
s,
i,
"unrecognized operator %s in attribute selector" % repr(op),
)
attrs.append(AttributeSelector(attr, val, type))
elif CLASS_SEL.match(s, i):
m = cast(Match[str], CLASS_SEL.match(s, i))
classes.append(m.group(1))
elif ID_SEL.match(s, i):
if id:
raise SelectorParserException(s, i, "multiple id selectors found")
m = cast(Match[str], ID_SEL.match(s, i))
id = m.group(1)
elif PSEUDO_CLASS_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-classes not supported")
elif PSEUDO_ELEM_SEL.match(s, i):
raise SelectorParserException(s, i, "pseudo-elements not supported")
else:
raise SelectorParserException(
s, i, "expecting simple selector, found none"
)
i = m.end()
# Try to parse a combinator, or end the selector.
if CHILD_COM.match(s, i):
m = cast(Match[str], CHILD_COM.match(s, i))
combinator = Combinator.CHILD
elif NEXT_SIB_COM.match(s, i):
m = cast(Match[str], NEXT_SIB_COM.match(s, i))
combinator = Combinator.NEXT_SIBLING
elif SUB_SIB_COM.match(s, i):
m = cast(Match[str], SUB_SIB_COM.match(s, i))
combinator = Combinator.SUBSEQUENT_SIBLING
elif END_OF_SELECTOR.match(s, i):
m = cast(Match[str], END_OF_SELECTOR.match(s, i))
combinator = None
# Need to parse descendant combinator at the very end
# because it could be a prefix to all previous cases.
elif DESCENDANT_COM.match(s, i):
m = cast(Match[str], DESCENDANT_COM.match(s, i))
combinator = Combinator.DESCENDANT
else:
continue
i = m.end()
if combinator and i == len(s):
raise SelectorParserException(s, i, "unexpected end at combinator")
selector = cls(
tag=tag,
classes=classes,
id=id,
attrs=attrs,
combinator=previous_combinator,
previous=selector,
)
previous_combinator = combinator
# End of selector.
if combinator is None:
break
tag = None
classes = []
id = None
attrs = []
combinator = None
if not selector:
raise SelectorParserException(s, i, "selector is empty")
return selector, i
def matches(self, node: "Node", root: Optional["Node"] = None) -> bool:
"""
Decides whether the selector matches `node`.
Each sequence of simple selectors in the selector's chain must
be matched for a positive.
If `root` is provided and child and/or descendant combinators
are involved, parent/ancestor lookup terminates at `root`.
"""
if self.tag:
if not node.tag or node.tag != self.tag:
return False
if self.id:
if node.attrs.get("id") != self.id:
return False
if self.classes:
classes = node.classes
for class_ in self.classes:
if class_ not in classes:
return False
if self.attrs:
for attr_selector in self.attrs:
if not attr_selector.matches(node):
return False
if not self.previous:
return True
if self.combinator == Combinator.DESCENDANT:
return any(
self.previous.matches(ancestor, root=root)
for ancestor in node.ancestors()
)
elif self.combinator == Combinator.CHILD:
if node is root or node.parent is None:
return False
else:
return self.previous.matches(node.parent)
elif self.combinator == Combinator.NEXT_SIBLING:
sibling = node.previous_element_sibling()
if not sibling:
return False
else:
return self.previous.matches(sibling)
elif self.combinator == Combinator.SUBSEQUENT_SIBLING:
return any(
self.previous.matches(sibling, root=root)
for sibling in node.previous_siblings()
if isinstance(sibling, ElementNode)
)
else: # pragma: no cover
raise RuntimeError("unimplemented combinator: %s" % repr(self.combinator))
class AttributeSelector:
"""
Represents an attribute selector.
Attributes:
attr (:class:`str`)
val (:class:`Optional`\\[:class:`str`])
type (:class:`AttributeSelectorType`)
"""
# Enum: basis for poor man's algebraic data type.
class AttributeSelectorType(Enum):
"""
Attribute selector types.
Members correspond to the following forms of attribute selector:
- :attr:`BARE`: ``[attr]``;
- :attr:`EQUAL`: ``[attr=val]``;
- :attr:`TILDE`: ``[attr~=val]``;
- :attr:`PIPE`: ``[attr|=val]``;
- :attr:`CARET`: ``[attr^=val]``;
- :attr:`DOLLAR`: ``[attr$=val]``;
- :attr:`ASTERISK`: ``[attr*=val]``.
"""
# [attr]
BARE = 1
# [attr=val]
EQUAL = 2
# [attr~=val]
TILDE = 3
# [attr|=val]
PIPE = 4
# [attr^=val]
CARET = 5
# [attr$=val]
DOLLAR = 6
# [attr*=val]
ASTERISK = 7
class Combinator(Enum):
"""
Combinator types.
Members correspond to the following combinators:
- :attr:`DESCENDANT`: ``A B``;
- :attr:`CHILD`: ``A > B``;
- :attr:`NEXT_SIBLING`: ``A + B``;
- :attr:`SUBSEQUENT_SIBLING`: ``A ~ B``.
"""
# ' '
DESCENDANT = 1
# >
CHILD = 2
# +
NEXT_SIBLING = 3
# ~
SUBSEQUENT_SIBLING = 4
def _tag_is_void(tag: str) -> bool:
"""
Checks whether the tag corresponds to a void element.
https://www.w3.org/TR/html5/syntax.html#void-elements
https://html.spec.whatwg.org/multipage/syntax.html#void-elements
"""
return tag.lower() in (
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
)
def _tag_encloses_foreign_namespace(tag: str) -> bool:
"""
Checks whether the tag encloses a foreign namespace (MathML or SVG).
https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
"""
return tag.lower() in ("math", "svg")
| [
37811,
198,
25,
4666,
25,
63,
27740,
63,
318,
281,
11532,
30751,
290,
2829,
24121,
7822,
351,
17391,
198,
19738,
273,
1104,
13,
198,
198,
25,
4666,
25,
63,
27740,
63,
198,
198,
12,
318,
257,
2060,
8265,
26,
198,
12,
468,
645,
2020... | 2.223517 | 13,046 |
# Generated by Django 2.1.1 on 2018-09-25 19:27
import itertools
from django.db import migrations, models
forward_map = {
'Daily': "daily",
'Weekly': "weekly",
'Monthly': "monthly",
}
reverse_map = {v: k for k, v in forward_map.items()}
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
2931,
12,
1495,
678,
25,
1983,
198,
198,
11748,
340,
861,
10141,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628,
198,
11813,
62,
8899,
796,
1391,
19... | 2.514851 | 101 |
# Generated by Django 3.1.3 on 2020-11-17 19:29
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
18,
319,
12131,
12,
1157,
12,
1558,
678,
25,
1959,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.567568 | 37 |
#!/usr/bin/python
from scheduler import Scheduler
from queue import Queue
from fixjob import FixJob
from tokenbucket import TokenBucket
from server import Server
import logging, sys
import numpy as np
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
### one token bucket
numJob = 1
testDuration = 500.0
bRate = 1000.0
bucketSize = 20.0
distrNameA = "bst1"
pRate = 900.0
fileName = "test.out"
if len(sys.argv) >= 2:
testDuration = float(sys.argv[1])
if len(sys.argv) >= 3:
pRate = float(sys.argv[2]) # arrive rate
if len(sys.argv) >= 4:
bRate = float(sys.argv[3]) # bucket limiting rate
if len(sys.argv) >= 5:
bucketSize = int(sys.argv[4]) # bucket size
if len(sys.argv) >= 6:
distrNameA = sys.argv[5]
if len(sys.argv) >= 7:
fileName = sys.argv[6]
#serviceDistr = ["wei", [1.0/mu]]
# 1000 per second
gRate = pRate / numJob
testInterval = []
#for j in xrange(60000):
testInterval += [0.05 for i in xrange(20*600)]
gen = []
queue = []
bucket = []
server = []
for i in xrange(numJob) :
gen.append(FixJob(i))
queue.append(Queue(i))
gen[-1].setOutput(queue[-1])
gen[-1].setIntDistr("bst", [50])
gen[-1].setIntList(testInterval)
#gen[-1].setSizeDistr("binorm", [1000.0])
gen[-1].setSizeDistr("cst", [1])
bucket.append(TokenBucket(i))
queue[-1].setOutput(bucket[-1])
#bucket[-1].setParameters(pRate/numJob, bucketSize/numJob)
bucket[-1].setParameters(bRate/numJob, bucketSize/numJob)
time = 0
while time < testDuration:
nextTimeList = []
itemList = []
for b in bucket:
nextTime, item = b.getNextTime()
nextTimeList.append(nextTime)
itemList.append(item)
index = [i for i in xrange(len(nextTimeList)) if nextTimeList[i] == min(nextTimeList)]
time = nextTimeList[index[0]]
logging.debug("Simulation time %f", time)
for i in index :
itemList[i].whoAmI()
itemList[i].runTime(time)
f = open(fileName, "a")
for q in queue:
#q.showStatistic(testDuration/2)
deq, enq = q.showQueueingTime(int(testDuration)*int(pRate)/2)
np.savetxt(f, (enq, deq))
f.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
6038,
18173,
1330,
27774,
18173,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
4259,
21858,
1330,
13268,
33308,
198,
6738,
11241,
27041,
316,
1330,
29130,
33,
38811,
198,
6738,
4382,
... | 2.35078 | 898 |
#!/usr/bin/env python
# ROS packages required
import rospy
import rospkg
# Dependencies required
import gym
import os
import numpy as np
import pandas as pd
import time
# from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
# from stable_baselines.common.vec_env import DummyVecEnv
# from stable_baselines import A2C, ACKTR, DDPG, PPO1, PPO2, SAC, TRPO, TD3, HER
# from stable_baselines.deepq.policies import MlpPolicy as mlp_dqn
# from stable_baselines.sac.policies import MlpPolicy as mlp_sac
# from stable_baselines.ddpg.policies import MlpPolicy as mlp_ddpg
# from stable_baselines.td3.policies import MlpPolicy as mlp_td3
# from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines3.common.utils import get_linear_fn
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3 import DDPG, PPO, A2C, TD3, SAC
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecCheckNan
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import CheckpointCallback
# For scheduler
from typing import Callable
# import our task environment
import hummingbird_hover_task_gt_env_ppo_baseline
# from openai_ros.task_envs.cartpole_stay_up import stay_up
# ROS ENV gets started automatically before the training
# from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment -- This has to be solved at the end
# change the directory
os.chdir('/home/ubuntu/catkin_ws/src/hummingbird_pkg/') #change directory
rospy.init_node('hummingbird_gt_eval_baseline', anonymous=True, log_level=rospy.FATAL)
# Create the Gym environment
environment_name = rospy.get_param('/hummingbird/task_and_robot_environment_name')
env = gym.make(environment_name)
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # Noisy / baseline gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_21/" # Noisy / baseline 1factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_29/" # Noisy / baseline 5factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_26/" # Noisy / baseline 10factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_28/" # Noisy / baseline 20factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/PPO_27/" # Noisy / baseline 100factor
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_0/" # GT / baseline / -0.5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_1/" # GT / baseline / -1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/baseline/gt/PPO_3/" # GT / baseline / -2
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # Noisy / 3rotors
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_2/" # GT / 3rotors
###### versatile ######
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_5/" # 100 / 50
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_8/" # 350 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # 150 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_12/" # 250 / 100
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_13/" # 350 / 100
### noisy 3r ###
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/gt/PPO_10/" # gt
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_20/" # f1
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_23/" # f5
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_24/" # f10
# log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_27/" # f10
log_dir = "/home/ubuntu/catkin_ws/src/hummingbird_pkg/results/trained_model/3rotors/PPO_Enjoy/" # enjoy
# When calling model, make sure which env your drone is trained from
# model = PPO.load(log_dir + "PPO_hummingbird_hover")
model = PPO.load(log_dir + "PPO_hummingbird_hover_3rotor")
env = DummyVecEnv([lambda: Monitor(env)])
# env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize.pkl", env)
env = VecNormalize.load(log_dir + "PPO_hummingbird_hover_vec_normalize_3rotor.pkl", env)
env.training = False
obs = env.reset()
for i in range(10000):
if i % 1000 == 0:
print(i)
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
48263,
10392,
2672,
198,
11748,
686,
2777,
88,
198,
11748,
686,
2777,
10025,
198,
2,
37947,
3976,
2672,
198,
11748,
11550,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
... | 2.673273 | 1,867 |
from allauth.socialaccount.providers.oauth_provider.urls import default_urlpatterns
from .provider import OpenStreetMapProvider
urlpatterns = default_urlpatterns(OpenStreetMapProvider)
| [
6738,
477,
18439,
13,
14557,
23317,
13,
15234,
4157,
13,
12162,
1071,
62,
15234,
1304,
13,
6371,
82,
1330,
4277,
62,
6371,
33279,
82,
198,
198,
6738,
764,
15234,
1304,
1330,
4946,
34356,
13912,
29495,
628,
198,
6371,
33279,
82,
796,
4... | 3.54717 | 53 |
"""Example test for a pdf or function"""
import numpy as np
import pytest
import tensorflow as tf
import zfit
import zfit_physics as zphys
# Important, do the imports below
from zfit.core.testing import tester
# specify globals here. Do NOT add any TensorFlow but just pure python
param1_true = 0.3
param2_true = 1.2
# register the pdf here and provide sets of working parameter configurations
tester.register_pdf(pdf_class=zphys.pdf.Argus, params_factories=argus_params_factory)
| [
37811,
16281,
1332,
329,
257,
37124,
393,
2163,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
1976,
11147,
198,
198,
11748,
1976,
11147,
62,
746,
23154,
355... | 3.297297 | 148 |
file = open('./map')
m = [[c for c in line[:-1]] for line in file]
start = findStart()
dist = 0
paths = {
start: []
}
queue = [start, None]
while queue:
loc = queue.pop(0)
if not loc:
if (len(queue) == 0 or not queue[0]):
break
dist += 1
queue.append(None)
continue
# print(loc, len(paths[loc]))
u = (loc[0] - 1, loc[1])
d = (loc[0] + 1, loc[1])
l = (loc[0], loc[1] - 1)
r = (loc[0], loc[1] + 1)
for target in [u, d, l, r]:
if m[target[0]][target[1]] != '#' and target not in paths:
paths[target] = paths[loc] + [loc]
queue.append(target)
# print(queue)
for loc in paths:
print(len(paths[loc]))
| [
7753,
796,
1280,
7,
4458,
14,
8899,
11537,
198,
198,
76,
796,
16410,
66,
329,
269,
287,
1627,
58,
21912,
16,
11907,
329,
1627,
287,
2393,
60,
628,
198,
198,
9688,
796,
1064,
10434,
3419,
198,
17080,
796,
657,
198,
6978,
82,
796,
1... | 1.980822 | 365 |
NUM_CLASSES = 14
IMG_HEIGHT = 353
IMG_WIDTH = 257
IMG_SMALL_HEIGHT = 120
IMG_SMALL_WIDTH = 96
RADIUS = 25
epochs = 10
batch_size = 64 | [
198,
41359,
62,
31631,
1546,
796,
1478,
198,
3955,
38,
62,
13909,
9947,
220,
796,
47567,
198,
3955,
38,
62,
54,
2389,
4221,
220,
220,
796,
36100,
198,
198,
3955,
38,
62,
12310,
7036,
62,
13909,
9947,
796,
7982,
198,
3955,
38,
62,
... | 1.931507 | 73 |
import os
from flask import Flask, json, jsonify, request
from flask_cors import CORS
from flask_caching import Cache
from decouple import config
import logging
# setting up for local testing, wnat to be able to log the database
"""
We want to be able to test locally and log information for debugging
"""
# Local sqlite3 database
local_db_name = 'test.sqlite3'
def create_app(test_config=None):
"""
Creates app
"""
app = Flask()
app.config.from_mapping(
# Make sure to change debug to False in production env
DEBUG=config('DEBUG', default=False),
SECRET_KEY=config('SECRET_KEY', default='dev'), # CHANGE THIS!!!!
# For in-memory db: default='sqlite:///:memory:'),
DATABASE_URI=config('DATABASE_URI', 'sqlite:///' + \
os.path.join(os.getcwd(), local_db_name)),
LOGFILE=config('LOGFILE', os.path.join(
app.instance_path, 'logs/debug.log')),
CACHE_TYPE=config('CACHE_TYPE', 'simple'), # Configure caching
# Long cache times probably ok for ML api
CACHE_DEFAULT_TIMEOUT=config('CACHE_DEFAULT_TIMEOUT', 300),
TESTING=config('TESTING', default='TRUE')
)
# Enable CORS header support
CORS(app)
# Enable caching
cache = Cache(app)
# Blueprints: Connecting all the routes(Endpoints) we create.
app.register_blueprints(vision_routes)
return app
| [
11748,
28686,
198,
6738,
42903,
1330,
46947,
11,
33918,
11,
33918,
1958,
11,
2581,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
42903,
62,
66,
8103,
1330,
34088,
198,
6738,
875,
43846,
1330,
4566,
198,
11748,
18931,
628,
... | 2.563177 | 554 |
# Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details
"""
Tests for L{wokkel.xmppim}.
"""
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish, utility
from wokkel import xmppim
from wokkel.generic import ErrorStanza, parseXml
from wokkel.test.helpers import XmlStreamStub
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_ROSTER = 'jabber:iq:roster'
class PresenceProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.PresenceProtocol}
"""
def test_errorReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type="error"/>"""
d = defer.Deferred()
self.protocol.errorReceived = errorReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_availableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence/>"""
d = defer.Deferred()
self.protocol.availableReceived = availableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unavailableReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unavailable'/>"""
d = defer.Deferred()
self.protocol.unavailableReceived = unavailableReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribe'/>"""
d = defer.Deferred()
self.protocol.subscribeReceived = subscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribe'/>"""
d = defer.Deferred()
self.protocol.unsubscribeReceived = unsubscribeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_subscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='subscribed'/>"""
d = defer.Deferred()
self.protocol.subscribedReceived = subscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_unsubscribedReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='unsubscribed'/>"""
d = defer.Deferred()
self.protocol.unsubscribedReceived = unsubscribedReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_probeReceived(self):
"""
Incoming presence stanzas are parsed and dispatched.
"""
xml = """<presence type='probe'/>"""
d = defer.Deferred()
self.protocol.probeReceived = probeReceived
self.protocol.xmlstream.dispatch(parseXml(xml))
return d
def test_available(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
status=u'Talk to me!',
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
self.assertEquals(u'Talk to me!', unicode(element.status))
self.assertEquals(u'50', unicode(element.priority))
def test_availableLanguages(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
show=u'chat',
statuses={None: u'Talk to me!',
'nl': u'Praat met me!'},
priority=50)
element = self.output[-1]
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertIdentical(None, element.getAttribute('type'))
self.assertEquals(u'chat', unicode(element.show))
statuses = {}
for status in element.elements():
if status.name == 'status':
lang = status.getAttribute((NS_XML, 'lang'))
statuses[lang] = unicode(status)
self.assertIn(None, statuses)
self.assertEquals(u'Talk to me!', statuses[None])
self.assertIn('nl', statuses)
self.assertEquals(u'Praat met me!', statuses['nl'])
self.assertEquals(u'50', unicode(element.priority))
def test_availableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.available(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unavailableDirected(self):
"""
Test sending of directed unavailable presence broadcast.
"""
self.protocol.unavailable(JID('user@example.com'))
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableWithStatus(self):
"""
Test sending of directed unavailable presence broadcast with status.
"""
self.protocol.unavailable(JID('user@example.com'),
{None: 'Disconnected'})
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals("user@example.com", element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
self.assertEquals("Disconnected", unicode(element.status))
def test_unavailableBroadcast(self):
"""
Test sending of unavailable presence broadcast.
"""
self.protocol.unavailable(None)
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableBroadcastNoRecipientParameter(self):
"""
Test sending of unavailable presence broadcast by not passing entity.
"""
self.protocol.unavailable()
element = self.output[-1]
self.assertEquals("presence", element.name)
self.assertEquals(None, element.uri)
self.assertEquals(None, element.getAttribute('to'))
self.assertEquals("unavailable", element.getAttribute('type'))
def test_unavailableSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unavailable(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_subscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.subscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_unsubscribedSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.unsubscribed(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
def test_probeSender(self):
"""
It should be possible to pass a sender address.
"""
self.protocol.probe(JID('user@example.com'),
sender=JID('user@example.org'))
element = self.output[-1]
self.assertEquals("user@example.org", element.getAttribute('from'))
class RosterClientProtocolTest(unittest.TestCase):
"""
Tests for L{xmppim.RosterClientProtocol}.
"""
def test_removeItem(self):
"""
Removing a roster item is setting an item with subscription C{remove}.
"""
d = self.protocol.removeItem(JID('test@example.org'))
# Inspect outgoing iq request
iq = self.stub.output[-1]
self.assertEquals('set', iq.getAttribute('type'))
self.assertNotIdentical(None, iq.query)
self.assertEquals(NS_ROSTER, iq.query.uri)
children = list(domish.generateElementsQNamed(iq.query.children,
'item', NS_ROSTER))
self.assertEquals(1, len(children))
child = children[0]
self.assertEquals('test@example.org', child['jid'])
self.assertEquals('remove', child['subscription'])
# Fake successful response
response = toResponse(iq, 'result')
self.stub.send(response)
return d
| [
2,
15069,
357,
66,
8,
5816,
12,
10531,
20993,
2185,
2926,
263,
198,
2,
4091,
38559,
24290,
329,
3307,
198,
198,
37811,
198,
51,
3558,
329,
406,
90,
86,
482,
7750,
13,
87,
76,
381,
320,
27422,
198,
37811,
198,
198,
6738,
19074,
13,... | 2.241644 | 4,697 |
from server import api, db
from flask_restful import Resource
from flask import request, g
from server.models import MailConfig
class MailConfigClass(Resource):
"""邮箱设置,接受前端发来的邮箱信息放到数据库
"""
def get(self):
"""获取邮箱设置
"""
mail_config = MailConfig.query.filter_by(ownerId = g.userId).first()
if mail_config:
data = {
'fromName': mail_config.fromName,
'toName': mail_config.toName,
'fromEmail': mail_config.fromEmail,
'fromEmailKey': mail_config.fromEmailKey,
'toEmail': mail_config.toEmail
}
else:
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
return {
'code': 20000,
'data': data
}
def post(self):
"""
data = {
'fromName': '',
'toName': '',
'fromEmail': '',
'fromEmailKey': '',
'toEmail': ''
}
"""
data = request.get_json(force = True)
# 先判断该用户设置是否存在
check = MailConfig.query.filter_by(ownerId = g.userId).first()
if check:
check.fromName = data.get('fromName')
check.toName = data.get('toName')
check.fromEmail = data.get('fromEmail')
check.fromEmailKey = data.get('fromEmailKey')
check.toEmail = data.get('toEmail')
else:
config = MailConfig(
fromName = data.get('fromName'),
toName = data.get('toName'),
fromEmail = data.get('fromEmail'),
fromEmailKey = data.get('fromEmailKey'),
toEmail = data.get('toEmail'),
ownerId = g.userId
)
db.session.add(config)
db.session.commit()
return {
'code': 20000
}
api.add_resource(MailConfigClass, '/mailconfig')
| [
6738,
4382,
1330,
40391,
11,
20613,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
6738,
42903,
1330,
2581,
11,
308,
198,
6738,
4382,
13,
27530,
1330,
11099,
16934,
628,
198,
4871,
11099,
16934,
9487,
7,
26198,
2599,
198,
220,
220,
... | 1.715102 | 1,225 |
# "Chef" class is defined with 3 functions.
| [
2,
366,
7376,
69,
1,
1398,
318,
5447,
351,
513,
5499,
13,
198
] | 3.384615 | 13 |
#!/usr/bin/env python
from nlp import NlpClass
import sqlite3
import os
# import time
from datetime import datetime
dir = os.path.dirname(os.path.abspath(__file__))
RM = sqlite3.connect(dir + '/data/robbie_memory.db')
if __name__ == '__main__':
st = TerminalInput()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
6738,
299,
34431,
1330,
399,
34431,
9487,
198,
11748,
44161,
578,
18,
198,
11748,
28686,
198,
2,
1330,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
15908,
796,
28686,
1... | 2.660194 | 103 |
"""Compute the observed order of convergence for the velocity and pressure."""
import numpy
import pathlib
import petibmpy
def observed_order_convergence(fields, grids, grid_ref, ratio):
"""Compute the observed order of convergence.
Parameters
----------
fields : tuple of numpy.ndarray objects
The field values on three consistently refined grids.
grids : tuple of tuple of numpy.ndarray objects
The gridline locations for three consistently refined grids.
grid_ref : tuple of numpy.ndarray objects
The reference gridlines used for interpolation.
ratio : float
The grid refinement ratio.
Returns
-------
float
The observed order of convergence.
"""
coarse = petibmpy.interpolate2d(fields[0], grids[0], grid_ref)
medium = petibmpy.interpolate2d(fields[1], grids[1], grid_ref)
fine = petibmpy.interpolate2d(fields[2], grids[2], grid_ref)
alpha = (numpy.log(numpy.linalg.norm(medium - coarse, ord=None) /
numpy.linalg.norm(fine - medium, ord=None)) /
numpy.log(ratio))
return alpha
# Set parameters.
rootdir = pathlib.Path(__file__).absolute().parents[1]
timestep = 500 # solution time-step index
field_names = ['p', 'u', 'v'] # name of the fields
ncells = [30, 90, 270, 810] # number of cells in each direction
ratio = 3 # refinement ratio between two consecutive grids
# Load the grid and field from files.
data = {}
for name in field_names:
subdata = {'grids': [], 'fields': []}
for n in ncells:
simudir = rootdir / str(n)
datadir = simudir / 'output'
grid = petibmpy.read_grid_hdf5(datadir / 'grid.h5', name)
filepath = datadir / f'{timestep:0>7}.h5'
field = petibmpy.read_field_hdf5(filepath, name)
subdata['grids'].append(grid)
subdata['fields'].append(field)
data[name] = subdata
# Compute the observed orders of convergence.
alphas = {}
for name in field_names:
grids, fields = data[name]['grids'], data[name]['fields']
# Compute order of convergence using the three coarsest grids.
# Fields are interpolated on the first grid.
alpha1 = observed_order_convergence(fields[:3], grids[:3], grids[0], ratio)
# Compute order of convergence using the three finest grids.
# Fields are interpolated on the first grid.
alpha2 = observed_order_convergence(fields[1:], grids[1:], grids[0], ratio)
alphas[name] = (alpha1, alpha2)
print(alphas)
| [
37811,
7293,
1133,
262,
6515,
1502,
286,
40826,
329,
262,
15432,
290,
3833,
526,
15931,
628,
198,
11748,
299,
32152,
198,
11748,
3108,
8019,
198,
198,
11748,
4273,
571,
3149,
88,
628,
198,
4299,
6515,
62,
2875,
62,
1102,
332,
12745,
7... | 2.671306 | 934 |
import errno
import os
import subprocess
from bp import abstract, generic
from bp.filepath import FilePath
from characteristic import Attribute, attributes
from zope.interface import implementer
# TODO: Really betterpath should have a separate interface for like,
# file systems, or listable things.
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_git_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
@implementer(abstract.IFilePath)
@attributes(
[
Attribute(name="_hg_dir", default_value=None, exclude_from_repr=True),
Attribute(name="_path", exclude_from_repr=True),
Attribute(name="path", exclude_from_init=True),
],
)
for attribute in [
"basename",
"changed",
"createDirectory",
"exists",
"getAccessTime",
"getContent",
"getModificationTime",
"getStatusChangeTime",
"getsize",
"isdir",
"isfile",
"islink",
"open",
"path",
"realpath",
"remove",
"sep", # Apparently not in IFilePath
"setContent",
"sibling",
]:
proxy = _proxy_for_attribute(name=attribute)
setattr(GitPath, attribute, proxy)
setattr(HgPath, attribute, proxy)
| [
11748,
11454,
3919,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
198,
6738,
275,
79,
1330,
12531,
11,
14276,
198,
6738,
275,
79,
13,
7753,
6978,
1330,
9220,
15235,
198,
6738,
16704,
1330,
3460,
4163,
11,
12608,
198,
6738,
1976,
3008,... | 2.521073 | 522 |
"""Test methods in __main__."""
from unittest.mock import patch, PropertyMock
from homeassistant import __main__ as main
@patch('sys.exit')
def test_validate_python(mock_exit):
"""Test validate Python version method."""
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(2, 7, 8))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 2, 0))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 1))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 4, 2))):
main.validate_python()
assert mock_exit.called is False
mock_exit.reset_mock()
with patch('sys.version_info',
new_callable=PropertyMock(return_value=(3, 5, 1))):
main.validate_python()
assert mock_exit.called is False
| [
37811,
14402,
5050,
287,
11593,
12417,
834,
526,
15931,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
14161,
44,
735,
198,
198,
6738,
1363,
562,
10167,
1330,
11593,
12417,
834,
355,
1388,
628,
198,
31,
17147,
10786,
17597,
13... | 2.335907 | 518 |
from datetime import datetime
from typing import Callable, Optional
import pytest
from _pytest.config.argparsing import Parser
from _pytest.fixtures import SubRequest
from _pytest.terminal import TerminalReporter
from .version import __version__
@pytest.fixture(name="printer")
def printer(request: SubRequest) -> Callable[[str], None]:
"""pytest plugin to print test progress steps in verbose mode"""
return create_printer(request)
@pytest.fixture(scope="session", name="printer_session")
def no_op(msg: str) -> None: # noqa: U100
"""Do nothing"""
__all__ = [
"__version__",
]
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
4889,
540,
11,
32233,
198,
198,
11748,
12972,
9288,
198,
6738,
4808,
9078,
9288,
13,
11250,
13,
853,
79,
945,
278,
1330,
23042,
263,
198,
6738,
4808,
9078,
9288,
13,
69,
2550... | 3.070707 | 198 |
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
from otopi import util
from . import connection
from . import engine_connection
from . import dbmsupgrade
from . import schema
from . import vacuum
@util.export
# vim: expandtab tabstop=4 shiftwidth=4
| [
2,
198,
2,
19643,
2265,
12,
18392,
12,
40406,
1377,
19643,
2265,
3113,
9058,
198,
2,
198,
2,
15069,
267,
53,
2265,
46665,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
628,
198,
6738,
... | 3.145631 | 103 |
from app import create_app,db
from app.models import User
from flask_script import Manager,Server
from flask_migrate import Migrate,MigrateCommand
from app.models import User, Blog, Comment, Quote
#creating app instance
app= create_app('production')
manager= Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
if __name__ == '__main__':
manager.run() | [
6738,
598,
1330,
2251,
62,
1324,
11,
9945,
198,
6738,
598,
13,
27530,
1330,
11787,
198,
6738,
42903,
62,
12048,
1330,
9142,
11,
10697,
198,
6738,
220,
42903,
62,
76,
42175,
1330,
337,
42175,
11,
44,
42175,
21575,
198,
6738,
598,
13,
... | 3.210145 | 138 |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import model.vertical_disc as vc
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import tensorflow as tf
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from nets.vgg16 import vgg16
from model.config import cfg
from model.bbox_transform import clip_boxes, bbox_transform_inv
CLASSES = ('__background__', 'bone', 's')
net = vgg16(batch_size=1)
model = "/Users/anekisei/Documents/tf-faster-rcnn2/output/default/vertical/default/vgg16_faster_rcnn_iter_1500.ckpt"
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
net.create_architecture(sess, "TEST", len(CLASSES), tag='',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
saver = tf.train.Saver()
saver.restore(sess, model)
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
store = []
vertical_points = []
'''
shape[0] -- h -- y, shape[1] -- w -- x
'''
for i in range(dets.shape[0]):
scores = dets[i, -1]
if scores < thresh:
continue
x1 = int(dets[i,0]) # DOUBLE-CHECK THE DIMENSIONS
y1 = int(dets[i,1])
x2 = int(dets[i,2])
y2 = int(dets[i,3])
area = (y2 - y1)*(x2 - x1)
center_x = int((x1 + x2) / 2.0)
center_y = int((y1 + y2) / 2.0)
vertical_points.append((center_x, center_y))
return vertical_points
def test_vertical(img, thresh=0.05, sess=sess):
"""Test a Fast R-CNN network on an image database."""
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
# timers
im = contrast(img)
_t = {'im_detect' : Timer(), 'misc' : Timer()}
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
vertical_points = []
for j, cls in enumerate(CLASSES[1:]):
j += 1
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
vertical_points += vis_detections(im, cls, cls_dets, thresh=0.7)
# sort verticals
vertical_points = sorted(vertical_points, key=lambda vertical_points: vertical_points[1], reverse=False)#[:7]
#res_image, flag, disease_bone = vc.spine_contour(im, vertical_points)
vertical_points = filter_outliers(vertical_points)
res_image = vc.spine_contour(img, vertical_points)
res_image = Draw_bone(res_image,vertical_points)
res2 = vc.spine_contour(im, vertical_points)#contrast img
res2 = Draw_bone(res2,vertical_points)
_t['misc'].toc()
return res_image, res2#, flag, disease_bone
'''
if cv2.waitKey(1) & 0xff == 27:
break
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
#for cls_ind, cls in enumerate(CLASSES[1:]):
#vis_detections(im, class_name, dets, thresh=0.5)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
'''
| [
2,
20368,
22369,
198,
2,
309,
22854,
11125,
38996,
371,
12,
18474,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
22503,
416,
25426,
293,
72,
12555,
198,
2,
20368,
22369,
198,
6738,
11593,
37443... | 2.309027 | 3,113 |
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenient base class for imaging
"""
from google.appengine.api import images
import logging
def generate_thumbnail(image_data, min_source_height, max_source_height, min_source_width, max_source_width, content_type, width, height, overlay_path, valign, top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, crop_x=None, crop_y=None, post_crop_uniform_scale_pct=None):
""" Generate a thumbnail and return the image data as a
binary string. If unable to create the
thumbnail, will return None.
:min_source_height:
If specified, a thumbnail will only be generated if the incoming image
is at least this high.
:min_source_width:
If specified, a thumbnail will only be generated if the incoming image
is at least this wide.
:max_source_height:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels high.
:max_source_width:
If specified, a thumbnail will only be generated if the incoming image
is less than this many pixels wide.
:image_data:
Image data, as a bytestring
:content_type:
The MIME content type of the image.
:width:
Width of the thumbnail
:height:
Height of the thumbnail
:overlay_path:
Full path to an image file to overlay on top of the image data, or None
to not use an overlay.
:valign:
A string, one of:
"top"
"bottom"
"middle"
describing how the image should be aligned along the
Y-axis when cropping.
:top_crop_pct:
:bottom_crop_pct:
Optional. Floats indicating how much from the top and bottom of the
original image to crop in before rescaling. Numbers between 0 and 1.0 incl.
:crop_x:
:crop_y:
Optional. If specified with width and height, will simply cut out a rectangle
of the incoming image which is width x height and has its upper-left corner
pegged to (crop_x, cropy_y).
NOTE: For crop_x and crop_y to work, the following other options must be None:
valign, top_crop_pct, bottom_crop_pct
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
# figure out the width/height of the image from the datastore
# img = images.Image(image_data=image_data)
# img.crop(left_x=0.25, top_y=0.25, right_x=0.25, bottom_y=0.25)
# img.resize(width=width, height=height)
# logging.info('(b) w=%i, h=%i' % (img.width, img.height))
# output = img.execute_transforms(output_encoding=img.format)
image = images.Image(image_data)
if min_source_height is not None and image.height < min_source_height:
return None
if max_source_height is not None and image.height > max_source_height:
return None
if min_source_width is not None and image.width < min_source_width:
return None
if max_source_width is not None and image.width > max_source_width:
return None
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
if crop_x is not None and crop_y is not None and valign is None and top_crop_pct is None and bottom_crop_pct is None and (image.width >= crop_x + width) and (image.height >= crop_y + height):
fw = float(image.width)
fh = float(image.height)
try:
output = images.crop(image_data, float(crop_x) / fw, float(crop_y) / fh, float(crop_x + width) / fw, float(crop_y + height) / fh, output_encoding=output_encoding)
except:
output = image_data
else:
if width > image.width and height > image.height:
output = image_data
# # this would result in scaling the image UP, that's no good
# if image.width > image.height:
# width = image.width
# else:
# height = image.height
#
# output = images.resize(image_data, width, height, output_encoding)
else:
output = rescale(image, width, height, halign='middle', valign=valign, top_crop_pct=top_crop_pct, bottom_crop_pct=bottom_crop_pct, left_crop_pct=left_crop_pct, right_crop_pct=right_crop_pct)
if post_crop_uniform_scale_pct is not None:
output = images.resize(output, width=int(width * post_crop_uniform_scale_pct), output_encoding=output_encoding)
if overlay_path is not None:
# read the overlay into memory
overlay_data = open(overlay_path,'r').read()
# composite the overlay onto the rescaled output
if content_type == 'image/png':
output_encoding = images.PNG
else:
output_encoding = images.JPEG
output = images.composite(
inputs=[
(output,0,0,1.0,images.CENTER_CENTER),
(overlay_data,0,0,1.0,images.CENTER_CENTER),
],
width=width,
height=height,
output_encoding=output_encoding
)
return output
# # Get a "file name" for a blobstore entity
# blob_file_name = files.blobstore.create(mime_type=content_type)
# # Open the blob and write the contents of the trasnfered file to it
# with files.open(blob_file_name, 'a') as target:
# target.write(output)
# # all done, let blobstore finish up
# files.finalize(blob_file_name)
#
# # retain a reference to the key
# k = files.blobstore.get_blob_key(blob_file_name)
# # logging.info(k)
# return k
def rescale(image, width, height, halign='middle', valign='middle', top_crop_pct=None, bottom_crop_pct=None, left_crop_pct=None, right_crop_pct=None, post_crop_uniform_scale_pct=None):
"""
From http://stackoverflow.com/questions/1944112/app-engine-cropping-to-a-specific-width-and-height
Resize then optionally crop a given image.
Attributes:
image: The image
width: The desired width
height: The desired height
halign: Acts like photoshop's 'Canvas Size' function, horizontally
aligning the crop to left, middle or right
valign: Verticallly aligns the crop to top, middle or bottom
:post_crop_uniform_scale_pct:
If not None, will scale image after cropping by the indicated percent. Should be None or a
float between 0.0 and 1.0
"""
#image = images.Image(img_data)
#logging.info(left_crop_pct)
if top_crop_pct is not None and bottom_crop_pct is not None:
if left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,top_crop_pct,right_crop_pct,bottom_crop_pct)
else:
image.crop(0.0,top_crop_pct,1.0,bottom_crop_pct)
elif left_crop_pct is not None and right_crop_pct is not None:
image.crop(left_crop_pct,0.0,right_crop_pct,1.0)
desired_wh_ratio = float(width) / float(height)
wh_ratio = float(image.width) / float(image.height)
if desired_wh_ratio > wh_ratio:
# resize to width, then crop to height
image.resize(width=width)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_y = (float(image.height - height) / 2) / image.height
if valign == 'top':
image.crop(0.0, 0.0, 1.0, 1 - (2 * trim_y))
elif valign == 'bottom':
# logging.info('----------')
# logging.info(image.height)
# logging.info(image.width)
# logging.info(height)
# logging.info(trim_y)
image.crop(0.0, (2 * trim_y), 1.0, 1.0)
else:
image.crop(0.0, trim_y, 1.0, 1 - trim_y)
else:
# resize to height, then crop to width
image.resize(height=height)
output = image.execute_transforms()
if image.width < width or image.height < height:
return output
trim_x = (float(image.width - width) / 2) / image.width
if halign == 'left':
image.crop(0.0, 0.0, 1 - (2 * trim_x), 1.0)
elif halign == 'right':
image.crop((2 * trim_x), 0.0, 1.0, 1.0)
else:
image.crop(trim_x, 0.0, 1 - trim_x, 1.0)
return image.execute_transforms()
| [
2,
220,
220,
220,
15069,
2211,
3012,
3457,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 2.412131 | 3,693 |
# -*- coding: utf-8 -*-
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from Mnn_Core.mnn_pytorch import *
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/cifar10_experiment_1')
class Mnn_Classic(torch.nn.Module):
"""Some Information about Net"""
if __name__ == "__main__":
test = Train_Cifar10_Model()
test.test_model()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
337,
20471,
62,
14055,
13,
10295,
77,
... | 2.834483 | 145 |
# Copyright 2020 nunopenim @github
#
# Licensed under the PEL (Penim Enterprises License), v1.0
#
# You may not use this file or any of the content within it, unless in
# compliance with the PE License
"""
used to get github api information
for github module
"""
import json
import urllib.request as url
VERSION = "1.1.0"
APIURL = "http://api.github.com/repos/"
# Repo-wise stuff
# Release-wise stuff
# Asset-wise stuff
| [
2,
15069,
12131,
48157,
9654,
320,
2488,
12567,
198,
2,
198,
2,
49962,
739,
262,
350,
3698,
357,
25553,
320,
41253,
13789,
828,
410,
16,
13,
15,
198,
2,
198,
2,
921,
743,
407,
779,
428,
2393,
393,
597,
286,
262,
2695,
1626,
340,
... | 3.164286 | 140 |
import os
import re
from typing import List, Optional, Union
import numpy as np
from matplotlib import pyplot as plt
from pandas import DataFrame
from scipy.optimize import curve_fit
from sigfig import round
from sklearn.metrics import r2_score
| [
11748,
28686,
198,
11748,
302,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
19798,
292,
1330,
6060,
19778,
198,
67... | 3.528571 | 70 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Union
import torch
import torch.nn as nn
from torchbiggraph.plugin import PluginRegistry
from torchbiggraph.types import FloatTensorType, LongTensorType, Side
from torchbiggraph.util import match_shape
class AbstractOperator(nn.Module, ABC):
"""Perform the same operation on many vectors.
Given a tensor containing a set of vectors, perform the same operation on
all of them, with a common set of parameters. The dimension of these vectors
will be given at initialization (so that any parameter can be initialized).
The input will be a tensor with at least one dimension. The last dimension
will contain the vectors. The output is a tensor that will have the same
size as the input.
"""
@abstractmethod
OPERATORS = PluginRegistry[AbstractOperator]()
@OPERATORS.register_as("none")
@OPERATORS.register_as("diagonal")
@OPERATORS.register_as("translation")
@OPERATORS.register_as("linear")
@OPERATORS.register_as("affine")
# FIXME This adapts from the pre-D14024710 format; remove eventually.
@OPERATORS.register_as("complex_diagonal")
class AbstractDynamicOperator(nn.Module, ABC):
"""Perform different operations on many vectors.
The inputs are a tensor containing a set of vectors and another tensor
specifying, for each vector, which operation to apply to it. The output has
the same size as the first input and contains the outputs of the operations
applied to the input vectors. The different operations are identified by
integers in a [0, N) range. They are all of the same type (say, translation)
but each one has its own set of parameters. The dimension of the vectors and
the total number of operations that need to be supported are provided at
initialization. The first tensor can have any number of dimensions (>= 1).
"""
@abstractmethod
DYNAMIC_OPERATORS = PluginRegistry[AbstractDynamicOperator]()
@DYNAMIC_OPERATORS.register_as("none")
@DYNAMIC_OPERATORS.register_as("diagonal")
@DYNAMIC_OPERATORS.register_as("translation")
@DYNAMIC_OPERATORS.register_as("linear")
@DYNAMIC_OPERATORS.register_as("affine")
# FIXME This adapts from the pre-D14024710 format; remove eventually.
@DYNAMIC_OPERATORS.register_as("complex_diagonal")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635... | 3.384416 | 770 |
from django.contrib import admin
from django import forms
from .models import Invoice, InvoiceItem, Payment
admin.site.register(Invoice, InvoiceAdmin)
admin.site.register(InvoiceItem, InvoiceItemAdmin)
admin.site.register(Payment, PaymentAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
764,
27530,
1330,
10001,
2942,
11,
10001,
2942,
7449,
11,
28784,
628,
198,
28482,
13,
15654,
13,
30238,
7,
19904,
2942,
11,
10001,
2942,
467... | 3.311688 | 77 |
import os
import pandas as pd
import joblib
base_accounts_dir = os.path.join(os.path.abspath('TweetSuite'),'accounts')
base_accounts = [account for account in os.listdir(base_accounts_dir) if account != 'vault']
accounts_dir = os.path.abspath('accounts')
getInput(base_accounts) | [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1693,
8019,
198,
198,
8692,
62,
23317,
82,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
397,
2777,
776,
10786,
47845,
5606,
578,
33809,
6,
23317,
8... | 2.688679 | 106 |
import unittest
import pytest
import sys
import hubcheck
pytestmark = [ pytest.mark.fail]
if __name__ == '__main__':
# unittest.main(verbosity=0)
tr = unittest.TextTestRunner(stream=sys.stdout,verbosity=0)
unittest.main(testRunner=tr,exit=False)
| [
11748,
555,
715,
395,
198,
11748,
12972,
9288,
198,
11748,
25064,
198,
198,
11748,
12575,
9122,
198,
198,
9078,
9288,
4102,
796,
685,
12972,
9288,
13,
4102,
13,
32165,
60,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
1... | 2.514286 | 105 |
import unittest
import math
import numpy as np
from cdtw.dtw import *
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
import dtaidistance
DTAIDISTANCE_INSTALLED = True
except ImportError:
DTAIDISTANCE_INSTALLED = False
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
22927,
4246,
13,
67,
4246,
1330,
1635,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
40496,
11,
6818,
62,
18747,
62,
28177,
62... | 2.713043 | 115 |
# -*- coding: utf-8 -*-
import inspect
import logging
import re
from six import string_types
from . import helpers
from . import magicnumbers
from . import util
from .magicnumbers import maps, language_map
from .xpath import xpb
log = logging.getLogger(__name__)
class Detail(object):
"""Represent a detail belonging to an okcupid.com profile."""
NO_DEFAULT = object()
@classmethod
@classmethod
@classmethod
@classmethod
@staticmethod
@property
_doc_format = 'The {0} detail of an okcupid.com user\'s profile.'
@id_name.setter
class Details(object):
"""Represent the details belonging to an okcupid.com profile."""
@classmethod
_profile_details_xpb = xpb.div(id='profile_details').dl
_basics = [maps.orientation,
maps.gender,
maps.status,
util.IndexedREMap(),
maps.bodytype]
_backgrounds = [maps.ethnicities,
util.REMap.from_string_pairs([(a.replace('+',r'\+'),b)
for a, b in language_map.iteritems()]),
maps.education_level,
maps.religion]
_misc = [util.IndexedREMap('smokes'),
util.IndexedREMap('drink'),
util.IndexedREMap('drugs'),
maps.diet,
util.IndexedREMap('kid'),
util.IndexedREMap('dogs','cats'),
maps.sign]
@classmethod
@util.cached_property
@property
bodytype = Detail.mapping_updater(maps.bodytype)
gender = Detail.mapping_updater(maps.gender)
orientation = Detail.mapping_updater(maps.orientation)
smokes = Detail.mapping_updater(maps.smokes, id_name='smoking')
drugs = Detail.mapping_updater(maps.drugs)
drinks = Detail.mapping_updater(maps.drinks, id_name='drinking')
job = Detail.mapping_updater(maps.job)
status = Detail.mapping_updater(maps.status)
monogamy = Detail(id_name='monogamous', updater=lambda id_name, value: {
'monogamous': maps.monogamy[value],
'monogamyflex': maps.strictness[value]
})
children = Detail(updater=lambda id_name, value: {
'children': maps.has_kids[value],
'children2': maps.wants_kids[value]
})
education = Detail(updater=lambda id_name, value: {
'educationstatus': maps.education_status[value],
'educationlevel': maps.education_level[value]
})
pets = Detail(updater=lambda id_name, value: {
'cats': maps.cats[value],
'dogs': maps.dogs[value]
})
diet = Detail(updater=lambda id_name, value: {
'diet': maps.diet[value],
'dietserious': maps.diet_strictness[value]
})
religion = Detail(updater=lambda id_name, value: {
'religion': maps.religion[value],
'religionserious': maps.seriousness[value]
})
sign = Detail(updater=lambda id_name, value: {
'sign': maps.sign[value],
'sign_status': maps.importance[value]
})
height = Detail(updater=lambda id_name, value: {
'centimeters': int(round(magicnumbers.parse_height_string(value)))
})
for id_name, detail in Details.name_detail_pairs():
if detail.id_name is None:
detail.id_name = id_name
is_declarative_detail = lambda x: (isinstance(x, type) and
issubclass(x, DeclarativeDetail))
for id_name, declarative_detail in inspect.getmembers(
Details, is_declarative_detail
):
detail = Detail(presenter=declarative_detail.presenter,
updater=declarative_detail.updater,
id_name=id_name)
setattr(Details, id_name, detail)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
10104,
198,
11748,
18931,
198,
11748,
302,
198,
198,
6738,
2237,
1330,
4731,
62,
19199,
198,
198,
6738,
764,
1330,
49385,
198,
6738,
764,
1330,
5536,
77,
17024,
1... | 2.24092 | 1,652 |
"""Executing a bash script."""
# Copyright (c) 2017 Thomas Lehmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=too-many-instance-attributes
import contextlib
import sys
import os
import shlex
import subprocess # nosec
import tempfile
from spline.tools.filters import render
from spline.tools.logger import Logger
from spline.tools.event import Event
@contextlib.contextmanager
def managed_process(process):
"""Wrapper for subprocess.Popen to work across various Python versions, when using the with syntax."""
try:
yield process
finally:
for stream in [process.stdout, process.stdin, process.stderr]:
if stream:
stream.close()
process.wait()
class Bash(object):
"""Wrapper for Bash execution."""
def __init__(self, config):
"""
Initialize with Bash code and optional environment variables.
Args:
config(ShellConfig): options for configuring Bash environment and behavior
"""
self.event = Event.create(__name__)
self.logger = Logger.get_logger(__name__)
self.config = config
self.success = True
self.env = {}
self.env.update(config.env)
self.stdout = subprocess.PIPE
self.stderr = subprocess.STDOUT
self.shell = False
self.exit_code = 0
@staticmethod
def creator(_, config):
"""
Creator function for creating an instance of a Bash.
Args:
config (ShellConfig): options for configuring Bash environment and behavior
Returns:
Bash: instance of class Bash
"""
return Bash(config)
def update_environment_variables(self, filename):
"""Updating OS environment variables and current script path and filename."""
self.env.update(os.environ.copy())
self.env.update({'PIPELINE_BASH_FILE': filename})
def get_temporary_scripts_path(self):
"""
Get path for temporary scripts.
Returns:
str: path for temporary scripts or None if not set
"""
result = None
if len(self.config.temporary_scripts_path) > 0:
if os.path.isdir(self.config.temporary_scripts_path):
result = self.config.temporary_scripts_path
return result
def create_file_for(self, script):
"""
Create a temporary, executable bash file.
It also does render given script (string) with the model and
the provided environment variables and optional also an item
when using the B{with} field.
Args:
script (str): either pather and filename or Bash code.
Returns:
str: path and filename of a temporary file.
"""
temp = tempfile.NamedTemporaryFile(
prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False,
dir=self.get_temporary_scripts_path())
self.update_environment_variables(temp.name)
rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item,
variables=self.config.variables)
if rendered_script is None:
self.success = False
temp.close()
os.remove(temp.name)
return None
to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s}
if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script):
with open(rendered_script) as handle:
content = str(handle.read())
temp.writelines(content)
else:
temp.write(u"#!/bin/bash\n%s" % self.render_bash_options())
temp.write(to_file_map[sys.version_info.major](rendered_script))
temp.close()
# make Bash script executable
os.chmod(temp.name, 0o700)
return temp.name
def render_bash_options(self):
"""Rendering Bash options."""
options = ''
if self.config.debug:
options += "set -x\n"
if self.config.strict:
options += "set -euo pipefail\n"
return options
def process_script(self, filename):
"""Running the Bash code."""
try:
with managed_process(subprocess.Popen(shlex.split("bash %s" % filename),
stdout=self.stdout, stderr=self.stderr,
shell=self.shell, env=self.env)) as process: # nosec
for line in iter(process.stdout.readline, ' '):
if not line:
break
yield line[0:-1].decode('utf-8')
process.wait()
self.exit_code = process.returncode
self.success = (process.returncode == 0)
if not self.config.internal:
if process.returncode == 0:
self.logger.info("Exit code has been %d", process.returncode)
else:
self.logger.error("Exit code has been %d", process.returncode)
except OSError as exception:
self.exit_code = 1
self.success = False
yield str(exception)
def process_file(self, filename):
"""Processing one file."""
if self.config.dry_run:
if not self.config.internal:
self.logger.info("Dry run mode for script %s", filename)
with open(filename) as handle:
for line in handle:
yield line[0:-1] if line[-1] == '\n' else line
else:
if not self.config.internal:
self.logger.info("Running script %s", filename)
for line in self.process_script(filename):
yield line
def process(self):
"""Running the Bash code."""
temp_filename = self.create_file_for(self.config.script)
if len(self.config.title) > 0:
self.logger.info(render(self.config.title, model=self.config.model, env=self.env,
item=self.config.item, variables=self.config.variables))
if temp_filename is not None:
try:
for line in self.process_file(temp_filename):
yield line
finally:
# removing script
os.remove(temp_filename)
if not self.config.internal:
if self.exit_code == 0:
self.event.succeeded()
else:
self.event.failed(exit_code=self.exit_code)
| [
37811,
23002,
15129,
257,
27334,
4226,
526,
15931,
198,
2,
15069,
357,
66,
8,
2177,
5658,
29921,
9038,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
198,
2,
3788,
... | 2.295052 | 3,335 |
import sys
import numpy as np
from lab1 import input_matrix, input_vector
# from lab4 import first_and_second_step_simplex_method
def double_simplex(c, b, a_matrix, j_vector):
"""Сreates an optimal unfeasible plan and then converts
it to a feasible one without violating optimality.
Args:
c (np.array): vector of values
b (np.array): vector of values
a_matrix (np.array): matrix composed of the coefficients
of the original system
j_vector (np.array): vector of values
Returns:
any: feasible plan (list) or message (str)
"""
m, n = a_matrix.shape
j_vector -= 1
y = get_initial_y(c, a_matrix, j_vector)
x_0 = [0 for _ in range(n)]
while True:
not_J = np.delete(np.arange(n), j_vector)
B = np.linalg.inv(a_matrix[:, j_vector])
kappa = B.dot(b)
if all(kappa >= 0):
for j, _kappa in zip(j_vector, kappa):
x_0[j] = _kappa
print(str(list(map(lambda _x: round(float(_x), 3), list(x_0)))
).replace('[', '').replace(']', ''), "- план")
print(f"План: \t{' '.join(map(str,list(x_0)))}")
return x_0
k = np.argmin(kappa)
delta_y = B[k]
mu = delta_y.dot(a_matrix)
sigma = []
for i in not_J:
if mu[i] >= 0:
sigma.append(np.inf)
else:
sigma.append((c[i] - a_matrix[:, i].dot(y)) / mu[i])
sigma_0_ind = not_J[np.argmin(sigma)]
sigma_0 = min(sigma)
if sigma_0 == np.inf:
print("Задача не имеет решения, т.к. пусто множество ее\
допустимых планов.")
return "Задача не имеет решения"
y += sigma_0 * delta_y
j_vector[k] = sigma_0_ind
def test1():
"""test case 1
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, -4, 1, 0],
[-2, -2, -2, 0, 1]
])
b = np.array([-1, -1.5])
c = np.array([-4, -3, -7, 0, 0])
J = np.array([4, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test2():
"""test case 2
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[4, 2, 1, 0, 1, 5, -1, -5],
[1, 1, 0, -1, 0, 3, -1, 1]
])
b = np.array([-2, 4, 3])
c = np.array([2, 2, 1, -10, 1, 4, -2, -3])
J = np.array([2, 5, 7])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test3():
"""test case 3
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 1, -7, 0, 0, 0, 2],
[-4, 2, 1, 0, 1, 5, -1, 5],
[1, 1, 0, 1, 4, 3, 1, 1]
])
b = np.array([-2, 8, -2])
c = np.array([12, -2, -6, 20, -18, -5, -7, -20])
J = np.array([2, 4, 6])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
def test4():
"""test case 4
Returns:
matrix: np.array
vector b: np.array
vector c: np.array
vector j: np.array
"""
A = np.array([
[-2, -1, 10, -7, 1, 0, 0, 2],
[-4, 2, 3, 0, 5, 1, -1, 0],
[1, 1, 0, 1, -4, 3, -1, 1]
])
b = np.array([-2, -5, 2])
c = np.array([10, -2, -38, 16, -9, -9, -5, -7])
J = np.array([2, 8, 5])
double_simplex(c=c, b=b, a_matrix=A, j_vector=J)
return A, b, c, J
if __name__ == "__main__":
simplex()
| [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2248,
16,
1330,
5128,
62,
6759,
8609,
11,
5128,
62,
31364,
198,
2,
422,
2248,
19,
1330,
717,
62,
392,
62,
12227,
62,
9662,
62,
14323,
11141,
62,
24396,
628,
198,
4299... | 1.761087 | 2,097 |
name = raw_input("Enter")
print"Hello " + name | [
3672,
796,
8246,
62,
15414,
7203,
17469,
4943,
198,
4798,
1,
15496,
366,
1343,
1438
] | 3.066667 | 15 |
# -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'airport challenge'
project = "Airport Challenge in Python"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Just some experimentation!'
authors = ['George Maddocks']
license = 'MIT'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
16775,
20150,
198,
198,
21918,
12059,
262,
1628,
13,
198,
37811,
198,
198,
2,
383,
5301,
1438,
11,
543,
318,
635,
262,
366,
4944,
10426,
1438,
1,
329,
262,
1628... | 3.390909 | 110 |
from utils import *
from data_load import load_data
import pandas as pd
import sys
import os
import glob
from argparse import ArgumentParser
import imp
import numpy as np
from utils import spectrogram2wav
# from scipy.io.wavfile import write
import soundfile as sf
import tqdm
from concurrent.futures import ProcessPoolExecutor
import tensorflow as tf
from architectures import Text2MelGraph, SSRNGraph, Graph_style_unsupervised
from synthesize import make_mel_batch, split_batch, synth_mel2mag
from configuration import load_config
import logger_setup
from logging import info
import logging
logging.getLogger('matplotlib.font_manager').disabled = True
from data_load import *
import numpy as np
from synthesize import *
import pickle
import matplotlib.pyplot as plt
def mi_regression_feat_embed(X, feat_df):
'''
X corresponds to latent embeddings
feat_df is y, i.e. the acoustic features
We want to see how much the acoustic features are predictable from the latent embeddings to
check that they contain information about expressiveness.
'''
from sklearn.feature_selection import mutual_info_regression
y=feat_df.values
mi_embed=np.zeros((y.shape[-1],X.shape[-1]))
#import pdb;pdb.set_trace()
for idx in range(y.shape[-1]):
mi_embed[idx,:]=mutual_info_regression(X, y[:,idx])
mi_embed=pd.DataFrame(mi_embed)
mi_embed.index=feat_df.columns
return mi_embed
def corr_feat_embed(embed_dfs, feat_df, titles=[]):
'''
This function computes correlations between a set of features and each dimension of the embeddings.
'''
# rc('ytick', labelsize=8) #change text size
n_feat = feat_df.shape[-1]
corr_embeds=[]
mi_embeds=[]
for i,embed_df in enumerate(embed_dfs):
embed_size = embed_df.shape[-1]
### Correlation matrix ###
# feat_embed = pd.concat([feat_df, embed_df], axis=1)
feat_embed=feat_df.copy()
for i in range(embed_df.shape[-1]):
feat_embed[str(i)]=embed_df.iloc[:,i]
corr=feat_embed.astype(float).corr().abs()
# # mi_embed=np.zeros((n_feat,embed_size))
# # for dim in range(embed_size):
# # mi = mutual_info_regression(feat_df, embed_df[dim])
# # mi_embed[:,dim]=mi
# # mi_embed=pd.DataFrame(mi_embed)
# # mi_embed.index=feat_df.columns
### get one matrix for corr, F and mi with vad
corr_embed=corr.iloc[:-embed_size,-embed_size:].abs()
for i in range(embed_size):
print('max corr '+str(i)+' : '+str(np.max(corr_embed.iloc[:,i])))
corr_embeds.append(corr_embed)
# mi_embeds.append(mi_embed)
return corr_embeds, mi_embeds
if __name__=="__main__":
main_work()
| [
6738,
3384,
4487,
1330,
1635,
198,
6738,
1366,
62,
2220,
1330,
3440,
62,
7890,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
15095,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
117... | 2.461402 | 1,127 |
# %%
#
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
from joblib import Parallel, delayed
from tqdm import tqdm
from pymaid_creds import url, name, password, token
import pymaid
import connectome_tools.cluster_analysis as clust
import connectome_tools.celltype as ct
import connectome_tools.process_graph as pg
import connectome_tools.process_matrix as pm
rm = pymaid.CatmaidInstance(url, token, name, password)
# load previously generated paths
all_edges_combined = pd.read_csv('interhemisphere/csv/all_paired_edges.csv', index_col=0)
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
pairs = pm.Promat.get_pairs()
# %%
# load neuron types
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain sensories')
all_outputs = ct.Celltype_Analyzer.get_skids_from_meta_annotation('mw brain outputs')
sensory_pair_ids = pm.Promat.load_pairs_from_annotation('sensory', pairs, return_type='all_pair_ids', skids=all_sensories, use_skids=True)
outputs_pair_ids = pm.Promat.load_pairs_from_annotation('output', pairs, return_type='all_pair_ids', skids=all_outputs, use_skids=True)
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
# %%
# generate and save paths all sensory to outputs
cutoff = 6
outputs = [dVNC_pair_ids, dSEZ_pair_ids, RGN_pair_ids, outputs_pair_ids]
output_types = ['dVNC', 'dSEZ', 'RGN', 'output']
save_paths = [f'data/paths/all_paths_sens-to-{output_type}_cutoff{cutoff}' for output_type in output_types]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(G=graph.G, source_list=sensory_pair_ids, targets=outputs[i], cutoff=cutoff, save_path = save_paths[i]) for i in tqdm(range(len(save_paths))))
'''
save_path = f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dVNC_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, dSEZ_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, RGN_pair_ids, cutoff=cutoff, save_path=save_path)
save_path = f'data/paths/all_paths_sens-to-output_cutoff{cutoff}'
pg.Prograph.generate_save_simple_paths(graph.G, sensory_pair_ids, outputs_pair_ids, cutoff=cutoff, save_path=save_path)
'''
# %%
#
dVNC_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
dSEZ_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-dSEZ_cutoff{cutoff}.csv.gz')
RGN_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-RGN_cutoff{cutoff}.csv.gz')
output_paths = pg.Prograph.open_simple_paths(f'data/paths/all_paths_sens-to-output_cutoff{cutoff}.csv.gz')
# %%
| [
2,
43313,
198,
2,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
3127,
87,
355,
299... | 2.443929 | 1,293 |
from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from ..permissions import permission_workflow_transition
from .mixins import (
WorkflowTestMixin, WorkflowViewTestMixin, WorkflowTransitionViewTestMixin
)
| [
6738,
743,
272,
13,
18211,
13,
15390,
2886,
13,
41989,
13,
8692,
1330,
42044,
24941,
7680,
14402,
20448,
198,
198,
6738,
11485,
525,
8481,
1330,
7170,
62,
1818,
11125,
62,
7645,
653,
198,
198,
6738,
764,
19816,
1040,
1330,
357,
198,
2... | 3.441176 | 68 |
# from pathlib import Path
# from text_utils.ipa2symb import IPAExtractionSettings
# from text_utils.language import Language
# from text_utils.text import EngToIpaMode
# def test_add_corpus_from_text_file(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line1\nline2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step1" / "data.pkl").exists()
# def test_app_normalize(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_normalize(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
# def test_app_convert_to_ipa(tmp_path: Path):
# base_dir = tmp_path / "base_dir"
# text_path = tmp_path / "input.txt"
# text_path.write_text("line 1\nline 2\n")
# add_corpus_from_text_file(
# base_dir=base_dir,
# corpus_name="corpus1",
# step_name="step1",
# text_path=text_path,
# lang=Language.ENG,
# replace_unknown_ipa_by=None,
# ignore_arcs=None,
# ignore_tones=None,
# overwrite=False,
# )
# app_convert_to_ipa(
# base_dir=base_dir,
# corpus_name="corpus1",
# in_step_name="step1",
# out_step_name="step2",
# target=PreparationTarget.BOTH,
# ignore_arcs=True,
# ignore_tones=True,
# replace_unknown_ipa_by="_",
# mode=EngToIpaMode.BOTH,
# overwrite=False,
# )
# assert (base_dir / "corpora" / "corpus1" / "step2" / "data.pkl").exists()
| [
2,
422,
3108,
8019,
1330,
10644,
198,
198,
2,
422,
2420,
62,
26791,
13,
541,
64,
17,
1837,
2022,
1330,
27966,
11627,
7861,
26232,
198,
2,
422,
2420,
62,
26791,
13,
16129,
1330,
15417,
198,
2,
422,
2420,
62,
26791,
13,
5239,
1330,
... | 2.093107 | 1,117 |
K = int(input())
a = input().split()
d = [0]
for x in a:
d.append(d[-1] ^ int(x)) # d[i] == a_1 ^ ... ^ a_i
Q = int(input())
for i in range(Q):
l, r = [int(x) for x in input().split()]
print(d[r % (K+1)] ^ d[(l-1) % (K+1)])
| [
42,
796,
493,
7,
15414,
28955,
198,
64,
796,
5128,
22446,
35312,
3419,
198,
67,
796,
685,
15,
60,
198,
1640,
2124,
287,
257,
25,
198,
220,
220,
220,
288,
13,
33295,
7,
67,
58,
12,
16,
60,
10563,
493,
7,
87,
4008,
220,
1303,
28... | 1.875 | 128 |
import torch
import torch.nn as nn
### Model : RRCNN-C ###
| [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
201,
198,
21017,
9104,
1058,
371,
7397,
6144,
12,
34,
44386,
201,
198,
201
] | 2.407407 | 27 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014,2015,2016,2017 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scipy discrete probability distribution (used in doTransition function)
from scipy.stats import rv_discrete
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1946,
11,
4626,
11,
5304,
11,
5539,
10272,
44871,
27196,
11290,
357,
4023,
1378,
2503,... | 3.648725 | 353 |
a=int(input())
for i in range(1,a+1):
print(i*'*') | [
198,
64,
28,
600,
7,
15414,
28955,
198,
198,
1640,
1312,
287,
2837,
7,
16,
11,
64,
10,
16,
2599,
198,
220,
220,
220,
3601,
7,
72,
9,
6,
9,
11537
] | 1.806452 | 31 |
from ..bert import Bert, BertMLMHead
from ..configs import RobertaConfig
class Roberta(Bert):
"""
Roberta
"""
config_cls = RobertaConfig
class RobertaPreTraining(Roberta):
"""
Roberta PreTraining Model, it exclude nsp task
"""
| [
6738,
11485,
4835,
1330,
22108,
11,
22108,
5805,
44,
13847,
198,
6738,
11485,
11250,
82,
1330,
5199,
64,
16934,
628,
198,
4871,
5199,
64,
7,
33,
861,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5199,
64,
198,
220,
220,
220,... | 2.795699 | 93 |
import sys
import os
import thriftpy
sys.path.append("../")
sys.path.append("gen-py")
from SpotifakeServices import TrackService
from SpotifakeServices.ttypes import *
from SpotifakeManagement.ttypes import *
from SQLConnection.sqlServer_track import SqlServerTrackManagement | [
11748,
25064,
198,
11748,
28686,
198,
11748,
5636,
2135,
9078,
198,
17597,
13,
6978,
13,
33295,
7203,
40720,
4943,
198,
17597,
13,
6978,
13,
33295,
7203,
5235,
12,
9078,
4943,
198,
6738,
15899,
361,
539,
31007,
1330,
17762,
16177,
198,
... | 3.618421 | 76 |
from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
class ReportGenerator(object):
"""A report generator is used to generate a report to a specific format."""
_is_first_page = True
_is_latest_page = True
_current_top_position = 0
_current_left_position = 0
_current_page_number = 0
_current_object = None
_current_queryset = None
_generation_datetime = None
# Groupping
_groups_values = None
_groups_working_values = None
_groups_changed = None
_groups_stack = None
# The rendered report has pages, each page is a ReportPage instance
_rendered_pages = None
_page_rect = None
def __init__(self, report):
"""This method should be overrided to receive others arguments"""
self.report = report
# Initializes some attributes
self._rendered_pages = []
self._groups_values = {}
self._groups_working_values = {}
self._groups_changed = {}
self._groups_stack = []
def execute(self):
"""This method must be overrided to execute the report generation."""
# Initializes pages
self._is_first_page = True
def render_border(self, borders_dict, rect_dict):
"""Renders a border in the coordinates setted in the rect."""
b_all = borders_dict.get('all', None)
if b_all:
graphic = isinstance(b_all, Graphic) and b_all or Rect()
graphic.set_rect(
left=rect_dict['left'],
top=rect_dict['top'] - rect_dict['height'],
width=rect_dict['right'] - rect_dict['left'],
height=rect_dict['height'],
)
self._rendered_pages[-1].elements.append(graphic)
b_left = borders_dict.get('left', None)
if b_left:
graphic = isinstance(b_left, Graphic) and b_left or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['left'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_top = borders_dict.get('top', None)
if b_top:
graphic = isinstance(b_top, Graphic) and b_top or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['top']
)
self._rendered_pages[-1].elements.append(graphic)
b_right = borders_dict.get('right', None)
if b_right:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['right'], top=rect_dict['top'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
b_bottom = borders_dict.get('bottom', None)
if b_bottom:
graphic = isinstance(b_right, Graphic) and b_right or Line()
graphic.set_rect(
left=rect_dict['left'], top=rect_dict['bottom'],
right=rect_dict['right'], bottom=rect_dict['bottom']
)
self._rendered_pages[-1].elements.append(graphic)
def make_band_rect(self, band, top_position, left_position):
"""Returns the right band rect on the PDF canvas"""
band_rect = {
'left': left_position, #self.report.margin_left,
'top': top_position,
'right': left_position + self.calculate_size(band.width), #self.report.page_size[0] - self.report.margin_right,
'bottom': top_position - self.calculate_size(band.height),
'height': self.calculate_size(band.height),
}
return band_rect
def render_band(self, band, top_position=None, left_position=None,
update_top=True, current_object=None):
"""Generate a band having the current top position or informed as its
top coordinate"""
# Sets the current object
current_object = current_object or self._current_object
# Page width. This should be done in a metaclass in Report domain TODO
self._rendered_pages[-1].width = self.calculate_size(self.report.page_size[0]) -\
self.calculate_size(self.report.margin_left) - self.calculate_size(self.report.margin_right)
# Default value for band width
band.width = self.calculate_size(band.width) or self._rendered_pages[-1].width
# Coordinates
left_position = left_position or self.get_left_pos()
# Increases the top position when being an inline displayed detail band
if left_position > self.calculate_size(self.report.margin_left) and\
getattr(band, 'display_inline', False) and\
band.width < self.get_available_width():
temp_height = band.height + getattr(band, 'margin_top', 0) + getattr(band, 'margin_bottom', 0)
self.update_top_pos(decrease=self.calculate_size(temp_height))
else:
self.update_left_pos(set=0)
left_position = self.get_left_pos()
temp_top = top_position = top_position or self.get_top_pos()
# Calculates the band dimensions on the canvas
band_rect = self.make_band_rect(band, top_position, left_position)
# Band borders
self.render_border(band.borders, band_rect)
# Variable that stores the highest height at all elements
highest_height = 0
# Loop at band widgets
for element in band.elements:
# Doesn't render not visible element
if not element.visible:
continue
# Widget element
if isinstance(element, Widget):
widget = element.clone()
# Set widget colors
widget.font_color = self.report.default_font_color
# Set widget basic attributes
widget.instance = current_object
widget.generator = self
widget.report = self.report # This should be done by a metaclass in Band domain TODO
widget.band = band # This should be done by a metaclass in Band domain TODO
widget.page = self._rendered_pages[-1]
if isinstance(widget, SystemField):
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
elif isinstance(widget, Label):
widget.para = self.make_paragraph(widget.text, self.make_paragraph_style(band, widget.style))
if widget.truncate_overflow:
self.keep_in_frame(
widget,
self.calculate_size(widget.width),
self.calculate_size(widget.height),
[widget.para],
mode='truncate',
)
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.height))
else:
self.wrap_paragraph_on(widget.para, self.calculate_size(widget.width), self.calculate_size(widget.height))
widget.left = band_rect['left'] + self.calculate_size(widget.left)
widget.top = self.calculate_top(temp_top, self.calculate_size(widget.top), self.calculate_size(widget.para.height))
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.para.height)
else:
temp_height = self.calculate_size(element.top) + self.calculate_size(widget.height)
# Sets element height as the highest
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(widget)
# Graphic element
elif isinstance(element, Graphic):
graphic = element.clone()
# Set widget basic attributes
graphic.instance = current_object
graphic.generator = self
graphic.report = self.report # This should be done by a metaclass in Band domain TODO
graphic.band = band # This should be done by a metaclass in Band domain TODO
graphic.page = self._rendered_pages[-1]
# Set graphic colors
graphic.fill_color = graphic.fill_color or self.report.default_fill_color
graphic.stroke_color = graphic.stroke_color or self.report.default_stroke_color
if isinstance(graphic, RoundRect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Rect):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
elif isinstance(graphic, Line):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Circle):
graphic.left_center = band_rect['left'] + self.calculate_size(graphic.left_center)
graphic.top_center = top_position - self.calculate_size(graphic.top_center)
elif isinstance(graphic, Arc):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Ellipse):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top)
graphic.right = band_rect['left'] + self.calculate_size(graphic.right)
graphic.bottom = top_position - self.calculate_size(graphic.bottom)
elif isinstance(graphic, Image):
graphic.left = band_rect['left'] + self.calculate_size(graphic.left)
graphic.top = top_position - self.calculate_size(graphic.top) - self.calculate_size(graphic.height)
# Sets element height as the highest
temp_height = self.calculate_size(element.top) + self.calculate_size(graphic.height)
if temp_height > highest_height:
highest_height = temp_height
self._rendered_pages[-1].elements.append(graphic)
# Updates top position
if update_top:
if band.auto_expand_height:
band_height = highest_height
else:
band_height = self.calculate_size(band.height)
band_height += self.calculate_size(getattr(band, 'margin_top', 0))
band_height += self.calculate_size(getattr(band, 'margin_bottom', 0))
self.update_top_pos(band_height)
# Updates left position
if getattr(band, 'display_inline', False):
self.update_left_pos(band.width + self.calculate_size(getattr(band, 'margin_right', 0)))
else:
self.update_left_pos(set=0)
# Child bands
for child_band in band.child_bands or []: # TODO This "or []" here is a quickfix
# Doesn't generate if it is not visible
if not child_band.visible:
continue
self.force_blank_page_by_height(self.calculate_size(child_band.height))
self.render_band(child_band)
def force_blank_page_by_height(self, height):
"""Check if the height is in client available report height and
makes a new page if necessary"""
if self.get_available_height() < height:
self.force_new_page()
def force_new_page(self, insert_new_page=True):
"""Starts a new blank page"""
# Ends the current page
self._current_top_position = 0
# Creates the new page
if insert_new_page:
self._rendered_pages.append(ReportPage())
# Starts a new one
self.start_new_page()
# Page footer
self.render_page_footer()
def render_begin(self):
"""Renders the report begin band if it exists"""
if not self.report.band_begin:
return
# Doesn't generate this band if it is not visible
if not self.report.band_begin.visible:
return
# Call method that print the band area and its widgets
self.render_band(self.report.band_begin)
def render_summary(self):
"""Generate the report summary band if it exists"""
if not self.report.band_summary:
return
# Doesn't generate this band if it is not visible
if not self.report.band_summary.visible:
return
# Clears groups stack
self._groups_stack = []
# Check to force new page if there is no available space
self.force_blank_page_by_height(self.calculate_size(self.report.band_summary.height))
# Call method that print the band area and its widgets
self.render_band(self.report.band_summary)
def render_page_header(self):
"""Generate the report page header band if it exists"""
if not self.report.band_page_header:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_header.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_header,
top_position=self.calculate_size(self.report.margin_top),
update_top=False,
)
def render_page_footer(self):
"""Generate the report page footer band if it exists"""
if not self.report.band_page_footer:
return
# Doesn't generate this band if it is not visible
if not self.report.band_page_footer.visible:
return
# Call method that print the band area and its widgets
self.render_band(
self.report.band_page_footer,
top_position=self.calculate_size(self.report.page_size[1]) -\
self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.band_page_footer.height),
update_top=False,
)
def render_end_current_page(self):
"""Closes the current page, using page breaker constant. Everything done after
this will draw into a new page. Before this, using the generate_page_footer
method to draw the footer"""
self.render_page_footer()
if self._is_latest_page:
self.render_summary()
self._current_page_number += 1
self._is_first_page = False
self.update_top_pos(set=0) # <---- update top position
def render_bands(self):
"""Loops into the objects list to create the report pages until the end"""
# Preparing local auxiliar variables
self._current_page_number = 0
self._current_object_index = 0
objects = self.report.get_objects_list()
# just an alias to make it easier
d_band = self.report.band_detail
# Empty report
if self.report.print_if_empty and not objects:
self.start_new_page()
self.render_begin()
self.render_end_current_page()
# Loop for pages
while self._current_object_index < len(objects):
# Starts a new page and generates the page header band
self.start_new_page()
first_object_on_page = True
# Generate the report begin band
if self._current_page_number == 0:
self.render_begin()
# Does generate objects if there is no details band
if not d_band:
self._current_object_index = len(objects)
# Loop for objects to go into grid on current page
while self._current_object_index < len(objects):
# Get current object from list
self._current_object = objects[self._current_object_index]
# Renders group bands for changed values
self.calc_changed_groups(first_object_on_page)
if not first_object_on_page:
self.render_groups_footers()
self.render_groups_headers()
# Generate this band only if it is visible
if d_band.visible:
self.render_band(d_band)
# Renders subreports
self.render_subreports()
# Next object
self._current_object_index += 1
first_object_on_page = False
# Break this if this page doesn't suppport nothing more...
# ... if there is no more available height
if self.get_available_height() < self.calculate_size(d_band.height):
# right margin is not considered to calculate the necessary space
d_width = self.calculate_size(d_band.width) + self.calculate_size(getattr(d_band, 'margin_left', 0))
# ... and this is not an inline displayed detail band or there is no width available
if not getattr(d_band, 'display_inline', False) or self.get_available_width() < d_width:
break
# ... or this band forces a new page and this is not the last object in objects list
elif d_band.force_new_page and self._current_object_index < len(objects):
break
# Sets this is the latest page or not
self._is_latest_page = self._current_object_index >= len(objects)
# Renders the finish group footer bands
if self._is_latest_page:
self.calc_changed_groups(False)
self.render_groups_footers(force=True)
# Ends the current page, printing footer and summary and necessary
self.render_end_current_page()
# Breaks if this is the latest item
if self._is_latest_page:
break
# Increment page number
self._current_page_number += 1
def start_new_page(self, with_header=True):
"""Do everything necessary to be done to start a new page"""
self._rendered_pages.append(ReportPage())
if with_header:
self.render_page_header()
# Page borders
if self.report.borders:
if not self._page_rect:
self._page_rect = self.report.get_page_rect()
self._page_rect['top'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['top']
self._page_rect['bottom'] = self.calculate_size(self.report.page_size[1]) - self._page_rect['bottom']
self.render_border(self.report.borders, self._page_rect)
def calculate_size(self, size):
"""Uses the function 'calculate_size' to calculate a size"""
return calculate_size(size)
def get_left_pos(self):
"""Returns the left position of the drawer. Is useful on inline displayed detail bands"""
return self.calculate_size(self.report.margin_left) + self._current_left_position
def get_top_pos(self):
"""We use this to use this to get the current top position,
considering also the top margin."""
ret = self.calculate_size(self.report.margin_top) + self._current_top_position
if self.report.band_page_header:
ret += self.calculate_size(self.report.band_page_header.height)
return ret
def get_available_height(self):
"""Returns the available client height area from the current top position
until the end of page, considering the bottom margin."""
ret = self.calculate_size(self.report.page_size[1]) - self.calculate_size(self.report.margin_bottom) -\
self.calculate_size(self.report.margin_top) - self._current_top_position
if self.report.band_page_header:
ret -= self.calculate_size(self.report.band_page_header.height)
if self.report.band_page_footer:
ret -= self.calculate_size(self.report.band_page_footer.height)
return ret
def update_top_pos(self, increase=0, decrease=0, set=None):
"""Updates the current top position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_top_position = set
else:
self._current_top_position += increase
self._current_top_position -= decrease
return self._current_top_position
def update_left_pos(self, increase=0, decrease=0, set=None):
"""Updates the current left position controller, increasing (by default),
decreasing or setting it with a new value."""
if set is not None:
self._current_left_position = set
else:
self._current_left_position += increase
self._current_left_position -= decrease
return self._current_left_position
def get_page_count(self):
"""Calculate and returns the page count for this report. The challenge
here is do this calculate before to generate the pages."""
return len(self._rendered_pages)
def make_paragraph(self, text, style=None):
"""Uses the Paragraph class to return a new paragraph object"""
raise Exception('Not implemented')
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
raise Exception('Not implemented')
# Stylizing
def set_fill_color(self, color):
"""Sets the current fill on canvas. Used for fonts and shape fills"""
pass
def set_stroke_color(self, color):
"""Sets the current stroke on canvas"""
pass
def set_stroke_width(self, width):
"""Sets the stroke/line width for shapes"""
pass
# Groups topic
def calc_changed_groups(self, force_no_changed=False):
"""Defines which groups has been changed their driver values to be
used to render group bands"""
changed = force_no_changed
# Stores the previous group values
self._groups_working_values = self._groups_values.copy()
# Loops on groups until find the first changed, then all under it are considered
# changed also
for group in self.report.groups:
# Gets the current value to compare with the old one
current_value = get_attr_value(self._current_object, group.attribute_name)
# Set changed as True if if wasn't and there is a change
changed = changed or current_value != self._groups_values.get(group, None)
# Stores new values
self._groups_changed[group] = changed
self._groups_values[group] = current_value
# Appends to the stack
if changed:
self._groups_stack.append(group)
def render_groups_headers(self):
"""Renders the report headers using 'changed' definition calculated by
'calc_changed_groups'"""
# Update working values for groups
self._groups_working_values = self._groups_values
# Loops on groups to render changed ones
for group in self.report.groups:
if self._groups_changed.get(group, None) and\
group.band_header and\
group.band_header.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_header.height))
self.render_band(group.band_header)
def render_groups_footers(self, force=False):
"""Renders the report footers using previous 'changed' definition calculated by
'calc_changed_groups'"""
# Loops on groups to render changed ones
for group in reversed(self.report.groups):
if force or ( self._groups_changed.get(group, None) and\
self._groups_stack and\
self._groups_stack[-1] == group ):
if group.band_footer and group.band_footer.visible:
self.force_blank_page_by_height(self.calculate_size(group.band_footer.height))
self.render_band(group.band_footer)
if self._groups_stack:
self._groups_working_values.pop(self._groups_stack[-1])
self._groups_stack.pop()
def get_current_queryset(self):
"""Returns the current queryset. This solves a problem with subreports
footers and headers, and solves also flexibility and customization issues."""
# Customized and SubReports
if self._current_queryset is not None:
return self._current_queryset
# Groups
elif self._groups_stack:
return self.get_objects_in_group()
# Defaul detail driver queryset
return self.report.queryset
def get_objects_in_group(self):
"""Returns objects filtered in the current group or all if there is no
group"""
filter_dict = dict([(group.attribute_name, value) for group, value in self._groups_working_values.items()])
return filter(filter_object, self.report.queryset)
# SubReports
def render_subreports(self):
"""Renders subreports bands for the current object in, usings its
own queryset.
For a while just the detail band is rendered. Maybe in future we
change this to accept header and footer."""
for subreport in self.report.subreports:
# Subreports must have detail band
if not subreport.band_detail or not subreport.visible:
continue
# Sets the parent object and automatically clear the queryset
# in memory
subreport.parent_object = self._current_object
# Sets the temporary currenty queryset
self._current_queryset = subreport.get_objects_list()
# Loops objects
for num, obj in enumerate(subreport.get_objects_list()):
# Renders the header band
if num == 0 and subreport.band_header:
# Forces new page if there is no available space
force_new_page(subreport.band_header.height)
# Renders the header band
if subreport.band_header.visible:
self.render_band(subreport.band_header)
# Forces new page if there is no available space
force_new_page(subreport.band_detail.height)
# Renders the detail band
if subreport.band_detail.visible:
self.render_band(subreport.band_detail, current_object=obj)
# Renders the footer band
if subreport.band_footer:
# Forces new page if there is no available space
force_new_page(subreport.band_footer.height)
# Renders the header band
if subreport.band_footer.visible:
self.render_band(subreport.band_footer)
# Sets back the default currenty queryset
self._current_queryset = None
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
raise Exception('Not implemented')
| [
6738,
308,
1691,
4598,
13,
26791,
1330,
651,
62,
35226,
62,
8367,
11,
15284,
62,
7857,
198,
6738,
308,
1691,
4598,
13,
28029,
11407,
1330,
370,
17484,
11,
36052,
11,
4482,
15878,
198,
6738,
308,
1691,
4598,
13,
70,
11549,
1330,
43029,... | 2.241553 | 12,933 |
import zipfile
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
fileContent = sanitize(fileContent)
players.append(sorted(fileContent.strip().split(",")))
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch5_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
# 列表推导
player = [sanitize(content) for content in fileContent.strip().split(",")]
players.append(sorted(set(player))[0:3])
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
player = {
"name": infos[0],
"birthday": infos[1]
}
scores = infos[2:len(infos) - 1]
# 列表推导
scores = [sanitize(content) for content in scores]
player["scores"] = sorted(set(scores))[0:3]
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
try:
with zipfile.ZipFile("data/hfpy_ch6_data.zip") as zipFile:
print(zipFile.namelist())
players = []
for name in zipFile.namelist():
fileContent = zipFile.read(name).decode('utf-8')
infos = fileContent.strip().split(",")
scores = infos[2:len(infos) - 1]
player = Player(infos[0], infos[1], scores)
players.append(player)
print(players)
except IOError as e:
print("error" + str(e))
jonny = NamedList("Jonny")
print(type(jonny))
print(dir(jonny))
| [
11748,
19974,
7753,
628,
198,
198,
28311,
25,
198,
220,
220,
220,
351,
19974,
7753,
13,
41729,
8979,
7203,
7890,
14,
71,
69,
9078,
62,
354,
20,
62,
7890,
13,
13344,
4943,
355,
19974,
8979,
25,
198,
220,
220,
220,
220,
220,
220,
22... | 2.034682 | 1,038 |
# Extract the raw 32 byte values of 'r and s' from OpenSSL's DER formatted signature. bytelen('r + s') == 64
from asn1crypto.core import Sequence
import binascii
raw64byte_sig = ''
with open("REAL_DERformat_openssl_gen_sig.bin", "rb") as f:
signature = f.read()
# parse the ASN.1 sequence from this signature
seq = Sequence.load(signature)
# print the native (Pythonic) representation of this ASN.1 object
dict = seq.native
for k,v in dict.items():
hexed = hex(v).strip('0x')
# print(hexed)
raw64byte_sig += hexed
# print(raw64byte_sig)
with open("REAL_raw64byte_sig_gen_from_openssl.bin", "wb") as f:
f.write(binascii.unhexlify(raw64byte_sig)) | [
2,
29677,
262,
8246,
3933,
18022,
3815,
286,
705,
81,
290,
264,
6,
422,
4946,
31127,
338,
360,
1137,
39559,
9877,
13,
416,
37524,
268,
10786,
81,
1343,
264,
11537,
6624,
5598,
220,
198,
6738,
355,
77,
16,
29609,
78,
13,
7295,
1330,
... | 2.580769 | 260 |
# -*- coding: utf-8 -*-
"""
Do grid search only for uavs
should save the configurations of the uavs from hiearchy search
send this configuration to the Astar
"""
from __future__ import print_function
import numpy as np
import math as m
import random
from Astar import Astar, AstarGraph, AstarLowLevel
import itertools
from itertools import combinations, permutations, product
import time
import gc
import heapq
import pickle
from operator import add, sub
def load_pkl_map(pkl_file_name):
"""returns the annotated map from a pkl file"""
with open(map_pkl_name, 'rb') as f:
annotated_map = pickle.load(f)
return annotated_map
if __name__=='__main__':
##let's just load the map to save us time
map_pkl_name = 'map_test.pkl'
annoted_map = load_pkl_map()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
5211,
10706,
2989,
691,
329,
334,
615,
82,
220,
198,
21754,
3613,
262,
25412,
286,
262,
334,
615,
82,
422,
23105,
3679,
88,
2989,
198,
21280,
428,
8398,
28... | 2.777778 | 288 |
from argparse import ArgumentParser
| [
6738,
1822,
29572,
1330,
45751,
46677,
198
] | 5.142857 | 7 |
#Find total number of Squares in a N*N cheesboard.
# using mathematical logic
#taking input
num = int(input("Enter the number :"))
obj = Solution()
print("The square : ")
print(obj.squares(num))
'''
Time complexity : O(N)
Space complexity : O(1)
Input :
Enter the number : 1
Output :
The square is : 1
''' | [
2,
16742,
2472,
1271,
286,
5056,
3565,
287,
257,
399,
9,
45,
27384,
3526,
13,
198,
220,
220,
220,
1303,
1262,
18069,
9156,
198,
198,
2,
26103,
5128,
198,
22510,
796,
493,
7,
15414,
7203,
17469,
262,
1271,
1058,
48774,
198,
26801,
79... | 2.95283 | 106 |
import fitz
from typing import Dict, List
import logging
logger = logging.getLogger(__name__)
| [
11748,
4197,
89,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198
] | 3.096774 | 31 |
#encoding=utf-8
import evt_abi
abi=evt_abi.evt_abi()
j=r'''
{
"name": "test",
"issuer": "EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"issue": {
"name": "issue",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
},
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
"transfer": {
"name": "transfer",
"threshold": 1,
"authorizers": [{
"ref": "[G] OWNER",
"weight": 1
}]
},
"manage": {
"name": "manage",
"threshold": 1,
"authorizers": [{
"ref": "[A] EVT8MGU4aKiVzqMtWi9zLpu8KuTHZWjQQrX475ycSxEkLd6aBpraX",
"weight": 1
}]
}
}
'''
j2=r'''
{
"expiration": "2018-05-20T12:25:51",
"ref_block_num": 8643,
"ref_block_prefix": 842752750,
"delay_sec": 0,
"actions": [
{
"name": "newdomain",
"domain": "domain",
"key": "test2",
"data": "000000000000000000000000109f077d0003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9700000000000a5317601000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100000000572d3ccdcd010000000102000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000002866a69101000000010100000003c7e3ff0060d848bd31bf53daf1d5fed7d82c9b1121394ee15dcafb07e913a9706d4859000000000100"
}
],
"transaction_extensions": []
}
'''
bin=abi.evt_abi_json_to_bin("newdomain",j)
json=abi.evt_abi_bin_to_json("newdomain",bin)
print(bin.data)
print(json)
chain_id=abi.evt_chain_id_from_string("bb248d6319e51ad38502cc8ef8fe607eb5ad2cd0be2bdc0e6e30a506761b8636")
digest=abi.evt_trx_json_to_digest(j2, chain_id)
print(chain_id)
print(digest)
| [
2,
12685,
7656,
28,
40477,
12,
23,
198,
11748,
819,
83,
62,
17914,
198,
198,
17914,
28,
1990,
83,
62,
17914,
13,
1990,
83,
62,
17914,
3419,
198,
73,
28,
81,
7061,
6,
198,
90,
198,
1,
3672,
1298,
366,
9288,
1600,
198,
1,
747,
1... | 1.873253 | 1,073 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-13 14:52
# @Author : liupan
# @Site :
# @File : demo2.py
# @Software: PyCharm
import csv
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
12,
3312,
12,
1485,
1478,
25,
4309,
198,
2,
2488,
13838,
220,
1058,
7649,
... | 2.236842 | 190 |
from __future__ import absolute_import
import logging
import os
import time
import typing
from collections import defaultdict
import pyspark.sql as spark
from pyspark import Row
from pyspark.sql.functions import (
approx_count_distinct,
col,
count,
countDistinct,
desc,
floor,
isnull,
lit,
when,
)
from pyspark.sql.types import (
ArrayType,
BooleanType,
MapType,
NumericType,
StringType,
StructType,
)
from dbnd._core.settings.histogram import HistogramConfig
from dbnd._core.utils import seven
if typing.TYPE_CHECKING:
from typing import Tuple, Dict, List
from targets.value_meta import ValueMetaConf
from pyspark.sql.dataframe import DataFrame
logger = logging.getLogger(__name__)
class SparkHistograms(object):
"""
calculates histograms and stats on spark dataframe.
they're calculated together since we do it per column and we use cache.
"""
def _cache_df_with_parquet_store(self, df, spark_parquet_cache_dir):
""" save dataframe as column-based parquet file to allow fast column queries which histograms depend on """
from dbnd_spark.spark_targets import SparkDataFrameValueType
signature = SparkDataFrameValueType().to_signature(df)
file_name = "dbnd_spark_dataframe_{}.parquet".format(signature)
path = os.path.join(spark_parquet_cache_dir, file_name)
self._temp_parquet_path = path
logger.info("Caching spark dataframe into '%s'.", path)
df.write.parquet(path)
logger.info("Reading spark dataframe from '%s'.", path)
df = df.sql_ctx.sparkSession.read.parquet(path)
return df
def _is_count_in_summary(self, dataframe, column_name):
""" dataframe.summary() returns count only for numeric and string types, otherwise we need to calculate it our own """
column_field = [f for f in dataframe.schema.fields if f.name == column_name][0]
return isinstance(column_field.dataType, (NumericType, StringType))
def _convert_numeric_histogram_collect_to_tuple(
self, value_counts, min_value, max_value
):
# type: (List[Row], float, float) -> Tuple
"""
value_counts is list of rows with each row representing a bucket.
each bucket has: bucket index and number of values.
we convert it to histogram represented as a tuple of 2 lists:
number of values in bucket and bucket boundaries.
"""
bucket_count = 20
counts = [0] * bucket_count
bucket_size = (max_value - min_value) / bucket_count
values = [min_value + i * bucket_size for i in range(bucket_count + 1)]
for row in value_counts:
bucket, count = row
if bucket is None:
continue
if bucket == bucket_count:
# handle edge of last bucket (values equal to max_value will be in bucket n+1 instead of n)
bucket = bucket - 1
counts[bucket] += count
return counts, values
def _calc_spark_categorical_hist_and_stats(self, column_df_list):
"""
all columns in column_df_list should have the same type (e.g. all should be booleans or all strings).
it might not be relevant anymore since we do collect() per column.
keeping it that way for now so we could change it back to collect() once for all columns.
"""
max_buckets = 50
value_counts = []
for column_df in column_df_list:
if self.config.spark_cache_dataframe_column:
column_df_cached = True
column_df.cache()
else:
column_df_cached = False
try:
column_name = column_df.schema.names[0]
self._calc_categorical_column_stats(column_df, column_name)
if column_name not in self._histogram_column_names:
continue
column_value_counts = (
column_df.groupby(column_name)
.count()
.orderBy(desc("count"))
.withColumn("column_name", lit(column_name))
.limit(max_buckets - 1)
)
column_value_counts = column_value_counts.collect()
value_counts.extend(column_value_counts)
finally:
if column_df_cached:
column_df.unpersist()
return value_counts
def _add_others(self, histograms):
""" sum all least significant values (who left out of histogram) to one bucket """
for column_name, histogram in histograms.items():
histogram_sum_count = sum(histogram[0])
others_count = self.stats[column_name]["count"] - histogram_sum_count
if others_count > 0:
histogram[0].append(others_count)
histogram[1].append("_others")
@seven.contextlib.contextmanager
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
19720,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
279,
893,
20928,
13,
25410,
355,
9009,
198,
198,
6... | 2.338173 | 2,135 |
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
from os import sys, path
import os,shutil,re
from glob import glob
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
from imp import find_module
try: find_module('numpy')
except: sys.exit('### Error: python module numpy not found')
try: find_module('pyfits')
except: sys.exit('### Error: python module pyfits not found')
try: find_module('pyraf')
except: sys.exit('### Error: python module pyraf not found')
try: find_module('matplotlib')
except: sys.exit('### Error: python module matplotlib not found')
try: find_module('scipy')
except: sys.exit('### Error: python module matplotlib not found')
setup(
name='s3',
version='1.1.0',
author='C.Inserra',
author_email='c.inserra@qub.ac.uk',
classifiers=[
# How mature is this project?
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Stable',
'Intended Audience :: General users',
'Topic :: Astronomy :: photometric corrections',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/STHREE','bin/SNAKE','bin/SNAKELOOP','bin/SMS','bin/SNAP'],
url='https://github.com/cinserra',
license=open('LICENSE.rst').read(),
description='S3 is a package for K and P-correction and synthetic mags',
long_description=open('README.rst').read(),
keywords='K-correction P-correction magnitudes',
install_requires = ['numpy','pyfits','pyraf','matplotlib','scipy'],
packages=['s3'],
package_dir={'':'src'},
package_data = {'s3' : ["metadata/*.txt","metadata/NTT/*.txt","metadata/NOT/*.txt",\
"metadata/PS1/*.txt","metadata/ASIAGO/*.txt","metadata/LCOGT/*.txt",\
"metadata/SKYMAPPER/*.txt","metadata/LT/*.txt","metadata/LSQ/*.txt",\
"metadata/WHT/*.txt","metadata/OGLE/*.txt","metadata/VLT/*.txt"]}
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#}
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
21812,
13,
17350,
1330,
40589,
7036,
62,
50,
3398,
3620,
1546,
198,
6738,
28686,
1330,
25064,
11,
3108,
198,
11748,
28686,
11,
1477,
22602,
11,
260,
198,
6738,
15095,
... | 2.385906 | 894 |
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import pybullet as p
import os
import IPython
# adapted from https://github.com/bryandlee/franka_pybullet/tree/ac86319a0b2f6c863ba3c7ee3d52f4f51b2be3bd
if __name__ == "__main__":
robot = Panda(realtime=1)
| [
2,
20368,
22369,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
20368,
22369,
198,
198,
11748,
12972,
15065,
1616,
355,
279,
198,
11748,
28686,
198,
11748,
6101,
7535,
198,
198,
2,
16573,
422,
3... | 3.362069 | 116 |
from pandas import DataFrame
import urllib.error
import urllib.request
from bs4 import BeautifulSoup
# initializes variables used for beautifulsoup
valid = False
postcode = input("Please enter your postal code so we may show you relevant showtime information (no spaces): ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
while not valid:
try:
page = urllib.request.urlopen(imdb)
valid = True
except urllib.error.HTTPError:
postcode = input("Please enter a valid postal code: ")
imdb = "https://www.imdb.com/showtimes/location/CA/" + postcode + "?ref_=sh_lc"
soup = BeautifulSoup(page, 'html.parser')
# initializes arrays used for storing scrapped information
movies = []
review = []
runtime = []
release = []
rating = []
genre = []
showtimes = []
movies_list = soup.find_all('div', attrs={'class': 'lister-item mode-grid'})
for movie in movies_list: # uses a loop to extract the information of a movie for all movies playing
a = movie.find('span', attrs={'name': 'alpha'}).attrs['data-value']
b = movie.find('span', attrs={'name': 'user_rating'}).attrs['data-value']
c = movie.find('span', attrs={'name': 'runtime'}).attrs['data-value']
d = movie.find('span', attrs={'name':'release_date'}).attrs['data-value']
try:
e = movie.find('span', attrs={'class': 'certificate'}).string
except AttributeError:
e = ""
f = movie.find('span', attrs={'class': 'genre'}).string.strip()
g = movie.find('div', attrs={'class': 'title'}).a.get('href')
year = get_year(d)
movies.append(a)
review.append(b)
runtime.append(c)
release.append(year)
rating.append(e)
genre.append(f)
showtimes.append("https://www.imdb.com"+g) # formats the string to produce a url based on the link within html file
# provides user a list of movies playing and a method of inputting their selection
print("Here are the movies playing near you: \n")
for x in range(0, len(movies)):
print(str(x+1) + ". {} {}/10 Runtime: {} \n Release Year: {} Genre: {} {}\n"
.format(movies[x], review[x], runtime[x], release[x], genre[x], rating[x]))
print("Please select a movie: ")
selection = get_selection()
while not(isinstance(selection, int)) or selection < 1 or selection > len(movies):
print("Your selection was not valid, please try again: ")
selection = get_selection()
# Creates new soup to access more detailed information about movie after user has made their selection
detailedMovie = showtimes[selection-1]
page2 = urllib.request.urlopen(detailedMovie[:47] + "CA/" + postcode)
soup2 = BeautifulSoup(page2, 'html.parser')
theaters_list = soup2.findAll('div', attrs={'class': 'list detail'})
times = []
print("Showtimes for " + movies[selection-1] + " : \n")
# Finds all the theaters in local area of user
for theater in theaters_list:
odd = theater.findAll('div', attrs={'class', 'list_item odd'})
even = theater.findAll('div', attrs={'class', 'list_item even'})
for x in odd:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
for x in even:
print(x.find('span', attrs={'itemprop': 'name'})
.find(text=True, recursive=False))
print(x.find('span', attrs={'itemprop': 'streetAddress'}).string)
times = x.findAll('meta', attrs={'itemprop': 'startDate'})
for y in times:
print(get_time(y.attrs['content']) + ' ', end="", flush=True)
print('\n')
df = DataFrame({'Movies Playing': movies, 'Rating': review,
'Runtime (min)': runtime, 'Release Year': release})
try:
df.to_excel('showtimes.xlsx', sheet_name='sheet1', index=False)
except PermissionError:
print("There was an error creating the spreadsheet, please make sure"
" the file is not currently open.")
| [
6738,
19798,
292,
1330,
6060,
19778,
198,
11748,
2956,
297,
571,
13,
18224,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
2,
4238,
4340,
9633,
973,
329,
4950,
82,
10486,
198,
12102,
... | 2.633776 | 1,581 |
from tars.helpers.api import CromAPI
def test_crom_api():
"""Simple test cases for the Crom API wrapper."""
api = CromAPI("en")
# Honestly, the tag list seems like the only thing that I'm reasonably guaranteeing
# won't be screwed with in the future.
scp_5000 = api.get_one_page_meta("scp-5000")
assert 'scp' in scp_5000['tags']
oldest_pages_iterator = api.get_all_pages()
oldest_pages_1 = next(oldest_pages_iterator)
assert len(oldest_pages_1) == 100
assert oldest_pages_1[0]['title'] == 'Manage Site'
oldest_pages_2 = next(oldest_pages_iterator)
assert len(oldest_pages_2) == 100
assert oldest_pages_2[0]['title'] == 'SCP-145'
oldest_tales = next(api.get_all_pages(tags=['tale']))
assert oldest_tales[0]['title'] == 'Archived Incident 076-2_682'
nav_pages = next(api.get_all_pages(categories=['nav']))
assert len(nav_pages) == 2
assert nav_pages[0]['title'] == 'Top Bar Menu'
assert nav_pages[1]['title'] == 'Side'
| [
6738,
256,
945,
13,
16794,
364,
13,
15042,
1330,
39131,
17614,
628,
198,
4299,
1332,
62,
66,
398,
62,
15042,
33529,
198,
220,
220,
220,
37227,
26437,
1332,
2663,
329,
262,
39131,
7824,
29908,
526,
15931,
198,
220,
220,
220,
40391,
796... | 2.613577 | 383 |
# 沿每个维度按crop_width裁剪数组 | [
2,
10545,
110,
123,
162,
107,
237,
10310,
103,
163,
119,
112,
41753,
99,
162,
234,
231,
31476,
62,
10394,
32518,
223,
30298,
103,
46763,
108,
163,
119,
226
] | 0.758621 | 29 |
import torch
import os
import pandas as pd
import math
import numpy as np
import dendropy
from Bio import SeqIO
| [
11748,
28034,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
288,
437,
28338,
198,
6738,
16024,
1330,
1001,
80,
9399,
628
] | 3.323529 | 34 |
import pytest
import kopf
from kopf._core.intents.filters import PRESENT
OBJECT_BODY = {
'apiVersion': 'group/version',
'kind': 'singular',
'metadata': {
'name': 'test',
'labels': {
'key': 'value',
},
'annotations': {
'key': 'value',
}
}
}
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('labels', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, True, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'value'}, id='value-matches'),
pytest.param({'key': PRESENT}, id='key-exists'),
])
@pytest.mark.parametrize('optional, expected', [
pytest.param(True, False, id='optional'),
pytest.param(False, False, id='mandatory'),
])
@pytest.mark.parametrize('annotations', [
pytest.param({'key': 'othervalue'}, id='value-mismatch'),
pytest.param({'otherkey': PRESENT}, id='key-doesnt-exist'),
])
| [
11748,
12972,
9288,
198,
198,
11748,
479,
404,
69,
198,
6738,
479,
404,
69,
13557,
7295,
13,
600,
658,
13,
10379,
1010,
1330,
32552,
3525,
198,
198,
9864,
23680,
62,
33,
33076,
796,
1391,
198,
220,
220,
220,
705,
15042,
14815,
10354,
... | 2.42707 | 761 |
# -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import sys
import warnings
import os
from openstack import utils
from openstack import connection
utils.enable_logging(debug=True, stream=sys.stdout)
warnings.filterwarnings('ignore')
auth_url = '******'
userDomainId = '******'
projectId = '******'
username = '******'
password = os.getenv('get_secret_code')
conn = connection.Connection(
auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password,
verify=False
)
if __name__ == '__main__':
# test_public_ips(conn)
# test_get_public_ip(conn)
# test_create_public_ip(conn)
# test_update_public_ip(conn)
# test_delete_public_ip(conn)
# test_find_public_ip(conn)
pass
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407... | 3.011211 | 446 |
from .func import Func | [
6738,
764,
20786,
1330,
11138,
66
] | 3.666667 | 6 |
import xml.etree.ElementTree
import json
import sys
import os
import hashlib
from collections import defaultdict
from bs4 import BeautifulSoup
if '__file__' in globals():
sys.path.insert(0, os.path.join(os.path.abspath(__file__), 'scripts'))
else:
sys.path.insert(0, os.path.join(os.path.abspath(os.getcwd()), 'scripts'))
from discovery_setup_utils import curdir, makeSurePathExists # noqa
# 'DATA_TYPE' should be the same as the data set downloaded
DATA_TYPE = 'travel'
# INPUT_DIR should correspond to the location of the extracted stackexchange
# by default, evaluates to <current_project_dir>/data/<DATA_TYPE>
INPUT_DIR = os.path.abspath(
os.path.join(os.path.abspath(curdir), '..', 'data', DATA_TYPE)
)
# OUTPUT_DIR should correspond where you want your documents written to disk
# by default, evaluates to <INPUT_DIR>/json
OUTPUT_DIR = os.path.abspath(os.path.join(INPUT_DIR, 'json'))
makeSurePathExists(OUTPUT_DIR)
def genId(filename):
"""
Generates an identifier suitable for ingestion
Based off of the Watson Discovery Tooling method of generating IDs
"""
return hashlib.md5(filename).hexdigest()
def getUsers(usersXML, OUTPUT_DIR):
"""
Returns a dictionary of user ID to dictionary of user properties:
{
"<userid_int>": {
"reputation": <reputation_int>,
"displayName": <displayname_str>
}
}
"""
print('Starting getUsers...')
USERS_FILE_NAME = 'users.json'
USERS_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', USERS_FILE_NAME)
)
if os.path.isfile(USERS_FILE_PATH):
print('Loading users from file cache...')
with open(USERS_FILE_PATH, 'r') as usersFile:
return json.loads(usersFile.read())
users_to_metadata = {}
for user in usersXML.findall('row'):
reputation = int(user.get('Reputation'))
name = user.get('DisplayName')
users_to_metadata[user.get('Id')] = {'reputation': reputation,
'displayName': name}
# write the file for later runs
user_to_metadata_str = json.dumps(users_to_metadata).replace('\n', '')
with open(USERS_FILE_PATH, 'w') as usersFile:
usersFile.write(user_to_metadata_str + '\n')
return users_to_metadata
def getVotes(votesXML, OUTPUT_DIR):
"""
Returns a dictionary of posts to vote types with counts of each type:
{
"<post_id_str>": {
"<vote_type_id_str>": <vote_count_int>,
"<vote_type_id_str>": <vote_count_int>,
...
}
}
"""
print('Starting getVotes...')
VOTES_FILE_NAME = 'votes.json'
VOTES_FILE_PATH = os.path.abspath(
os.path.join(OUTPUT_DIR, '..', VOTES_FILE_NAME)
)
if os.path.isfile(VOTES_FILE_PATH):
print('Loading votes from file cache...')
with open(VOTES_FILE_PATH, 'r') as votesFile:
return json.loads(votesFile.read())
# Types of votes
# Id | Name
# -- | ----------------------
# 1 | AcceptedByOriginator
# 2 | UpMod
# 3 | DownMod
# 4 | Offensive
# 5 | Favorite
# 6 | Close
# 7 | Reopen
# 8 | BountyStart
# 9 | BountyClose
# 10 | Deletion
# 11 | Undeletion
# 12 | Spam
# 15 | ModeratorReview
# 16 | ApproveEditSuggestion
initial_vote_types = {'1': 0,
'2': 0,
'3': 0,
'4': 0,
'5': 0,
'6': 0,
'7': 0,
'8': 0,
'9': 0,
'10': 0,
'11': 0,
'12': 0,
'15': 0,
'16': 0}
posts_to_votes = defaultdict(dict)
for vote in votesXML.findall('row'):
voteTypeId = vote.get('VoteTypeId')
if voteTypeId in initial_vote_types:
postId = vote.get('PostId')
if postId in posts_to_votes:
newCount = posts_to_votes[postId][voteTypeId] + 1
posts_to_votes[postId][voteTypeId] = newCount
else:
posts_to_votes[postId] = initial_vote_types.copy()
posts_to_votes[postId][voteTypeId] = 1
# write the file for later runs
posts_to_votes_str = json.dumps(posts_to_votes).replace('\n', '')
with open(VOTES_FILE_PATH, 'w') as votesFile:
votesFile.write(posts_to_votes_str + '\n')
return posts_to_votes
def validAnswer(item):
"""
determine whether or not the item has the required keys to write to file
"""
keys = {'id', 'text', 'question', 'question_metadata', 'answer_metadata',
'author_metadata', 'user_metadata'}
return keys <= set(item)
def writeAnswerFile(file_name, item, OUTPUT_DIR):
"""
writes the item as a document to be used for ingestion
"""
if validAnswer(item):
with open(os.path.join(OUTPUT_DIR, file_name), 'w') as answer_file:
answer_file.write(json.dumps(item).replace('\n', '') + '\n')
else:
print('Item missing required keys!')
print(json.dumps(item, indent=4))
def writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR):
"""
splits the posts XML file into individual answer units by pairing 1 answer
to its corresponding question to prepare for document ingestion
(thus the question will be duplicated for multiple answers)
"""
documents = {}
for post in postsXML.findall('row'):
# Types of posts
# Id | Name
# -- | ---------
# 1 | Question
# 2 | Answer
postTypeId = int(post.get('PostTypeId'))
if postTypeId == 1:
handleQuestion(documents, post, OUTPUT_DIR)
elif postTypeId == 2:
handleAnswer(documents, post, votesDict, usersDict, OUTPUT_DIR)
print('Getting Posts...')
postsXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Posts.xml')
).getroot()
print('Posts loaded')
print('Getting Votes...')
votesXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Votes.xml')
).getroot()
votesDict = getVotes(votesXML, OUTPUT_DIR)
print('Votes loaded')
print('Getting Users...')
usersXML = xml.etree.ElementTree.parse(
os.path.join(INPUT_DIR, 'Users.xml')
).getroot()
usersDict = getUsers(usersXML, OUTPUT_DIR)
print('Users loaded')
print('Begin writing documents...')
writeDocuments(postsXML, votesDict, usersDict, OUTPUT_DIR)
print("Documents written to %s" % OUTPUT_DIR)
| [
11748,
35555,
13,
316,
631,
13,
20180,
27660,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
12234,
8019,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
361,
705,
834... | 2.180349 | 3,094 |
import numpy as np
import cv2 as cv
try:
log = open('log.txt',"w")
except:
print( "No se puede abrir el archivo log")
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv.VideoCapture(0)
cap = cv.VideoCapture('Test Files/videos/TestVideo.avi')
#Imprime las propiedades de captura a consola
for i in range(19):
print( i, cap.get(i))
if cap.isOpened():
h = cap.get(cv.CAP_PROP_FRAME_HEIGHT) # float
w = cap.get(cv.CAP_PROP_FRAME_WIDTH) # float
#Calculate Gx and Gy for grid lines
gX = int(w/3)
gY = int(h/3)
gx1 = gX
gy1 = gY
gx2 = gX*2
gy2 = gY*2
gx3 = int(w)
gy3 = int(h)
frameArea = h*w
areaTH = frameArea/250
print( 'Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
#Substractor de fondo
fgbg = cv.createBackgroundSubtractorMOG2(detectShadows = True)
#Elementos estructurantes para filtros morfoogicos
kernelOp = np.ones((3,3),np.uint8)
kernelOp2 = np.ones((5,5),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
#Variables
font = cv.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
color1 = (255, 255, 255)
color2 = (0, 0, 255)
cg1 = color1
cg2 = color1
cg3 = color1
cg4 = color1
cg5 = color1
cg6 = color1
cg7 = color1
cg8 = color1
cg9 = color1
while(cap.isOpened()):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
#Drawing the grid
# cv.line(frame, (0, gy1), (gx3, gy1), (150, 0, 200), 2)
# cv.line(frame, (0, gy2), (gx3, gy2), (150, 0, 200), 2)
# cv.line(frame, (gx1, 0), (gx1, gy3), (150, 0, 200), 2)
# cv.line(frame, (gx2, 0), (gx2, gy3), (150, 0, 200), 2)
# Row 1
cv.rectangle(frame, (0, 0), (gx1, gy1), cg1, 2)
cv.rectangle(frame, (gx1, 0), (gx2, gy1), cg2, 2)
cv.rectangle(frame, (gx2, 0), (gx3, gy1), cg3, 2)
# Row 2
cv.rectangle(frame, (0, gy1), (gx1, gy2), cg4, 2)
cv.rectangle(frame, (gx1, gy1), (gx2, gy2), cg5, 2)
cv.rectangle(frame, (gx2, gy1), (gx3, gy2), cg6, 2)
# Row 3
cv.rectangle(frame, (0, gy2), (gx1, gy3), cg7, 2)
cv.rectangle(frame, (gx1, gy2), (gx2, gy3), cg8, 2)
cv.rectangle(frame, (gx2, gy2), (gx3, gy3), cg9, 2)
for i in persons:
i.age_one() #age every person one frame
#Aplica substraccion de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
#Binariazcion para eliminar sombras (color gris)
try:
ret,imBin= cv.threshold(fgmask,200,255,cv.THRESH_BINARY)
ret,imBin2 = cv.threshold(fgmask2,200,255,cv.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv.morphologyEx(imBin, cv.MORPH_OPEN, kernelOp)
mask2 = cv.morphologyEx(imBin2, cv.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv.morphologyEx(mask , cv.MORPH_CLOSE, kernelCl)
mask2 = cv.morphologyEx(mask2, cv.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print( 'UP:',cnt_up)
print ('DOWN:',cnt_down)
break
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
contours0, hierarchy = cv.findContours(mask2,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv.contourArea(cnt)
if area > areaTH:
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv.circle(frame,(cx,cy), 5, (0,0,255), -1)
rectSize = 50
cv.rectangle(frame,(cx+rectSize,cy+rectSize), (cx-rectSize,cy-rectSize), (0,0,255), 2)
text = cx, cy
cv.putText(frame, str(text), (cx,cy), font, 0.5, (255,0,0), 1, cv.LINE_AA)
# for ccx in range(1, 4):
# for ccy in range(1, 4):
if cx > 0 and cx < gx1 and cy > 0 and cy < gy1:
cg1 = color2
else:
cg1 = color1
str_up = 'UP: '+ str(cnt_up)
str_down = 'DOWN: '+ str(cnt_down)
cv.imshow('Frame',frame)
cv.imshow('Mask',mask)
k = cv.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
log.flush()
log.close()
cap.release()
cv.destroyAllWindows()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
198,
28311,
25,
198,
220,
220,
220,
2604,
796,
1280,
10786,
6404,
13,
14116,
40264,
86,
4943,
198,
16341,
25,
198,
220,
220,
220,
3601,
7,
366,
2949,
384,
... | 1.909329 | 2,294 |
""" Tests for settings utils """
from pyramid_duh.settings import asdict
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
class TestAsDict(unittest.TestCase):
""" Tests for asdict """
def test_default(self):
""" If provided value is a dict, return that """
self.assertEqual(asdict({}), {})
def test_default_none(self):
""" If provided value is None, return {} """
self.assertEqual(asdict(None), {})
def test_convert(self):
""" Convert a string to a dict """
setting = """
a = b
c=d
"""
data = {
'a': 'b',
'c': 'd',
}
self.assertEqual(asdict(setting), data)
def test_convert_with_equals(self):
""" Properly converts strings that have multiple equals signs """
setting = """
a = KpxYAw==
b = 1+2=3
"""
data = {
'a': 'KpxYAw==',
'b': '1+2=3',
}
self.assertEqual(asdict(setting), data)
def test_convert_value(self):
""" Run a function on dict values """
setting = """
foo = 2
bar = 5
"""
data = {
'foo': 2,
'bar': 5,
}
self.assertEqual(asdict(setting, int), data)
| [
37811,
30307,
329,
6460,
3384,
4487,
37227,
198,
6738,
27944,
62,
646,
71,
13,
33692,
1330,
355,
11600,
628,
198,
28311,
25,
198,
220,
220,
220,
1330,
555,
715,
395,
17,
355,
555,
715,
395,
220,
1303,
279,
2645,
600,
25,
15560,
28,
... | 2.051908 | 655 |
from tika import parser
from dateutil.parser import parser as date_parser
from PortugueseParserInfo import PortugueseParserInfo
| [
6738,
256,
9232,
1330,
30751,
198,
6738,
3128,
22602,
13,
48610,
1330,
30751,
355,
3128,
62,
48610,
198,
6738,
21813,
46677,
12360,
1330,
21813,
46677,
12360,
628,
628
] | 4.678571 | 28 |
from django.contrib import admin
from .models import Unit, Skill, Gear, Guild, Category
# noinspection PyMethodMayBeStatic,PyUnusedLocal
@admin.register(Unit)
@admin.register(Gear)
@admin.register(Skill)
@admin.register(Category)
@admin.register(Guild)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
11801,
11,
16023,
11,
10740,
11,
16446,
11,
21743,
628,
198,
2,
645,
1040,
14978,
9485,
17410,
6747,
3856,
45442,
11,
20519,
3118,
1484,
14565,
628,
198,
... | 3.068966 | 87 |
import json
from asyncio import AbstractEventLoop, get_event_loop
from typing import Optional, NoReturn
from aiohttp import TCPConnector, ClientSession
| [
11748,
33918,
198,
6738,
30351,
952,
1330,
27741,
9237,
39516,
11,
651,
62,
15596,
62,
26268,
198,
6738,
19720,
1330,
32233,
11,
1400,
13615,
198,
198,
6738,
257,
952,
4023,
1330,
23633,
34525,
11,
20985,
36044,
628
] | 4.162162 | 37 |
import numpy as np
def laplace_numpy(image):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
def laplace_loops(image):
"""Laplace operator for 2D images."""
h = image.shape[0]
w = image.shape[1]
laplacian = np.empty((h - 2, w - 2), np.uint8)
for i in range(1, h - 1):
for j in range(1, w - 1):
laplacian[i - 1, j - 1] = (
np.abs(
image[i - 1, j]
+ image[i + 1, j]
+ image[i, j - 1]
+ image[i, j + 1]
- 4 * image[i, j]
)
> 0.05
)
return laplacian
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
8591,
5372,
62,
77,
32152,
7,
9060,
2599,
198,
220,
220,
220,
37227,
14772,
5372,
10088,
287,
31835,
20519,
329,
362,
35,
4263,
526,
15931,
198,
220,
220,
220,
8591,
489,
330,
666,
796,
... | 1.659696 | 526 |
import pkg_resources
import os.path
from PIL import ImageFont
def available():
"""
Returns list of available font names.
"""
names = []
for f in pkg_resources.resource_listdir('ev3dev.fonts', ''):
name, ext = os.path.splitext(os.path.basename(f))
if ext == '.pil':
names.append(name)
return sorted(names)
def load(name):
"""
Loads the font specified by name and returns it as an instance of
`PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_
class.
"""
try:
pil_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pil'.format(name))
pbm_file = pkg_resources.resource_filename('ev3dev.fonts', '{}.pbm'.format(name))
return ImageFont.load(pil_file)
except FileNotFoundError:
raise Exception('Failed to load font "{}". '.format(name) +
'Check ev3dev.fonts.available() for the list of available fonts')
| [
11748,
279,
10025,
62,
37540,
198,
11748,
28686,
13,
6978,
198,
6738,
350,
4146,
1330,
7412,
23252,
198,
198,
4299,
1695,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
1351,
286,
1695,
10369,
3891,
13,
198,
220,
220,
22... | 2.467866 | 389 |
# https://tailwindcss.com
| [
2,
3740,
1378,
13199,
7972,
25471,
13,
785,
628,
628
] | 2.9 | 10 |
# Generated by Django 3.0.2 on 2020-06-16 14:06
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
3312,
12,
1433,
1478,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import os
import sys
import launch
import launch_ros.actions
if __name__ == '__main__':
generate_launch_description()
| [
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
4219,
198,
11748,
4219,
62,
4951,
13,
4658,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
7716,
62,
35681,
62,
11213,
3419,
198
] | 3.15 | 40 |
from os import getenv as env
from urllib.parse import quote
from datetime import datetime
from pytz import timezone
import json
from utils import (
TIMESTAMP_FORMAT,
retrieve_schedule,
schedule_days,
schedule_match,
PipelineStatus,
ZoomStatus,
)
import logging
logger = logging.getLogger()
STACK_NAME = env("STACK_NAME")
PRETTY_TIMESTAMP_FORMAT = "%A, %B %d, %Y at %-I:%M%p"
SHORT_TIMESTAMP_FORMAT = "%m/%d/%y %-I:%M%p"
LOCAL_TIME_ZONE = env("LOCAL_TIME_ZONE")
OC_CLUSTER_NAME = env("OC_CLUSTER_NAME")
# Slack places an upper limit of 50 UI blocks per message
# so we must limit the number of records per message
# Should be a multiple of RESULTS_PER_REQUEST
MAX_RECORDS_PER_MSG = 6
RESULTS_PER_REQUEST = 2
"""
Slack results blocks
"""
"""
Helpers
"""
"""
Status descriptions
"""
| [
6738,
28686,
1330,
651,
24330,
355,
17365,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
12972,
22877,
1330,
640,
11340,
198,
11748,
33918,
198,
6738,
3384,
4487,
1330,
357,
198,
2... | 2.625397 | 315 |
if __name__ == '__main__':
from argparse import ArgumentParser
import fileinput
parser = ArgumentParser()
parser.add_argument('--kind', choices=['sexism', 'racism'])
args = parser.parse_args()
print('doc_id\ttext\tis_hate')
for i, line in enumerate(fileinput.input('-')):
line = line.strip()
if not line:
continue
_, tweet, cls = line.split('\t')
cls = '1' if cls in (args.kind, 'both') else '0'
print(i, tweet, cls, sep='\t')
| [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
422,
1822,
29572,
1330,
45751,
46677,
198,
220,
220,
220,
1330,
2393,
15414,
628,
220,
220,
220,
30751,
796,
45751,
46677,
3419,
198,
220,
220,
220,
30... | 2.290179 | 224 |
from sfaira.versions.topologies.mouse.embedding.ae import AE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.linear import LINEAR_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.nmf import NMF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vae import VAE_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaeiaf import VAEIAF_TOPOLOGIES
from sfaira.versions.topologies.mouse.embedding.vaevamp import VAEVAMP_TOPOLOGIES
| [
6738,
264,
22043,
64,
13,
47178,
13,
4852,
5823,
13,
35888,
13,
20521,
12083,
13,
3609,
1330,
25603,
62,
35222,
33462,
11015,
198,
6738,
264,
22043,
64,
13,
47178,
13,
4852,
5823,
13,
35888,
13,
20521,
12083,
13,
29127,
1330,
48920,
1... | 3.059603 | 151 |
from fastapi import APIRouter, Query, Response, Request
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import json
from pydantic import BaseModel
import s3fs
from typing import Optional, TypedDict
from .backend import *
router = APIRouter()
s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"region_name": "us-east-1"})
HEADERS = {
"XDODS-Server": "opendap/3.7",
"Accept-Ranges": "bytes",
"Connection": "close"
}
@router.get("/parameters", tags=["dap"], description="Query a dataset's properties",
summary="parameters", response_model=DapParameterResponse,
responses={200: {"content": {"application/json": {}}, "description": "Successful Response"}})
@router.get("/{path:path}.das", tags=["dap"], description="Request a DAS response", summary="DAS")
@router.get("/{path:path}.dds", tags=["dap"], description="Request a DDS response", summary="DDS")
@router.get("/{path:path}.dods", tags=["dap"], description="Request a binary response", summary="DODS")
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
43301,
11,
18261,
11,
19390,
198,
6738,
3049,
15042,
13,
12685,
375,
364,
1330,
33918,
540,
62,
12685,
12342,
198,
6738,
3049,
15042,
13,
16733,
274,
1330,
19449,
31077,
198,
11748,
33918,
... | 2.873278 | 363 |
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
import matplotlib.patches as patches
from skimage.color import rgb2gray
from skimage.io import imread
from skimage.filters import threshold_otsu
import pytesseract
from PIL import Image
import imutils
from tasks import *
plate_like_objects = []
filename = sys.argv[1]
# Image converted to binary
car_image = imread(filename, as_gray=True)
print(car_image.shape)
gray_car_image = car_image*255
# thershold value obtained using Otsu's method
threshold_value = threshold_otsu(gray_car_image)
binary_car_image = gray_car_image > threshold_value
# get all the connected regions and group them together
label_image = measure.label(binary_car_image)
# constraints on maximum and minimum values on width, height
plate_dimensions = (0.04*label_image.shape[0], 0.5*label_image.shape[0], 0.2*label_image.shape[1], 0.6*label_image.shape[1])
min_height, max_height, min_width, max_width = plate_dimensions
plate_objects_cordinates = []
fig, (ax1) = plt.subplots(1)
ax1.imshow(gray_car_image, cmap="gray")
# regionprops creates a list of properties of all the labelled regions
for region in regionprops(label_image):
if region.area < 50:
#if the region is very small
continue
# the bounding box coordinates
min_row, min_col, max_row, max_col = region.bbox
region_height = max_row - min_row
region_width = max_col - min_col
# checking the conditions of a typical license plate
if region_height >= min_height and region_height <= max_height and region_width >= min_width and region_width <= max_width and region_width > region_height:
plate_like_objects.append(gray_car_image[min_row:max_row,
min_col:max_col])
plate_objects_cordinates.append((min_row, min_col,
max_row, max_col))
rectBorder = patches.Rectangle((min_col, min_row), max_col - min_col, max_row - min_row, edgecolor="red",
linewidth=2, fill=False)
# red rectangular border added
ax1.add_patch(rectBorder)
Cropped = gray_car_image[min_row:max_row, min_col:max_col]
# text = pytesseract.image_to_string(Cropped, config='--psm 11')
# print("Predicted Number by pytessaract : ",text)
plt.show()
modelName = 'my_model.npy'
nn1 = nn.NeuralNetwork(36, 0.001, 200, 10)
nn1.addLayer(FullyConnectedLayer(400, 50, "relu"))
nn1.addLayer(FullyConnectedLayer(50, 36, "softmax"))
model = np.load(modelName,allow_pickle=True)
k,i = 0,0
for l in nn1.layers:
if type(l).__name__ != "AvgPoolingLayer" and type(l).__name__ != "FlattenLayer":
nn1.layers[i].weights = model[k]
nn1.layers[i].biases = model[k+1]
k+=2
i+=1
print("Model Loaded... ")
list_of_plates = [] # list of characters in all paltes
list_of_columns = [] # to re-order characters as they are in LP
for lp in plate_like_objects:
# invert image
license_plate = (255-lp)
# reaply threshold on the extracted region
threshold_value = threshold_otsu(license_plate)
license_plate = license_plate > threshold_value
labelled_plate = measure.label(license_plate)
fig, ax1 = plt.subplots(1)
license_plate = rgb2gray(license_plate)
ax1.imshow(license_plate, cmap="gray")
# character dimension constraints
character_dimensions = (0.3*license_plate.shape[0], 1.0*license_plate.shape[0], 0.01*license_plate.shape[1], 0.6*license_plate.shape[1])
min_height, max_height, min_width, max_width = character_dimensions
characters = []
column_list = []
for regions in regionprops(labelled_plate):
y0, x0, y1, x1 = regions.bbox
region_height = y1 - y0
region_width = x1 - x0
if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width:
roi = license_plate[y0:y1, x0:x1]
# draw a red bordered rectangle over the character.
rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor="red",
linewidth=2, fill=False)
ax1.add_patch(rect_border)
# resize the characters to 20X20 and then append each character into the characters list
resized_char = Image.fromarray(roi).resize((20, 20))
characters.append(resized_char)
# to keep track of the arrangement of the characters(based on x-coordinate)
column_list.append(x0)
list_of_plates.append(characters)
list_of_columns.append(column_list)
plt.show()
list_of_numbers = []
for i in range(len(list_of_plates)):
characters = list_of_plates[i]
plate_num = []
for resized_char in characters:
roi = np.array(resized_char)
# reshape to an array as one input
roi = roi.reshape((1,400))
# predict result using neural network
valActivations = nn1.feedforward(roi)
# get the class with highest prediction
pred = np.argmax(valActivations[-1], axis=1)
# check with threshold to remove non-characters
if(valActivations[-1][0][pred]<0.5):
plate_num.append('')
continue
if(pred<10):
plate_num.append(str(pred[0]))
else:
plate_num.append(str(chr(65+pred[0]-10)))
column = np.array(list_of_columns[i])
# sort characters as they are in LP
sort_idx = np.argsort(column)
plate_num = np.array(plate_num)[sort_idx]
# output licence plate number
plate_num = "".join(plate_num)
list_of_numbers.append(plate_num)
print('Predictions - ',end=' ')
print(list_of_numbers)
final_num = sorted(list_of_numbers, key=len)
print('Final Licence plate - ' + final_num[-1]) | [
11748,
25064,
198,
11748,
269,
85,
17,
220,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
9060,
13,
35636,
1330,
47558,
198,
6738,
1341,
9060,
1330,
3953,
198,
6738,
... | 2.417413 | 2,458 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, os, shlex, time, textwrap, re
import subprocess, multiprocessing, queue, threading, ctypes, uuid
import numpy as np
__author__ = 'herrlich10 <herrlich10@gmail.com>'
__version__ = '0.1.7'
# The following are copied from six
# =================================
if sys.version_info[0] == 3:
string_types = (str,)
from io import StringIO
else:
string_types = (basestring,)
from StringIO import StringIO
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
return wrapper
# =================================
def format_duration(duration, format='standard'):
'''Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else: # Assume 'standard'
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
def cmd_for_exec(cmd, shell=False):
''' Format cmd appropriately for execution according to whether shell=True.
Split a cmd string into a list, if shell=False.
Join a cmd list into a string, if shell=True.
Do nothing to callable.
Parameters
----------
cmd : str, list, or callable
shell : bool
'''
# If shell=kwargs, its true value is inferred.
if isinstance(shell, dict):
shell = ('shell' in shell and shell['shell'])
if not callable(cmd):
if shell: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''Format cmd for printing.
Parameters
----------
cmd : str, list, or callable
'''
if not callable(cmd):
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Remove insignificant whitespaces
cmd = ' '.join(shlex.quote(s) for s in cmd)
return cmd
ERROR_PATTERN = r'error|^\*{2}\s'
def check_output_for_errors(output, error_pattern=None, error_whitelist=None, verbose=1, label=''):
'''
User can skip error checking by setting error_pattern=''
'''
if error_pattern is None:
error_pattern = ERROR_PATTERN
n_errors = 0
if error_pattern != '': # User can skip error checking by setting error_pattern=''
if isinstance(error_pattern, string_types): # User can provide compiled regex if case sensitivity is desired
error_pattern = re.compile(error_pattern, re.IGNORECASE)
if isinstance(error_whitelist, string_types):
error_whitelist = re.compile(error_whitelist, re.IGNORECASE)
for line in output:
if error_pattern.search(line) and (error_whitelist is None or not error_whitelist.search(line)):
if verbose > 0:
print(label, line, end='')
n_errors += 1
return n_errors
def run(cmd, check=True, error_pattern=None, error_whitelist=None, goal_pattern=None, shell=False, verbose=2):
'''Run an external command line.
This function is similar to subprocess.run introduced in Python 3.5, but
provides a slightly simpler and perhaps more convenient API.
Parameters
----------
cmd : str or list
'''
cmd = cmd_for_exec(cmd, shell=shell)
cmd_str = cmd_for_disp(cmd)
if verbose > 0:
print('>>', cmd_str)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell)
res = {'cmd': cmd_str, 'pid': p.pid, 'output': [], 'start_time': time.time()}
for line in iter(p.stdout.readline, b''): # The 2nd argument is sentinel character (there will be no ending empty line)
res['output'].append(line.decode('utf-8'))
if verbose > 1:
print(res['output'][-1], end='')
p.stdout.close() # Notify the child process that the PIPE has been broken
res['returncode'] = p.wait()
res['stop_time'] = time.time()
if verbose > 0:
print('>> Command finished in {0}.'.format(format_duration(res['stop_time'] - res['start_time'])))
if check and (res['returncode'] or check_output_for_errors(res['output'], error_pattern=error_pattern,
error_whitelist=error_whitelist, verbose=verbose)):
print('>> Please pay attention to the above errors.')
raise RuntimeError(f'Error occurs when executing the following command (returncode={p.returncode}):\n{cmd_str}')
if check and not check_output_for_goal(res['output'], goal_pattern=goal_pattern):
raise RuntimeError(f'Expected goal pattern "{goal_pattern}" does not found! Something must be wrong!')
return res
STDOUT = sys.stdout
STDERR = sys.stderr
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def run(self, cmd, *args, _depends=None, _retry=None, _dispatch=False, _error_pattern=None, _error_whitelist=None, _suppress_warning=False, _block=False, **kwargs):
'''Asynchronously run command or callable (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that although return values of the callable are retrieved via PIPE,
sometimes it could be advantageous to directly save the computation
results into a shared file (e.g., an HDF5 file), esp. when they're large.
In the later case, a proper lock mechanism via multiprocessing.Lock()
is required.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args :
If cmd is a callable, `*args` are passed to the callable as its arguments.
**kwargs :
If cmd is a callable, `**kwargs` are passed to the callable as its keyword arguments.
If cmd is a list or str, `**kwargs` are passed to subprocess.Popen().
_depends : list
A list of jobs (identified by their uuid) that have to be done
before this job can be scheduled.
_retry: int
Number of retry before accepting failure (if detecting non-zero return code).
_dispatch : bool
Dispatch the job immediately, which will run in the background without blocking.
_error_pattern : str
_suppress_warning : bool
_block : bool
if True, call wait() internally and block.
Returns
-------
_uuid : str
The uuid of current job (which can be used as future jobs' dependency)
'''
cmd = cmd_for_exec(cmd, shell=kwargs)
_uuid = uuid.uuid4().hex[:8]
if _retry is None:
_retry = 0
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs, _uuid, _depends, _retry,
_error_pattern, _error_whitelist, _suppress_warning))
self._n_cmds += 1 # Accumulate by each call to run(), and reset after wait()
if _dispatch:
self.dispatch()
if _block:
self.wait()
return _uuid
def wait(self, pool_size=None, return_codes=False, return_jobs=False):
'''
Wait for all jobs in the queue to finish.
Returns
-------
return_values : list
Return values of executed python callable. Always `None` for command.
codes : list (only when return_codes=True)
The return code of the child process for each job.
jobs : list (only when return_jobs=True)
Detailed information about each child process, including captured stdout and stderr.
'''
if isinstance(pool_size, string_types) and pool_size == 'balanced':
# Make sure each volley has roughly equal number of jobs
n = len(self.cmd_queue)
pool_size = int(np.ceil(n/np.ceil(n/self.pool_size)))
if pool_size is not None:
# Allow temporally adjust pool_size for current batch of jobs
old_size = self.pool_size
self.pool_size = pool_size
start_time = time.time()
ress = []
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
job = self._pid2job[p.pid]
if isinstance(p, subprocess.Popen):
if p.poll() is not None: # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.returncode
job['speed_up'].set()
job['watcher'].join() # Retrieve all remaining output before closing PIPE
p.stdout.close() # Notify the child process that the PIPE has been broken
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
if job['returncode'] != 0: # Failed
if job['retry'] > 0: # Need retry
# Insert a new cmd (as if we automatically run it again)
self.cmd_queue.append((self._n_cmds, job['cmd'], job['args'], job['kwargs'], job['uuid'],
job['depends'], job['retry']-1, job['error_pattern'], job['suppress_warning']))
job['successor'] = self._n_cmds
self._n_cmds += 1
else: # No more retry, accept failure...
raise RuntimeError(f">> job#{job['idx']} failed!\n Full output:\n {''.join(job['output'])}")
else: # Successful
self.res_queue.put([job['idx'], None]) # Return None to mimic callable behavior
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled, even with error (TODO: or shall I break all??)
# These helper objects may not be useful for the end users
for key in ['watcher', 'speed_up', 'args', 'kwargs']:
job.pop(key)
else:
pass
# elif isinstance(p, multiprocessing.Process):
elif isinstance(p, self.ctx.Process):
if not p.is_alive(): # If the process is terminated
job['stop_time'] = time.time()
job['returncode'] = p.exitcode # subprocess.Popen and multiprocessing.Process use different names for this
self.ps.remove(p)
if self.verbose > 0:
print('>> job#{0} finished (return {1}) in {2}.'.format(job['idx'], job['returncode'], format_duration(job['stop_time']-job['start_time'])))
# TODO: retry mechanism for callable
self._fulfilled[job['uuid']] = job['log_idx'] # Marked as fulfilled
# Remove potentially very large data
for key in ['args', 'kwargs']:
job.pop(key)
else:
pass
time.sleep(0.1)
# Dequeuing, see https://stackoverflow.com/questions/10028809/maximum-size-for-multiprocessing-queue-item
self._async_get_res(ress)
# Handle return values by callable cmd
while not self.res_queue.empty():
self._async_get_res(ress)
ress = [res[1] for res in sorted(ress, key=lambda res: res[0])]
# Handle return codes by children processes
jobs = sorted([job for job in self._pid2job.values() if job['successor'] is None], key=lambda job: job['idx'])
codes = [job['returncode'] for job in jobs]
if self.verbose > 0:
duration = time.time() - start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncodes: {0}'.format(codes))
first_error = np.nonzero(codes)[0][0]
print(f">> Output for job#{first_error} was as follows:\n------------------------------")
print(jobs[first_error]['output'])
else:
print('all returncodes are 0.')
if self.all_successful(jobs=jobs):
print('>> All {0} jobs finished successfully.'.format(len(jobs)))
else:
print('>> Please pay attention to the above errors.')
# Reset object states
self._n_cmds = 0
self._idx2pid = {}
self._pid2job = {}
if pool_size is not None:
self.pool_size = old_size
res = (ress,) + ((codes,) if return_codes else ()) + ((jobs,) if return_jobs else ())
if len(res) == 1:
return res[0]
else:
return res
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
# TODO: 1. Use ctx instead of multiprocessing. 2. Use multiprocessing.shared_memory
@add_metaclass(ArrayWrapper) # Compatibility code from six
class SharedMemoryArray(object):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
_SHARED_ARR_ATTRIBUTES = ['acquire', 'release', 'get_lock']
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
2864,
607,
81,
33467,
940,
31,
14816,
13,
785,
198,
2,
198,
2,
2448,
3411,
318,
29376,
... | 2.355117 | 7,651 |
from assetman import AssetManager
| [
6738,
11171,
805,
1330,
31433,
13511,
220,
198
] | 4.375 | 8 |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'auth/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^$', views.home, name='Home'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5009,
355,
6284,
62,
33571,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
197,
6371,
7,
... | 2.655172 | 116 |
# -*- coding: utf-8 -*-
import random
import numpy as np
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198
] | 2.37037 | 27 |