content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Provide a mock standalone component."""
DOMAIN = 'test_standalone'
def setup(hass, config):
"""Mock a successful setup."""
return True
| [
37811,
15946,
485,
257,
15290,
27669,
7515,
526,
15931,
198,
39170,
29833,
796,
705,
9288,
62,
1481,
17749,
6,
628,
198,
4299,
9058,
7,
71,
562,
11,
4566,
2599,
198,
220,
220,
220,
37227,
44,
735,
257,
4388,
9058,
526,
15931,
198,
2... | 3.083333 | 48 |
from learntools.libs.wavelet import signal_to_wavelet
| [
6738,
26338,
10141,
13,
8019,
82,
13,
19204,
1616,
1330,
6737,
62,
1462,
62,
19204,
1616,
198
] | 3.176471 | 17 |
# -*- coding: utf-8 -*-
"""
This script reads a XML-formatted word list and produces a dictionary
file used by the FirefoxOS virtual keyboard for word suggestions and
auto corrections.
The word lists come from the Android source: https://android.googlesource.com/platform/packages/inputmethods/LatinIME/+/master/dictionaries/
This script currently depends on the XML format of the Android
wordlists. (Eventually we might want to pre-process the XML files
to a plain text format and simplify this script so that it will work
with any plain-text word and frequency list)
The sample.xml file from the Android repo looks like this:
----------------------------------------------------------------------
<!-- This is a sample wordlist that can be converted to a binary
dictionary for use by the Latin IME. The format of the word
list is a flat list of word entries. Each entry has a frequency
between 255 and 0. Highest frequency words get more weight in
the prediction algorithm. As a special case, a weight of 0 is
taken to mean profanity - words that should not be considered a
typo, but that should never be suggested explicitly. You can
capitalize words that must always be capitalized, such as
"January". You can have a capitalized and a non-capitalized
word as separate entries, such as "robin" and "Robin". -->
<wordlist>
<w f="255">this</w>
<w f="255">is</w>
<w f="128">sample</w>
<w f="1">wordlist</w>
</wordlist>
----------------------------------------------------------------------
This script processes the word list and converts it to a Ternary
Search Tree (TST), as described in the wiki link below, also in
http://en.wikipedia.org/wiki/Ternary_search_tree
http://www.strchr.com/ternary_dags
http://www.strchr.com/dawg_predictive
Note that the script does not convert the tree into a DAG (by sharing
common word suffixes) because it cannot maintain separate frequency
data for each word if the words share nodes.
We have moved the documentation (format and example) for the dictionary blob to
Mozilla Wiki: https://wiki.mozilla.org/Gaia/System/Keyboard/IME/Latin/Dictionary_Blob
Please make sure any updates to the codes are reflected in the wiki too.
"""
from optparse import OptionParser
from xml.parsers import expat
import struct
import math
_NodeCounter = 0
_NodeRemoveCounter = 0
_NodeVisitCounter = 0
_EmitCounter = 0
_WordCounter = 0
_EndOfWord = chr(0)
# How many times do we use each character in this language
characterFrequency = {}
maxWordLength = 0
highestFreq = 0
# Data Structure for TST Tree
# Constructor for creating a new TSTNode
# Constructor for creating a TST Tree
# Insert a word into the TSTTree
# Balance the TST
# set the number of children nodes
# balance level of TST
# find node in the subtree of root and promote it to root
# balance the whole TST
# Serialize the tree to an array. Do it depth first, folling the
# center pointer first because that might give us better locality
# Make a pass through the array of nodes and figure out the size and offset
# of each one.
# Parse command line arguments.
#
# Syntax: python xml2dict.py [-v] -o output-file input-file
#
use = "Usage: %prog [options] dictionary.xml"
parser = OptionParser(usage = use)
parser.add_option("-o", "--output", dest="output", metavar="FILE", help="write output to FILE")
options, args = parser.parse_args()
# We expect the dictionary name to be present on the command line.
if len(args) < 1:
print("Missing dictionary name.")
exit(-1)
if options.output == None:
print("Missing output file.")
exit(-1)
# print some status statements to the console
print ("[0/4] Creating dictionary ... (this might take a long time)" )
print ("[1/4] Reading XML wordlist and creating TST ..." )
tstRoot = None
tree = TSTTree()
# Parse the XML input file and build the trie.
p = expat.ParserCreate()
p.StartElementHandler = start_element
p.CharacterDataHandler = char_data
p.EndElementHandler = end_element
p.ParseFile(open(args[0], 'rb'))
print ("[2/4] Balancing Ternary Search Tree ...")
tstRoot = tree.balance(tstRoot)
print ("[3/4] Serializing TST ...");
nodes = serializeTree(tstRoot)
print ("[4/4] Emitting TST ...")
output = open(options.output, "wb")
emit(output, nodes)
output.close()
print ("Successfully created Dictionary")
exit()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
1212,
4226,
9743,
257,
23735,
12,
687,
16898,
1573,
1351,
290,
11073,
257,
22155,
198,
7753,
973,
416,
262,
16802,
2640,
7166,
10586,
329,
1573,
11776,
... | 3.304833 | 1,345 |
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Matt Chisholm
import wx
from BitTorrent.StatusLight import StatusLight as _StatusLight
| [
2,
383,
10154,
286,
428,
2393,
389,
2426,
284,
262,
4722,
39286,
4946,
8090,
13789,
198,
2,
10628,
352,
13,
16,
357,
1169,
13789,
737,
220,
921,
743,
407,
4866,
393,
779,
428,
2393,
11,
287,
2035,
198,
2,
2723,
2438,
393,
28883,
1... | 4.025 | 160 |
import os
import threading
import unittest
from collections import defaultdict
from unittest import mock
import webtest
from cornice import errors as cornice_errors
from pyramid.url import parse_url_overrides
from kinto.core import DEFAULT_SETTINGS
from kinto.core import statsd
from kinto.core.storage import generators
from kinto.core.utils import sqlalchemy, memcache, follow_subrequest, encode64
skip_if_travis = unittest.skipIf("TRAVIS" in os.environ, "travis")
skip_if_no_postgresql = unittest.skipIf(sqlalchemy is None, "postgresql is not installed.")
skip_if_no_memcached = unittest.skipIf(memcache is None, "memcached is not installed.")
skip_if_no_statsd = unittest.skipIf(not statsd.statsd_module, "statsd is not installed.")
class DummyRequest(mock.MagicMock):
"""Fully mocked request.
"""
follow_subrequest = follow_subrequest
class FormattedErrorMixin:
"""Test mixin in order to perform advanced error responses assertions.
"""
def get_user_headers(user, password="secret"):
"""Helper to obtain a Basic Auth authorization headers from the specified
`user` (e.g. ``"user:pass"``)
:rtype: dict
"""
credentials = "{}:{}".format(user, password)
authorization = "Basic {}".format(encode64(credentials))
return {"Authorization": authorization}
class BaseWebTest:
"""Base Web Test to test your kinto.core service.
It setups the database before each test and delete it after.
"""
api_prefix = "v0"
"""URL version prefix"""
entry_point = None
"""Main application entry"""
headers = {"Content-Type": "application/json"}
@classmethod
@classmethod
def make_app(cls, settings=None, config=None):
"""Instantiate the application and setup requests to use the api
prefix.
:param dict settings: extra settings values
:param pyramid.config.Configurator config: already initialized config
:returns: webtest application instance
"""
settings = cls.get_app_settings(extras=settings)
main = cls.entry_point
wsgi_app = main({}, config=config, **settings)
app = webtest.TestApp(wsgi_app)
app.RequestClass = get_request_class(cls.api_prefix)
return app
@classmethod
def get_app_settings(cls, extras=None):
"""Application settings to be used. Override to tweak default settings
for the tests.
:param dict extras: extra settings values
:rtype: dict
"""
settings = {**DEFAULT_SETTINGS}
settings["storage_backend"] = "kinto.core.storage.memory"
settings["storage_strict_json"] = True
settings["cache_backend"] = "kinto.core.cache.memory"
settings["permission_backend"] = "kinto.core.permission.memory"
settings.update(extras or None)
return settings
| [
11748,
28686,
198,
11748,
4704,
278,
198,
11748,
555,
715,
395,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
3992,
9288,
198,
6738,
11676,
501,
1330,
8563,
355,
11676,
501,
62,
48277,
198,... | 2.819348 | 1,013 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 17 11:31:40 2021
@author: justi
"""
import utils
import play
import spotify
import time as t
from abc import ABC, abstractmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
5979,
1596,
1367,
25,
3132,
25,
1821,
33448,
198,
198,
31,
9800,
25,
655,
72,
198,
37811,
198,
11748,
3384,
4487,
198,
11748,
711,
198,
1... | 2.825397 | 63 |
"""
Mixin for INI, .properties, and TOML.
"""
from __future__ import annotations
import os
from typing import Optional, Sequence, Set, Union
import pandas as pd
from typeddfs.df_errors import UnsupportedOperationError
from typeddfs.utils import IoUtils, ParseUtils, Utils
__all__ = ["_IniLikeMixin"]
| [
37811,
198,
35608,
259,
329,
3268,
40,
11,
764,
48310,
11,
290,
41526,
43,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
28686,
198,
6738,
19720,
1330,
32233,
11,
45835,
11,
5345,
11,
4479,
198,
198,
1174... | 3.154639 | 97 |
from time import perf_counter as pfc
puzzle = load('Tag_03.txt')
start = pfc()
print(solve(puzzle), pfc() - start)
| [
6738,
640,
1330,
23035,
62,
24588,
355,
279,
16072,
220,
198,
198,
79,
9625,
796,
3440,
10786,
24835,
62,
3070,
13,
14116,
11537,
198,
198,
9688,
796,
279,
16072,
3419,
198,
4798,
7,
82,
6442,
7,
79,
9625,
828,
279,
16072,
3419,
532... | 2.565217 | 46 |
import os
import unittest
import scCloud.pipeline
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
11748,
629,
18839,
13,
79,
541,
4470,
628
] | 3.058824 | 17 |
import time
from datetime import datetime
if __name__ == '__main__':
main()
| [
11748,
640,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
628
] | 2.833333 | 30 |
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
2764,
38469,
7509,
628
] | 3.809524 | 21 |
import os
from . import env
PAUSE = {
'windows': 'PAUSE',
'macos': None,
'linux': None,
'unix': None
}
CLEAR = {
'windows': 'CLS',
'macos': 'clear',
'linux': 'clear',
'unix': 'clear'
}
| [
11748,
28686,
198,
6738,
764,
1330,
17365,
198,
198,
4537,
19108,
796,
1391,
198,
220,
220,
220,
705,
28457,
10354,
705,
4537,
19108,
3256,
198,
220,
220,
220,
705,
20285,
418,
10354,
6045,
11,
198,
220,
220,
220,
705,
23289,
10354,
6... | 2.064815 | 108 |
# -*- coding: utf-8 -*-
"""
@FileName : tf_load.py
@Description : 加载tf模型
@Author : 齐鲁桐
@Email : qilutong@yahoo.com
@Time : 2019-05-15 19:08
@Modify : None
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def load_graph(model_file, name=None):
"""
加载tf模型
:param model_file: 模型文件名
:param name: 节点名称
:return: tf计算图
"""
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def, name=name)
return graph
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
8979,
5376,
220,
220,
220,
1058,
48700,
62,
2220,
13,
9078,
198,
31,
11828,
1058,
10263,
232,
254,
164,
121,
121,
27110,
162,
101,
94,
161,
252,
233,
... | 1.914201 | 338 |
from hylfm.datasets.online import OnlineTensorInfo
| [
6738,
2537,
1652,
76,
13,
19608,
292,
1039,
13,
25119,
1330,
7467,
51,
22854,
12360,
628
] | 3.25 | 16 |
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from StockAndFlowInPython.sfd_canvas.interactive_sfd_ui import Ui_widget_interactive_sfd
from StockAndFlowInPython.parsing.XMILE_parsing import text_to_equation, equation_to_text
from StockAndFlowInPython.graph_sd.graph_engine import STOCK, FLOW, VARIABLE, PARAMETER, ALIAS, CONNECTOR, Structure
if __name__ == '__main__':
app = QApplication(sys.argv)
main_window = QMainWindow()
main_window.setWindowTitle("Interactive SFD")
main_window.setMinimumWidth(960)
main_window.setMinimumHeight(800)
interactive_sfd = InteractiveSFD()
main_window.setCentralWidget(interactive_sfd)
main_window.show()
sys.exit(app.exec_())
| [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
2603,
29487... | 2.642173 | 313 |
n = 84
| [
77,
796,
9508,
201
] | 1.75 | 4 |
# Copyright 2020 Jianfeng Hou <frankderekdick@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculates the optimal v for the DBS value iteration experiments.
"""
import math
import copy
import time
import pickle
import numpy as np
from env import GridWorldEnv
from agent import ValueIterationAgent
from dbs.config import VALUE_ITERATION_CONFIG as config
from dbs.config import GRID_WORLD
from util import format_time
total_begin_time = time.time()
# Create the GridWorld Environment
grid_world_env = GridWorldEnv(
name="GirldWorldEnv of size (10, 10)",
state_space=GRID_WORLD['state_space'],
action_space=GRID_WORLD['action_space'],
episode_max_length=config['episode_max_length'],
size=(GRID_WORLD['row_count'], GRID_WORLD['column_count']),
starting_index=GRID_WORLD['starting_index'],
goal_index=GRID_WORLD['goal_index'],
goal_reward=GRID_WORLD['goal_reward'],
wall_index_list=GRID_WORLD['wall_index_list'])
# Create the value iteration agent
value_iteration_agent = ValueIterationAgent(discount=config['discount'], env=grid_world_env, in_place=True)
# Calculate the optimal v list
begin_time = time.time()
for step in range(1, config['optimal_step_num'] + 1):
value_iteration_agent.take_action()
# Print progress information
if step % 10000 == 0:
current_time = time.time()
print("{:>8d}/{:<8d} iterations finished in {:>10f}s.".format(step, config['optimal_step_num'], current_time - begin_time))
optimal_v_array = value_iteration_agent.v_array
# Dump the optimal_v_list
filename = "dbs_value_iteration_optimal_v_array.pkl"
with open(filename, "wb") as f:
pickle.dump(optimal_v_array, f)
print("optimal_v_array has been successfully dumped to file \'{:s}\'.".format(filename))
total_end_time = time.time()
print("The optimal v for DBS value iteration experiments calculated in {:s}.".format(format_time(total_end_time - total_begin_time)))
| [
2,
15069,
12131,
40922,
69,
1516,
33790,
1279,
8310,
962,
67,
18238,
67,
624,
31,
14816,
13,
785,
29,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
153... | 2.995146 | 824 |
"""Module with resnet like models."""
import argparse
from collections import OrderedDict
import importlib
from typing import Any, Dict, List, Optional, Type, Union
import torch
import torch.nn as nn
from .blocks import ResNetBlock, ResNetBottleneck, conv1x1
TYPE_TO_ARGS = {
"resnet18": ([2, 2, 2, 2], ResNetBlock),
"resnet34": ([3, 4, 6, 3], ResNetBlock),
"resnet50": ([3, 4, 6, 3], ResNetBottleneck),
"resnet101": ([3, 4, 23, 3], ResNetBottleneck),
"resnet152": ([3, 8, 36, 3], ResNetBottleneck),
}
USE_TORCHVISION_MODEL = False
class ResNet(nn.Module):
"""A convolutional resnet-like model.
Args:
data_config: a dictionary containing information about data.
args (optional): args from argparser.
"""
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Returns tensor of logits for each class."""
return self.model(x)
@staticmethod
def add_to_argparse(
parser: argparse.ArgumentParser,
main_parser: argparse.ArgumentParser # pylint: disable=unused-argument
) -> argparse.ArgumentParser:
"""Adds possible args to the given parser."""
parser.add_argument(
"--resnet_type", type=str, default="resnet18",
help="Type of resnet to use (resnet{18, 34, 50, 101, 152})."
)
parser.add_argument(
"--use_torchvision_model", default=False, action="store_true",
help="If true, will use resnet architecture from torchvision."
)
return parser
class _ResNet(nn.Module):
"""A convolutional resnet-like model.
Args:
num_blocks: a list of number of blocks in each resnet layer.
block: a class constructor to use for creating resnet blocks.
num_classes: a number of classes.
"""
_base_channels: int = 64
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Returns tensor of logits for each class."""
return self.model(x)
| [
37811,
26796,
351,
581,
3262,
588,
4981,
526,
15931,
628,
198,
11748,
1822,
29572,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
1330,
8019,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
5994,
11,
44... | 2.519084 | 786 |
'''
forms for the forum's related instances
. ReplyForm
. Meta:
'''
from django import forms
#
from .models import Reply
| [
7061,
6,
198,
198,
23914,
329,
262,
10041,
338,
3519,
10245,
628,
220,
220,
220,
764,
14883,
8479,
198,
220,
220,
220,
220,
220,
220,
220,
764,
30277,
25,
198,
7061,
6,
198,
6738,
42625,
14208,
1330,
5107,
198,
2,
198,
6738,
764,
... | 2.87234 | 47 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 18:43:20 2021
@author: Waradon Senzt Phokhinanan
"""
############################################################################################
import math
import librosa
import scipy.io
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
import os
import soundfile as sf
############################################################################################
############################################################################################
############################################################################################
############################################################################################
# MAIN PROGRAMME ###########################################################################
############################################################################################
azimuthdict = {
0: "-90",
1: "-85",
2: "-80",
3: "-75",
4: "-70",
5: "-65",
6: "-60",
7: "-55",
8: "-50",
9: "-45",
10: "-40",
11: "-35",
12: "-30",
13: "-25",
14: "-20",
15: "-15",
16: "-10",
17: "-5",
18: "0",
19: "5",
20: "10",
21: "15",
22: "20",
23: "25",
24: "30",
25: "35",
26: "40",
27: "45",
28: "50",
29: "55",
30: "60",
31: "65",
32: "70",
33: "75",
34: "80",
35: "85",
36: "90"
}
NoiseData = NoiseTestingImport()
TEST_ILDIPD_FeatureCON = np.empty([0,321,50,2])
TEST_ILDIPD_LabelCON = np.empty([0])
#Generate Testing Data
SpeechTestD = os.listdir('./SpeechTEST')
for FileXD in SpeechTestD:
FileX = '/SpeechTEST/' + FileXD
for SNRx in [-6,0,6]:
for Nx in range(0,len(NoiseData)):
print('Spatialising')
print('SNR: ' + str(SNRx))
print('Speech file: ' + str(FileXD))
print('Noise number: ' + str(Nx))
NoisePUT = NoiseData[Nx]
ILDIPD_Feature, ILDIPD_Label = spatialise37azimuths(FileX,SNRx,NoisePUT,Nx)
TEST_ILDIPD_FeatureCON = np.vstack([TEST_ILDIPD_FeatureCON,ILDIPD_Feature])
TEST_ILDIPD_LabelCON = np.hstack([TEST_ILDIPD_LabelCON,ILDIPD_Label.astype(int)])
######
TEST_ILDIPD_LabelCON = np.vectorize(azimuthdict.get)(TEST_ILDIPD_LabelCON)
TEST_ILDIPD_LabelCON = TEST_ILDIPD_LabelCON.astype(int)
with open('BinSL_TESTextract.npy', 'wb') as f:
np.save(f, TEST_ILDIPD_FeatureCON)
np.save(f, TEST_ILDIPD_LabelCON)
print('Genrating testing data has done!')
print('Total testing samples: ' + str(TEST_ILDIPD_FeatureCON.shape))
print('Total testing labels: ' + str(TEST_ILDIPD_LabelCON.shape)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
1526,
2310,
1248,
25,
3559,
25,
1238,
33448,
201,
198,
201,
198,
31,
9800,
25,
1810,
324,
261,
2311,
89,
83,
1380,
482,
20079,
... | 2.249807 | 1,297 |
import subprocess
import logging
from prairiedog.node import Node
from prairiedog.edge import Edge
from prairiedog.dgraph import Dgraph
from prairiedog.graph import Graph
from prairiedog.errors import GraphException
log = logging.getLogger('prairiedog')
# TODO: this currently runs too slow for tests
# def test_dgraph_preload(dg):
# dg.preload()
# assert True
| [
11748,
850,
14681,
198,
11748,
18931,
198,
198,
6738,
7201,
343,
798,
519,
13,
17440,
1330,
19081,
198,
6738,
7201,
343,
798,
519,
13,
14907,
1330,
13113,
198,
6738,
7201,
343,
798,
519,
13,
67,
34960,
1330,
360,
34960,
198,
6738,
720... | 2.961538 | 130 |
import statistics
import os, random, pickle
import numpy as np
# we are going to show for each timestep, for each layer, what's the majority attention.
# majority attention excluding roadmark tokens
# majority
from typing import List
import scipy
from collections import Counter
from util import convert_enc_attn, logger
index_of_bpe = 1
compar_set1 = ['last_inp', 'cur_inp', 'cur_pred', 'next_pred']
compar_set2 = ['top1_most_common', 'top1_distill_most_common']
from scipy.stats import entropy
from scipy.special import softmax
np.set_printoptions(precision=5)
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
import json
if __name__ == '__main__':
print("Looking at attention")
if 'pegasus' in MODEL_NAME:
from transformers import PegasusTokenizer
bpe_tokenizer = PegasusTokenizer.from_pretrained(MODEL_NAME)
EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id] # <n>
bos_token_id = 0
else:
raise NotImplementedError
# visualize_distribution(None,None)
files = os.listdir(CUR_DIR)
random.shuffle(files)
files = files[:20]
if True:
all_outputs = []
for layer_num in range(16):
print(f"Layer :{layer_num}")
output_array = run_trial(layer_num, files)
all_outputs.append(output_array)
draw_plot(all_outputs)
exit()
results = []
layer_num = 0
for f in files:
with open(os.path.join(CUR_DIR, f), 'rb') as fd:
data = pickle.load(fd)
result = attention_entrance(data['attentions'], data['pred_distributions'], data['logits'], data['input_doc'],
BOS_TOKEN=bos_token_id, layer_num=layer_num)
results += result
result_in_arry = np.asarray(results)
draw_plot(result_in_arry.T, layer_num)
# print("Start writing analysis result to disk...")
# print(len(results))
# with open(os.path.join(PROB_META_DIR, f"{spec_name}_attention.json"), 'w') as fd:
# json.dump(results, fd)
# print(f'Done writing to disk: {os.path.join(PROB_META_DIR, f"{spec_name}_attention.json")}')
| [
11748,
7869,
198,
198,
11748,
28686,
11,
4738,
11,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
356,
389,
1016,
284,
905,
329,
1123,
4628,
395,
538,
11,
329,
1123,
7679,
11,
644,
338,
262,
3741,
3241,
13,
198,
2,
37... | 2.362256 | 922 |
from django.conf.urls.static import static
from django.conf import settings
"""marketplace URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from main_app.views import (LandingView, TOSView, SellView, AuctionListView,
ProductDetailView, WatchView, LikeView, BidView,
ProfileView, RegistrationView)
urlpatterns = [
path('i18n/', include('django.conf.urls.i18n')),
path('admin/', admin.site.urls),
path('accounts/register/', RegistrationView.as_view(), name="register"),
path('accounts/', include('django.contrib.auth.urls')),
path('tos', TOSView.as_view(), name="tos"),
path('sell', SellView.as_view(), name="sell"),
path('product_detail/<int:pk>/', ProductDetailView.as_view(), name="product_detail"),
path('watch/<int:pk>/', WatchView.as_view(), name="watch"),
path('like/<int:pk>/', LikeView.as_view(), name="like"),
path('bid/<int:pk>/', BidView.as_view(), name="bid"),
path('search/', AuctionListView.as_view(), name="search"),
path('profile/', ProfileView.as_view(), name="profile"),
path('', LandingView.as_view(), name='landing'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.UPLOADS_DIR, document_root=settings.UPLOADS_DIR)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
37811,
10728,
5372,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,... | 2.681572 | 738 |
import logging
import os
from currentplatform import platform
from sound_player.common import StatusObject, STATUS
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
1459,
24254,
1330,
3859,
198,
198,
6738,
2128,
62,
7829,
13,
11321,
1330,
12678,
10267,
11,
15486,
2937,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
1... | 3.690476 | 42 |
from .clog import MyRotatingFileHandler, MyTimedRotatingFileHandler
| [
6738,
764,
565,
519,
1330,
2011,
24864,
803,
8979,
25060,
11,
2011,
14967,
276,
24864,
803,
8979,
25060,
198
] | 3.578947 | 19 |
import base64
import mimetypes
import uuid
from django.core.files.base import ContentFile
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from unicef_restlib.fields import ModelChoiceField
from unicef_attachments.utils import get_client_ip
| [
11748,
2779,
2414,
198,
11748,
17007,
2963,
12272,
198,
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
8692,
1330,
14041,
8979,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
35... | 3.47619 | 84 |
import logging
import time
from datetime import timedelta
from time import sleep
import RPi.GPIO as GPIO
from thespian.actors import Actor, ActorSystem, ActorTypeDispatcher
# Logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
# Sensor
trigPin = 16
echoPin = 18
MAX_DISTANCE = 220
timeOut = MAX_DISTANCE * 60
GPIO.setmode(GPIO.BOARD) # use PHYSICAL GPIO Numbering
GPIO.setup(trigPin, GPIO.OUT) # set trigPin to OUTPUT mode
GPIO.setup(echoPin, GPIO.IN) # set echoPin to INPUT mode
# Resources
# https://github.com/malefs/security-smell-detector-python-gist/blob/e90764deb06ae4d3c45e702db7ad00351520348f/gist-hash/b51a9cabd41edae990fd6e844f10ef8e/snippet.py
# https://thespianpy.com/doc/in_depth#outline-container-org9bb4305
class BellBoy(ActorTypeDispatcher):
"""
Lead actor.
Starts other actors and co-ordinates system actions.
"""
log = logging.getLogger("BellBoy")
def receiveMsg_str(self, message, sender):
"""Handles string messages sent to the BellBoy actor."""
self.log.info("Received message %s from sender %s", message, sender)
if type(message) is str and "start" in message:
self.startBellboyServices()
if type(message) is str and "heartbeat" in message:
print("Got heartbeat message...")
self.send(self.gui, "heartbeat")
self.send(self.sensor, "heartbeat")
def startBellboyServices(self):
"""Starts all other BellBoy system actors."""
self.log.info("Starting bellboy services.")
# Create child actors. Ha.
self.gui = self.createActor(StatusWebGUI)
self.sensor = self.createActor(Sensor)
self.send(self.gui, "start")
self.send(self.sensor, "start")
self.wakeupAfter(timedelta(seconds=1))
class StatusWebGUI(Actor):
"""
Will eventually deploy a simple Flask site as a simple frontend for the
device.
Simple actors that inherit from Actor only need to implement
recieveMessage.
"""
log = logging.getLogger("StatusWebGUI")
def receiveMessage(self, message, sender):
"""Handles all messages sent to the StatusWebGUI actor."""
self.log.info("Received message %s from sender %s", message, sender)
class Sensor(ActorTypeDispatcher):
"""Reads the Ultrasonic sensor and calculates a rolling average of the
distance."""
log = logging.getLogger("Sensor")
distances = []
parent = None
def receiveMsg_str(self, message, sender):
"""Handles strings sent to the Sensor actor."""
self.log.info("Received message %s from sender %s", message, sender)
if "start" in message:
self.parent = sender
self.wakeupAfter(timedelta(seconds=3))
if "heartbeat" in message:
self.log.info("Past 10 readings: %s", self.distances)
def receiveMsg_WakeupMessage(self, message, sender):
"""Handles WakeupMessages sent to the Sensor actor."""
self.wakeupAfter(timedelta(seconds=0.1))
distance = self.measure()
self.distances.append(distance)
# self.log.info("Raw distance is: %f", distance)
self.analyzeDistance()
# Prune extra elements
while len(self.distances) > 10:
del self.distances[0]
def analyzeDistance(self):
"""Returns the average distance of the last 10 ultrasonic sensor
polls."""
average = sum(self.distances) / len(self.distances)
self.log.info("Average distance: %i cm", average)
return average
def measure(self):
"""Takes a single measurement from the Ultrasonic sensor."""
# measurement_start_time = time.time()
# Pulse HIGH for 10us
GPIO.output(trigPin, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(trigPin, GPIO.LOW)
# Wait for output to go high.
t0 = time.time()
while GPIO.input(echoPin) != GPIO.HIGH:
# If pin fails to go high, time out.
if (time.time() - t0) > timeOut * 0.000001:
self.log.error("Ultrasonic sensor init timed out.")
return 0
# Record start time
t0 = time.time()
while GPIO.input(echoPin) == GPIO.HIGH:
# Return zero if timeout.
if (time.time() - t0) > timeOut * 0.000001:
self.log.error("Ultrasonic reading timed out.")
return 0
# Sound travels at 340m/s, distance is half that time.
pulseTime = (time.time() - t0) * 1000000
distance = pulseTime * 340.0 / 2.0 / 10000.0
# measurement_time = time.time() - measurement_start_time
# self.log.info("Measurement took %f s", measurement_time)
return distance
if __name__ == "__main__":
# Start each actor in its own process.
system = ActorSystem("multiprocQueueBase")
# Without a multiprocessing base, wakeupAfter won't work at all.
bellboy = system.createActor(BellBoy)
system.tell(bellboy, "start")
try:
while True:
# Every five seconds, get the BellBoy actor to report on its children
sleep(5)
system.tell(bellboy, "heartbeat")
finally:
# This call sends an ActorExitRequest to all live actors.
system.shutdown()
GPIO.cleanup()
| [
11748,
18931,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
6738,
262,
2777,
666,
13,
529,
669,
1330,
27274,
11,
27274,
11964,
11,
... | 2.473148 | 2,160 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 5.229167 | 96 |
from django.contrib.auth import get_user_model
from auth_app.utils import get_activate_key
from main.decorators import except_shell
from src.celery import app
from auth_app.tasks import send_information_email
from django.conf import settings
from microservice_request.services import MicroServiceConnect, ConnectionService
from . import models
User = get_user_model()
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
6284,
62,
1324,
13,
26791,
1330,
651,
62,
39022,
62,
2539,
198,
6738,
1388,
13,
12501,
273,
2024,
1330,
2845,
62,
29149,
198,
6738,
12351,
... | 3.693069 | 101 |
import math
inputItems = input().split(' ')
sum = 0
numbers = []
operators = []
isFirst = True
for item in inputItems:
if item == '*' or item == '/' or item == '+' or item == '-':
for number in numbers:
if isFirst:
sum = int(number)
isFirst = False
else:
if item == '*':
sum *= int(number)
elif item == '/':
sum = math.trunc(sum / int(number))
elif item == '+':
sum += int(number)
else:
sum -= int(number)
numbers = []
else:
numbers.append(item)
print(math.trunc(sum))
| [
11748,
10688,
198,
198,
15414,
23022,
796,
5128,
22446,
35312,
10786,
705,
8,
198,
16345,
796,
657,
198,
77,
17024,
796,
17635,
198,
3575,
2024,
796,
17635,
198,
271,
5962,
796,
6407,
198,
198,
1640,
2378,
287,
5128,
23022,
25,
198,
2... | 1.795918 | 392 |
# Copyright 2018 Fujitsu.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from barbican.common import exception
from barbican import objects
from barbican.tests.objects import test_ovo_base
| [
2,
220,
220,
220,
15069,
2864,
32671,
19831,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
... | 3.461538 | 208 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from ftl.agents import Client, Server
from ftl.training_utils import cycle
from torchvision import datasets
from torch.utils.data import DataLoader, Subset
import torch
import numpy as np
from typing import Dict, List
class DataManager:
"""
Base Class for all Data Readers
"""
@staticmethod
def _get_common_data_trans(_train_dataset):
""" Implements a simple way to compute train and test transform that usually works """
try:
mean = [_train_dataset.data.float().mean(axis=(0, 1, 2)) / 255]
std = [_train_dataset.data.float().std(axis=(0, 1, 2)) / 255]
except:
mean = _train_dataset.data.mean(axis=(0, 1, 2)) / 255
std = _train_dataset.data.std(axis=(0, 1, 2)) / 255
return mean, std
def _populate_data_partition_map(self):
""" wrapper to Sampling data for client, server """
data_distribution_strategy = self.data_config.get("data_distribution_strategy", 'iid')
if data_distribution_strategy == 'iid':
self._iid_dist()
else:
raise NotImplemented
def _iid_dist(self):
""" Distribute the data iid into all the clients """
all_indexes = np.arange(self.num_train + self.num_dev)
# Let's assign points for Dev data
if self.num_dev > 0:
self.val_ix = set(np.random.choice(a=all_indexes, size=self.num_dev, replace=False))
all_indexes = list(set(all_indexes) - self.val_ix)
# split rest to clients for train
num_clients = len(self.clients)
num_samples_per_machine = self.num_train // num_clients
for machine_ix in range(0, num_clients - 1):
self.data_distribution_map[self.clients[machine_ix].client_id] = \
set(np.random.choice(a=all_indexes, size=num_samples_per_machine, replace=False))
all_indexes = list(set(all_indexes) - self.data_distribution_map[self.clients[machine_ix].client_id])
# put the rest in the last machine
self.data_distribution_map[self.clients[-1].client_id] = all_indexes
def download_data(self) -> [datasets, datasets]:
""" Downloads Data and Apply appropriate Transformations . returns train, test dataset """
raise NotImplementedError("This method needs to be implemented")
def distribute_data(self):
""" Distributes Data among clients, Server accordingly. Makes ready to train-test """
_train_dataset, _test_dataset = self.download_data()
# update data set stats
total_train_samples = _train_dataset.data.shape[0]
self.num_dev = int(self.data_config.get('dev_split', 0.1) * total_train_samples)
self.num_train = total_train_samples - self.num_dev
self.num_test = _test_dataset.data.shape[0]
assert self.data_config.get('num_labels') == len(_train_dataset.classes), \
'Number of Labels of DataSet and Model output shape Mismatch, ' \
'fix num_labels in client.config.data_config to change model output shape'
if len(_train_dataset.data.shape) > 3:
assert self.data_config.get('num_channels') == _train_dataset.data.shape[-1], \
'Number of channels of DataSet and Model in channel shape Mismatch, ' \
'fix num_channels in client.config.data_config to change model input shape'
else:
assert self.data_config.get('num_channels') == 1, \
'Number of channels of DataSet and Model in channel shape Mismatch, ' \
'fix num_channels in client.config.data_config to change model input shape'
# partition data
self._populate_data_partition_map()
# populate server data loaders
if self.val_ix:
val_dataset = Subset(dataset=_train_dataset, indices=self.val_ix)
self.server.val_loader = DataLoader(val_dataset.dataset,
batch_size=self.data_config.get("infer_batch_size", 1),
pin_memory=True,
num_workers=self.data_config.get("val_num_workers", 0))
self.server.test_loader = DataLoader(_test_dataset,
batch_size=self.data_config.get("infer_batch_size", 1),
pin_memory=True,
num_workers=self.data_config.get("test_num_workers", 0))
# populate client data loader
for client in self.clients:
local_dataset = Subset(dataset=_train_dataset,
indices=self.data_distribution_map[client.client_id])
client.local_train_data = DataLoader(local_dataset.dataset,
shuffle=True,
batch_size=client.client_opt_config.get("train_batch_size", 256),
pin_memory=True,
num_workers=self.data_config.get("train_num_workers", 0))
client.trainer.train_iter = iter(cycle(client.local_train_data))
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
13789,
198,
198,
6738,
10117,
75,
13,
49638,
1330,
20985,
11,
9652,
198,
6738,
10117,
75,
13,
34409,
62,
26791,
1330,
6772,
198,
6738,
28034,
10178,
1330,
40522,
... | 2.148747 | 2,474 |
# -*- coding: utf-8 -*-
"""
Allows access to the site's bot user list.
The function refresh() downloads the current bot user list and saves
it to disk. It is run automatically when a bot first tries to get this
data.
"""
# (C) Daniel Herding, 2005
# (C) Dr. Trigon, 2009-2010
# (C) Pywikipedia bot team, 2010-2012
#
# DrTrigonBot: http://de.wikipedia.org/wiki/Benutzer:DrTrigonBot
#
# Distributed under the terms of the MIT license.
#
__version__='$Id$'
#
import re, sys, pickle
import os.path
import time
import urllib
import wikipedia as pywikibot
cache = {}
#def refresh_all(new = False, sysop=False):
# if new:
# import config
# pywikibot.output('Downloading All bot user lists for your accounts in user-config.py');
# for family in config.usernames:
# for lang in config.usernames[ family ]:
# refresh(pywikibot.getSite( code = lang, fam = family ), sysop=sysop )
# for family in config.sysopnames:
# for lang in config.sysopnames[ family ]:
# refresh(pywikibot.getSite( code = lang, fam = family ), sysop=sysop )
#
# else:
# import dircache, time
# filenames = dircache.listdir(pywikibot.config.datafilepath('botlists'))
# botlist_filenameR = re.compile('botlist-([a-z\-:]+).dat')
# for filename in filenames:
# match = botlist_filenameR.match(filename)
# if match:
# arr = match.group(1).split('-')
# family = arr[0]
# lang = '-'.join(arr[1:])
# refresh(pywikibot.getSite(code = lang, fam = family))
#
#def main():
# all = False
# new = False
# sysop = False
# for arg in pywikibot.handleArgs():
# if arg == '-all' or arg == '-update':
# all = True
# elif arg == '-new':
# new = True
# elif arg == '-sysop':
# sysop = True
# if all:
# refresh_all(sysop=sysop)
# elif new:
# refresh_all(new, sysop=sysop)
# else:
# refresh(pywikibot.getSite(), sysop=sysop)
#
# botlist = get(pywikibot.getSite())
# pywikibot.output(u'%i pages in the bot user list.' % len(botlist))
# for pageName in botlist:
# pywikibot.output( pageName, toStdout = True )
#
#if __name__ == "__main__":
# try:
# main()
# finally:
# pywikibot.stopme()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
34934,
1895,
284,
262,
2524,
338,
10214,
2836,
1351,
13,
198,
198,
464,
2163,
14976,
3419,
21333,
262,
1459,
10214,
2836,
1351,
290,
16031,
198,
270,
284,
11... | 2.16895 | 1,095 |
import numpy as np
path = '/Users/benjaminramtoula/Documents/Cours/POLYMTL/MISTLAB/SLAM/decentralized_slam_project/logs/'
dir = '2019-08-22_11-43-20_0.13_20/'
timestamps_combined = []
nb_kf_used = []
data_exchanged_f_m_q = []
data_exchanged_f_m_a = []
data_exchanged_r_s_q = []
with open(path+dir+'find_matches_query.txt', 'r') as infile:
lines = infile.readlines()
f_m_q_timestamps = filter(lambda x: x.startswith('15'), lines)
f_m_q_timestamps = [float(t[:-2]) for t in f_m_q_timestamps]
f_m_q_number_of_values_in_descriptors = filter(
lambda x: x.startswith('number_of_values'), lines)
f_m_q_number_of_values_in_descriptors = [
int(v.split()[1]) for v in f_m_q_number_of_values_in_descriptors]
with open(path+dir+'find_matches_answer.txt', 'r') as infile:
lines = infile.readlines()
f_m_a_timestamps = filter(lambda x: x.startswith('15'), lines)
f_m_a_timestamps = [float(t[:-2]) for t in f_m_a_timestamps]
f_m_a_number_of_kf_ids_computing_robot = filter(
lambda x: x.startswith('number_of_kf_ids_computing_robot'), lines)
f_m_a_number_of_kf_ids_computing_robot = [
int(v.split()[1]) for v in f_m_a_number_of_kf_ids_computing_robot]
with open(path+dir+'find_matches_answer.txt', 'r') as infile:
lines = infile.readlines()
f_m_a_timestamps = filter(lambda x: x.startswith('15'), lines)
f_m_a_timestamps = [float(t[:-2]) for t in f_m_a_timestamps]
f_m_a_number_of_kf_ids_computing_robot = filter(
lambda x: x.startswith('number_of_kf_ids_computing_robot'), lines)
f_m_a_number_of_kf_ids_computing_robot = [
int(v.split()[1]) for v in f_m_a_number_of_kf_ids_computing_robot]
f_m_a_sizes_of_descriptors = filter(
lambda x: x.startswith('sizes_of_descriptors'), lines)
f_m_a_sizes_of_descriptors = [
[int(v2) for v2 in v.split()[1:]] for v in f_m_a_sizes_of_descriptors]
f_m_a_sizes_of_kpts3D = filter(
lambda x: x.startswith('sizes_of_kpts3D'), lines)
f_m_a_sizes_of_kpts3D = [
[int(v2) for v2 in v.split()[1:]] for v in f_m_a_sizes_of_kpts3D]
with open(path+dir+'receive_separators_query.txt', 'r') as infile:
lines = infile.readlines()
r_s_q_timestamps = filter(lambda x: x.startswith('15'), lines)
r_s_q_timestamps = [float(t[:-2]) for t in r_s_q_timestamps]
r_s_q_number_of_kf_ids_from = filter(
lambda x: x.startswith('number_of_kf_ids_from'), lines)
r_s_q_number_of_kf_ids_from = [
int(v.split()[1]) for v in r_s_q_number_of_kf_ids_from]
while (not f_m_q_timestamps[0] == np.inf) or (not r_s_q_timestamps[0] == np.inf) or (not f_m_a_timestamps[0] == np.inf):
if (f_m_q_timestamps[0] < r_s_q_timestamps[0]) and (f_m_q_timestamps[0] < f_m_a_timestamps[0]):
if not f_m_q_number_of_values_in_descriptors[0] or f_m_q_number_of_values_in_descriptors[0] == 0:
f_m_q_timestamps.pop(0)
f_m_q_number_of_values_in_descriptors.pop(0)
if not f_m_q_timestamps:
f_m_q_timestamps.append(np.inf)
continue
timestamps_combined.append(f_m_q_timestamps[0])
nb_kf_used.append(int(f_m_q_number_of_values_in_descriptors[0]/128))
data_exchanged_f_m_q.append(f_m_q_number_of_values_in_descriptors[0]*8)
data_exchanged_f_m_a.append(0)
data_exchanged_r_s_q.append(0)
f_m_q_timestamps.pop(0)
f_m_q_number_of_values_in_descriptors.pop(0)
if not f_m_q_timestamps:
f_m_q_timestamps.append(np.inf)
elif (r_s_q_timestamps[0] < f_m_a_timestamps[0]):
if not r_s_q_number_of_kf_ids_from[0]:
r_s_q_timestamps.pop(0)
r_s_q_number_of_kf_ids_from.pop(0)
if not r_s_q_timestamps:
r_s_q_timestamps.append(np.inf)
continue
timestamps_combined.append(r_s_q_timestamps[0])
nb_kf_used.append(nb_kf_used[-1])
data_exchanged_r_s_q.append(2+(8+344*3)*r_s_q_number_of_kf_ids_from[0])
data_exchanged_f_m_a.append(0)
data_exchanged_f_m_q.append(0)
r_s_q_timestamps.pop(0)
r_s_q_number_of_kf_ids_from.pop(0)
if not r_s_q_timestamps:
r_s_q_timestamps.append(np.inf)
elif f_m_a_timestamps[0] < np.inf:
if f_m_a_number_of_kf_ids_computing_robot[0] == 0 or not f_m_a_number_of_kf_ids_computing_robot[0] or not f_m_a_sizes_of_descriptors[0]:
f_m_a_timestamps.pop(0)
f_m_a_sizes_of_kpts3D.pop(0)
f_m_a_sizes_of_descriptors.pop(0)
f_m_a_number_of_kf_ids_computing_robot.pop(0)
if not f_m_a_timestamps:
f_m_a_timestamps.append(np.inf)
continue
timestamps_combined.append(f_m_a_timestamps[0])
nb_kf_used.append(nb_kf_used[-1])
# print(f_m_a_sizes_of_kpts3D[0])
# print(f_m_a_number_of_kf_ids_computing_robot[0])
data_exchanged_f_m_a.append(
f_m_a_number_of_kf_ids_computing_robot[0]*(344+44*np.mean(f_m_a_sizes_of_kpts3D[0])+np.mean(f_m_a_sizes_of_descriptors[0])))
data_exchanged_f_m_q.append(0)
data_exchanged_r_s_q.append(0)
f_m_a_timestamps.pop(0)
f_m_a_sizes_of_kpts3D.pop(0)
f_m_a_sizes_of_descriptors.pop(0)
if not f_m_a_timestamps:
f_m_a_timestamps.append(np.inf)
else:
break
# all_data_exchanged = data_exchanged_f_m_a + \
# data_exchanged_f_m_q+data_exchanged_r_s_q
all_data_exchanged_f_m = [sum(x) for x in zip(
data_exchanged_f_m_a, data_exchanged_f_m_q)]
all_data_exchanged = [sum(x) for x in zip(
all_data_exchanged_f_m, data_exchanged_r_s_q)]
total_data_exchanged = np.cumsum(all_data_exchanged)
# import numpy as np
import matplotlib.pyplot as plt
import csv
# results = np.loadtxt(open(
# "collected_data/parameter_effects/netvlad_data.csv", "rt"), delimiter=",", skiprows=0)
fig, ax = plt.subplots()
ax.fill_between(np.cumsum(nb_kf_used), 0, np.cumsum(data_exchanged_f_m_q)/(2**20),
facecolor='#46237A', label='Sharing Netvlad descriptors')
ax.fill_between(np.cumsum(nb_kf_used), np.cumsum(data_exchanged_f_m_q)/(2**20), np.cumsum(all_data_exchanged_f_m)/(2**20),
facecolor='#E84354', label='Answers to find matches')
ax.fill_between(np.cumsum(nb_kf_used), np.cumsum(all_data_exchanged_f_m)/(2**20), total_data_exchanged/(2**20),
facecolor='#256EFF', label='Separators sent back')
# ax.fill_between(results[:, 0], results[:, 1] - results[:, 3], results[:, 1], where=results[:, 1] >= results[:, 2],
# facecolor='#E84354', label='Rejected')
ax.set_ylabel('Amount of data exchanged [MB]')
ax.set_xlabel('Number of keyframes seen by first robot')
ax.legend(loc='upper left')
ax.grid(True)
# ax.set_ylim(0, 120)
# ax.set_xlim(0.10, 0.15)
plt.show()
print()
| [
11748,
299,
32152,
355,
45941,
198,
198,
6978,
796,
31051,
14490,
14,
11722,
13337,
859,
83,
2852,
64,
14,
38354,
14,
34,
4662,
14,
45472,
56,
13752,
43,
14,
44,
8808,
48780,
14,
8634,
2390,
14,
12501,
298,
1373,
1143,
62,
82,
2543,... | 1.87476 | 3,649 |
import logging
from django.conf import settings
from django.core.mail import get_connection
from django_rq import job
RQ_EMAIL_DEFAULT_QUEUE = getattr(settings, 'RQ_EMAIL_DEFAULT_QUEUE', 'default')
RQ_EMAIL_BACKEND = getattr(settings, 'RQ_EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
logger = logging.getLogger(__name__)
@job(RQ_EMAIL_DEFAULT_QUEUE)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
651,
62,
38659,
198,
6738,
42625,
14208,
62,
81,
80,
1330,
1693,
628,
198,
49,
48,
62,
27630,
4146,
62,
7206,
38865,... | 2.325581 | 172 |
from Model.job import Job
from Model.website import Website
from Driver.WebDriver import WebDriver | [
6738,
9104,
13,
21858,
1330,
15768,
198,
6738,
9104,
13,
732,
12485,
1330,
15887,
198,
6738,
12434,
13,
13908,
32103,
1330,
5313,
32103
] | 4.26087 | 23 |
# -*- coding: utf-8 -*-
######################
# TÜRKÇE NOT DEFTERİ #
######################
# @author : Şükrü Erdem Gök
# @date : 28/06/2020
# @os : Windows 10
# @version : Python 3.8
# @description: Kodlar dışında yorumları ve uygulamanı görünen kısmını tamamen türkçe yaptım. Umarım işinize yarar.
# GÖK DEFTER
# Kütüphaneler
# PyQt5
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtWidgets import QMainWindow, QVBoxLayout, QPlainTextEdit, QToolBar, QAction, QApplication, QWidget, \
QStatusBar, QFontDialog, QColorDialog, QFileDialog, QMessageBox
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtPrintSupport import QPrintDialog
# Resimleri kullanabilmek için:
from os import path
# Pencere kapatıldığında programın kapanması için:
from sys import argv, exit
# Tarayıcıyı açmak için webbrowser kütüphanesinin open fonksiyonunu kullandım
from webbrowser import open as wbopen
# Window class'ı
# Class'ın contructor'ı
# Hata veren method
# Font seçme dialoğunu açan ve seçilen fontu ayarlayan method
# Renk seçme dialoğunu açan ve seçilen rengi yazı rengi olarak ayarlayan method
# Renk seçme dialoğunu açan ve seçilen rengi arka plan rengi olarak ayarlayan method
# Yazılı olan metni internette arayan method
# Yazılı olan metni link olarak açan method
# Yeni dosya oluşturan veya bir dosyayı açan method
# Dosyayı kaydeden method
# Dosya üzerinde yapılan değişiklikleri kaydeden method
# Dosya yazdırılmak istenirse bu method çalışacak
# Bir dosya açıldığında başlığı o dosyanın adı olarak değiştiren method
# Eğer satır sonu sözcük kaydırma açıksa kapatan, kapalıysa açan method
# Telegram'ı açan method
# Github'ı açan method
if __name__ == '__main__':
app = QApplication(argv)
app.setApplicationName("Gök Defter")
window = MainWindow()
exit(app.exec_())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
14468,
4242,
2235,
198,
2,
309,
127,
250,
49,
42,
127,
229,
36,
5626,
23449,
5781,
128,
108,
1303,
198,
14468,
4242,
2235,
198,
198,
2,
2488,
9800,
1058,
25370,
252,
... | 2.29803 | 812 |
from src.utils.utils import *
import src.config.Consts as cs
def sum_one_hot_columns_on_rows(df, col_to_aggregate):
"""
aggregate past loans information (category columns) in one row
:param df: previous loans application dataframe
:param col_to_aggregate: columns to be aggregated
:return dataframe aggregated columns plus SK_ID_CURR
"""
df = one_hot_encoding(df, col_to_aggregate, ['SK_ID_CURR'], drop_first=False)
column = df.columns.tolist()
column.remove('SK_ID_CURR')
df_out = df.groupby(['SK_ID_CURR'])[column].sum().reset_index()
return df_out
def sum_amount_previous_loan(df):
"""
sum previous loans amount (demanded amount and obtained amount)
:param df: previous loans application dataframe
:return dataframe : SK_ID_CURR plus feature PREV_AMT_APP_SUM & PREV_AMT_CRED_SUM
"""
df_out = df.groupby(['SK_ID_CURR'])['AMT_APPLICATION', 'AMT_CREDIT'].sum().reset_index(). \
rename(columns={'AMT_APPLICATION': 'PREV_AMT_APP_SUM', 'AMT_CREDIT': 'PREV_AMT_CRED_SUM'})
return df_out
def number_past_loans(df):
"""
count number of past loans
:param df: previous loans application dataframe
:return dataframe : SK_ID_CURR plus feature NUMBER_PREVIOUS_LOANS
"""
df = df.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index(). \
rename(columns={'SK_ID_PREV': 'NUMBER_PREVIOUS_LOANS'})
return df
def max_past_annuity(df):
"""
max annuity payed on any previous loans
:param df: previous loans application dataframe
:return dataframe : SK_ID_CURR plus feature MAX_PAST_ANNUITY
"""
df = df.groupby(['SK_ID_CURR'])['AMT_ANNUITY'].max().reset_index(). \
rename(columns={'AMT_ANNUITY': 'MAX_PAST_ANNUITY'})
return df
def min_max_past_loans_subscription(df):
"""
min max of time relative to actual current loan application
:param df: previous loans application dataframe
:return dataframe : SK_ID_CURR plus feature TIME_SINCE_FIRST_LOAN & TIME_SINCE_LAST_LOAN
"""
df = df.groupby(['SK_ID_CURR'])['DAYS_DECISION'].agg(['min', 'max']).reset_index(). \
rename(columns={'min': 'TIME_SINCE_FIRST_LOAN', 'max': 'TIME_SINCE_LAST_LOAN'})
col_to_turn_positive = ['TIME_SINCE_FIRST_LOAN', 'TIME_SINCE_LAST_LOAN']
df[col_to_turn_positive] = df[col_to_turn_positive].abs()
return df
def min_max_past_loans_duration(df):
"""
min max of past loans duration
:param df: previous loans application dataframe
:return dataframe : SK_ID_CURR plus feature SHORTEST_PAST_LOAN & LONGEST_PAST_LOAN
"""
df = df.groupby(['SK_ID_CURR'])['CNT_PAYMENT'].agg(['min', 'max']).reset_index(). \
rename(columns={'min': 'SHORTEST_PAST_LOAN', 'max': 'LONGEST_PAST_LOAN'})
return df
| [
6738,
12351,
13,
26791,
13,
26791,
1330,
1635,
198,
11748,
12351,
13,
11250,
13,
3103,
6448,
355,
50115,
628,
198,
4299,
2160,
62,
505,
62,
8940,
62,
28665,
82,
62,
261,
62,
8516,
7,
7568,
11,
951,
62,
1462,
62,
9460,
49373,
2599,
... | 2.436142 | 1,151 |
'''
实验名称:EEPROM(AT24C02)
版本:v1.0
日期:2020.12
作者:01Studio
说明:EEPROM的读写实验
'''
from at24c02 import AT24C02
import time
EE = AT24C02(i2c_num=1) #哥伦布的B8,B9为I2C1
EE.write(1,8) #往地址1写入数字8(用户可以更改自己写的数字)
time.sleep_ms(5) #需要适当延时再读取
print(EE.read(1)) #读取地址1数据,等于前面写入的数字
| [
7061,
6,
198,
22522,
252,
165,
103,
234,
28938,
235,
163,
100,
108,
171,
120,
248,
6500,
4805,
2662,
171,
120,
230,
1404,
1731,
34,
2999,
171,
120,
231,
198,
48304,
17312,
105,
171,
120,
248,
85,
16,
13,
15,
198,
33768,
98,
17312,... | 0.964413 | 281 |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
from neutron_lib import constants as lib_constants
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.utils import runtime
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from neutron._i18n import _
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import utils as common_utils
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
FIP_NS_PREFIX = 'fip-'
FIP_EXT_DEV_PREFIX = 'fg-'
FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX
# Route Table index for FIPs
FIP_RT_TBL = 16
# Rule priority range for FIPs
FIP_PR_START = 32768
FIP_PR_END = FIP_PR_START + 40000
# Fixed rule priority for Fast Path Exit rules
FAST_PATH_EXIT_PR = 80000
| [
2,
15069,
357,
66,
8,
1853,
4946,
25896,
5693,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
... | 3.048736 | 554 |
# -*- coding: utf-8 -*-
"""NIM REST API Python Client -- Team component"""
from __future__ import absolute_import
import json
from netease_im import util
from netease_im.components import base
from netease_im.util import is_str_type
__author__ = "Manson Li"
__email__ = "manson.li3307@gmail.com"
class TeamComponent(base.BaseComponent):
"""Component dealing with all user related matters"""
def create(self, **kwargs):
"""
创建群
"""
util.require_keys(kwargs, ['tname', 'owner', 'members', 'msg', 'magree', 'joinmode'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/create.action', data=kwargs)
def add(self, **kwargs):
"""
拉人入群
"""
util.require_keys(kwargs, ['tid', 'owner', 'members', 'msg', 'magree'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/add.action', data=kwargs)
def kick(self, **kwargs):
"""
踢人出群
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
if 'member' not in kwargs and 'members' not in kwargs:
raise ValueError("either 'member' or 'members' must be set")
if 'members' in kwargs and not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/kick.action', data=kwargs)
def remove(self, **kwargs):
"""
解散群
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/remove.action', data=kwargs)
def update(self, **kwargs):
"""
编辑群资料
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/update.action', data=kwargs)
def query(self, **kwargs):
"""
群信息与成员列表查询
"""
util.require_keys(kwargs, ['tids', 'ope'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['tids']):
kwargs['tids'] = json.dumps(kwargs['tids'])
return self.post_request('/team/query.action', data=kwargs)
def query_detail(self, **kwargs):
"""
获取群组详细信息
"""
util.require_keys(kwargs, 'tid', False)
return self.post_request('/team/queryDetail.action', data=kwargs)
def get_mark_read_info(self, **kwargs):
"""
获取群组已读消息的已读详情信息
"""
util.require_keys(kwargs, ['tid', 'msgid', 'fromAccid'], False)
return self.post_request('/team/getMarkReadInfo.action', data=kwargs)
def change_owner(self, **kwargs):
"""
移交群主
"""
util.require_keys(kwargs, ['tid', 'owner', 'newowner', 'leave'], False)
return self.post_request('/team/changeOwner.action', data=kwargs)
def add_manager(self, **kwargs):
"""
任命管理员
"""
util.require_keys(kwargs, ['tid', 'owner', 'members'], False)
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/addManager.action', data=kwargs)
def remove_manager(self, **kwargs):
"""
移除管理员
"""
util.require_keys(kwargs, ['tid', 'owner', 'members'], False)
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/removeManager.action', data=kwargs)
def join_teams(self, **kwargs):
"""
获取某用户所加入的群信息
"""
util.require_keys(kwargs, ['accid'], False)
return self.post_request('/team/joinTeams.action', data=kwargs)
def update_team_nick(self, **kwargs):
"""
修改群昵称
"""
util.require_keys(kwargs, ['tid', 'owner', 'accid', 'nick'], False)
return self.post_request('/team/updateTeamNick.action', data=kwargs)
def mute_team(self, **kwargs):
"""
修改消息提醒开关
"""
util.require_keys(kwargs, ['tid', 'accid', 'ope'], False)
return self.post_request('/team/muteTeam.action', data=kwargs)
def mute_tlist(self, **kwargs):
"""
禁言群成员
"""
util.require_keys(kwargs, ['tid', 'owner', 'accid', 'mute'], False)
return self.post_request('/team/muteTlist.action', data=kwargs)
def leave(self, **kwargs):
"""
主动退群
"""
util.require_keys(kwargs, ['tid', 'accid'], False)
return self.post_request('/team/leave.action', data=kwargs)
def mute_tlist_all(self, **kwargs):
"""
将群组整体禁言
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
if 'mute' not in kwargs and 'muteType' not in kwargs:
raise ValueError("either 'mute' or 'muteType' must be set")
return self.post_request('/team/muteTlistAll.action', data=kwargs)
def list_team_mute(self, **kwargs):
"""
获取群组禁言列表
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/listTeamMute.action', data=kwargs)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
45,
3955,
30617,
7824,
11361,
20985,
1377,
4816,
7515,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
33918,
198,
198,
6738,... | 1.94756 | 2,746 |
from rest_framework import serializers, viewsets
from .models import Note
class NoteSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer to define the API representation for Notes"""
class NoteViewSet(viewsets.ModelViewSet):
"""ViewSet to define the view behavior for Notes."""
serializer_class = NoteSerializer
queryset = Note.objects.none() | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
11,
5009,
1039,
198,
6738,
764,
27530,
1330,
5740,
198,
198,
4871,
5740,
32634,
7509,
7,
46911,
11341,
13,
38197,
25614,
17633,
32634,
7509,
2599,
198,
220,
220,
220,
37227,
32634,
7509,
284,
8... | 3.621359 | 103 |
import os
| [
11748,
28686,
198
] | 3.333333 | 3 |
'''
Defines classes for the pattern of browsing a number of dyanmic views.
@author: Peter Parente <parente@cs.unc.edu>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
import Base, Output, System, Config
class LinkedBrowsing(Base.Task):
'''
Pattern to support the linear navigation through a list of control patterns.
@ivar wait_done: Time of last message indicating waiting to complete task
@type wait_done: float
@ivar curr: Index of the active field
@type curr: number
@ivar fields: All the views
@type fields: list of L{Control.Base}
'''
def Shutdown(self):
'''Calls shutdown on all views.'''
for v in self.views:
v.Shutdown()
super(LinkedBrowsing, self).Shutdown()
def GetSize(self):
'''
@return: Total number of views
@rtype: number
'''
return len(self.views)
Size = property(GetSize)
def AddView(self, view):
'''
Add a view to this task. Connect the added view to a previous view to receive
update messages if that view exists.
@param view: Some control
@type view: L{Control.Base.Control}
'''
if self.Size > 0:
# connect this view to the previous for change notifications
self.views[-1].AddChangeListener(view)
self.views.append(view)
def OnDoThat(self, message):
'''
Moves the focus to the next field in the sequeunce or completes the task
if there is only one field.
@param message: Input message that triggered this event handler
@type message: L{Input.Messages.InboundMessage}
'''
if message.Press:
self.OnImDone(message)
def OnActivate(self, message, auto_focus):
'''
Handle a request to activate this task. Ensure the model is ready before
proceeding.
@param message: Input message that triggered this event handler
@type message: L{Input.Messages.InboundMessage}
@param auto_focus: Did this object receive the focus automatically?
@type auto_focus: boolean
@return: True if the task is ready for interaction, false if not
@rtype: boolean
'''
if Base.Task.OnActivate(self, message, auto_focus):
self.ChangeFocus(self.views[self.curr], None, auto_focus)
return True
else:
return False
def OnPrevSubTask(self, message):
'''
Handle a request to activate the previous field.
@param message: Input message that triggered this event handler
@type message: L{Input.Messages.InboundMessage}
'''
if self.curr-1 < 0:
p1 = Output.Packet(self, message, Output.CONTEXT)
p1.AddMessage(sound=Output.ISound(self).Action('wrap'))
self.Output(self, p1)
self.curr = (self.curr-1) % self.Size
if not self.ChangeFocus(self.views[self.curr], message, False):
p2 = self.views[self.curr].OutDeadLong(message)
self.Output(self, p2)
def OnNextSubTask(self, message):
'''
Handle a request to activate the next field.
@param message: Input message that triggered this event handler
@type message: L{Input.Messages.InboundMessage}
'''
if self.curr+1 >= self.Size:
p1 = Output.Packet(self, message, Output.CONTEXT)
p1.AddMessage(sound=Output.ISound(self).Action('wrap'))
self.Output(self, p1)
self.curr = (self.curr+1) % self.Size
if not self.ChangeFocus(self.views[self.curr], message, False):
p2 = self.views[self.curr].OutDeadLong(message)
self.Output(self, p2)
def OutIntroduction(self, message, auto_focus):
'''
Outputs the control type and the number of views.
@param message: Message that caused this event handler to fire
@type message: L{Input.Messages.InboundMessage}
@param auto_focus: Did this object receive the focus automatically?
@type auto_focus: boolean
@return: Output to be played
@rtype: L{Output.Messages.OutboundPacket}
'''
p1 = super(LinkedBrowsing, self).OutIntroduction(message, auto_focus)
p2 = Output.Packet(self, message)
p2.AddMessage(speech='%s, %d views' % (self.Name, self.Size),
person=Output.SUMMARY)
return (p1, p2)
| [
7061,
6,
198,
7469,
1127,
6097,
329,
262,
3912,
286,
23182,
257,
1271,
286,
288,
4121,
9383,
5009,
13,
198,
198,
31,
9800,
25,
5613,
16774,
68,
1279,
8000,
68,
31,
6359,
13,
19524,
13,
15532,
29,
198,
31,
22163,
4766,
25,
15069,
3... | 2.743252 | 1,593 |
from abc import ABC, abstractmethod
from datetime import datetime
from functools import cached_property
from typing import Mapping
from uuid import uuid4
from dateutil.tz import tzutc
from app.utilities.json import json_dumps
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1257,
310,
10141,
1330,
39986,
62,
26745,
198,
6738,
19720,
1330,
337,
5912,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
6738,
... | 3.523077 | 65 |
"""
A zero-indexed array arr consisting of n integers is given.
The dominator of array arr is the value that occurs in more than half of the elements of arr.
For example, consider array arr such that arr = [3,4,3,2,3,1,3,3]
The dominator of arr is 3 because it occurs in 5 out of 8 elements of arr and 5 is more than a half of 8.
"""
from typing import List
def dominator(array: List[int]) -> int:
"""Returns most occurred number (dominator) in an array.
Args:
array (List[int]): an array
Examples:
>>> assert dominator([3,4,3,2,3,1,3,3]) == 3
"""
for value in array: # type: int
if array.count(value) > len(array) / 2:
return value
return -1
if __name__ == "__main__":
print(dominator([3, 4, 3, 2, 3, 1, 3, 3]))
| [
37811,
198,
32,
6632,
12,
9630,
276,
7177,
5240,
17747,
286,
299,
37014,
318,
1813,
13,
198,
198,
464,
7462,
1352,
286,
7177,
5240,
318,
262,
1988,
326,
8833,
287,
517,
621,
2063,
286,
262,
4847,
286,
5240,
13,
198,
1890,
1672,
11,
... | 2.634228 | 298 |
import collections
import numpy
from xgboost.callback import print_evaluation
import Crawler
import SentAnalysis
import DatabaseService
from flask import Flask, request, Response
| [
11748,
17268,
198,
11748,
299,
32152,
198,
6738,
2124,
70,
39521,
13,
47423,
1330,
3601,
62,
18206,
2288,
198,
11748,
20177,
1754,
198,
11748,
11352,
32750,
198,
11748,
24047,
16177,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
18261,
628... | 4.357143 | 42 |
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import MetaData
db = SQLAlchemy(
metadata=MetaData(
naming_convention={
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
)
)
| [
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
6875,
62,
35226,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
6738,
44161,
282,
26599,
13,
25410... | 1.978261 | 276 |
import requests,sys,time
import time,datetime
| [
11748,
7007,
11,
17597,
11,
2435,
198,
11748,
640,
11,
19608,
8079,
628,
198
] | 3.428571 | 14 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import os
import re
import shutil
import subprocess
import sys
import time
import urllib.parse
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
__PLUGIN_ID__ = "plugin.picture.sane-scanner"
_PLUGIN_NAME = "Kodi Sane Scanner"
_TMP_FOLDER = "/tmp/"
_IMG_FILE = "kodi-sane-scanner-img"
_PDF_PREVIEW_FILE = "kodi-sane-scanner-pdf"
_SCANNER_MODES = [
["--mode", "Lineart"],
["--mode", "Gray"],
["--mode", "Color"]
]
_SCANNNER_RESOLUTIONS = [
["--resolution", "150"],
["--resolution", "200"],
["--resolution", "300"],
["--resolution", "600"]
]
_ARCHIVE_RESOLUTIONS = [
"150",
"200",
"300",
"600"
]
_SCANNER_DIMENSIONS = [
[],
["-l", "0", "-t", "0", "-x", "216mm", "-y", "279mm"],
["-l", "0", "-t", "0", "-x", "210mm", "-y", "297mm"],
["-l", "0", "-t", "0", "-x", "148mm", "-y", "210mm"],
["-l", "0", "-t", "0", "-x", "105mm", "-y", "148mm"],
]
_SCANNER_FORMAT = [
"png",
"jpeg"
]
settings = xbmcaddon.Addon(id=__PLUGIN_ID__)
addon_dir = xbmcvfs.translatePath(settings.getAddonInfo('path'))
_menu = []
if __name__ == '__main__':
if sys.argv[1] == "find_scanner":
find_scanner()
elif sys.argv[1] == "find_printer":
find_printer()
else:
addon_handle = int(sys.argv[1])
path = urllib.parse.urlparse(sys.argv[0]).path
url_params = urllib.parse.parse_qs(sys.argv[2][1:])
if "exec" in url_params:
execute(path, url_params)
else:
browse(path, url_params)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
11748,
250... | 2.064151 | 795 |
import urllib.request,json
from .models import Quote
get_quotes_url = 'http://quotes.stormconsultancy.co.uk/random.json'
| [
11748,
2956,
297,
571,
13,
25927,
11,
17752,
198,
6738,
764,
27530,
1330,
19879,
628,
198,
198,
1136,
62,
421,
6421,
62,
6371,
796,
705,
4023,
1378,
421,
6421,
13,
12135,
5936,
586,
3883,
13,
1073,
13,
2724,
14,
25120,
13,
17752,
6,... | 2.116883 | 77 |
# Copyright 2016-2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lcm.pub.config.config import MSB_BASE_URL
SERVICE_TYPE = 'NetworkService'
SERVICE_ROLE = 'NetworkService'
NS_INSTANCE_BASE_URI = MSB_BASE_URL + '/api/nslcm/v1/ns_instances/%s'
NS_OCC_BASE_URI = MSB_BASE_URL + '/api/nslcm/v1/ns_lcm_op_occs/%s'
SUBSCRIPTION_ROOT_URI = MSB_BASE_URL + "/api/nslcm/v1/subscriptions/%s"
| [
2,
15069,
1584,
12,
5539,
1168,
9328,
10501,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
19... | 2.93871 | 310 |
import re
import string
import logging
from tcutils.util import get_random_name, retry, is_v6
result_file = '/tmp/ping'
class Ping:
''' Helper to generate ping traffic
Mandatory args:
host : Dest IP
sender_vm_fixture : Sender VMs fixture handle
Supports ping with IPv4 and IPv6
If c option is not passed then ping will run continuosly
If multiple ping traffic sessions needs to be running together,
user needs to instantiate as many ping objects
Ex :
c=10
'''
def start(self, wait=True):
'''
if c is not passed as argument to ping, 'wait' must be False
'''
cmd = '%s %s %s 2>%s 1>%s' % (self.ping_cmd, self.args_string,
self.host, self.log_file, self.result_file)
self.logger.info('Starting %s on %s, args: %s' % (self.ping_cmd,
self.sender_vm_fixture.vm_name, self.args_string))
self.logger.debug('%s cmd : %s' % (self.ping_cmd, cmd))
self.sender_vm_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True,
as_daemon=True, pidfile=self.pid_file)
if wait:
self.wait_till_ping_completes()
# end start
def stop(self):
'''
Stops the running instance of ping
Returns a dict of structure :
{ 'sent' : xyz,
'received' : xyz,
'loss' : xyz in percent,
'time' : xyz in ms
'rtt_min' : xyz in ms,
'rtt_avg' : xyz,
'rtt_max' : xyz,
'rtt_mdev' : xyz
}
'''
cmd = 'cat %s | xargs kill -2 ' % (self.pid_file)
self.logger.debug('Ensuring ping instance with result file %s '
'on %s is stopped' % (self.result_file,
self.sender_vm_fixture.vm_name))
self.sender_vm_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True)
(stats, log) = self.parse_result_file()
self.delete_log_files()
return (stats, log)
# end stop
def get_stats(self):
'''
Get the ping stats without killing the ping
log file output format when SIGQUIT(-3) is used for ping:
67/67 packets, 0% loss, min/avg/ewma/max = 0.171/0.217/0.208/0.312 ms
77/77 packets, 0% loss, min/avg/ewma/max = 0.171/0.221/0.232/0.312 ms
Returns a dict of structure :
{ 'sent' : xyz,
'received' : xyz,
'loss' : xyz in percent,
}
'''
cmd = 'cat %s | xargs kill -3 ' % (self.pid_file)
self.sender_vm_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True)
result_data = {'sent': None, 'received': None, 'loss': None}
search1 = '''(\S+)\/(\S+) packets, (\S+)% loss'''
cmds = ['cat %s| tail -1' %(self.log_file)]
result = self.sender_vm_fixture.run_cmd_on_vm(cmds, timeout=300)
result_content = result[cmds[0]]
if result_content:
reg_result = re.search(search1, result_content)
if reg_result:
result_data['sent'] = reg_result.group(1)
result_data['received'] = reg_result.group(2)
result_data['loss'] = reg_result.group(3)
if 'None' in result_data.values():
self.logger.warn('Parsing of ping had problems. Got stats: %s'
'Please check debug logs' %(result_data))
self.logger.debug(result_content)
else:
self.logger.debug('ping stats: %s' % (result_data))
return result_data
# end get_stats
def parse_result_file(self, result_file=None):
''' parse output similar to below and return a dict
64 bytes from netmatters.juniper.net (66.129.230.17): icmp_seq=1 ttl=50 time=231 ms
64 bytes from netmatters.juniper.net (66.129.230.17): icmp_seq=2 ttl=50 time=213 ms
64 bytes from netmatters.juniper.net (66.129.230.17): icmp_seq=3 ttl=50 time=213 ms
^C
--- juniper.net ping statistics ---
4 packets transmitted, 3 received, 25% packet loss, time 3003ms
rtt min/avg/max/mdev = 213.115/219.307/231.394/8.564 ms
'''
result_file = result_file or self.result_file
reg_result = None
rtt_result = None
result_data = {'sent': None, 'received': None, 'loss': None,
'time':None, 'rtt_min':None, 'rtt_avg':None, 'rtt_max':None,
'rtt_mdev':None}
search1 = '''(\S+) packets transmitted, (\S+) received, (\S+)% packet loss, time (\S+)ms'''
search2 = '''rtt min/avg/max/mdev = (\S+)\/(\S+)\/(\S+)\/(\S+) '''
cmds = ['cat %s' %(result_file),
'cat %s' %(self.log_file)]
result = self.sender_vm_fixture.run_cmd_on_vm(cmds, timeout=300)
result_content = result[cmds[0]]
result_log = result[cmds[1]]
if result_content:
reg_result = re.search(search1, result_content)
rtt_result = re.search(search2, result_content)
if reg_result:
result_data['sent'] = reg_result.group(1)
result_data['received'] = reg_result.group(2)
result_data['loss'] = reg_result.group(3)
result_data['time'] = reg_result.group(4)
if rtt_result:
result_data['rtt_min'] = rtt_result.group(1)
result_data['rtt_avg'] = rtt_result.group(2)
result_data['rtt_max'] = rtt_result.group(3)
result_data['rtt_mdev'] = rtt_result.group(4)
if 'None' in result_data.values():
self.logger.warn('Parsing of ping had problems. Got stats: %s'
'Please check debug logs' %(result_data))
self.logger.debug(result_content)
else:
self.logger.debug('ping stats: %s' % (result_data))
return (result_data, result_log)
# end parse_result_file
def get_cmd_args(self, **kwargs):
''' convert { 'k1': val, 'k2':val2 } to
"-k1 val -k2 val2"
All keys are of type string
All values are string or boolean
'''
ret_val = ''
for (k,v) in kwargs.items():
key = '-%s' % (k)
if type(v) == bool:
if v:
v = ''
else:
# i.e. dont set this arg
continue
ret_val += ' %s %s ' % (key,v)
# end for
return ret_val
# end get_cmd_args
# end _check_if_ping_still_running
@retry(delay=5, tries=50)
# end wait_till_ping_completes
| [
11748,
302,
198,
11748,
4731,
198,
11748,
18931,
198,
6738,
256,
8968,
4487,
13,
22602,
1330,
651,
62,
25120,
62,
3672,
11,
1005,
563,
11,
318,
62,
85,
21,
198,
198,
20274,
62,
7753,
796,
31051,
22065,
14,
13886,
6,
198,
198,
4871,
... | 1.999693 | 3,262 |
import origen, subprocess, builtins, types, inspect, re, pathlib
from .. import origen_sphinx_extension as ose
def insert_header(app, docname, source):
'''
Insert content at the beginning of the docs.
Currently inserts:
* Any |shorthand| ``include`` RST files
'''
ext = ose.sphinx_ext(app, 'origen.web.shorthand')
doc = pathlib.Path(app.env.doc2path(docname))
if '.rst' in doc.suffixes:
if ext:
includes = ext.all_include_rsts()
# Make sure we aren't including the shared file in the shared files themselves
if not any(i.match(str(doc)) for i in includes):
depth = len(doc.relative_to(origen.web.source_dir).parents) - 1
incs = [
"../" * depth + str(i.relative_to(origen.web.source_dir))
for i in includes
]
source[0] = "\n".join([
f".. include:: {i}\n :start-after: start-content\n\n"
for i in incs
]) + source[0]
return True
return False
# Setup taken from here: https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
@origen.helpers.continue_on_exception(ose.logger)
@origen.helpers.continue_on_exception(ose.logger)
def process_docstring(app, what, name, obj, options, lines):
''' Runs the template engine on docstrings, allowing for jinja syntax inside docstrings. '''
app.emit("origen-preprocess-docstring", what, name, obj, options, lines)
try:
_lines = jinja_render_string(app, "\n".join(lines))
except Exception as e:
# Technically, not all exceptions have a message, so get for the attribute first
m = getattr(e, 'message', repr(e))
raise type(
e
)(f"Exception occurred processing the docstring for {name} of doc-type '{what}' (from {app.env.docname}) {': ' + m if m else ''}"
) from e
_lines += "\n"
lines.clear()
lines += _lines.split("\n")
| [
11748,
1796,
268,
11,
850,
14681,
11,
3170,
1040,
11,
3858,
11,
10104,
11,
302,
11,
3108,
8019,
198,
6738,
11485,
1330,
1796,
268,
62,
82,
746,
28413,
62,
2302,
3004,
355,
267,
325,
628,
198,
198,
4299,
7550,
62,
25677,
7,
1324,
1... | 2.238462 | 910 |
from graph_depth_first import __version__
import pytest
from graph_depth_first.graph_depth import *
@pytest.fixture
# Node can be successfully added to the graph
# An edge can be successfully added to the graph
# A collection of all nodes can be properly retrieved from the graph
# All appropriate neighbors can be retrieved from the graph
# Neighbors are returned with the weight between nodes included
# The proper size is returned, representing the number of nodes in the graph
# A graph with only one node and edge can be properly returned
# An empty graph properly returns null
| [
6738,
4823,
62,
18053,
62,
11085,
1330,
11593,
9641,
834,
198,
11748,
12972,
9288,
198,
198,
6738,
4823,
62,
18053,
62,
11085,
13,
34960,
62,
18053,
1330,
1635,
628,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198,
198,
2,
19081,
4... | 4.156463 | 147 |
##############################################################
#
# script uses local X axis to compute element Length and snapshot it along a curve
# 21.02.2016
# Sergey Solohin (Neill3d) 2016
# e-mail to: s@neill3d.com
# www.neill3d.com
#
# Github repo - https://github.com/Neill3d/MoPlugs
# Licensed under BSD 3-clause
# https://github.com/Neill3d/MoPlugs/blob/master/LICENSE
#
############################################################
# select source, then destination. Script creates relation constraint between all joints elements
from pyfbsdk import *
import math
#
| [
198,
29113,
14468,
7804,
4242,
2235,
198,
2,
198,
2,
4226,
3544,
1957,
1395,
16488,
284,
24061,
5002,
22313,
290,
27479,
340,
1863,
257,
12133,
198,
2,
2310,
13,
2999,
13,
5304,
198,
2,
36106,
4294,
1219,
259,
357,
26538,
18,
67,
8,... | 3.309392 | 181 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from FUNCS import FNS
from LEGS import LegVar, LegFun
from BMAP3D import MapVar, MapFun
# ---------------------------------------------------------------------------------------------------------------------
# Leg Module - practice locomotion with CPG only, ERG only, and CPG + ERG
if __name__ == '__main__':
length = 10000
size = 5
num = 2 * size
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# ----------------------------------------------------------------------------------------------------------------
# initialize variables
origin = np.array((20, 50, 45))
limit = np.array((100, 100, 120))
Var = MapVar(ax, limit, origin, size)
eye_data = FNS().eye_init()
axial_data = FNS().column_init()
uplimb_rot = np.array((FNS().uplimb_init(), FNS().uplimb_init()))
lowlimb_rot = np.array((FNS().lowlimb_init(), FNS().lowlimb_init()))
append_data = np.array((uplimb_rot, lowlimb_rot))
Map = MapFun(eye_data, axial_data, append_data, Var)
mode = (0, 0)
shift = Map.CoM_shift(mode)
LegVar = LegVar(num)
Leg = LegFun(LegVar)
insert = Map.left_leg_cpt(shift)[5], Map.right_leg_cpt(shift)[5]
LegVar.spc.mus_insert = FNS().arrform(insert, 'append')
# ----------------------------------------------------------------------------------------------------------------
ani = animation.FuncAnimation(fig, pract, frames=length, interval=100, blit=True)
#ax.set_title('Locomotion by subcortical CPG')
#ax.set_title('Locomotion by cortical CPG')
ax.set_title('Locomotion by cortical and subcortical CPGs')
plt.show()
# -------------------------------------------------------------------------------------------------------------------- | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
198,
6738,
29397,
7902,
1330,
376,
8035,
198,
6738,
20978,
50,
1330,
3564,
1985... | 3.164384 | 584 |
dice_state = 0
# https://adventofcode.com/2021/day/21
if __name__ == '__main__':
res = solve(2, 5)
print(res)
| [
67,
501,
62,
5219,
796,
657,
628,
198,
2,
3740,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
1238,
2481,
14,
820,
14,
2481,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
581,
796,
8494,
7,
... | 2.2 | 55 |
import io
import json
import re
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
302,
198
] | 3.555556 | 9 |
from time import strftime, strptime
from datetime import date, timedelta, datetime
from handler.BaseHandler import *
from model.Accounts import *
from util import *
| [
6738,
640,
1330,
965,
31387,
11,
965,
457,
524,
198,
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
11,
4818,
8079,
198,
198,
6738,
21360,
13,
14881,
25060,
1330,
1635,
198,
6738,
2746,
13,
30116,
82,
1330,
1635,
198,
6738,
7736,
133... | 3.733333 | 45 |
from collections import Counter
from .utils import to_unicode
''' from .value_checks import (is_a_date, is_a_number, is_a_nothing,
is_a_latitude, is_a_longitude, is_a_coord_pair, is_a_country, is_a_city,
is_a_state, is_a_address, is_a_text, is_a_label, is_a_zip, is_a_street,
is_a_phone, is_a_url, is_a_email, is_a_time, is_a_currency, is_a_percent) '''
# currently understands
# category
# datetime
# time
# number
# label
# text
# id
# email
# url
# address
# street
# city
# state
# zipcode
# country
# phone
# latitude
# longitude
# coordinate_pair
# coming soon
# name
# ordinal??? -- can obtain from categorical/int info...
from .utils import prep_value
| [
6738,
17268,
1330,
15034,
198,
6738,
764,
26791,
1330,
284,
62,
46903,
1098,
198,
7061,
6,
422,
764,
8367,
62,
42116,
1330,
357,
271,
62,
64,
62,
4475,
11,
318,
62,
64,
62,
17618,
11,
318,
62,
64,
62,
22366,
11,
198,
220,
220,
2... | 2.57197 | 264 |
"""
What are the two primary categories for tree traversals?
A - 1- Depth-First
2- Breadth-First
B - 1- Height-First
2- Width-First
C - 1- Level-First
2- Branch-First
D - 1- Leaf-First
2- Branch-First
answer is :
""" | [
37811,
198,
198,
2061,
389,
262,
734,
4165,
9376,
329,
5509,
33038,
874,
30,
198,
198,
32,
532,
352,
12,
36350,
12,
5962,
198,
220,
362,
12,
28731,
400,
12,
5962,
198,
198,
33,
532,
352,
12,
27280,
12,
5962,
220,
220,
198,
220,
... | 2.568421 | 95 |
# 1100. Find K-Length Substrings With No Repeated Characters
import collections
print(Solution().numKLenSubstrNoRepeats("havefunonleetcode", 5)) | [
2,
36566,
13,
9938,
509,
12,
24539,
3834,
37336,
2080,
1400,
30558,
515,
26813,
198,
11748,
17268,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
4798,
7,
46344,
22446,
22510,
42,
30659,
7004,
2536,
2949,
47541,... | 2.962264 | 53 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVersionResult',
'AwaitableGetVersionResult',
'get_version',
'get_version_output',
]
@pulumi.output_type
# pylint: disable=using-constant-test
def get_version(app_id: Optional[str] = None,
service_id: Optional[str] = None,
version_id: Optional[str] = None,
view: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVersionResult:
"""
Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource.
"""
__args__ = dict()
__args__['appId'] = app_id
__args__['serviceId'] = service_id
__args__['versionId'] = version_id
__args__['view'] = view
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:appengine/v1:getVersion', __args__, opts=opts, typ=GetVersionResult).value
return AwaitableGetVersionResult(
api_config=__ret__.api_config,
automatic_scaling=__ret__.automatic_scaling,
basic_scaling=__ret__.basic_scaling,
beta_settings=__ret__.beta_settings,
build_env_variables=__ret__.build_env_variables,
create_time=__ret__.create_time,
created_by=__ret__.created_by,
default_expiration=__ret__.default_expiration,
deployment=__ret__.deployment,
disk_usage_bytes=__ret__.disk_usage_bytes,
endpoints_api_service=__ret__.endpoints_api_service,
entrypoint=__ret__.entrypoint,
env=__ret__.env,
env_variables=__ret__.env_variables,
error_handlers=__ret__.error_handlers,
handlers=__ret__.handlers,
health_check=__ret__.health_check,
inbound_services=__ret__.inbound_services,
instance_class=__ret__.instance_class,
libraries=__ret__.libraries,
liveness_check=__ret__.liveness_check,
manual_scaling=__ret__.manual_scaling,
name=__ret__.name,
network=__ret__.network,
nobuild_files_regex=__ret__.nobuild_files_regex,
readiness_check=__ret__.readiness_check,
resources=__ret__.resources,
runtime=__ret__.runtime,
runtime_api_version=__ret__.runtime_api_version,
runtime_channel=__ret__.runtime_channel,
runtime_main_executable_path=__ret__.runtime_main_executable_path,
service_account=__ret__.service_account,
serving_status=__ret__.serving_status,
threadsafe=__ret__.threadsafe,
version_url=__ret__.version_url,
vm=__ret__.vm,
vpc_access_connector=__ret__.vpc_access_connector)
@_utilities.lift_output_func(get_version)
def get_version_output(app_id: Optional[pulumi.Input[str]] = None,
service_id: Optional[pulumi.Input[str]] = None,
version_id: Optional[pulumi.Input[str]] = None,
view: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVersionResult]:
"""
Gets the specified Version resource. By default, only a BASIC_VIEW will be returned. Specify the FULL_VIEW parameter to get the full resource.
"""
...
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.363636 | 1,562 |
import torch
import numpy as np
import time | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640
] | 3.909091 | 11 |
import create_word_lists
| [
11748,
2251,
62,
4775,
62,
20713,
628,
628
] | 3.5 | 8 |
# Server is setup here
from flask import (
Flask,
render_template,
redirect,
request,
jsonify,
make_response,
Response,
)
from flask_bootstrap import Bootstrap
import psycopg2
import logging
from odm360.log import start_logger, stream_logger
from odm360.camera360rig import do_request
from odm360 import dbase
from odm360.states import states
from odm360.utils import cleanopts
db = "dbname=odm360 user=odm360 host=localhost password=zanzibar"
conn = psycopg2.connect(db)
cur = conn.cursor()
# make sure devices is empty
dbase.truncate_table(cur, "devices")
logger = start_logger("True", "False")
# if there is an active project, put status on zero (waiting for cams) at the beginning no matter what
cur_project = dbase.query_project_active(cur)
if len(cur_project) == 1:
dbase.update_project_active(cur, states["ready"])
app = Flask(__name__)
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
app.logger.disabled = True
bootstrap = Bootstrap(app)
@app.route("/", methods=["GET", "POST"])
@app.route("/project", methods=["GET", "POST"])
def project_page():
"""
The settings page where you can manage the various services, the parameters, update, power...
"""
if request.method == "POST":
# config = current_app.config['config']
# FIXME: put inputs into the database and remove config stuff below
form = cleanopts(request.form)
# set the config options as provided
dbase.insert_project(
cur, form["project_name"], n_cams=int(form["n_cams"]), dt=int(form["dt"])
)
# remove the current project selection and make a fresh table
dbase.create_table_project_active(cur, drop=True)
# set project to current by retrieving its id and inserting that in current project table
project_id = dbase.query_projects(cur, project_name=form["project_name"])[0][0]
dbase.insert_project_active(cur, project_id=project_id)
logger.info(
f'Created a new project name: "{form["project_name"]}" cams: {form["n_cams"]} interval: {int(form["dt"])} secs.'
)
return redirect("/")
else:
return render_template("project.html")
@app.route("/logs")
def logs_page():
"""
The data web pages where you can download/delete the raw gnss data
"""
return render_template("logs.html")
@app.route("/settings")
def settings_page():
"""
The data web pages where you can download/delete the raw gnss data
"""
return render_template("settings.html")
@app.route("/cams")
@app.route("/file_page")
@app.route("/log_stream", methods=["GET"])
def stream():
"""returns logging information"""
# largely taken from https://towardsdatascience.com/how-to-add-on-screen-logging-to-your-flask-application-and-deploy-it-on-aws-elastic-beanstalk-aa55907730f
return Response(
stream_logger(), mimetype="text/plain", content_type="text/event-stream"
)
@app.route("/_cameras")
@app.route("/_cam_summary")
@app.route("/picam", methods=["GET", "POST"])
if __name__ == "__main__":
run(app)
| [
2,
9652,
318,
9058,
994,
198,
6738,
42903,
1330,
357,
198,
220,
220,
220,
46947,
11,
198,
220,
220,
220,
8543,
62,
28243,
11,
198,
220,
220,
220,
18941,
11,
198,
220,
220,
220,
2581,
11,
198,
220,
220,
220,
33918,
1958,
11,
198,
... | 2.664966 | 1,176 |
import numpy as np
import datetime
from keras.models import Model
from keras.layers import Input, Activation, Reshape, BatchNormalization, MaxPool2D, Conv2D, Add, Dropout, Flatten, Dense
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import xception
SEED = 1
ITERATIONS = 10001
BATCH_SIZE = 8
IMG_SHAPE = (256, 256, 1)
IMG_HEIGHT, IMG_WIDTH, IMG_CHAN = IMG_SHAPE
if __name__ == "__main__":
train()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
23412,
11,
13144,
341,
11,
1874,
71,
1758,
11,
347,
963,
26447,
1634,
11,
5436,
27201... | 2.752874 | 174 |
from __future__ import print_function
# basic functions
import argparse
import os
import random
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
# torch functions
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import grad as torch_grad
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
# local functions
from network_nobn_nosn import *
from resnet import *
from utils import poolSet, inceptionScore
#--------------------------------------------------------------------
# input arguments
parser = argparse.ArgumentParser(description='EPT')
parser.add_argument('--divergence', '-div', type=str, default='KL', help='Pearson | KL | JS')
parser.add_argument('--dataset', required=True, help='mnist | fashionmnist | cifar10')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--gpuDevice', type=str, default='2', help='CUDA_VISIBLE_DEVICES')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--imageSize', type=int, default=32, help='input image size')
parser.add_argument('--nz', type=int, default=128, help='size of the latent vector')
parser.add_argument('--ngf', type=int, default=128)
parser.add_argument('--ndf', type=int, default=128)
parser.add_argument('--nLoop', type=int, default=10000, help='maximum Outer Loops')
parser.add_argument('--nDiter', type=int, default=1, help='number of D update')
parser.add_argument('--nPiter', type=int, default=20, help='number of particle update')
parser.add_argument('--nProj', type=int, default=20, help='number of G projection')
parser.add_argument('--nPool', type=int, default=20, help='times of batch size for particle pool')
parser.add_argument('--nBatch', type=int, default=1, help='times of batch size for particle pool')
parser.add_argument('--period', type=int, default=100, help='period of saving ckpts')
parser.add_argument('--coef_gp', type=float, default=5, help='coef for the gradient penalty')
parser.add_argument('--eta', type=float, default=0.5, help='learning rate for particle update')
parser.add_argument('--lrg', type=float, default=0.0001, help='learning rate for G, default=0.0001')
parser.add_argument('--lrd', type=float, default=0.0001, help='learning rate for D, default=0.0001')
parser.add_argument('--decay_g', type=bool, default=True, help='lr_g decay')
parser.add_argument('--decay_d', type=bool, default=True, help='lr_d decay')
parser.add_argument('--net', required=True, default='resnet', help='resnet')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help='path to netG (to continue training)')
parser.add_argument('--netD', default='', help='path to netD (to continue training)')
parser.add_argument('--resume', type=bool, default=False, help='resume from checkpoint')
parser.add_argument('--resume_loop', type=int, default=0)
parser.add_argument('--start_save', type=int, default=1000)
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--increase_nProj', type=bool, default=False, help='increase the projection times')
opt = parser.parse_args()
print(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpuDevice
try:
os.makedirs('./results')
except OSError:
pass
try:
os.makedirs('./loss')
except OSError:
pass
try:
os.makedirs(os.path.join('./results', opt.dataset))
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print('Random Seed: ', opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
train_transforms = transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
if opt.dataset == 'mnist':
dataset = dset.MNIST(root=opt.dataroot, download=True,
transform=train_transforms)
nc = 1
nclass = 10
elif opt.dataset == 'fashionmnist':
dataset = dset.FashionMNIST(root=opt.dataroot, download=True,
transform=train_transforms)
nc = 1
nclass = 10
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=train_transforms)
nc = 3
nclass = 10
else:
raise NameError
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
device = torch.device('cuda:0')
device_cpu = torch.device('cpu')
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
eta = float(opt.eta)
nrow = int(math.sqrt(opt.batchSize))
# nets
if opt.net == "resnet":
netG = G_resnet(nc, ngf, nz)
netD = D_resnet(nc, ndf)
elif opt.net == "dcgan":
netG = G_dcgan(nc, ngf, nz)
netD = D_dcgan(nc, ndf)
elif opt.net == "dcgan_sn":
netG = G_dcgan_sn(nc, ngf, nz)
netD = D_dcgan_sn(nc, ndf)
netG.apply(weights_init)
netG.to(device)
netD.apply(weights_init)
netD.to(device)
print('#-----------GAN initializd-----------#')
if opt.resume:
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
state = torch.load('./checkpoint/UPT-%s-%s-%s-%s-ckpt-gp.t7' % (opt.divergence, opt.dataset, str(opt.resume_loop), str(opt.eta)))
netG.load_state_dict(state['netG'])
netD.load_state_dict(state['netD'])
start_loop = state['loop'] + 1
is_score = state['is_score']
best_is = state['best_is']
loss_G = state['loss_G']
print('#-----------Resumed from checkpoint-----------#')
else:
start_loop = 0
is_score = []
best_is = 0.0
netIncept = PreActResNet18(nc)
netIncept.to(device)
netIncept = torch.nn.DataParallel(netIncept)
if torch.cuda.is_available() and not opt.cuda:
checkpoint = torch.load('./checkpoint/resnet18-%s-ckpt.t7' % opt.dataset)
netIncept.load_state_dict(checkpoint['net'])
else:
checkpoint = torch.load('./checkpoint/resnet18-%s-ckpt.t7' % opt.dataset, map_location=lambda storage, loc: storage)
netIncept.load_state_dict(checkpoint['net'])
print('#------------Classifier load finished------------#')
poolSize = opt.batchSize * opt.nPool
z_b = torch.FloatTensor(opt.batchSize, nz).to(device)
img_real = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize).to(device)
img_fake = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize).to(device)
p_z = torch.FloatTensor(poolSize, nz).to(device_cpu)
p_img = torch.FloatTensor(poolSize, nc, opt.imageSize, opt.imageSize).to(device_cpu)
show_z_b = torch.FloatTensor(opt.batchSize, nz).to(device)
eval_z_b = torch.FloatTensor(250, nz).to(device)
# set optimizer
optim_D = optim.RMSprop(netD.parameters(), lr=opt.lrd)
optim_G = optim.RMSprop(netG.parameters(), lr=opt.lrg)
if opt.dataset == 'mnist':
scheduler_D = MultiStepLR(optim_D, milestones=[400, 800, 1200], gamma=0.5)
scheduler_G = MultiStepLR(optim_G, milestones=[400, 800, 1200], gamma=0.5)
elif opt.dataset == 'fashionmnist':
scheduler_D = MultiStepLR(optim_D, milestones=[400, 800, 1200], gamma=0.5)
scheduler_G = MultiStepLR(optim_G, milestones=[400, 800, 1200], gamma=0.5)
elif opt.dataset == 'cifar10':
scheduler_D = MultiStepLR(optim_D, milestones=[800, 1600, 2400], gamma=0.5)
scheduler_G = MultiStepLR(optim_G, milestones=[800, 1600, 2400], gamma=0.5)
# set criterion
criterion_G = nn.MSELoss()
#--------------------------- main function ---------------------------#
show_z_b.normal_()
dataloader_iter = iter(dataloader)
real_show, _ = next(dataloader_iter)
vutils.save_image(real_show / 2 + 0.5, './results/%s/real-%s-gp.png' % (opt.dataset, opt.dataset), nrow=nrow, padding=0)
LOSS_DR = []
LOSS_GP = []
GRAD_NORM = []
LOSS_PROJ = []
for loop in range(start_loop, start_loop + opt.nLoop):
# input_pool
netD.train()
netG.eval()
p_z.normal_()
with torch.no_grad():
for i in range(opt.nPool):
p_img[opt.batchSize*i : opt.batchSize*(i+1)] = netG(p_z[opt.batchSize*i : opt.batchSize*(i+1)].cuda()).detach()
for t in range(opt.nPiter):
LOSS_dr = []
LOSS_gp = []
Grad_norm = []
for _ in range(opt.nDiter):
# Update D
netD.zero_grad()
try:
real_img, _ = next(dataloader_iter)
except:
dataloader_iter = iter(dataloader)
real_img, _ = next(dataloader_iter)
img_real = real_img.to(device).clone()
z_b_idx = random.sample(range(poolSize), opt.batchSize)
img_fake.copy_(p_img[z_b_idx])
img_real.requires_grad_(True)
if img_real.grad is not None:
img_real.grad.zero_()
D_img_real = netD(img_real)
loss_dr = (D_img_real ** 2).mean() - 2 * netD(img_fake).mean()
loss_gp = opt.coef_gp * gradient_penalty(img_real, D_img_real)
loss_dr_gp = loss_dr + loss_gp
loss_dr_gp.backward()
optim_D.step()
if opt.decay_d:
scheduler_D.step()
LOSS_dr.append(loss_dr.detach().cpu().item())
LOSS_gp.append(loss_gp.detach().cpu().item())
# update particle pool
p_img_t = p_img.clone().to(device)
p_img_t.requires_grad_(True)
if p_img_t.grad is not None:
p_img_t.grad.zero_()
fake_D_score = netD(p_img_t)
# set s(x)
if opt.divergence == 'Pearson':
s = torch.ones_like(fake_D_score.detach())
elif opt.divergence == 'KL':
s = 1 / fake_D_score.detach()
elif opt.divergence == 'JS':
s = 1 / (1 + fake_D_score.detach()) / fake_D_score.detach()
else:
raise ValueError("The divergence is not found.")
s.unsqueeze_(1).unsqueeze_(2).unsqueeze_(3).expand_as(p_img_t)
fake_D_score.backward(torch.ones(len(p_img_t)).to(device))
p_img = torch.clamp(p_img - eta * s.cpu() * p_img_t.grad.cpu(), -1, 1)
Grad_norm.append(p_img_t.grad.norm(p=2).detach().cpu().item())
LOSS_DR.append(np.mean(LOSS_dr))
LOSS_GP.append(np.mean(LOSS_gp))
GRAD_NORM.append(np.mean(Grad_norm))
# update G
netG.train()
netD.eval()
poolset = poolSet(p_z, p_img)
poolloader = torch.utils.data.DataLoader(poolset, batch_size=opt.nBatch*opt.batchSize, shuffle=True, num_workers=opt.workers)
loss_G = []
for _ in range(opt.nProj):
loss_G_t = []
for _, data_ in enumerate(poolloader, 0):
netG.zero_grad()
input_, target_ = data_
pred_ = netG(input_.to(device))
loss = criterion_G(pred_, target_.to(device))
loss.backward()
optim_G.step()
if opt.decay_g:
scheduler_G.step()
loss_G_t.append(loss.detach().cpu().item())
loss_G.append(np.mean(loss_G_t))
LOSS_PROJ.append(np.mean(loss_G))
vutils.save_image(target_ / 2 + 0.5, './results/%s/particle-%s-%s-%s-%s-gp.png'
% (opt.dataset, str(loop).zfill(4), opt.divergence, opt.dataset, str(opt.eta)), nrow=nrow, padding=0)
print('Loop(%s/%s)%d: dr: %.4f | gp: %.4f | norm: %.4f | proj: %.4f'
% (opt.divergence, opt.dataset, loop, LOSS_DR[-1], LOSS_GP[-1], GRAD_NORM[-1], LOSS_PROJ[-1]))
#-----------------------------------------------------------------
if loop % opt.period == 0:
fig = plt.figure()
plt.style.use('ggplot')
plt.plot(loss_G, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('Inner Projection Loss')
plt.legend()
fig.savefig('./loss/inner_projection-%s-%s-%s-gp.png'
% (opt.divergence, opt.dataset, str(opt.eta)))
plt.close()
fig = plt.figure(figsize=(20, 20))
plt.style.use('ggplot')
plt.subplot(411)
plt.plot(LOSS_DR, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('DR Loss')
plt.subplot(412)
plt.plot(LOSS_GP, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('GP Loss')
plt.subplot(413)
plt.plot(GRAD_NORM, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('Gradient Norm')
plt.subplot(414)
plt.plot(LOSS_PROJ, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('Projection Loss')
fig.savefig('./loss/loss-%s-%s-%s-gp.png'
% (opt.divergence, opt.dataset, str(opt.eta)))
plt.close()
# show image
netG.eval()
fake_img = netG(show_z_b)
vutils.save_image(fake_img.detach().cpu() / 2 + 0.5, './results/%s/fake-%s-%s-%s-%s-gp.png'
% (opt.dataset, str(loop).zfill(4), opt.divergence, opt.dataset, str(opt.eta)), nrow=nrow, padding=0)
# inception score
is_score.append(inceptionScore(netIncept, netG, device, nz, nclass))
print('[%d] Inception Score is: %.4f' % (loop, is_score[-1]))
best_is = max(is_score[-1], best_is)
fig = plt.figure()
plt.style.use('ggplot')
plt.plot(opt.period * (np.arange(loop//opt.period + 1)), is_score, label=opt.divergence)
plt.xlabel('Loop')
plt.ylabel('Inception Score')
plt.legend()
fig.savefig('loss/IS-%s-%s-%s-gp.png' % (opt.divergence, opt.dataset, str(opt.eta)))
plt.close()
if best_is == is_score[-1]:
print('Save the best Inception Score: %.4f' % is_score[-1])
else:
pass
if loop > opt.start_save and loop % 100 == 0:
state = {
'netG': netG.state_dict(),
'netD': netD.state_dict(),
'is_score': is_score,
'loss_G': loss_G,
'loop': loop,
'best_is': best_is
}
torch.save(state, './checkpoint/UPT-%s-%s-%s-%s-ckpt-gp.t7' % (opt.divergence, opt.dataset, str(loop), str(opt.eta)))
# save IS
if loop % 500 == 0:
dataframe = pd.DataFrame({'IS-%s' % opt.divergence: is_score})
dataframe.to_csv('loss/IS-%s-%s-%s-gp.csv' % (opt.divergence, opt.dataset, str(opt.eta)), sep=',')
torch.cuda.empty_cache()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
2,
4096,
5499,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
1174... | 2.189739 | 6,783 |
""" You have a bomb to defuse, and your time is running out! Your informer will
provide you with a circular array code of length of n and a key k. To decrypt
the code, you must replace every number. All the numbers are replaced
simultaneously.
If k > 0, replace the ith number with the sum of the next k numbers.
If k < 0, replace the ith number with the sum of the previous k numbers.
If k == 0, replace the ith number with 0. As code is circular, the
next element of code[n-1] is code[0], and the previous element of code[0] is
code[n-1].
Given the circular array code and an integer key k, return the
decrypted code to defuse the bomb!
Example 1: Input: code = [5,7,1,4], k = 3 Output: [12,10,16,13]
Explanation: Each number is replaced by the sum of the
next 3 numbers. The decrypted code is [7+1+4, 1+4+5, 4+5+7, 5+7+1]. Notice
that the numbers wrap around.
"""
from typing import List
| [
37811,
220,
220,
921,
423,
257,
5194,
284,
825,
1904,
11,
290,
534,
640,
318,
2491,
503,
0,
3406,
4175,
263,
481,
198,
220,
220,
2148,
345,
351,
257,
18620,
7177,
2438,
286,
4129,
286,
299,
290,
257,
1994,
479,
13,
1675,
42797,
19... | 2.945122 | 328 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
df=pd.read_csv("./Dataset/Mall_Customers.csv")
# In[3]:
df.head()
# In[4]:
df.info()
# In[5]:
df.describe()
# In[6]:
df.rename(columns={'Annual Income (k$)':'Income','Spending Score (1-100)':'SpendScore'},inplace=True)
# In[7]:
df.head()
# In[8]:
sns.pairplot(df)
# In[9]:
df=df.drop(['CustomerID'],axis=1)
# In[10]:
df.head()
# In[11]:
sns.heatmap(df.corr())
# In[12]:
plt.figure(figsize=(7,7))
size=df['Gender'].value_counts()
label=['Female','Male']
color=['Pink','Blue']
explode=[0,0.1]
plt.pie(size,explode=explode,labels=label,colors=color,shadow=True)
plt.legend()
plt.show()
# In[13]:
plt.figure(figsize=(10,5))
sns.countplot(df['Age'])
plt.xticks(rotation=90)
# In[14]:
sns.boxplot(df['Gender'],df['SpendScore'])
# In[15]:
plt.figure(figsize=(15,5))
sns.countplot(df['Income'])
# In[16]:
plt.bar(df['Income'],df['SpendScore'])
plt.title('Spendscore over income',fontsize=20)
plt.xlabel('Income')
plt.ylabel('Spendscore')
# # Density Based Spacial Clustering of Applications with noise (DBSCAN)
# We are going to use the DBSCAN for algorithm for the purpose of clustering. It is an unsupervised machine learning algorithm. It is used for clusters of high density. It automatically predicts the outliers and removes it. It is better than hierarchical and k-means clustering algorithm. It makes the clusters based on the parameters like epsilon,min points and noise.It separately predicts the core points, border points and outliers efficiently.
# In[17]:
df.head()
# In[18]:
x=df.iloc[:,[2,3]].values
# In[19]:
x.shape
# In[20]:
from sklearn.cluster import DBSCAN
db=DBSCAN(eps=3,min_samples=4,metric='euclidean')
# In[21]:
model=db.fit(x)
# In[22]:
label=model.labels_
# In[23]:
label
# In[24]:
from sklearn import metrics
sample_cores=np.zeros_like(label,dtype=bool)
sample_cores[db.core_sample_indices_]=True
n_clusters=len(set(label))- (1 if -1 in label else 0)
print('No of clusters:',n_clusters)
# In[25]:
y_means = db.fit_predict(x)
plt.figure(figsize=(7,5))
plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s = 50, c = 'pink')
plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s = 50, c = 'yellow')
plt.scatter(x[y_means == 2, 0], x[y_means == 2, 1], s = 50, c = 'cyan')
plt.scatter(x[y_means == 3, 0], x[y_means == 3, 1], s = 50, c = 'magenta')
plt.scatter(x[y_means == 4, 0], x[y_means == 4, 1], s = 50, c = 'orange')
plt.scatter(x[y_means == 5, 0], x[y_means == 5, 1], s = 50, c = 'blue')
plt.scatter(x[y_means == 6, 0], x[y_means == 6, 1], s = 50, c = 'red')
plt.scatter(x[y_means == 7, 0], x[y_means == 7, 1], s = 50, c = 'black')
plt.scatter(x[y_means == 8, 0], x[y_means == 8, 1], s = 50, c = 'violet')
plt.xlabel('Annual Income in (1k)')
plt.ylabel('Spending Score from 1-100')
plt.title('Clusters of data')
plt.show()
# # HIERARCHICAL CLUSTERING
# In[26]:
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(x, method = 'ward'))
plt.title('Dendrogam', fontsize = 20)
plt.xlabel('Customers')
plt.ylabel('Ecuclidean Distance')
plt.show()
# In[27]:
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 9, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(x)
plt.scatter(x[y_hc == 0, 0], x[y_hc == 0, 1], s = 50, c = 'pink')
plt.scatter(x[y_hc == 1, 0], x[y_hc == 1, 1], s = 50, c = 'yellow')
plt.scatter(x[y_hc == 2, 0], x[y_hc == 2, 1], s = 50, c = 'cyan')
plt.scatter(x[y_hc == 3, 0], x[y_hc == 3, 1], s = 50, c = 'magenta')
plt.scatter(x[y_hc == 4, 0], x[y_hc == 4, 1], s = 50, c = 'orange')
plt.scatter(x[y_hc == 5, 0], x[y_hc == 5, 1], s = 50, c = 'blue')
plt.scatter(x[y_hc == 6, 0], x[y_hc == 6, 1], s = 50, c = 'red')
plt.scatter(x[y_hc == 7, 0], x[y_hc == 7, 1], s = 50, c = 'black')
plt.scatter(x[y_hc == 8, 0], x[y_hc == 8, 1], s = 50, c = 'violet')
plt.title('Hierarchial Clustering', fontsize = 20)
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.grid()
plt.show()
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
90... | 2.202944 | 1,902 |
import json
import sys
f = open('/path/to/json', 'r')
h = json.load(f)
'''
for i in h["UEN_DATAGOV"]["BODY"][0]["DATA"]:
if i["ENTITY_NAME"][0] == "LEGALESE PTE. LTD.":
print i["ENTITY_NAME"][0]
print i["UEN"][0]
'''
# note that my parser prints the key values into a single-element array
chunkSize = 4550
with o as h["UEN_DATAGOV"]["BODY"][0]["DATA"]:
for i in xrange(0, len(o), chunkSize):
with open('uen' + '_' + str(i//chunkSize) + '.json', 'w') as outfile:
json.dump(o[i:i+chunkSize], outfile)
| [
11748,
33918,
198,
11748,
25064,
198,
198,
69,
796,
1280,
10786,
14,
6978,
14,
1462,
14,
17752,
3256,
705,
81,
11537,
198,
71,
796,
33918,
13,
2220,
7,
69,
8,
198,
198,
7061,
6,
198,
1640,
1312,
287,
289,
14692,
52,
1677,
62,
35,
... | 2.076046 | 263 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
from scrapy.pipelines.images import ImagesPipeline | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
534,
2378,
31108,
994,
198,
2,
198,
2,
2094,
470,
6044,
284,
751,
534,
11523,
284,
262,
7283,
3620,
62,
47,
4061,
3698,
1268,
1546,
4634,
198,
2,
... | 2.815217 | 92 |
import os
import pandas as pd
from feature_extraction.date_utils import date_features
from feature_extraction.coords_features import coord_features
from feature_extraction.other_features import raw_features, categorical_features
from feature_extraction.path_utils import project_root
import xgboost as xgb
import joblib
raw_data = pd.read_csv(os.path.join(project_root(), 'data', 'raw', 'ubaar-competition', 'train.csv'),
encoding="utf-8", index_col="ID")
all_features_cols = pd.read_csv(os.path.join(project_root(), 'data', 'processed', 'ubaar_features.csv'),
encoding="utf-8", index_col="ID").columns
model = joblib.load(os.path.join(project_root(), 'data', 'processed', 'model.bin'))
num_cols = ['sourceLatitude', 'sourceLongitude', 'destinationLatitude', 'destinationLongitude',
'distanceKM', 'taxiDurationMin', 'weight', 'price']
num_cols_dict = {col: float for col in num_cols}
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3895,
62,
2302,
7861,
13,
4475,
62,
26791,
1330,
3128,
62,
40890,
198,
6738,
3895,
62,
2302,
7861,
13,
1073,
3669,
62,
40890,
1330,
6349,
62,
40890,
198,
6738,
3895,
62,
... | 2.682584 | 356 |
from flask import Blueprint, render_template, session
view = Blueprint('view', __name__, template_folder='templates')
@view.route('/')
print('view working') | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
6246,
198,
198,
1177,
796,
39932,
10786,
1177,
3256,
11593,
3672,
834,
11,
11055,
62,
43551,
11639,
11498,
17041,
11537,
198,
198,
31,
1177,
13,
38629,
10786,
14,
11537,
198,
220,
220,
... | 3.192308 | 52 |
import pprint
import random
from collections import defaultdict, deque
from enum import Enum, auto
from typing import *
import numpy as np
V = TypeVar("V")
D = TypeVar("D")
Solution = Dict[V, D]
Constraint = Callable[[Solution], bool]
if __name__ == "__main__":
kwargs = {
"pruning_type": PruningType.AC3,
"variable_ordering": VariableOrdering.FAIL_FIRST,
"max_solutions": 100,
}
csps = [map_coloring(**kwargs), n_queens(n=8, **kwargs)]
for csp in csps:
csp.solve()
# csp.min_conflicts(100000)
test_solutions(csp)
| [
11748,
279,
4798,
198,
11748,
4738,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
6738,
33829,
1330,
2039,
388,
11,
8295,
198,
6738,
19720,
1330,
1635,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
53,
796,
5994,
19852,
... | 2.3 | 260 |
# Copyright (c) 2017-2020 Wenyi Tang.
# Author: Wenyi Tang
# Email: wenyitang@outlook.com
# Update: 2020 - 5 - 28
import logging
_logger = logging.getLogger("VSR.RWSR")
_logger.info("LICENSE: RealSR is implemented by Xiaozhong Ji. "
"@xiaozhongji https://github.com/jixiaozhong/RealSR")
| [
2,
220,
15069,
357,
66,
8,
2177,
12,
42334,
370,
28558,
72,
18816,
13,
198,
2,
220,
6434,
25,
370,
28558,
72,
18816,
198,
2,
220,
9570,
25,
266,
28558,
270,
648,
31,
448,
5460,
13,
785,
198,
2,
220,
10133,
25,
12131,
532,
642,
... | 2.401575 | 127 |
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
def no_ticks(ax, axis="both"):
"""
Remove ticks and labels from one or both axis.
ax : matplotlib ax object.
axis : "both", "x", "y"
"""
from numpy import ndarray
try:
for axi in ax:
set(axi)
except:
set(ax)
def cmap_colors(n_colors, alpha=1.0, cmap="viridis"):
"""
Get colors from matplotlib colormap.
n_colors : number of colors to draw.
alpha : alpha value.
cmap : colormap to choose from. Default is viridis.
"""
from matplotlib.colors import rgb2hex
cmap = plt.cm.get_cmap(name=cmap, lut=n_colors)
colors = [
(cmap(i)[0], cmap(i)[1], cmap(i)[2], alpha) for i in range(n_colors)
]
# Set corresponding alpha value and return an array.
return colors
def ax_colorbar(fig, ax, im, contours=False):
"""
Add a colorbar to the image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
cax = make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
cax.tick_params(axis="y", which="minor", bottom=False)
if contours:
norm = matplotlib.colors.Normalize(
vmin=im.cvalues.min(), vmax=im.cvalues.max()
)
sm = plt.cm.ScalarMappable(norm=norm, cmap=im.cmap)
sm.set_array([])
fig.colorbar(sm, cax=cax, orientation="vertical")
else:
fig.colorbar(im, cax=cax, orientation="vertical")
return cax
def fig_colorbar(fig, cax, im, contours=False):
"""
Add a colorbar to the figure.
"""
cax.tick_params(axis="y", which="minor", bottom=False)
if contours:
norm = matplotlib.colors.Normalize(
vmin=im.cvalues.min(), vmax=im.cvalues.max()
)
sm = plt.cm.ScalarMappable(norm=norm, cmap=im.cmap)
sm.set_array([])
fig.colorbar(sm, cax=cax, orientation="vertical")
else:
fig.colorbar(im, cax=cax, orientation="vertical")
return cax
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
628,
198,
4299,
645,
62,
83,
3378,
7,
897,
11,
16488,
2625,
16885,
1,
2599,
198,
220,
220,
220,
37227,
198,
... | 2.164354 | 937 |
print((
ld('kitten','kitten'), # 0
ld('kitten','sitten'), # 1
ld('kitten','sittes'), # 2
ld('kitten','sityteng'), # 3
ld('kitten','sittYing'), # 4
ld('rosettacode','raisethysword'), # 8
ld('kitten','kittenaaaaaaaaaaaaaaaaa'), # 17
ld('kittenaaaaaaaaaaaaaaaaa','kitten') # 17
))
print((
ld('kitten','kitten',3), # True
ld('kitten','sitten',3), # True
ld('kitten','sittes',3), # True
ld('kitten','sityteng',3), # True
ld('kitten','sittYing',3), # False
ld('rosettacode','raisethysword',3), # False
ld('kitten','kittenaaaaaaaaaaaaaaaaa',3), # False
ld('kittenaaaaaaaaaaaaaaaaa','kitten',3) # False
))
| [
198,
4798,
19510,
198,
220,
220,
220,
300,
67,
10786,
74,
2621,
41707,
74,
2621,
33809,
1303,
657,
198,
220,
220,
220,
300,
67,
10786,
74,
2621,
41707,
82,
2621,
33809,
1303,
352,
198,
220,
220,
220,
300,
67,
10786,
74,
2621,
41707,... | 2.155844 | 308 |
import cv2 as cv
faceCascade = cv.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml') #most accurate
eyeCascade = cv.CascadeClassifier('haarcascade/haarcascade_eye.xml')
video = cv.VideoCapture(0)
while(True):
ret, frame = video.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
eyes = eyeCascade.detectMultiScale(gray, 1.3, 5)
for(x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = gray[y:y+w, x: x+w]
roi_color = frame[y: y+h, x: x+w]
faceText = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,'face', (x, y), faceText, 1, (0, 255, 0), 1)
for(x, y, w, h) in eyes:
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi_gray = gray[y:y+w, x: x+w]
roi_color = frame[y: y+h, x: x+w]
eyeText = cv.FONT_HERSHEY_SIMPLEX
cv.putText(frame,'eye', (x, y), eyeText, 1, (0, 0, 255), 1)
cv.imshow("Face - Eye - Detection", frame)
if cv.waitKey(1) == ord('q'):
cv.destroyAllWindows()
break
video.release()
| [
11748,
269,
85,
17,
355,
269,
85,
198,
198,
2550,
34,
28966,
796,
269,
85,
13,
34,
28966,
9487,
7483,
10786,
3099,
5605,
28966,
14,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
13,
19875,
11537,
1303,
1712,
7187,
198,
25379,
3... | 1.920195 | 614 |
from braid import *
from sgraph import *
from typing import List
from numpy import random
| [
6738,
275,
7086,
1330,
1635,
198,
6738,
264,
34960,
1330,
1635,
198,
6738,
19720,
1330,
7343,
198,
6738,
299,
32152,
1330,
4738,
628,
628
] | 3.875 | 24 |
##########################################################################
## Package Version
##########################################################################
from .version import get_version, __version_info__
__version__ = get_version(short=True) | [
29113,
29113,
7804,
2235,
198,
2235,
15717,
10628,
198,
29113,
29113,
7804,
2235,
198,
6738,
764,
9641,
1330,
651,
62,
9641,
11,
11593,
9641,
62,
10951,
834,
198,
834,
9641,
834,
796,
651,
62,
9641,
7,
19509,
28,
17821,
8
] | 6.425 | 40 |
# coding: utf-8
from . import views
from django.conf.urls import url
urlpatterns = [
url(
r'^create/(?P<app_label>\w+)/(?P<model>\w+)/(?P<obj_id>\d+)/$',
views.ExampleCreateView.as_view(),
name='create'
),
url(
r'^update/(?P<pk>\d+)/$',
views.ExampleUpdateView.as_view(),
name='update'
),
]
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
198,
220,
220,
220,
220,
220,
... | 1.889474 | 190 |
# Random forest classification with PCA
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Read Data
from sklearn.datasets import load_iris
dataset = load_iris()
# Choose which features to use
x = dataset["data"] # It has 4 features - with PCA we will reduce it to 3 for 3D visualisation
y = dataset["target"] # Output value
# Split data into train and test dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
# PCA to reduce dimensions to 3 (from 4)
from sklearn.decomposition import PCA
pca = PCA(n_components = 3, random_state = 42)
x_train = pca.fit_transform(x_train)
x_test = pca.transform(x_test)
# Data Preprocessing
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
x_train = sc_x.fit_transform(x_train)
x_test = sc_x.transform(x_test)
# Train Model
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(random_state = 42)
classifier.fit(x_train, y_train)
# Predict Results
y_pred = classifier.predict(x_test)
# Measure accuracy
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, y_pred)
# Merge output values with features into pandas dataframe
# It is just used to make a plotting part clearer to read
x_test = sc_x.inverse_transform(x_test) # Return back to non-scaled values
pred_df = pd.DataFrame({'x0': x_test[:,0], 'x1': x_test[:,1],'x2': x_test[:,2], 'y': y_pred})
# Visualise Results
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X0')
ax.set_ylabel('X1')
ax.set_zlabel('X2')
ax.set_title('Predicted Datapoints')
temp_df = pred_df[pred_df['y'] == 0] # Take only rows from dataset containing y = 0
ax.scatter(temp_df['x0'], temp_df['x1'], temp_df['x2'], color = 'r')
temp_df = pred_df[pred_df['y'] == 1]
ax.scatter(temp_df['x0'], temp_df['x1'], temp_df['x2'], color = 'g')
temp_df = pred_df[pred_df['y'] == 2]
ax.scatter(temp_df['x0'], temp_df['x1'], temp_df['x2'], color = 'b')
| [
2,
14534,
8222,
17923,
351,
4217,
32,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
2,
4149,
6060,
198,
6738,
1341,
35720,
13,
... | 2.686375 | 778 |
from datetime import date
from django.db import models
| [
6738,
4818,
8079,
1330,
3128,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
628,
628
] | 3.588235 | 17 |
from pm4pymdl.visualization import mvp
| [
6738,
9114,
19,
79,
4948,
25404,
13,
41464,
1634,
1330,
285,
36133,
198
] | 3 | 13 |
"""
Created on Dec 22, 2013
@author: root
"""
import Utils
from Utils import print_pypoly_warning
| [
37811,
198,
41972,
319,
4280,
2534,
11,
2211,
198,
198,
31,
9800,
25,
6808,
198,
37811,
198,
11748,
7273,
4487,
198,
6738,
7273,
4487,
1330,
3601,
62,
79,
4464,
3366,
62,
43917,
628
] | 3.030303 | 33 |
from django.conf.urls import url
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.home_page, name='homePage'),
url(r'^category/', views.category, name='showCategory'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
764,
1330,
5009,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
198,
6371,
33279,
82,
... | 2.903226 | 124 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.625 | 16 |
"""
Dismiss a javascript alert.
"""
from screenpy.abilities import BrowseTheWeb
from screenpy.actor import Actor
from screenpy.pacing import aside, beat
class DismissAlert:
"""Dismiss an alert.
Abilities Required:
|BrowseTheWeb|
Examples::
the_actor.attempts_to(DismissAlert())
"""
def describe(self) -> str:
"""Describe the Action in present tense."""
return "Dismiss the alert."
@beat("{} dismisses the alert.")
def perform_as(self, the_actor: Actor) -> None:
"""Direct the Actor to dismiss the alert."""
browser = the_actor.uses_ability_to(BrowseTheWeb).browser
alert = browser.switch_to.alert
aside(f'... the alert says "{alert.text}"')
alert.dismiss()
| [
37811,
198,
35,
1042,
747,
257,
44575,
7995,
13,
198,
37811,
198,
198,
6738,
3159,
9078,
13,
5738,
1330,
44775,
464,
13908,
198,
6738,
3159,
9078,
13,
11218,
1330,
27274,
198,
6738,
3159,
9078,
13,
79,
4092,
1330,
7263,
11,
4405,
628,... | 2.619863 | 292 |
#
# working file for testing git support . should be a unit test
#
import sys
import os
from src import check_path
check_path()
from shared.util_git import pull, push, isbehind
if __name__ == "__main__":
test_status()
| [
2,
198,
2,
1762,
2393,
329,
4856,
17606,
1104,
220,
764,
220,
815,
307,
257,
4326,
1332,
198,
2,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
6738,
12351,
1330,
2198,
62,
6978,
198,
9122,
62,
6978,
3419,
198,
198,
6738,
4888,
13,... | 3.094595 | 74 |
"""
Files for every custom exceptions.
"""
class GameNotFoundError(Exception):
"""
Exception to raise when the required game is not found.
"""
| [
37811,
198,
25876,
329,
790,
2183,
13269,
13,
198,
37811,
628,
198,
4871,
3776,
3673,
21077,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
35528,
284,
5298,
618,
262,
2672,
983,
318,
407,
1043,
13,
198,
220,
... | 3.413043 | 46 |
from typing import Dict
from botocore.paginate import Paginator
| [
6738,
19720,
1330,
360,
713,
198,
6738,
10214,
420,
382,
13,
79,
363,
4559,
1330,
31525,
20900,
628,
198
] | 3.473684 | 19 |
# -*- coding: utf-8 -*-
LATEST = "2"
f1 = "events.json"
v1 = {
"name": {"type": "string", "minlength": 1, "required": True},
"url": {"type": "string", "minlength": 1, "required": True},
"city": {"type": "string", "minlength": 1, "required": True},
"state": {"type": "string", "required": True, "nullable": True},
"country": {"type": "string", "minlength": 1, "required": True},
"cfp_open": {"type": "boolean", "required": True},
"cfp_end_date": {"is_date": True, "type": "string", "required": True},
"start_date": {"is_date": True, "type": "string", "required": True},
"end_date": {"is_date": True, "type": "string", "required": True},
"source": {"type": "string", "minlength": 1, "required": True},
"tags": {"type": "list", "minlength": 1, "required": True},
"kind": {"type": "string", "allowed": ["conference", "meetup"], "required": True},
"by": {"type": "string", "allowed": ["human", "bot"], "required": True},
}
f2 = "events_v2.json"
v2 = {
"name": {"type": "string", "minlength": 1, "required": True},
"url": {"type": "string", "minlength": 1, "required": True},
"city": {"type": "string", "required": True, "nullable": True},
"state": {"type": "string", "required": True, "nullable": True},
"country": {"type": "string", "required": True, "nullable": True},
"location": {"type": "string", "required": True, "nullable": True},
"cfp_open": {"type": "boolean", "required": True},
"cfp_end_date": {"is_date": True, "type": "string", "required": True},
"start_date": {"is_date": True, "type": "string", "required": True},
"end_date": {"is_date": True, "type": "string", "required": True},
"source": {"type": "string", "minlength": 1, "required": True},
"tags": {"type": "list", "minlength": 1, "required": True},
"kind": {"type": "string", "allowed": ["conference", "meetup"], "required": True},
"by": {"type": "string", "allowed": ["human", "bot"], "required": True},
}
latest = eval(f"v{LATEST}")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
43,
1404,
6465,
796,
366,
17,
1,
198,
198,
69,
16,
796,
366,
31534,
13,
17752,
1,
198,
85,
16,
796,
1391,
198,
220,
220,
220,
366,
3672,
1298,
19779,
4906,
12... | 2.660502 | 757 |
#
# PySNMP MIB module HP-ICF-IP-LOCKDOWN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-IP-LOCKDOWN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:34:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
VlanIndex, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanIndex")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
iso, TimeTicks, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, IpAddress, Counter64, Integer32, Unsigned32, MibIdentifier, Counter32, ObjectIdentity, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "IpAddress", "Counter64", "Integer32", "Unsigned32", "MibIdentifier", "Counter32", "ObjectIdentity", "NotificationType")
DisplayString, MacAddress, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "MacAddress", "TruthValue", "TextualConvention")
hpicfIpLockdown = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39))
hpicfIpLockdown.setRevisions(('2008-03-16 05:24', '2006-06-08 23:47',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpicfIpLockdown.setRevisionsDescriptions(("Added hpicfIpLockErrantNotify, it's objects and groups. Obsoleted hpicfIpLockTrapsCntl in favor of hpicfIpLockTrapsCtrl and added a hpicfIpLockObsoleteGroup.", 'Initial revision.',))
if mibBuilder.loadTexts: hpicfIpLockdown.setLastUpdated('200803160524Z')
if mibBuilder.loadTexts: hpicfIpLockdown.setOrganization('HP Networking')
if mibBuilder.loadTexts: hpicfIpLockdown.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747')
if mibBuilder.loadTexts: hpicfIpLockdown.setDescription('This MIB module contains HP proprietary objects for managing Dynamic IP Lockdown.')
hpicfIpLockTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0))
hpicfIpLockTrapsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1))
hpicfIpLockOutOfResourceSource = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dhcpsnooping", 1), ("iplockdown", 2)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockOutOfResourceSource.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockOutOfResourceSource.setDescription('The identifier of the reason for out of hardware resource condition')
hpicfIpLockOutOfResources = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 2)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrPort"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrMacAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrVlan"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockOutOfResourceSource"))
if mibBuilder.loadTexts: hpicfIpLockOutOfResources.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockOutOfResources.setDescription("This trap indicates that unexpected running out of hardware resources to program a Dynamic IP Lockdown rule. This notification trap is controlled by the state of 'hpicfIpLockTrapCtrl' object. Implementation of this trap is optional.")
hpicfIpLockErrantNotify = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 3)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyCount"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyPort"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifySrcIpType"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifySrcIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyDstIpType"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyDstIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyMacAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyPktCount"))
if mibBuilder.loadTexts: hpicfIpLockErrantNotify.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockErrantNotify.setDescription("This notification indicates a host was denied access to the switch based on Dynamic Lockdown Protection rules. This notification trap is controlled by the state of the 'hpicfIpLockTrapCtrl' object. Implementation of this trap is optional.")
hpicfIpLockErrantNotifyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4))
hpicfIpLockNotifyCount = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 1), Counter32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyCount.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyCount.setDescription("A count of 'hpicfIpLockErrantNotify' sent from the Dynamic Ip Lockdown Protection entity to the SNMP entity since boot.")
hpicfIpLockNotifyPort = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 2), InterfaceIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyPort.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyPort.setDescription("The port for which this 'hpicfIpLockErrantNotify' applies.")
hpicfIpLockNotifySrcIpType = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 3), InetAddressType()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifySrcIpType.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifySrcIpType.setDescription("The type of IP address contained in 'hpicfIpLockNotifySrcIpAddress'. The only values expected are ipv4 or ipv6.")
hpicfIpLockNotifySrcIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 4), InetAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifySrcIpAddress.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifySrcIpAddress.setDescription("The source IP address for which this 'hpicfIpLockErrantNotify' applies.")
hpicfIpLockNotifyDstIpType = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 5), InetAddressType()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyDstIpType.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyDstIpType.setDescription("The type of IP address contained in 'hpicfIpLockNotifyDstIpAddress'. The only values expected are ipv4 or ipv6.")
hpicfIpLockNotifyDstIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 6), InetAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyDstIpAddress.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyDstIpAddress.setDescription("The destination IP address for which this 'hpicfIpLockErrantNotify' applies.")
hpicfIpLockNotifyMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 7), MacAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyMacAddress.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyMacAddress.setDescription("The source MAC address for which this 'hpicfIpLockErrantNotify' applies.")
hpicfIpLockNotifyPktCount = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 0, 1, 4, 8), Counter32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpicfIpLockNotifyPktCount.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockNotifyPktCount.setDescription('This object indicates the number of packets received from this host which were dropped.')
hpicfIpLockObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1))
hpicfIpLockConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1))
hpicfIpLockEnable = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIpLockEnable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockEnable.setDescription('The administrative status of the Dynamic IP Lockdown feature.')
hpicfIpLockPortTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 2), )
if mibBuilder.loadTexts: hpicfIpLockPortTable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortTable.setDescription('Per-interface configuration for Dynamic IP Lockdown.')
hpicfIpLockTrapCntl = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 3), Bits().clone(namedValues=NamedValues(("outOfResource", 0)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIpLockTrapCntl.setStatus('obsolete')
if mibBuilder.loadTexts: hpicfIpLockTrapCntl.setDescription("********* THIS OBJECT IS OBSOLETED ********** This object has been obsoleted in favor of 'hpicfIpLockTrapCtrl'. Controls generation of SNMP traps for events defined in this MIB. The set bit means 'enabled'. - OutOfResource(0) The state of this bit specifies whether the notification trap is allowed to be send when one runs out of resources programming a dynamic IP Lockdown rule..")
hpicfIpLockTrapCtrl = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 4), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIpLockTrapCtrl.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockTrapCtrl.setDescription('Controls generation of SNMP notifications for traps defined in this MIB.')
hpicfIpLockPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpicfIpLockPortEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortEntry.setDescription('Dynamic IP Lockdown configuration information for a single port.')
hpicfIpLockPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 1, 2, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIpLockPortEnable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortEnable.setDescription('This object indicates whether this port is enabled for Dynamic IP Lockdown.')
hpicfIpLockStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2))
hpicfIpLockPortStatusTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 1), )
if mibBuilder.loadTexts: hpicfIpLockPortStatusTable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortStatusTable.setDescription('Per-interface status for Dynamic IP Lockdown.')
hpicfIpLockPortStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpicfIpLockPortStatusEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortStatusEntry.setDescription('Dynamic IP Lockdown status information for a single port.')
hpicfIpLockPortOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 1, 1, 1), Bits().clone(namedValues=NamedValues(("active", 0), ("noDsnoop", 1), ("trustedPort", 2), ("noSnoopingVlan", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockPortOperStatus.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockPortOperStatus.setDescription("This object indicates the various states of the current operating mode of Dynamic IP Lockdown on this port. When no bits are set, the status of this feature shall be 'disabled'. Each status is described below: active - Dynamic IP Lockdown is active on this port. noDsnoop - Dynamic IP Lockdown is enabled on this port, but DHCP Snooping is not globally enabled. trustedPort - Dynamic IP Lockdown is enabled on this port, but is not active because the port is a DHCP Snooping trusted port. noSnoopingVlan - Dynamic IP Lockdown is enabled on this port, but is not active because the port is not a member of any VLAN with DHCP Snooping enabled.")
hpicfIpLockAddrTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2), )
if mibBuilder.loadTexts: hpicfIpLockAddrTable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrTable.setDescription('Table of source address bindings on ports where Dynamic IP Lockdown is active that are currently permitted.')
hpicfIpLockAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1), ).setIndexNames((0, "HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrPort"), (0, "HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrType"), (0, "HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrIpAddress"))
if mibBuilder.loadTexts: hpicfIpLockAddrEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrEntry.setDescription('An entry in the table containing a single permitted source address binding.')
hpicfIpLockAddrPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockAddrPort.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrPort.setDescription('The port that this address binding is permitted on.')
hpicfIpLockAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockAddrType.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrType.setDescription('The type of IP address contained in hpicfIpLockAddrIpAddress. The only values expected are ipv4 or ipv6.')
hpicfIpLockAddrIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockAddrIpAddress.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrIpAddress.setDescription('A source IP address permitted on this port. The type of address contained in this object is indicated by hpicfIpLockAddrType.')
hpicfIpLockAddrVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 4), VlanIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockAddrVlan.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrVlan.setDescription('The VLAN ID on which this source address is permitted on this port.')
hpicfIpLockAddrMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 5), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockAddrMacAddress.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockAddrMacAddress.setDescription('The source MAC address that is permitted for this source IP address on this port.')
hpicfIpLockResourceAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 1, 2, 2, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfIpLockResourceAvailable.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockResourceAvailable.setDescription('TRUE indicates that resources were available to add binding. FALSE indicates that resources were not available')
hpicfIpLockConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2))
hpicfIpLockGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 1))
hpicfIpLockBaseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 1, 1)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockEnable"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockPortEnable"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockPortOperStatus"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrPort"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrType"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrVlan"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockAddrMacAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockResourceAvailable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockBaseGroup = hpicfIpLockBaseGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockBaseGroup.setDescription('A collection of objects for configuring and monitoring the base Dynamic IP Lockdown functionality.')
hpicfIpLockTrapsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 1, 2)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockOutOfResources"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockErrantNotify"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockTrapsGroup = hpicfIpLockTrapsGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockTrapsGroup.setDescription('A collection of trap objects for Dynamic IP Lockdown.')
hpicfIpLockTrapObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 1, 3)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockOutOfResourceSource"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyCount"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyPort"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifySrcIpType"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifySrcIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyDstIpType"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyDstIpAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyMacAddress"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockNotifyPktCount"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockTrapCtrl"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockTrapObjectsGroup = hpicfIpLockTrapObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockTrapObjectsGroup.setDescription('A collection of objects for receiving notification information in regards to the Dynamic IP Lockdown functionality.')
hpicfIpLockObsoleteGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 1, 4)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockTrapCntl"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockObsoleteGroup = hpicfIpLockObsoleteGroup.setStatus('obsolete')
if mibBuilder.loadTexts: hpicfIpLockObsoleteGroup.setDescription('These objects are obsolete and are no longer used.')
hpicfIpLockCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 2))
hpicfIpLockCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 2, 1)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockBaseGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockCompliance = hpicfIpLockCompliance.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockCompliance.setDescription('The compliance statement for HP switches that support Dynamic IP Lockdown.')
hpicfIpLockTrapCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 39, 2, 2, 2)).setObjects(("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockTrapObjectsGroup"), ("HP-ICF-IP-LOCKDOWN-MIB", "hpicfIpLockTrapsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpLockTrapCompliance = hpicfIpLockTrapCompliance.setStatus('current')
if mibBuilder.loadTexts: hpicfIpLockTrapCompliance.setDescription('The compliance statement for HP switches that support Dynamic IP Lockdown Notify group .')
mibBuilder.exportSymbols("HP-ICF-IP-LOCKDOWN-MIB", hpicfIpLockConformance=hpicfIpLockConformance, hpicfIpLockTrapCtrl=hpicfIpLockTrapCtrl, hpicfIpLockNotifyDstIpAddress=hpicfIpLockNotifyDstIpAddress, hpicfIpLockNotifyPktCount=hpicfIpLockNotifyPktCount, hpicfIpLockAddrType=hpicfIpLockAddrType, hpicfIpLockdown=hpicfIpLockdown, hpicfIpLockErrantNotifyObjects=hpicfIpLockErrantNotifyObjects, hpicfIpLockAddrEntry=hpicfIpLockAddrEntry, hpicfIpLockNotifyCount=hpicfIpLockNotifyCount, hpicfIpLockAddrVlan=hpicfIpLockAddrVlan, hpicfIpLockAddrPort=hpicfIpLockAddrPort, hpicfIpLockPortStatusEntry=hpicfIpLockPortStatusEntry, hpicfIpLockResourceAvailable=hpicfIpLockResourceAvailable, hpicfIpLockPortStatusTable=hpicfIpLockPortStatusTable, hpicfIpLockCompliance=hpicfIpLockCompliance, hpicfIpLockNotifyPort=hpicfIpLockNotifyPort, hpicfIpLockPortEntry=hpicfIpLockPortEntry, hpicfIpLockEnable=hpicfIpLockEnable, hpicfIpLockObjects=hpicfIpLockObjects, hpicfIpLockTrapsGroup=hpicfIpLockTrapsGroup, hpicfIpLockTrapCompliance=hpicfIpLockTrapCompliance, hpicfIpLockAddrMacAddress=hpicfIpLockAddrMacAddress, hpicfIpLockTrapsObjects=hpicfIpLockTrapsObjects, hpicfIpLockNotifySrcIpAddress=hpicfIpLockNotifySrcIpAddress, hpicfIpLockGroups=hpicfIpLockGroups, hpicfIpLockNotifySrcIpType=hpicfIpLockNotifySrcIpType, hpicfIpLockOutOfResources=hpicfIpLockOutOfResources, hpicfIpLockTraps=hpicfIpLockTraps, hpicfIpLockStatus=hpicfIpLockStatus, hpicfIpLockPortEnable=hpicfIpLockPortEnable, hpicfIpLockAddrIpAddress=hpicfIpLockAddrIpAddress, hpicfIpLockErrantNotify=hpicfIpLockErrantNotify, hpicfIpLockNotifyDstIpType=hpicfIpLockNotifyDstIpType, hpicfIpLockTrapCntl=hpicfIpLockTrapCntl, hpicfIpLockCompliances=hpicfIpLockCompliances, hpicfIpLockObsoleteGroup=hpicfIpLockObsoleteGroup, hpicfIpLockConfig=hpicfIpLockConfig, hpicfIpLockBaseGroup=hpicfIpLockBaseGroup, hpicfIpLockPortOperStatus=hpicfIpLockPortOperStatus, hpicfIpLockPortTable=hpicfIpLockPortTable, hpicfIpLockAddrTable=hpicfIpLockAddrTable, PYSNMP_MODULE_ID=hpicfIpLockdown, hpicfIpLockTrapObjectsGroup=hpicfIpLockTrapObjectsGroup, hpicfIpLockNotifyMacAddress=hpicfIpLockNotifyMacAddress, hpicfIpLockOutOfResourceSource=hpicfIpLockOutOfResourceSource)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
6574,
12,
2149,
37,
12,
4061,
12,
36840,
41925,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
... | 2.596815 | 8,351 |
from ....import_utils import *
from ....models_dict import MODEL_REQUIREMENTS
if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-yamnet']):
import tensorflow as tf
import tensorflow_hub as hub
from ..base import BaseAudio2Vec
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from datetime import date
YamnetModelDefinition = ModelDefinition(
model_id="audio/yamnet",
model_name="Yamnet",
vector_length=1024,
description="""
YAMNet is an audio event classifier that takes audio waveform as input and makes independent predictions for each
of 521 audio events from the AudioSet ontology. The model uses the MobileNet v1 architecture and was trained using
the AudioSet corpus. This model was originally released in the TensorFlow Model Garden, where we have the model
source code, the original model checkpoint, and more detailed documentation.
This model can be used:
- as a stand-alone audio event classifier that provides a reasonable baseline across a wide variety of audio events.
- as a high-level feature extractor: the 1024-D embedding output of YAMNet can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows quickly creating specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end.
- as a warm start: the YAMNet model parameters can be used to initialize part of a larger model which allows faster fine-tuning and model exploration.
""",
release_date=date(2020,3,11),
limitations="""
YAMNet's classifier outputs have not been calibrated across classes, so you cannot directly treat
the outputs as probabilities. For any given task, you will very likely need to perform a calibration with task-specific data
which lets you assign proper per-class score thresholds and scaling.
YAMNet has been trained on millions of YouTube videos and although these are very diverse, there can still be a domain mismatch
between the average YouTube video and the audio inputs expected for any given task. You should expect to do some amount of
fine-tuning and calibration to make YAMNet usable in any system that you build.""",
repo="https://tfhub.dev/google/yamnet/1",
installation="pip install vectorhub[encoders-audio-tfhub]",
example="""
#pip install vectorhub[encoders-audio-tfhub]
from vectorhub.encoders.audio.tfhub import Yamnet2Vec
model = Yamnet2Vec()
sample = model.read('https://vecsearch-bucket.s3.us-east-2.amazonaws.com/voices/common_voice_en_2.wav')
model.encode(sample)
"""
)
__doc__ = YamnetModelDefinition.create_docs() | [
6738,
19424,
11748,
62,
26791,
1330,
1635,
198,
6738,
19424,
27530,
62,
11600,
1330,
19164,
3698,
62,
2200,
49128,
28957,
198,
361,
318,
62,
439,
62,
45841,
1387,
62,
37050,
7,
33365,
3698,
62,
2200,
49128,
28957,
17816,
12685,
375,
364... | 3.519595 | 791 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 "Shopify inc." All rights reserved.
# Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
from __future__ import unicode_literals
import xml.etree.ElementTree as et
import pytest
import six
import tests.utils
@pytest.fixture
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2177,
366,
29917,
1958,
753,
526,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
17168,
12,
7635,
5964,
326,
... | 3.23 | 100 |
"""create openvas_vuln table
Revision ID: 506c8e35ba7c
Revises: 13b7c3d4c802
Create Date: 2017-07-21 12:19:35.711173
"""
from sqlalchemy.dialects import postgresql
from alembic import op
import sqlalchemy as sa
import datetime
# revision identifiers, used by Alembic.
revision = '506c8e35ba7c'
down_revision = '13b7c3d4c802'
branch_labels = None
depends_on = None
| [
37811,
17953,
1280,
11017,
62,
85,
377,
77,
3084,
198,
198,
18009,
1166,
4522,
25,
2026,
21,
66,
23,
68,
2327,
7012,
22,
66,
198,
18009,
2696,
25,
1511,
65,
22,
66,
18,
67,
19,
66,
30863,
198,
16447,
7536,
25,
2177,
12,
2998,
12... | 2.483221 | 149 |
# Generated by Django 3.2.4 on 2021-07-09 04:35
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
19,
319,
33448,
12,
2998,
12,
2931,
8702,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import torch
import numpy as np
import collections
from itertools import repeat
import random
def flatten(x):
'''
flatten high dimensional tensor x into an array
:param x:
:return: 1 dimensional tensor
'''
dims = x.size()[1:] #remove the first dimension as it is batch dimension
num_features = 1
for s in dims: num_features *= s
return x.contiguous().view(-1, num_features)
#from spotlight
#from spotlight
#from spotlight
#from spotlight
#convert ids to torch Tensor
#use to detach some module, prevent updating gradients.
#get number of parameters in a model
######## TO MAKE POSITION EMBEDDDING ##########################
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = tensor.new()
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
17268,
198,
6738,
340,
861,
10141,
1330,
9585,
198,
11748,
4738,
198,
198,
4299,
27172,
268,
7,
87,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
27172,
268,
102... | 2.743961 | 621 |