content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from PyQt5 import QtWidgets
from pymodaq.daq_utils.plotting.viewerND.viewerND_main import ViewerND
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
198,
6738,
12972,
4666,
30188,
13,
48539,
62,
26791,
13,
29487,
889,
13,
1177,
263,
8575,
13,
1177,
263,
8575,
62,
12417,
1330,
3582,
263,
8575,
628,
... | 2.64 | 50 |
import os
import shutil
from .file import create_config_file, load_config_file, load_css_file, \
load_data_file, load_js_file, load_template_file
from .util import generate_id
| [
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
764,
7753,
1330,
2251,
62,
11250,
62,
7753,
11,
3440,
62,
11250,
62,
7753,
11,
3440,
62,
25471,
62,
7753,
11,
3467,
198,
220,
220,
220,
3440,
62,
7890,
62,
7753,
11,
3440,
62,
8457,
... | 2.897059 | 68 |
import numpy as np
import cv2
import cv2.cv as cv
import facetracker
import traceback
from SimpleCV import *
from video import create_capture
from common import clock, draw_str
import pdb
help_message = '''
USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
try_me = 10
if __name__ == '__main__':
import sys, getopt
print help_message
args, video_src = getopt.getopt(sys.argv[1:], '', ['face=', 'con=', 'tri='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
face_fn = args.get('--con', r"../external/FaceTracker/model/face2.tracker")
con_fn = args.get('--con', r"../external/FaceTracker/model/face.con")
tri_fn = args.get('--tri', r"../external/FaceTracker/model/face.tri")
tracker = facetracker.FaceTracker(face_fn)
# Tracker init variables
conns = facetracker.LoadCon(con_fn)
trigs = facetracker.LoadTri(tri_fn)
cam = create_capture(video_src, width=800,height=600)
tracker.setWindowSizes((11,9,7))
try:
while True:
t = clock()
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
cv2.imshow('facedetect', gray)
if tracker.update(gray):
draw_str(img, (20, 40), 'pos: %.1f, %.1f' % tracker.getPosition())
draw_str(img, (20, 60), 'scale: %.1f ' % tracker.getScale())
draw_str(img, (20, 80), 'orientation: %.1f, %.1f, %.1f' % tracker.getOrientation())
tracker.getScale()
tracker.getOrientation()
# img = tracker.draw(img, conns, trigs)
else:
tracker.setWindowSizes((11, 9,7))
dt = clock() - t
# Draw eyes ONLY MY CODE
faceShape= tracker.get2DShape()[0]
MAX_SIZE = 132 / 2
left_eye_x, left_eye_y = faceShape[36:42],faceShape[36+MAX_SIZE:42+MAX_SIZE]
MIN_X = MIN_Y = float("+inf")
MAX_X = MAX_Y = float("-inf")
for i in range(len(left_eye_x)):
x = int(round(left_eye_x[i]))
y = int(round(left_eye_y[i]))
# Debug to check if eye is found
# draw_str(img, (x, y), '*')
if(x > MAX_X):
MAX_X = x
if(x < MIN_X):
MIN_X = x
if(y > MAX_Y):
MAX_Y = y
if(y < MIN_Y):
MIN_Y = y
MAX_Y += 10
MAX_X += 10
MIN_Y -= 10
MIN_X -= 10
# Get LEFT_EYE INTO Image
subset_img = img[MIN_Y:MAX_Y, MIN_X:MAX_X]
# Transpose suc that (y,x) -> (x,y)
_img = Image(subset_img.transpose(1,0,2))
_eye_left=_img.colorDistance(SimpleCV.Color.BLACK).dilate(3)
# _eye_left=_eye_left.stretch(120,140).invert()
# invert the image so the pupil is white,
# Blobs track the white!
blobs = _eye_left.invert().findBlobs(minsize=1,threshval=100)
pdb.set_trace()
if(len(blobs)>0): # if we got a blob
blobs[0].show() # the zeroth blob is the largest blob - draw it
# write the blob's centroid to the image
# _img.dl().text(locationStr,(0,0),color=Color.RED)
# save the image
_img.save("eye_only_1.png")
# and show us the result.
# END MY CODE
# draw_str(img, (20, 20), 'time: %.1f ms' % (dt*1000))
# cv2.rectangle(img, (MIN_X, MIN_Y), (MAX_X, MAX_Y), (255,0,0), 2)
cv2.imshow('facedetect', img)
if 0xFF & cv2.waitKey(5) == 27:
break
except Exception, err:
print traceback.format_exc()
cv2.destroyAllWindows()
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
269,
85,
17,
13,
33967,
355,
269,
85,
201,
198,
11748,
1777,
21879,
10735,
201,
198,
11748,
12854,
1891,
201,
198,
6738,
17427,
33538,
1330,
1635,
201,
198,... | 1.788353 | 2,301 |
#!/usr/bin/env python
# coding: utf-8
# StimulationDevice class demonstration
# =====================================
#
# Assumptions:
# - This is intended to function as a generic device class for multiprimary stimulators.
# - devices are additive
# - calibration is stationary
# - expect values as W/m2/nm
# In[1]:
import sys
sys.path.insert(0, '../')
import random
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from silentsub.device import StimulationDevice
from silentsub.CIE import get_CIES026
from silentsub import colorfunc
sns.set_context('notebook')
sns.set_style('whitegrid')
# Load data with pandas -- this is our starting point
# ---------------------------------------------------
# In[2]:
spds = pd.read_csv('../data/S2_corrected_oo_spectra.csv', index_col=['led','intensity'])
spds.index.rename(['Primary', 'Setting'], inplace=True)
spds.columns = pd.Int64Index(spds.columns.astype(int))
spds.columns.name = 'Wavelength'
spds = spds.sort_index()
spds
# Instantiate `StimulationDevice` class
# -------------------------------------
# In[3]:
# list of colors for the primaries
colors = ['blueviolet', 'royalblue', 'darkblue', 'blue', 'cyan',
'green', 'lime', 'orange', 'red', 'darkred']
# instantiate the class
device = StimulationDevice(
resolutions=[4095]*10,
colors=colors,
spds=spds,
spd_binwidth=1
)
# Plot the SPDs
# -------------
# In[4]:
_ = device.plot_spds()
# Plot the gamut of the device on CIE 1931 horseshoe
# --------------------------------------------------
# In[5]:
_ = device.plot_gamut()
# Predict output for a specific primary at a given setting
# --------------------------------------------------------
# In[6]:
primary_spd = device.predict_primary_spd(
primary=7,
setting=.5,
name='Primary 7 (half power)'
)
primary_spd.plot(legend=True, ylabel='W/m$^2$/nm', color=device.colors[7]);
# Predict output for random device settings
# -----------------------------------------
# In[7]:
settings = [random.randrange(s) for s in device.resolutions] # Using a list of integers
device_spd = device.predict_multiprimary_spd(settings, 'Random SPD')
device_spd.plot(legend=True, ylabel='W/m$^2$/nm');
print(f'Predicted output for device settings: {settings}')
# In[8]:
weights = device.settings_to_weights(settings) # Convert settings to float
device_spd = device.predict_multiprimary_spd(weights, 'Random SPD')
device_spd.plot(legend=True, ylabel='W/m$^2$/nm');
print(f'Predicted output for device settings: {weights}')
# Predict *a*-opic irradiances for a list of device settings and plot with nice colours
# --------------------------------------------------------------------------------------
# In[9]:
device_ao = device.predict_multiprimary_aopic(settings)
ao_colors = list(device.aopic_colors.values())
device_ao.plot(kind='bar', color=ao_colors, ylabel='W/m$^2$');
# Convert settings to weights and weights to settings
# ---------------------------------------------------
# In[10]:
device.settings_to_weights(settings)
# In[11]:
device.weights_to_settings(weights)
# Find a spectrum based on xy chromaticity coordinates and luminance
# ------------------------------------------------------------------
# In[12]:
xy = [.3127, .3290] # D65
luminance = 600. # Lux
res = device.find_settings_xyY(
xy=xy,
luminance=luminance,
tollerance=1e-6,
plot_solution=True,
verbose=True
)
# In[13]:
import numpy as np
bg = device.predict_multiprimary_spd(
[.2 for val in range(10)],
'background',
nosum=True)
sss = get_CIES026()
mat = bg.T.dot(sss)
pinv_mat = np.linalg.pinv(mat)
mod = np.dot(pinv_mat.T, np.array([0, 0, 0, 0, 0]))
device.predict_multiprimary_spd([.5 for val in range(10)] + mod, 'mod').plot(legend=True);
device.predict_multiprimary_spd([.5 for val in range(10)], 'notmod').plot(legend=True);
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
41669,
1741,
24728,
1398,
13646,
198,
2,
46111,
1421,
198,
2,
220,
198,
2,
2195,
388,
8544,
25,
198,
2,
532,
770,
318,
5292,
284,
216... | 2.911524 | 1,345 |
"""
Author: Andres Andreu < andres at neurofuzzsecurity dot com >
Company: neuroFuzz, LLC
Date: 12/31/2015
Last Modified: 06/17/2016
functions to facilitate TCP port scanning via raw sockets
BSD 3-Clause License
Copyright (c) 2015-2016, Andres Andreu, neuroFuzz LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*** Take note:
If you use this for criminal purposes and get caught you are on
your own and I am not liable. I wrote this for legitimate
pen-testing and auditing purposes.
***
Be kewl and give credit where it is due if you use this. Also,
send me feedback as I don't have the bandwidth to test for every
condition - Dre
"""
import socket
from nftk_raw_packet import *
def create_raw_socket(is_target_local=False):
''' create a raw socket with a short timeout '''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
except socket.error , msg:
print '%s - %s' % (str(msg[0]), msg[1])
sys.exit()
'''
if LAN based then aggressive timeout
is possible
'''
if is_target_local:
s.settimeout(.2)
else:
# TODO if remote what timeout ????
s.settimeout(5)
# tell kernel not to put in headers since we are providing it
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
return s
| [
37811,
198,
220,
220,
220,
6434,
25,
843,
411,
10948,
84,
1279,
290,
411,
379,
7669,
69,
4715,
12961,
16605,
401,
1875,
198,
220,
220,
220,
5834,
25,
7669,
37,
4715,
11,
11419,
198,
220,
220,
220,
7536,
25,
1105,
14,
3132,
14,
462... | 2.925553 | 994 |
from copy import deepcopy
import immutables
import pytest
from skepticoin.humans import computer
from skepticoin.params import SASHIMI_PER_COIN, MAX_COINBASE_RANDOM_DATA_SIZE
from skepticoin.coinstate import CoinState
from skepticoin.consensus import (
construct_minable_summary_genesis,
# construct_minable_summary,
construct_coinbase_transaction,
construct_pow_evidence,
get_block_subsidy,
get_transaction_fee,
validate_coinbase_transaction_by_itself,
validate_block_header_by_itself,
validate_block_by_itself,
# validate_non_coinbase_transaction_in_coinstate,
ValidateBlockError,
ValidateBlockHeaderError,
ValidateTransactionError,
ValidatePOWError,
)
from skepticoin.signing import SECP256k1PublicKey, SECP256k1Signature
from skepticoin.datatypes import Transaction, OutputReference, Input, Output, Block
example_public_key = SECP256k1PublicKey(b'x' * 64)
def test_construct_minable_summary_with_transactions():
# TODO
'''
coinstate = CoinState.empty()
construct_minable_summary(coinstate, [
construct_coinbase_transaction(0, [], immutables.Map(), b"Political statement goes here", example_public_key)
], 1231006505, 0)
'''
def test_validate_non_coinbase_transaction_in_coinstate_invalid_output_reference():
# I started on the below, but the amount of setup is getting excessive... perhaps it's going to be easier to
# express these tests when more mechanisms of _creation_ are available? we'll see
'''
previous_transaction_hash = b'a' * 32
unspent_transaction_outs = immutables.Map()
transaction = Transaction(
inputs=[Input(
OutputReference(previous_transaction_hash, 1),
SECP256k1Signature(b'y' * 64),
)],
outputs=[Output(30, public_key)]
)
with pytest.raises(ValidateTransactionError, match=r".*does not exist.*") as e:
validate_non_coinbase_transaction_in_coinstate
'''
| [
6738,
4866,
1330,
2769,
30073,
198,
11748,
2296,
315,
2977,
198,
11748,
12972,
9288,
198,
198,
6738,
11200,
3713,
259,
13,
40205,
1330,
3644,
198,
6738,
11200,
3713,
259,
13,
37266,
1330,
311,
11211,
3955,
40,
62,
18973,
62,
8220,
1268,... | 2.73461 | 731 |
#!/usr/bin/env python
from pygame import midi
import time
import os
"""
this code is for reading a generic midi keyboard
references I used to make this code:
https://stackoverflow.com/questions/1554896/getting-input-from-midi-devices-live-python
https://www.pygame.org/docs/ref/midi.html
https://stackoverflow.com/questions/64818410/pygame-read-midi-input
"""
midi.init()
print(midi.get_count())
for id in range(midi.get_count()):
interf, name, is_input, is_output, is_opened = midi.get_device_info(id)
#s = "id: {}".format(id)
s = interf.decode("utf-8") + " " + name.decode("utf-8")
if is_input == 1:
s += " input"
if is_output == 1:
s += " output"
if is_opened == 1:
s += " (opened)"
print(id, s)
print("Enter the number above corresponding to the input you want to monitor")
input_id = int(input())
input_device = midi.Input(input_id)
while True:
while not input_device.poll():
time.sleep(0.05)
data = input_device.read(1)
# print(data)
print(data[0][0])
event_type, channel, value, _ = data[0][0]
if event_type == 144: #button pressed
print("button {} pressed, velocity: {}".format(channel, value))
elif event_type == 128: #button released
print("button {} released".format(channel))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
12972,
6057,
1330,
3095,
72,
198,
11748,
640,
198,
11748,
28686,
628,
198,
37811,
198,
5661,
2438,
318,
329,
3555,
257,
14276,
3095,
72,
10586,
198,
198,
5420,
4972,
314,
973,
28... | 2.713043 | 460 |
import sys
import os
import shutil
is_exe_file = getattr(sys, 'frozen', False)
if is_exe_file:
# If the application is run as a bundle, the pyInstaller bootloader
# extends the sys module by a flag frozen=True and sets the app
# path into variable _MEIPASS'.
APPLICATION_PATH = sys._MEIPASS
else:
APPLICATION_PATH = os.path.dirname(os.path.abspath(__file__))
# TEMPLATES_LOCATION = os.path.join(sys.path[0], 'templates')
DEST_PATH = os.getcwd()
DEV_FILE_NAME = "dev.py"
UTEST_FILE_NAME = "dev_unittest.py"
os.chdir(APPLICATION_PATH)
if __name__ == "__main__":
counts = len(sys.argv)
data = ""
if counts > 1:
# get file name to be created
dev_file_name = sys.argv[1]
if is_exe_file:
src_dir = os.path.abspath(os.path.join(APPLICATION_PATH, "../.."))
else:
src_dir = APPLICATION_PATH
dst_dev_file_name = "{}.py".format(dev_file_name)
dst_tst_file_name = "{}_ut.py".format(dev_file_name)
# copy template file to the local path
try:
shutil.copy2(os.path.join(src_dir, 'templates', DEV_FILE_NAME),
os.path.join(DEST_PATH, dst_dev_file_name))
shutil.copy2(os.path.join(src_dir, 'templates', UTEST_FILE_NAME),
os.path.join(DEST_PATH, dst_tst_file_name))
except Exception as e:
print("copy failed: %s" % e)
sys.exit(-1)
# modify the content of the dev.py
try:
with open(os.path.join(DEST_PATH, dst_tst_file_name), 'r+') as file:
for line in file.readlines():
line = line.format(dev_file_name)
data += line
with open(os.path.join(DEST_PATH, dst_tst_file_name), 'r+') as file:
file.writelines(data)
except Exception as e:
print("convert error: %s" % e)
sys.exit(-2)
else:
print("you should input the dev file's name!")
sys.exit(1) | [
11748,
25064,
201,
198,
11748,
28686,
201,
198,
11748,
4423,
346,
201,
198,
201,
198,
271,
62,
13499,
62,
7753,
796,
651,
35226,
7,
17597,
11,
705,
69,
42005,
3256,
10352,
8,
201,
198,
201,
198,
361,
318,
62,
13499,
62,
7753,
25,
... | 1.978011 | 1,046 |
"""Syscall Class."""
from typing import Callable
from ..models import MipsProgram
class Syscall:
"""Syscall Class, callable."""
def __init__(self, function: Callable[[MipsProgram], None], number: int):
"""Create Syscall."""
self.function = function
if function.__doc__:
self.description = function.__doc__.split("\n")[0]
else:
self.description = ""
name = self.function.__name__
if name.startswith("_"):
name = name[1:]
self.name = name
self.number = number
def __call__(self, program: MipsProgram):
"""Callable Instruction."""
self.function(program)
def __repr__(self) -> str:
"""Return Representation string."""
return f"Syscall({self.number}, {self.name})"
| [
37811,
44387,
13345,
5016,
526,
15931,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
6738,
11485,
27530,
1330,
337,
2419,
15167,
628,
198,
4871,
311,
893,
13345,
25,
198,
220,
220,
220,
37227,
44387,
13345,
5016,
11,
869,
540,
526,
15931... | 2.355491 | 346 |
# Improve /data/raw/deaths_and_episodes_S01_S06.csv
import pandas as pd
import re
if __name__ == '__main__':
main()
| [
2,
20580,
1220,
7890,
14,
1831,
14,
22595,
82,
62,
392,
62,
538,
8052,
62,
50,
486,
62,
50,
3312,
13,
40664,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.372549 | 51 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__modify__ = "GiriNeko"
__Email___ = "king@ineko.cc"
__author__ = "XiaoLin"
__email__ = "lolilin@outlook.com"
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Production"
import os, re, requests, configparser, json, signal, logging as log, coloredlogs
from mutagen.mp3 import MP3, HeaderNotFoundError
from mutagen.id3 import ID3, APIC, TPE1, TIT2, TALB, error
from mutagen.flac import Picture, FLAC, FLACNoHeaderError
from datetime import datetime
from operator import itemgetter
from PIL import Image
from concurrent.futures import ThreadPoolExecutor, wait
CONFIG = configparser.ConfigParser()
CONFIG.read('config.ini')
SERVER = CONFIG['General']['server']
requests = requests.Session()
coloredlogs.install(level=CONFIG['General']['logLevel'], fmt="%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s")
def format_string(string):
"""
Replace illegal character with ' '
"""
return re.sub(r'[\\/:*?"<>|\t]', ' ', string)
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
log.info("CloudWoman Version {}".format(__version__))
# Create target directories if don't exist
dirName = './Data'
if not os.path.exists(dirName):
os.mkdir(dirName)
dirName = './../MUSIC/CloudWoman'
if not os.path.exists(dirName):
os.mkdir(dirName)
dirName = './../MUSIC/CloudWoman/MUSIC'
if not os.path.exists(dirName):
os.mkdir(dirName)
if CONFIG['PlayList']['genListForFolder']:
files = os.listdir("./../MUSIC")
for dir_name in files:
if os.path.isdir(os.path.join("./../MUSIC", dir_name)):
if dir_name == "CloudWoman":
continue
playlist_file = open("./../MUSIC/{}.m3u".format(dir_name), 'w', encoding='utf8')
playlist_file.writelines("#EXTM3U\n")
folder = os.listdir(os.path.join("./../MUSIC", dir_name))
for track in folder:
if not os.path.isdir(os.path.join("./../MUSIC", dir_name,track)):
if trackendswith('flac') or trackendswith('mp3'):
playlist_file.writelines('\n{}/{}'.format(dir_name,track))
playlist_file.close()
log.info("Successfully generated playlist for folder: {}".format(dir_name))
if CONFIG['General']['enableLogin']:
login = requests.get(SERVER + "login/cellphone?phone={}&password={}".format(CONFIG['General']['cellphone'],CONFIG['General']['password'])).json()
log.debug(json.dumps(login))
if not login['code'] == 200:
log.error("Login failed: " + login['msg'])
exit()
UID = login['profile']['userId']
log.info("Login success")
else:
UID = CONFIG['General']['UID']
##此處為獲取播放列表
#
#playlist = requests.get(SERVER + "user/playlist?uid=" + str(UID)).json()
#Playlist函數指定(空)不備選列表)
##這邊採用了‘浙江共青團’的用戶,歌單少,好排除
playlist = requests.get(SERVER + "user/playlist?uid=" + str(1677641391)).json()
log.debug(json.dumps(playlist))
#此處似乎是添加播放列表
for extraList in CONFIG['PlayList']['extraList'].split(','):
#for extraList in CONFIG['extraList'].split(','):
tmp = requests.get(SERVER + "playlist/detail?id=" + extraList.replace(" ", "")).json()
log.debug(json.dumps(tmp))
if tmp['code'] == 200:
playlist['playlist'].append({
'name': tmp['playlist']['name'],
'id': tmp['playlist']['id']
})
log.info("Successfully get all tracks from playlist {}".format(tmp['playlist']['name']))
del tmp, extraList
excludeList = []
for tmp in CONFIG['PlayList']['excludeList'].split(','):
if not re.search(r"\w+",tmp) is None:
excludeList.append(int(tmp.replace(" ", "")))
playlist['playlist'] = [x for x in playlist['playlist'] if x['id'] not in excludeList]
log.info("The list of playlists we're going to download:")
for list in playlist['playlist']:
log.info("{} ({})".format(list['name'],list['id']))
del list, excludeList
for list in playlist['playlist']:
playlist_name = list['name']
playlist_tracks = requests.get(SERVER + "playlist/detail?id=" + str(list['id'])).json()['playlist']['tracks']
log.debug(json.dumps(playlist_tracks))
log.info('Downloading playlist: ' + playlist_name)
playlist_file = playlist_file_path = dirName + '/../' + format_string(playlist_name) + '.m3u'
if os.path.exists(playlist_file):
os.remove(playlist_file)
playlist_file = open(playlist_file, 'w', encoding='utf8')
playlist_file.writelines("#EXTM3U\n")
i = 0
track_error = {}
for track in playlist_tracks:
i += 1
log.info('{}: {}'.format(i, track['name']))
track_name = format_string(track['name'])
if is_downloaded(track['name']):
log.info('Music file already download')
if os.path.isfile(os.path.join(dirName,str(track['name']) + '.mp3')):
playlist_file.writelines("\n" + 'MUSIC/' + str(track['name']) + '.mp3')
else:
playlist_file.writelines("\n" + 'MUSIC/' + str(track['name']) + '.flac')
playlist_file.flush()
continue
status = check_retry_limit(track['id'],track_error)
if status == 1:
log.error('CANNOT download music: ' + track['name'])
continue
elif status == 2:
log.warning('Retring redownload music: ' + track['name'])
# download song
track_url = requests.get(SERVER + 'song/url?br={}&id='.format(CONFIG['General']['bitRate']) + str(track['id'])).json()
log.debug(json.dumps(track))
if (not track_url is None) and 'data' in track_url:
track_url = track_url['data'][0]['url']
if track_url is None or not validate_url(track_url):
log.warning('Song <<{}>> is not available due to copyright issue!'.format(track_name))
continue
try:
track_file_name = '{}.{}'.format(str(track['name']),os.path.splitext(track_url)[-1][1:])
#replace some file name
track_file_name = track_file_name.replace("/"," ")
track_file_name = track_file_name.replace("?"," ")
track_file_name = track_file_name.replace(":"," ")
track_file_name = track_file_name.replace("|"," ")
track_file_name = track_file_name.replace("<"," ")
track_file_name = track_file_name.replace(">"," ")
track_file_name = track_file_name.replace('"'," ")
track_file_path = os.path.join(dirName, track_file_name)
download_file(track_url, track_file_name, dirName)
playlist_file.writelines("\n" + 'MUSIC/' + track_file_name)
playlist_file.flush()
# download cover
cover_url = track['al']['picUrl']
if cover_url is None:
cover_url = 'http://p1.music.126.net/9A346Q9fbCSmylIkId7U3g==/109951163540324581.jpg'
cover_file_name = 'cover_{}.jpg'.format(track['id'])
cover_file_path = os.path.join(dirName, cover_file_name)
download_file(cover_url, cover_file_name, dirName, False)
except Exception as e:
log.error('Caused an error while downloading a file: ' + str(e))
playlist_tracks.append(track)
track_error[track['id']] = 1
continue
# resize cover
try:
img = Image.open(cover_file_path)
if img.size[0] > 640 or img.size[1] > 640:
img.thumbnail((640,640), Image.ANTIALIAS)
if img.format == 'PNG':
img = img.convert('RGB')
img.save(cover_file_path, quality=90)
except IOError:
log.warning('Can\'t open image:' + cover_file_path)
except Exception as e:
log.error('Caused an error while resizing cover: ' + str(e))
playlist_tracks.append(track)
track_error[track['id']] = 1
continue
try:
# add metadata for song
if os.path.splitext(track_url)[-1][1:] != 'flac':
# id3
try:
audio = MP3(track_file_path, ID3=ID3)
if audio.tags is None:
log.warning('No tags, trying to add one!')
try:
audio.add_tags()
audio.save()
except error as e:
log.error('Error occur when add tags:' + str(e))
# Modify ID3 tags
id3 = ID3(track_file_path)
# Remove old 'APIC' frame
# Because two 'APIC' may exist together with the different description
# For more information visit: http://mutagen.readthedocs.io/en/latest/user/id3.html
if id3.getall('APIC'):
id3.delall('APIC')
# add album cover
id3.add(APIC(encoding=0,mime='image/jpeg',type=3,data=open(cover_file_path, 'rb').read()))
artists = []
for artist in track['ar']:
artists.append(artist['name'])
# add artist name
id3.add(TPE1(text=artists))
# add song name
id3.add(TIT2(encoding=3,text=track['name']))
# add album name
id3.add(TALB(encoding=3,text=track['al']['name']))
id3.save(v2_version=3)
except HeaderNotFoundError:
log.error('Can\'t sync to MPEG frame, not an validate MP3 file!')
playlist_tracks.append(track)
track_error[track['id']] = 1
continue
else:
try:
audio = FLAC(track_file_path)
if audio.tags is None:
log.warning('No tags, trying to add one!')
try:
audio.add_tags()
audio.save()
except error as e:
log.error('Error occur when add tags:' + str(e))
audio['title'] = track['name']
artists = []
for artist in track['ar']:
artists.append(artist['name'])
audio['artist'] = artists
audio['album'] = track['al']['name']
image = Picture()
image.type = 3
image.desc = 'front cover'
image.mime = 'image/jpeg'
image.width = 640
image.height = 640
image.data = open(cover_file_path, 'rb').read()
audio.save()
audio.clear_pictures()
audio.add_picture(image)
audio.save()
except FLACNoHeaderError:
log.error('Can\'t sync to MPEG frame, not an validate FLAC file!')
playlist_tracks.append(track)
track_error[track['id']] = 1
continue
except Exception as e:
log.error('Caused an error while adding metadata: ' + str(e))
playlist_tracks.append(track)
track_error[track['id']] = 1
continue
# delete cover file
os.remove(cover_file_path)
try:
track_lyric_raw = requests.get(SERVER + 'lyric?id=' + str(track['id'])).json()
log.debug(json.dumps(track_lyric_raw))
if ('lrc' in track_lyric_raw) and not(track_lyric_raw['lrc']['lyric'] is None):
fix_track_name = str(track['name'])
fix_track_name = fix_track_name.replace("/"," ")
fix_track_name = fix_track_name.replace("?"," ")
fix_track_name = fix_track_name.replace(":"," ")
fix_track_name = fix_track_name.replace("|"," ")
fix_track_name = fix_track_name.replace("<"," ")
fix_track_name = fix_track_name.replace(">"," ")
fix_track_name = fix_track_name.replace('"'," ")
track_lyric_file = open(os.path.join(dirName, fix_track_name + '.lrc'), 'w', encoding='utf8')
if ('tlyric' in track_lyric_raw) and (track_lyric_raw['tlyric']['version'] != 0) and not(track_lyric_raw['tlyric']['lyric'] is None):
track_lyric = track_lyric_raw['lrc']['lyric'].split('\n')
track_lyric_trans = track_lyric_raw['tlyric']['lyric'].split('\n')
lyric = []
for a in track_lyric:
time = get_lyric_time(a)
if not time:
continue
data = {
'time': time,
'type': 0,
'content': re.sub(r"^\[\w+\:\w+\.\w+\]","",a)
}
lyric.append(data)
for a in track_lyric_trans:
time = get_lyric_time(a)
if not time:
continue
data = {
'time': time,
'type': 1,
'content': re.sub(r"^\[\w+\:\w+\.\w+\]","",a)
}
lyric.append(data)
lyric = sorted(lyric,key = itemgetter('time', 'type'))
for key, value in enumerate(lyric):
if (value['type'] == 0) or (key == 0) or (key == len(lyric) - 1):
continue
if (lyric[key - 1]['type'] == 1):
continue
if not (lyric[key - 1]['time'] == value['time']):
continue
lyric[key]['time'] = lyric[key + 1]['time']
for a in lyric:
track_lyric_file.writelines("{}{}\n".format(gen_lyric_time(a['time']),a['content']))
else:
track_lyric = track_lyric_raw['lrc']['lyric'].split('\n')
for a in track_lyric:
time = get_lyric_time(a)
if not time:
continue
track_lyric_file.writelines(gen_lyric_time(time) + re.sub(r"^\[\w+\:\w+\.\w+\]","",a) + "\n")
track_lyric_file.close()
except Exception as e:
log.error('Caused an error while generating lyrics: ' + str(e))
track_error[track['id']] = 1
playlist_tracks.append(track)
continue
downloaded_music(track['id'])
playlist_file.writelines("\n")
playlist_file.close()
os.chmod(playlist_file_path,0o777)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
4666,
1958,
834,
796,
366,
38,
14783,
45,
988,
78,
1,
198,
834,
15333,
17569,
796,
366,
3364,
31,
500,
72... | 1.883462 | 8,006 |
# Make sure our GEO data is loaded
from django.test import TestCase
from services.models import LebanonRegion, ServiceArea
| [
2,
6889,
1654,
674,
402,
4720,
1366,
318,
9639,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
2594,
13,
27530,
1330,
16849,
47371,
11,
4809,
30547,
628
] | 4.133333 | 30 |
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from . import affine_grid_cuda
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
15553,
198,
6738,
28034,
13,
2306,
519,
6335,
13,
8818,
1330,
1752,
62,
39799,
3379,
198,
198,
6738,
764,
1330,
1527,
500,
6... | 3.5625 | 48 |
if cur == first:
first = first.nextNode
else:
pre = first
while pre.nextNode != cur:
pre = pre.nextNode
pre.nextNode = cur.nextNode | [
361,
1090,
6624,
717,
25,
201,
198,
197,
11085,
796,
717,
13,
19545,
19667,
201,
198,
17772,
25,
201,
198,
197,
3866,
796,
717,
201,
198,
197,
4514,
662,
13,
19545,
19667,
14512,
1090,
25,
201,
198,
197,
197,
3866,
796,
662,
13,
1... | 2.433333 | 60 |
from ..layer_operation import LayerOperation
import tensorflow as tf
import re
| [
6738,
11485,
29289,
62,
27184,
1330,
34398,
32180,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
302,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 3 | 30 |
import os
from glob import glob
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm
ACTIVE = True
STD = 0
ITERATION = 470000
MODEL_NAME = 'height_1024_val_False_without_error'
dir_image = './checkpoints/Over_{}_std/Image/Test/{}/{}'.format(STD, MODEL_NAME, ITERATION)
dir_patch_active = './checkpoints/Over_{}_std/Analysis/{}/{}/Patch/Active'.format(STD, MODEL_NAME, ITERATION)
dir_patch_quiet = './checkpoints/Over_{}_std/Analysis/{}/{}/Patch/Quiet'.format(STD, MODEL_NAME, ITERATION)
os.makedirs(dir_patch_active, exist_ok=True)
os.makedirs(dir_patch_quiet, exist_ok=True)
list_fake = sorted(glob(os.path.join(dir_image, '*_fake.png')))
list_real = sorted(glob(os.path.join(dir_image, '*_real.png')))
assert len(list_fake) == len(list_real)
# with open(os.path.join(dir_patch_active, 'positions.txt'), 'wt') as log:
# log.write('file_name, positions(W, H)\n')
# log.close()
dir_patch = dir_patch_active if ACTIVE else dir_patch_quiet
with open(os.path.join(dir_patch, 'positions.txt'), 'wt') as log:
log.write('file_name, positions(W, H)\n')
log.close()
list_index = list()
k = 0
for i in range(1024):
for j in range(1024):
if (i - 512) ** 2 + (j - 512) ** 2 > 392 ** 2:
list_index.append(k)
k += 1
for i in tqdm(range(len(list_fake))):
name = os.path.split(list_fake[i])[-1].strip('_fake.png')
fake_np = np.array(Image.open(list_fake[i]), dtype=np.uint8).flatten()
real_np = np.array(Image.open(list_real[i]), dtype=np.uint8).flatten()
fake_np[list_index] = 127
real_np[list_index] = 127
fake_image = fake_np.reshape((1024, 1024))
real_image = real_np.reshape((1024, 1024))
f, a = plt.subplots(figsize=(15, 15))
a.imshow(real_image, cmap='gray')
f.tight_layout()
positions = list()
cid = f.canvas.mpl_connect('button_press_event', my_click)
plt.show()
f.canvas.mpl_connect(cid, my_close)
for j, position in enumerate(positions):
fake_patch = Image.fromarray(np.array(fake_image)[position[1] - 63: position[1] + 65, position[0] - 63: position[0] + 65])
real_patch = Image.fromarray(np.array(real_image)[position[1] - 63: position[1] + 65, position[0] - 63: position[0] + 65])
fake_patch.save(os.path.join(dir_patch, name + '_patch_{}_fake.png'.format(j)))
real_patch.save(os.path.join(dir_patch, name + '_patch_{}_real.png'.format(j)))
with open(os.path.join(dir_patch, 'positions.txt'), 'a') as log:
log.write(', '.join([name, *list(map(lambda x: str(x), positions))]) + '\n')
log.close()
# #f.canvas.mpl_connect(cid, my_close)
| [
11748,
28686,
198,
6738,
15095,
1330,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
628,
198,
... | 2.330992 | 1,139 |
import logging
import json
from pathlib import Path
import urllib.request
import itertools as it
from zensols.actioncli import persisted
from zensols.dlqaclass import (
CorpusReader,
CorpusReaderFactory,
CorpusParserFactory,
)
logger = logging.getLogger(__name__)
CorpusParserFactory.register(SquadCorpusParser)
CorpusReaderFactory.register(SquadCorpusReader)
| [
11748,
18931,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
340,
861,
10141,
355,
340,
198,
6738,
1976,
641,
10220,
13,
2673,
44506,
1330,
33264,
198,
6738,
1976,
641,
10220,
1... | 3.158333 | 120 |
"""Methods pertaining to loading and configuring CTA "L" station data."""
import logging
from pathlib import Path
from confluent_kafka import avro
from confluent_kafka.avro import ClientError
from .train import Train
from .turnstile import Turnstile
from .producer import Producer
logger = logging.getLogger(__name__)
class Station(Producer):
"""Defines a single station"""
key_schema = avro.load(f"{Path(__file__).parents[0]}/schemas/arrival_key.json")
#
# TODO: Define this value schema in `schemas/station_value.json, then uncomment the below
#
value_schema = avro.load(f"{Path(__file__).parents[0]}/schemas/arrival_value.json")
def run(self, train: Train, direction: str, prev_station_id, prev_direction):
"""Simulates train arrivals at this station"""
#
#
# TODO: Complete this function by producing an arrival message to Kafka
#
#
logger.info(str(self))
try:
self.producer.produce(
topic=self.topic_name,
key={"timestamp": self.time_millis()},
value={
"station_id": self.station_id,
"train_id": train.train_id,
"direction": direction,
"line": self.color.name,
"train_status": train.status.name,
"prev_station_id": prev_station_id,
"prev_direction": prev_direction
#
#
# TODO: Configure this
#
#
},
callback=self.delivery_report
)
except ClientError as err:
print(f"Produce topic-name: {self.topic_name}. Error: {err}")
except Exception as e:
print(f"noo- what is this? Error: {e}")
def arrive_a(self, train, prev_station_id, prev_direction):
"""Denotes a train arrival at this station in the 'a' direction"""
self.a_train = train
self.run(train, "a", prev_station_id, prev_direction)
def arrive_b(self, train, prev_station_id, prev_direction):
"""Denotes a train arrival at this station in the 'b' direction"""
self.b_train = train
self.run(train, "b", prev_station_id, prev_direction)
def close(self):
"""Prepares the producer for exit by cleaning up the producer"""
self.turnstile.close()
super(Station, self).close()
| [
37811,
46202,
27113,
284,
11046,
290,
4566,
870,
327,
5603,
366,
43,
1,
4429,
1366,
526,
15931,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
1013,
28216,
62,
74,
1878,
4914,
1330,
1196,
305,
198,
6738,
1013,
... | 2.164502 | 1,155 |
# -*- coding: utf-8 -*-
import os
import logging
import gitlab
from settings import GIT_SETTINGS
from settings import MAIL_SETTINGS
from settings import LOG_SETTINGS
from settings import MAIL_NOTIFY_ENABLE
from custome_logging import BufferingSMTPHandler
from custome_logging import ConsoleHandler
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
18931,
198,
198,
11748,
17606,
23912,
198,
198,
6738,
6460,
1330,
402,
2043,
62,
28480,
51,
20754,
198,
6738,
6460,
1330,
8779,
4146,
62,
28480,
... | 3.061947 | 113 |
from globibot.lib.helpers import parsing as p
from collections import namedtuple
DiceRoll = namedtuple('DiceRoll', ['count', 'face_count', 'modifier'])
dice_types = { 'd4', 'd6', 'd8', 'd10', 'd12', 'd20' }
dice_type_grammar = p.one_of(p.string, *dice_types)
dice_grammar = (
p.maybe(p.integer) +
(dice_type_grammar >> extract_dice_type)
).named('Dice')
dice_modifier_grammar = (
(p.one_of(p.a, '+', '-') >> p.to_s) +
p.integer
).named('Modifier')
dice_roll_parser = (
dice_grammar +
p.maybe(dice_modifier_grammar)
) >> to_dice_roll
| [
6738,
15095,
571,
313,
13,
8019,
13,
16794,
364,
1330,
32096,
355,
279,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
35,
501,
26869,
796,
3706,
83,
29291,
10786,
35,
501,
26869,
3256,
37250,
9127,
3256,
705,
2550,
62,
9127,... | 2.199219 | 256 |
#!/usr/bin/env python
# Block everyone you've muted, and vice-versa.
from argparse import ArgumentParser
import time
from tqdm import tqdm
from tweettools import get_client
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
9726,
2506,
345,
1053,
38952,
11,
290,
7927,
12,
690,
64,
13,
198,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
11748,
640,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
19... | 3.042857 | 70 |
from django.shortcuts import render
from django.shortcuts import get_object_or_404, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.db import transaction
from django.forms import inlineformset_factory
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from basketapp.models import Basket
from ordersapp.models import Order, OrderItem
from ordersapp.forms import OrderItemForm
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
11,
9575,
... | 3.425676 | 148 |
'''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
import weakref
class ServiceLauncherBase(object):
'''Base class for service launcher.'''
def start_service(self, serviceName, token, serviceManager, appData):
'''
Called to launch a new service for a session. Expected to raise
a ValueError if the service is unknown or any other Exception if there
is a problem starting the service.
'''
raise NotImplementedError
def stop_service(self, serviceName, serviceManager):
'''
Called to end a service for a session. No expected return value.
'''
raise NotImplementedError
def stop_all_services(self, serviceManager):
'''
Called to end all services for a session. No expected return value.
'''
raise NotImplementedError | [
7061,
6,
198,
15269,
357,
66,
8,
383,
2141,
7639,
5693,
2813,
13,
1439,
6923,
33876,
13,
198,
15269,
357,
66,
8,
19764,
10501,
3648,
11,
2813,
13,
1439,
6923,
33876,
13,
198,
7061,
6,
198,
11748,
4939,
5420,
198,
198,
4871,
4809,
... | 2.767647 | 340 |
from socket import error as socket_error
from django import forms
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin import SimpleListFilter
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.forms import ValidationError
from django.http import HttpResponseRedirect
import autocomplete_light
from celery.task.sets import TaskSet
from functools import update_wrapper
from import_export.admin import ExportMixin
from import_export.fields import Field
from import_export.resources import ModelResource
from sorl.thumbnail.admin import AdminImageMixin
import mozillians.users.tasks
from mozillians.common.helpers import get_datetime
from mozillians.groups.models import GroupMembership, Skill
from mozillians.users.cron import index_all_profiles
from mozillians.users.models import (PUBLIC, Language, ExternalAccount, Vouch,
UserProfile, UsernameBlacklist)
admin.site.unregister(Group)
Q_PUBLIC_PROFILES = Q()
for field in UserProfile.privacy_fields():
key = 'privacy_%s' % field
Q_PUBLIC_PROFILES |= Q(**{key: PUBLIC})
def subscribe_to_basket_action():
"""Subscribe to Basket action."""
def subscribe_to_basket(modeladmin, request, queryset):
"""Subscribe to Basket or update details of already subscribed."""
ts = [(mozillians.users.tasks.update_basket_task
.subtask(args=[userprofile.id]))
for userprofile in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.')
subscribe_to_basket.short_description = 'Subscribe to or Update Basket'
return subscribe_to_basket
def unsubscribe_from_basket_action():
"""Unsubscribe from Basket action."""
def unsubscribe_from_basket(modeladmin, request, queryset):
"""Unsubscribe from Basket."""
ts = [(mozillians.users.tasks.unsubscribe_from_basket_task
.subtask(args=[userprofile.user.email, userprofile.basket_token]))
for userprofile in queryset]
TaskSet(ts).apply_async()
messages.success(request, 'Basket update started.')
unsubscribe_from_basket.short_description = 'Unsubscribe from Basket'
return unsubscribe_from_basket
def update_vouch_flags_action():
"""Update can_vouch, is_vouched flag action."""
update_vouch_flags.short_description = 'Update vouch flags'
return update_vouch_flags
class SuperUserFilter(SimpleListFilter):
"""Admin filter for superusers."""
title = 'has access to admin interface'
parameter_name = 'superuser'
class PublicProfileFilter(SimpleListFilter):
"""Admin filter for public profiles."""
title = 'public profile'
parameter_name = 'public_profile'
class CompleteProfileFilter(SimpleListFilter):
"""Admin filter for complete profiles."""
title = 'complete profile'
parameter_name = 'complete_profile'
class DateJoinedFilter(SimpleListFilter):
"""Admin filter for date joined."""
title = 'date joined'
parameter_name = 'date_joined'
class LastLoginFilter(SimpleListFilter):
"""Admin filter for last login."""
title = 'last login'
parameter_name = 'last_login'
class AlternateEmailFilter(SimpleListFilter):
"""Admin filter for users with alternate emails."""
title = 'alternate email'
parameter_name = 'alternate_email'
class LegacyVouchFilter(SimpleListFilter):
"""Admin filter for profiles with new or legacy vouch type."""
title = 'vouch type'
parameter_name = 'vouch_type'
class UsernameBlacklistAdmin(ExportMixin, admin.ModelAdmin):
"""UsernameBlacklist Admin."""
save_on_top = True
search_fields = ['value']
list_filter = ['is_regex']
list_display = ['value', 'is_regex']
admin.site.register(UsernameBlacklist, UsernameBlacklistAdmin)
admin.site.register(Language, LanguageAdmin)
class UserProfileResource(ModelResource):
"""django-import-export UserProfile Resource."""
username = Field(attribute='user__username')
email = Field(attribute='user__email')
admin.site.register(UserProfile, UserProfileAdmin)
class NullProfileFilter(SimpleListFilter):
"""Admin filter for null profiles."""
title = 'has user profile'
parameter_name = 'has_user_profile'
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Vouch, VouchAdmin)
| [
6738,
17802,
1330,
4049,
355,
17802,
62,
18224,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
6738,
42625,
14208,
... | 3 | 1,557 |
from typing import List
import msgpack
import json
import pytest
from main import Example, ExampleChild, encode_example, decode_example, encode_example_proto, decode_example_proto
from random import randint, choice, random
from string import ascii_letters, digits
import pickle
single = gen_test_obj(1)
_100 = gen_test_obj(100)
_1k = gen_test_obj(1000)
_10k = gen_test_obj(10000)
_100k = gen_test_obj(100000)
test_sets = [single, _100, _1k, _10k, _100k]
test_set_labels = ["single", "100", "1k", "10k", "100k"]
libs = ["msgpack", "json", "pickle4", "pickle5", "proto"]
@pytest.mark.parametrize("lib", libs)
@pytest.mark.parametrize("test_data", test_sets, ids=test_set_labels)
@pytest.mark.parametrize("lib", libs)
@pytest.mark.parametrize("test_data", test_sets, ids=test_set_labels)
| [
6738,
19720,
1330,
7343,
198,
11748,
31456,
8002,
198,
11748,
33918,
198,
11748,
12972,
9288,
198,
6738,
1388,
1330,
17934,
11,
17934,
16424,
11,
37773,
62,
20688,
11,
36899,
62,
20688,
11,
37773,
62,
20688,
62,
1676,
1462,
11,
36899,
6... | 2.586885 | 305 |
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import torch
from ncc import LOGGER
from ncc import tasks
from ncc.data.kd.teacher_out_dataset import (
TeacherOutDataset,
)
from ncc.utils import checkpoint_utils
from ncc.utils import utils
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.logging import progress_bar
from ncc.utils.utils import move_to_cuda
if __name__ == '__main__':
cli_main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
299,
535,
1330,
41605,
30373,
198,
6738,
299,
535,
1330,
... | 2.652695 | 167 |
#!/usr/bin/env python3
import collections
import datetime
import shutil
from typing import Any, Callable, List
Partition = collections.namedtuple("Partition", [
'device',
'mountpoint'
])
Meminfo = collections.namedtuple('Meminfo', [
'total',
'free',
'cached',
'used',
])
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17268,
198,
11748,
4818,
8079,
198,
11748,
4423,
346,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
7343,
628,
198,
7841,
653,
796,
17268,
13,
13190,
83,
29291,
7203... | 2.576389 | 144 |
from sublime import Region
from WrapAsYouType.tests.command_test_base import WrapAsYouTypeCommandTestBase
class TestWrapAsYouTypeCommandNormal(WrapAsYouTypeCommandTestBase):
"""Test WrapAsYouTypeCommand under "normal" editing behavior."""
def test_cpp_block_comments(self):
"""Test WrapAsYouTypeCommand on edits to C++ block comments."""
view = self._view
self._set_up_cpp()
view.settings().set('rulers', [60])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n'
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence.\n'
' */\n'
'int fibonacci(int n) {\n'
' // Base case\n'
' if (n == 0) {\n'
' return 0;\n'
' }\n'
'\n'
' // Iterative implementation of "fibonacci"\n'
' int cur = 1;\n'
' int prev = 0;\n'
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n'
' return cur;\n'
'}\n'
'\n'
'int main() {\n'
' cout << "The 8th Fibonacci number is " <<\n'
' fibonacci(8) << "\\n";\n'
' return 0;\n'
'}\n')
comment_start_point = view.find(r'/\*\*', 0).begin()
point = view.find(r'Fibonacci sequence\.', 0).end()
self._insert(point, ' The function assumes that n >= 0.')
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The function assumes that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
point = view.find('The function assumes', 0).begin() - 1
self._insert(
point,
'The Fibonacci sequence begins with 0 as the 0th number and 1 as '
'the first number. Every subsequent number is equal to the sum '
'of the two previous numbers.')
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence begins with 0\n'
' * as the 0th number and 1 as the first number. Every\n'
' * subsequent number is equal to the sum of the two previous\n'
' * numbers. The function assumes that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find('The function assumes', 0).begin() - 1
end_point = start_point + 34
self._backspace(Region(start_point, end_point))
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence begins with 0\n'
' * as the 0th number and 1 as the first number. Every\n'
' * subsequent number is equal to the sum of the two previous\n'
' * numbers.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_cpp_line_comments(self):
"""Test WrapAsYouTypeCommand on edits to C++ line comments."""
view = self._view
self._set_up_cpp()
settings = view.settings()
settings.set('wrap_width', 60)
settings.set('rulers', [80])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n'
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence.\n'
' */\n'
'int fibonacci(int n) {\n'
' // Base case\n'
' if (n == 0) {\n'
' return 0;\n'
' }\n'
'\n'
' // Iterative implementation of "fibonacci"\n'
' int cur = 1;\n'
' int prev = 0;\n'
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n'
' return cur;\n'
'}\n'
'\n'
'int main() {\n'
' cout << "The 8th Fibonacci number is " <<\n'
' fibonacci(8) << "\\n";\n'
' return 0;\n'
'}\n')
comment_start_point = view.find('// Iterative', 0).begin() - 4
point = view.find('implementation of "fibonacci"', 0).end()
self._insert(
point,
'. We maintain two variables: "cur", the value of the current '
'number in the sequence, and "prev", the value of the previous '
'number.')
expected_text = (
' // Iterative implementation of "fibonacci". We maintain\n'
' // two variables: "cur", the value of the current number\n'
' // in the sequence, and "prev", the value of the\n'
' // previous number.\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
# Test paragraphs
point = view.find(r'previous number\.', 0).end()
self._insert(
point,
' Here\'s what happens at each iteration:\n'
'// - The variable "cur" gets set to be the value of prev '
'+ cur.\n'
'//\n'
'// - The variable "prev" gets set to be the old value of '
'"cur" - the value at the beginning of the iteration.')
expected_text = (
' // Iterative implementation of "fibonacci". We maintain\n'
' // two variables: "cur", the value of the current number\n'
' // in the sequence, and "prev", the value of the\n'
' // previous number. Here\'s what happens at each\n'
' // iteration:\n'
' // - The variable "cur" gets set to be the value of\n'
' // prev + cur.\n'
' //\n'
' // - The variable "prev" gets set to be the old\n'
' // value of "cur" - the value at the beginning of\n'
' // the iteration.\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find(', and "prev",', 0).end() - 1
end_point = view.find(r'previous number\.', 0).end() - 1
self._backspace(Region(start_point, end_point))
expected_text = (
' // Iterative implementation of "fibonacci". We maintain\n'
' // two variables: "cur", the value of the current number\n'
' // in the sequence, and "prev". Here\'s what happens at\n'
' // each iteration:\n'
' // - The variable "cur" gets set to be the value of\n'
' // prev + cur.\n'
' //\n'
' // - The variable "prev" gets set to be the old\n'
' // value of "cur" - the value at the beginning of\n'
' // the iteration.\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
point = view.find(' - the value at the', 0).begin() + 3
self._delete(point, 10)
expected_text = (
' // Iterative implementation of "fibonacci". We maintain\n'
' // two variables: "cur", the value of the current number\n'
' // in the sequence, and "prev". Here\'s what happens at\n'
' // each iteration:\n'
' // - The variable "cur" gets set to be the value of\n'
' // prev + cur.\n'
' //\n'
' // - The variable "prev" gets set to be the old\n'
' // value of "cur" - at the beginning of the\n'
' // iteration.\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
# Make sure that inline comments don't wrap, because they don't extend
# to the beginning of the line
comment_start_point = view.find('int next =', 0).begin() - 8
self._insert(
comment_start_point + 30,
' // In order to make sure that we use the correct value of '
'"prev" in the addition, we must create a temporary value "next" '
'to store the result of the addition, before setting "prev" to be '
'"cur".')
expected_text = (
' int next = cur + prev; // In order to make sure that we '
'use the correct value of "prev" in the addition, we must create '
'a temporary value "next" to store the result of the addition, '
'before setting "prev" to be "cur".\n'
' prev = cur;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_python_block_comments(self):
"""Test WrapAsYouTypeCommand on edits to Python block comments."""
view = self._view
self._set_up_python()
# Attempt to trick WrapAsYouType with an irrelevant ruler
view.settings().set('rulers', [60])
self._append(
'def fibonacci(n):\n'
' """Return the nth number in the Fibonacci sequence."""\n'
' # Base case\n'
' if n == 0:\n'
' return 0\n'
'\n'
' # Iterative implementation of "fibonacci"\n'
' cur = 1\n'
' prev = 0\n'
' for i in range(1, n):\n'
' cur, prev = cur + prev, cur\n'
' return cur\n'
'\n'
'print(\'The 8th Fibonacci number is {:d}\'.format(fibonacci(8)))'
'\n')
comment_start_point = view.find(' """', 0).begin()
point = view.find(r'Fibonacci sequence\.', 0).end()
self._insert(point, '\nAssume that n >= 0.\n')
expected_text = (
' """Return the nth number in the Fibonacci sequence.\n'
' Assume that n >= 0.\n'
' """\n'
' # Base case\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
point = view.find('Assume that', 0).begin() - 1
self._insert(
point,
' The Fibonacci sequence begins with 0 as the 0th number and 1 as '
'the first number. Every subsequent number is equal to the sum '
'of the two previous numbers.')
expected_text = (
' """Return the nth number in the Fibonacci sequence.\n'
' The Fibonacci sequence begins with 0 as the 0th number and 1 '
'as the\n'
' first number. Every subsequent number is equal to the sum of '
'the two\n'
' previous numbers. Assume that n >= 0.\n'
' """\n'
' # Base case\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find('Assume that', 0).begin() - 1
end_point = start_point + 20
self._backspace(Region(start_point, end_point))
expected_text = (
' """Return the nth number in the Fibonacci sequence.\n'
' The Fibonacci sequence begins with 0 as the 0th number and 1 '
'as the\n'
' first number. Every subsequent number is equal to the sum of '
'the two\n'
' previous numbers.\n'
' """\n'
' # Base case\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
# Make sure the first line doesn't wrap
point = view.find(r'Fibonacci sequence\.', 0).end()
self._insert(
point,
' The Fibonacci sequence is the sequence 1, 1, 2, 3, 5, 8, etc.')
expected_text = (
' """Return the nth number in the Fibonacci sequence. The '
'Fibonacci sequence is the sequence 1, 1, 2, 3, 5, 8, etc.\n'
' The Fibonacci sequence begins with 0 as the 0th number and 1 '
'as the\n'
' first number. Every subsequent number is equal to the sum of '
'the two\n'
' previous numbers.\n'
' """\n'
' # Base case\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_python_line_comments(self):
"""Test WrapAsYouTypeCommand on edits to Python line comments."""
view = self._view
self._set_up_python()
view.settings().set('rulers', [60])
self._append(
'def fibonacci(n):\n'
' """Return the nth number in the Fibonacci sequence."""\n'
' # Base case\n'
' if n == 0:\n'
' return 0\n'
'\n'
' # Iterative implementation of "fibonacci"\n'
' cur = 1\n'
' prev = 0\n'
' for i in range(1, n):\n'
' cur, prev = cur + prev, cur\n'
' return cur\n'
'\n'
'print(\'The 8th Fibonacci number is {:d}\'.format(fibonacci(8)))'
'\n')
comment_start_point = view.find('# Iterative', 0).begin() - 4
point = view.find('implementation of "fibonacci"', 0).end()
self._insert(
point,
'. We maintain two variables: "cur", the value of the current '
'number in the sequence, and "prev", the value of the previous '
'number.')
expected_text = (
' # Iterative implementation of "fibonacci". We maintain two '
'variables:\n'
' # "cur", the value of the current number in the sequence, '
'and "prev", the\n'
' # value of the previous number.\n'
' cur = 1\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
# Test paragraphs
point = view.find(r'previous number\.', 0).end()
self._insert(
point,
' Here\'s what happens at each iteration:\n'
'# - The variable "cur" gets set to be the value of prev '
'+ cur.\n'
'#\n'
'# - The variable "prev" gets set to be the old value of '
'"cur" - the value at the beginning of the iteration.')
expected_text = (
' # Iterative implementation of "fibonacci". We maintain two '
'variables:\n'
' # "cur", the value of the current number in the sequence, '
'and "prev", the\n'
' # value of the previous number. Here\'s what happens at each '
'iteration:\n'
' # - The variable "cur" gets set to be the value of prev '
'+ cur.\n'
' #\n'
' # - The variable "prev" gets set to be the old value of '
'"cur" - the\n'
' # value at the beginning of the iteration.\n'
' cur = 1\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find(', and "prev",', 0).end() - 1
end_point = view.find(r'previous number\.', 0).end() - 1
self._backspace(Region(start_point, end_point))
expected_text = (
' # Iterative implementation of "fibonacci". We maintain two '
'variables:\n'
' # "cur", the value of the current number in the sequence, '
'and "prev".\n'
' # Here\'s what happens at each iteration:\n'
' # - The variable "cur" gets set to be the value of prev '
'+ cur.\n'
' #\n'
' # - The variable "prev" gets set to be the old value of '
'"cur" - the\n'
' # value at the beginning of the iteration.\n'
' cur = 1\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_explicit_line_breaks(self):
"""Test WrapAsYouTypeCommand with explicit line breaks.
Test that WrapAsYouTypeCommand refrains from deleting line
breaks entered in by the user as he performs forward editing.
"""
view = self._view
self._set_up_cpp()
settings = view.settings()
settings.set('rulers', [60])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n'
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence.\n'
' */\n'
'int fibonacci(int n) {\n'
' // Base case\n'
' if (n == 0) {\n'
' return 0;\n'
' }\n'
'\n'
' // Iterative implementation of "fibonacci"\n'
' int cur = 1;\n'
' int prev = 0;\n'
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n'
' return cur;\n'
'}\n'
'\n'
'int main() {\n'
' cout << "The 8th Fibonacci number is " <<\n'
' fibonacci(8) << "\\n";\n'
' return 0;\n'
'}\n')
comment_start_point = view.find(r'/\*\*', 0).begin()
point = view.find(r'Fibonacci sequence\.', 0).end()
self._insert(
point,
' The Fibonacci sequence begins with 0 as the 0th number and 1 as '
'the first number.\n'
'* Every subsequent number is equal to the sum '
'of the two previous numbers.\n'
'* The function assumes that n >= 0.')
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence begins with 0\n'
' * as the 0th number and 1 as the first number.\n'
' * Every subsequent number is equal to the sum of the two\n'
' * previous numbers.\n'
' * The function assumes that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
# This tests whether WrapAsYouType deletes the preceding newline
comment_start_point = view.find(' // Iterative', 0).begin()
settings.set('auto_indent', False)
point = view.find('Iterative implementation', 0).end()
self._delete(point, 1)
self._insert(point, '\n ')
expected_text = (
' // Iterative implementation\n'
' of "fibonacci"\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
point = view.find('of "fibonacci"', 0).begin()
self._backspace(Region(point - 4, point))
expected_text = (
' // Iterative implementation\n'
'of "fibonacci"\n'
' int cur = 1;\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_comment_out_lines(self):
"""Test WrapAsYouTypeCommand when commenting out lines.
Test that WrapAsYouTypeCommand does not perform word wrapping
fixup when commenting out lines of code.
"""
view = self._view
self._set_up_cpp()
view.settings().set('rulers', [60])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n'
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence.\n'
' */\n'
'int fibonacci(int n) {\n'
' // Base case\n'
' if (n == 0) {\n'
' return 0;\n'
' }\n'
'\n'
' // Iterative implementation of "fibonacci"\n'
' int cur = 1;\n'
' int prev = 0;\n'
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n'
' return cur;\n'
'}\n'
'\n'
'int main() {\n'
' cout << "The 8th Fibonacci number is " <<\n'
' fibonacci(8) << "\\n";\n'
' return 0;\n'
'}\n')
block_start_point = view.find(r'for \(int i', 0).begin() - 4
point = view.find('int next', 0).begin()
self._insert(point, '//')
point = view.find('prev = cur;', 0).begin()
self._set_selection_point(point)
view.run_command('toggle_comment')
point = view.find('cur = next;', 0).begin()
self._insert(point, '//')
expected_text = (
' for (int i = 1; i < n; i++) {\n'
' //int next = cur + prev;\n'
' // prev = cur;\n'
' //cur = next;\n'
' }\n')
actual_text = view.substr(
Region(block_start_point, block_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find('int next', 0).begin()
end_point = view.find('cur = next;', 0).end()
self._set_selection_region(Region(start_point, end_point))
view.run_command('toggle_comment')
expected_text = (
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n')
actual_text = view.substr(
Region(block_start_point, block_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
view.run_command('toggle_comment')
expected_text = (
' for (int i = 1; i < n; i++) {\n'
' // int next = cur + prev;\n'
' // prev = cur;\n'
' // cur = next;\n'
' }\n')
actual_text = view.substr(
Region(block_start_point, block_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
point = view.find('cur = next;', 0).begin() + 4
self._set_selection_point(point)
view.run_command('toggle_comment')
point = view.find('// int next', 0).begin()
self._delete(point, 3)
expected_text = (
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' // prev = cur;\n'
' cur = next;\n'
' }\n')
actual_text = view.substr(
Region(block_start_point, block_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
| [
6738,
41674,
1330,
17718,
198,
198,
6738,
41028,
1722,
1639,
6030,
13,
41989,
13,
21812,
62,
9288,
62,
8692,
1330,
41028,
1722,
1639,
6030,
21575,
14402,
14881,
628,
198,
4871,
6208,
54,
2416,
1722,
1639,
6030,
21575,
26447,
7,
54,
2416... | 1.945817 | 13,159 |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import subprocess
from traceback import print_exc
__version__ = "2.8.3"
| [
2,
43907,
25,
7753,
12685,
7656,
28,
40477,
12,
23,
25,
3919,
316,
198,
6738,
11593,
37443,
834,
1330,
357,
46903,
1098,
62,
17201,
874,
11,
7297,
11,
4112,
62,
11748,
11,
3601,
62,
8818,
8,
198,
198,
11748,
850,
14681,
198,
6738,
... | 3.015873 | 63 |
import keras.backend as K
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from attack_utils import gen_adv_loss
from keras.models import save_model
import time
import sys
FLAGS = flags.FLAGS
EVAL_FREQUENCY = 1000
BATCH_SIZE = 64
BATCH_EVAL_NUM = 100
def batch_eval(tf_inputs, tf_outputs, numpy_inputs):
"""
A helper function that computes a tensor on numpy inputs by batches.
From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py
"""
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
for start in range(0, m, BATCH_SIZE):
batch = start // BATCH_SIZE
# Compute batch start and end indices
start = batch * BATCH_SIZE
end = start + BATCH_SIZE
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= BATCH_SIZE
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
feed_dict[K.learning_phase()] = 0
numpy_output_batches = K.get_session().run(tf_outputs,
feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
def tf_test_error_rate(model, x, X_test, y_test):
"""
Compute test error.
"""
assert len(X_test) == len(y_test)
# Predictions for the test set
eval_prediction = K.softmax(model(x))
predictions = batch_eval([x], [eval_prediction], [X_test])[0]
return error_rate(predictions, y_test)
def error_rate(predictions, labels):
"""
Return the error rate in percent.
"""
assert len(predictions) == len(labels)
preds = np.argmax(predictions, 1)
orig = np.argmax(labels, 1)
error_rate = 100.0 - (100.0 * np.sum(preds == orig) / predictions.shape[0])
return preds, orig, error_rate
| [
11748,
41927,
292,
13,
1891,
437,
355,
509,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
24254,
1330,
9701,
198,
6738,
1368,
62,
26791,
1330,
2429,
62,
32225... | 2.23139 | 1,115 |
from datetime import timedelta
from airflow.utils.dates import days_ago
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
default_args = {
'owner': 'Yilin',
'start_date': days_ago(2),
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
dag = DAG(
'shipment_dag',
default_args=default_args,
description='DAG for the shipment system',
schedule_interval=timedelta(minutes=5)
)
t1 = BashOperator(
task_id='state_sync',
bash_command='curl http://web:8000/state-sync',
dag=dag
) | [
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
45771,
13,
26791,
13,
19581,
1330,
1528,
62,
3839,
198,
6738,
45771,
1330,
360,
4760,
198,
6738,
45771,
13,
3575,
2024,
13,
41757,
62,
46616,
1330,
15743,
18843,
1352,
198,
198,
1228... | 2.690821 | 207 |
"Unit test to check Python bpo-45074."
import asyncio
import sys
import pytest
pytestmark = pytest.mark.asyncio
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
@pytest.mark.xfail(sys.platform == "win32", reason="bpo-45074")
| [
1,
26453,
1332,
284,
2198,
11361,
275,
7501,
12,
17885,
4524,
526,
198,
198,
11748,
30351,
952,
198,
11748,
25064,
198,
198,
11748,
12972,
9288,
198,
198,
9078,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
292,
13361,
952,
198,
198,
47... | 2.595238 | 84 |
import multiprocessing
import random
import time
from threading import current_thread
import rx
from rx.scheduler import ThreadPoolScheduler
from rx import operators as ops
# calculate cpu count, using which will create a ThreadPoolScheduler
thread_count = multiprocessing.cpu_count()
thread_pool_scheduler = ThreadPoolScheduler(thread_count)
print("Cpu count is : {0}".format(thread_count))
# Task 1
rx.of(1,2,3,4,5).pipe(
ops.map(lambda a: adding_delay(a)),
ops.subscribe_on(thread_pool_scheduler)
).subscribe(
lambda s: print("From Task 1: {0}".format(s)),
lambda e: print(e),
lambda: print("Task 1 complete")
)
# Task 2
rx.range(1, 5).pipe(
ops.map(lambda a: adding_delay(a)),
ops.subscribe_on(thread_pool_scheduler)
).subscribe(
lambda s: print("From Task 2: {0}".format(s)),
lambda e: print(e),
lambda: print("Task 2 complete")
)
input("Press any key to exit\n") | [
11748,
18540,
305,
919,
278,
198,
11748,
4738,
198,
11748,
640,
198,
6738,
4704,
278,
1330,
1459,
62,
16663,
198,
11748,
374,
87,
198,
6738,
374,
87,
13,
1416,
704,
18173,
1330,
14122,
27201,
50,
1740,
18173,
198,
6738,
374,
87,
1330,... | 2.769231 | 325 |
from .estimators import EmpiricalMeanSL, GLMSL, StepwiseSL
from .stackers import SuperLearner
| [
6738,
764,
395,
320,
2024,
1330,
2295,
4063,
605,
5308,
272,
8634,
11,
10188,
5653,
43,
11,
5012,
3083,
8634,
198,
6738,
764,
25558,
364,
1330,
3115,
14961,
1008,
198
] | 3.133333 | 30 |
# -*- coding: utf-8 -*-
""" Print a json file with the participants information for their badges."""
import json
import logging as log
from optparse import make_option
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from assopy import models as assopy_models
from ...utils import (get_profile_company,
get_all_order_tickets)
### Helpers
###
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
12578,
257,
33918,
2393,
351,
262,
6809,
1321,
329,
511,
37583,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
18931,
355,
2604,
198,
6738,
220,
220,
2172,
29572,
... | 3.013986 | 143 |
""" A series of helper functions and constants related to rendering """
SCREEN_WIDTH = 10
SCREEN_HEIGHT = 10
CAMERA_WIDTH = SCREEN_WIDTH
CAMERA_HEIGHT = SCREEN_HEIGHT
WORLD_WIDTH = 30
WORLD_HEIGHT = 30
CHUNK_WIDTH = 10
CHUNK_HEIGHT = 10
# Max number of chunks from the player to load
CHUNK_LOAD_DISTANCE = 1 | [
37811,
317,
2168,
286,
31904,
5499,
290,
38491,
3519,
284,
14837,
37227,
198,
198,
6173,
2200,
1677,
62,
54,
2389,
4221,
796,
838,
198,
6173,
2200,
1677,
62,
13909,
9947,
796,
838,
198,
198,
34,
2390,
46461,
62,
54,
2389,
4221,
796,
... | 2.496 | 125 |
# -*- coding: utf-8 -*-
"""Wrapper to run JSPEC from the command line.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import sdds_util, template_common
import re
import shutil
import sirepo.sim_data
import sirepo.template.jspec as template
_SIM_DATA = sirepo.sim_data.get_class('jspec')
_X_FIELD = 's'
_FIELD_UNITS = {
'betx': 'm',
#'alfx': '',
'mux': 'rad/2π',
'dx': 'm',
#'dpx': '',
'bety': 'm',
#'alfy': '',
'muy': 'rad/2π',
'dx': 'm',
#'dpx': '',
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
36918,
2848,
284,
1057,
449,
48451,
422,
262,
3141,
1627,
13,
198,
198,
25,
22163,
4766,
25,
15069,
357,
66,
8,
2177,
5325,
544,
18380,
11419,
13,
220,
1439,
69... | 2.355685 | 343 |
import smart_imports
smart_imports.all()
TEST_FREEDOM = float(666)
| [
198,
11748,
4451,
62,
320,
3742,
198,
198,
27004,
62,
320,
3742,
13,
439,
3419,
628,
198,
51,
6465,
62,
37,
2200,
1961,
2662,
796,
12178,
7,
27310,
8,
628
] | 2.4 | 30 |
"""Open API written in Python for making your own Smash Bros: Melee AI
Python3 only
Currently only works on Linux/OSX
"""
from melee.dolphin import Dolphin
from melee.logger import Logger
from melee.gamestate import GameState
from melee.enums import Stage, Menu, Character, Button, Action, ProjectileSubtype
from melee.controller import Controller, ControllerState
from melee import menuhelper, techskill, framedata, stages, dtmreader
import melee.version
| [
37811,
11505,
7824,
3194,
287,
11361,
329,
1642,
534,
898,
18214,
14266,
25,
21058,
9552,
198,
37906,
18,
691,
198,
21327,
691,
2499,
319,
7020,
14,
2640,
55,
198,
37811,
198,
6738,
16837,
13,
67,
27161,
1330,
44576,
198,
6738,
16837,
... | 4.071429 | 112 |
from posixpath import split
import threading
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.uic import loadUiType
from pytube import YouTube
from pytube import Playlist
from pytube.cli import on_progress
from threading import Thread
import os
from os import path
import sys
import urllib.request
Form_Class,_ = loadUiType(path.join(path.dirname(__file__),"DownLoad.ui")) # Form_Class اسم اختياري للتطبيق
if __name__ == '__main__':
main()
| [
6738,
1426,
844,
6978,
1330,
6626,
198,
11748,
4704,
278,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
48,
... | 2.420091 | 219 |
from tensorflow import expand_dims
from tensorflow import tile
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import standard_ops
# from tensorflow.python.util.tf_export import tf_export
from tf_export import tf_export
# from tensorflow.python import math_ops
# from tensorflow.contrib.eager import context
@tf_export('layers.GCN')
class GCN(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
edges_label_num: Python integer, dimensionality of the edge label space.
bias_label_num: Python integer, dimensionality of the bias label space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
@tf_export('layers.gcn')
def gcn(
inputs, units,
activation=None,
gate=True,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
gate_kernel_initializer=None,
gate_bias_initializer=init_ops.zeros_initializer(),
gate_kernel_regularizer=None,
gate_bias_regularizer=None,
gate_kernel_constraint=None,
gate_bias_constraint=None,
# activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the graph convolutional network.
This layer implements the operation:
`outputs = activation(inputs.labeled_graph_kernel + labeled_bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
a different matrix per label,
and `bias` is a bias vector created by the layer, a different bias per label
(only if `use_bias` is `True`).
Arguments:
inputs: List of Tensor inputs.
The inputs, the edges labels and the bias labels.
Labels are expected in the form of neighbors X vertices X labels tensors
with 0 or one representing the existence of a labeled edge between a vertice to its neighbor.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
layer = GCN(units,
activation=activation,
gate=True,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
gate_kernel_initializer=None,
gate_bias_initializer=init_ops.zeros_initializer(),
gate_kernel_regularizer=None,
gate_bias_regularizer=None,
gate_kernel_constraint=None,
gate_bias_constraint=None,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
# activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
# dtype=inputs[0].dtype.base_dtype,
_scope=name,
_reuse=reuse
)
return layer.apply(inputs)
| [
6738,
11192,
273,
11125,
1330,
4292,
62,
67,
12078,
201,
198,
6738,
11192,
273,
11125,
1330,
17763,
201,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
30604,
1330,
39628,
201,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
30604,
1330,
111... | 2.678715 | 2,988 |
import time
import logging
logging.basicConfig(level=logging.DEBUG)
from redis import StrictRedis
from rq import Queue
from apscheduler.schedulers.blocking import BlockingScheduler
import sys
sys.path.append('/d1lod')
from d1lod import jobs
conn = StrictRedis(host='redis', port='6379')
queues = {
'default': Queue('default', connection=conn),
'dataset': Queue('dataset', connection=conn),
'export': Queue('export', connection=conn)
}
sched = BlockingScheduler()
@sched.scheduled_job('interval', id='update', minutes=1)
@sched.scheduled_job('interval', id='stats', minutes=1)
@sched.scheduled_job('interval', id='export', hours=1)
# Wait a bit for Sesame to start
time.sleep(10)
# Queue the stats job first. This creates the graph before any other
# jobs are run.
queues['default'].enqueue(jobs.calculate_stats)
queues['default'].enqueue(jobs.update_graph)
# Start the scheduler
sched.start()
| [
11748,
640,
198,
11748,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8,
198,
198,
6738,
2266,
271,
1330,
520,
2012,
7738,
271,
198,
6738,
374,
80,
1330,
4670,
518,
198,
6738,
257,
862,
1740,
18173,
1... | 2.810398 | 327 |
import os
import argparse
import soccer
from os import listdir
from os.path import isfile, join, exists
################################################################################
# run: python3 estimate_openpose.py --path_to_data ~/path/to/data/ --openpose_dir ~/path/to/openpose
################################################################################
# CMD Line arguments
parser = argparse.ArgumentParser(description='Estimate the poses')
# --path_to_data: where the images are
parser.add_argument('--path_to_data', default='~/path/to/data/', help='path')
# --openpose_dir: where the openpose directory is (./build/examples/openpose/openpose.bin)
parser.add_argument('--openpose_dir', default='~/path/to/openpose', help='path')
opt, _ = parser.parse_known_args()
# initialize SoccerVideo for every camera
db = soccer.SoccerVideo(opt.path_to_data)
# what exactly do those?
db.gather_detectron()
db.digest_metadata()
db.get_boxes_from_detectron()
db.dump_video('detections')
db.estimate_openposes(openpose_dir=opt.openpose_dir)
db.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
db.dump_video('poses')
| [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
11783,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
11,
7160,
628,
198,
29113,
29113,
14468,
198,
2,
1057,
25,
21015,
18,
8636,
62,
9654,
... | 3.247159 | 352 |
from __future__ import absolute_import, unicode_literals
import textwrap
from django.utils.safestring import SafeData
from django.test import TestCase
__all__ = ('FeinCMSTests',)
class FeinCMSTests(TestCase):
"""Tests the ``markupmirror.feincms`` module that provides integration
into FeinCMS as page content-type.
"""
def test_import(self):
"""The ``markupmirror.feincms`` app can only be installed when
FeinCMS is installed.
"""
# returns True if feincms is installed or False otherwise
# unregister previous imports
import sys
to_delete = [
module for module in sys.modules
if (module.startswith('markupmirror.feincms') or
module.startswith('feincms'))]
for module in to_delete:
del sys.modules[module]
self.assertFalse('markupmirror.feincms' in sys.modules)
self.assertFalse('feincms' in sys.modules)
# save original import
import builtins
original_import = builtins.__import__
# patch and test the import
builtins.__import__ = import_hook
# without FeinCMS, the import should fail
self.assertFalse(import_markupmirror_feincms())
# with FeinCMS installed, the import should work
# restore import
builtins.__import__ = original_import
self.assertTrue(import_markupmirror_feincms())
# restore normal import
from markupmirror.feincms import models
self.assertTrue('markupmirror.feincms' in sys.modules)
self.assertTrue('markupmirror.feincms.models' in sys.modules)
def test_markupmirror_content(self):
"""Tests registering and rendering a ``MarkupMirrorContent`` instance
with a FeinCMS ``Page``.
"""
from tests.models import Page
from markupmirror.markup.base import markup_pool
mmc_type = Page._feincms_content_types[0]
obj = mmc_type()
obj.content = "**markdown**"
# fake obj.save(): DB does not have page_page_markupmirrorcontent table
rendered = markup_pool[obj.content.markup_type](obj.content.raw)
obj.content_rendered = rendered
with self.assertTemplateUsed('content/markupmirror/default.html'):
self.assertIsInstance(obj.render(), SafeData)
self.assertEqual(
obj.render(),
textwrap.dedent(u"""\
<p><strong>markdown</strong></p>
"""))
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
2420,
37150,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
19978,
6601,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
62... | 2.390728 | 1,057 |
from django.contrib import admin
from django.conf import settings
from .models import Scooter, ScooterActivity
admin.site.register(Scooter, ScooterAdmin)
admin.site.register(ScooterActivity, ScooterActivityAdmin) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
764,
27530,
1330,
1446,
25141,
11,
1446,
25141,
16516,
628,
198,
28482,
13,
15654,
13,
30238,
7,
3351,
25141,
11,
1446,
25141,
46... | 3.689655 | 58 |
'''Create a function/method that takes a string and return the word count. The string will be a sentence.
Input Format
A string
Constraints
No
Output Format
Word count
Sample Input 0
This is test
Sample Output 0
3
Sample Input 1
Test Demo
Sample Output 1
2
Sample Input 2
Prakash
Sample Output 2
1'''
#solution
print(len(input().split(' '))) | [
7061,
6,
16447,
257,
2163,
14,
24396,
326,
2753,
257,
4731,
290,
1441,
262,
1573,
954,
13,
383,
4731,
481,
307,
257,
6827,
13,
198,
198,
20560,
18980,
198,
198,
32,
4731,
198,
198,
3103,
2536,
6003,
198,
198,
2949,
198,
198,
26410,
... | 3.095652 | 115 |
from .objectJSON import ObjectJSON
| [
198,
6738,
764,
15252,
40386,
1330,
9515,
40386,
198
] | 4 | 9 |
"Malagasy config with language-specific information."
from pynini import *
from pynini.lib import byte
from config import utils
GRAPHEMES = union(byte.LOWER, "'", "-", "@",
"à", "â", "è", "é", "ê", "ë",
"ì", "ò", "ô", "ù", "n̈", "ñ")
INITIAL_PUNCTUATION = utils.DEFAULT_INITIAL_PUNCTUATION
FINAL_PUNCTUATION = utils.DEFAULT_FINAL_PUNCTUATION
NUMERALS = byte.DIGIT
# Malagasy's official orthography uses n with diaeresis, but many keyboards do
# not have this grapheme, so users often replace it with n with tilde. This rule
# normalizes the text towards the official orthography.
MG_VELAR_NASAL = cdrewrite(
cross("ñ", "n̈"),
"",
"",
byte.BYTES.closure())
# Malagasy speakers apparently use <@> as an abbreviation for <amin'ny>, meaning
# 'with the'. This rule transduces standalone <@> back into <amin'ny>.
MG_ABBREVIATION = cdrewrite(
cross("@", "amin'ny"),
union("[BOS]", byte.SPACE),
union("[EOS]", byte.SPACE),
byte.BYTES.closure())
LANGUAGE_SPECIFIC_PREPROCESSING = (MG_VELAR_NASAL @ MG_ABBREVIATION).optimize()
# These files are not in the repo. You will need to change these paths to match
# where you place the data files.
UD = ""
UM = ""
AC = "language_data/mg/ac/mg-wordbigrams.txt"
OSCAR = "language_data/mg/oscar/mg.txt"
OSCAR_DEDUP = "language_data/mg/oscar/mg_dedup.txt"
LCC = "language_data/mg/lcc/mlg_wikipedia_2014_30K/mlg_wikipedia_2014_30K-sentences.txt"
#LCC = "language_data/mg/lcc/mlg_web_2012_30K/mlg_web_2012_30K-sentences.txt"
| [
1,
15029,
363,
4107,
4566,
351,
3303,
12,
11423,
1321,
526,
198,
198,
6738,
279,
2047,
5362,
1330,
1635,
198,
6738,
279,
2047,
5362,
13,
8019,
1330,
18022,
198,
6738,
4566,
1330,
3384,
4487,
198,
198,
10761,
31300,
3620,
1546,
796,
64... | 2.440191 | 627 |
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: BSD, see file LICENSE for more details.
"""
Defines Lino's **Python serializer and deserializer**. See
:doc:`Specification </specs/dpy>`.
"""
from __future__ import unicode_literals
from __future__ import print_function
# from future import standard_library
# standard_library.install_aliases()
from builtins import str
from builtins import object
import six
import logging
logger = logging.getLogger(__name__)
from pkg_resources import parse_version as PV
#from io import StringIO
import os
#from os.path import dirname
import imp
#from decimal import Decimal
from unipath import Path
# from lino import AFTER17
from django.conf import settings
from django.db import models
from django.utils import translation
from django.utils.module_loading import import_string
from django.utils.encoding import force_text
#from django.db import IntegrityError
from django.core.serializers import base
from django.core.exceptions import ValidationError
#from django.core.exceptions import ObjectDoesNotExist
#from lino.utils.mldbc.fields import BabelCharField, BabelTextField
#from lino.core.choicelists import ChoiceListField
from lino.core.utils import obj2str, full_model_name
SUFFIX = '.py'
def create_mti_child(parent_model, pk, child_model, **kw):
"""Similar to :func:`lino.utils.mti.insert_child`, but for usage in
Python dumps (generated by :manage:`dump2py`).
The difference is very tricky. The return value here is an
"almost normal" model instance, whose `save` and `full_clean`
methods have been hacked. These are the only methods that will be
called by :class:`Deserializer`. You should not use this instance
for anything else and throw it away when the save() has been
called.
"""
parent_link_field = child_model._meta.parents.get(parent_model, None)
if parent_link_field is None:
raise ValidationError("A %s cannot be parent for a %s" % (
parent_model.__name__, child_model.__name__))
pfields = {}
for f in parent_model._meta.fields:
if f.name in kw:
pfields[f.name] = kw.pop(f.name)
kw[parent_link_field.name + "_id"] = pk
# if ignored:
# raise Exception(
# "create_mti_child() %s %s from %s : "
# "ignored non-local fields %s" % (
# child_model.__name__,
# pk,
# parent_model.__name__,
# ignored))
child_obj = child_model(**kw)
if len(pfields):
parent_obj = parent_model.objects.get(pk=pk)
for k, v in pfields.items():
setattr(parent_obj, k, v)
parent_obj.full_clean()
parent_obj.save()
child_obj.save = save
child_obj.full_clean = full_clean
return child_obj
SUPPORT_EMPTY_FIXTURES = False # trying, but doesn't yet work
if SUPPORT_EMPTY_FIXTURES:
from django_site.utils import AttrDict
class FakeDeserializedObject(base.DeserializedObject):
"""Imitates DeserializedObject required by loaddata.
Unlike normal DeserializedObject, we *don't want* to bypass
pre_save and validation methods on the individual objects.
"""
def save(self, *args, **kw):
"""
"""
# print 'dpy.py',self.object
# logger.info("Loading %s...",self.name)
self.try_save(*args, **kw)
# if self.try_save(*args,**kw):
# self.deserializer.saved += 1
# else:
# self.deserializer.save_later.append(self)
def try_save(self, *args, **kw):
"""Try to save the specified Model instance `obj`. Return `True`
on success, `False` if this instance wasn't saved and should be
deferred.
"""
obj = self.object
try:
"""
"""
m = getattr(obj, 'before_dumpy_save', None)
if m is not None:
m(self.deserializer)
if not self.deserializer.quick:
try:
obj.full_clean()
except ValidationError as e:
# raise Exception("{0} : {1}".format(obj2str(obj), e))
raise # Exception("{0} : {1}".format(obj2str(obj), e))
obj.save(*args, **kw)
logger.debug("%s has been saved" % obj2str(obj))
self.deserializer.register_success()
return True
# except ValidationError,e:
# except ObjectDoesNotExist,e:
# except (ValidationError,ObjectDoesNotExist), e:
# except (ValidationError,ObjectDoesNotExist,IntegrityError), e:
except Exception as e:
if True:
if not settings.SITE.loading_from_dump:
# hand-written fixtures are expected to yield in savable
# order
logger.warning("Failed to save %s from manual fixture:" % obj2str(obj))
raise
deps = [f.remote_field.model for f in obj._meta.fields
if f.remote_field and f.remote_field.model]
if not deps:
logger.exception(e)
raise Exception(
"Failed to save independent %s." % obj2str(obj))
self.deserializer.register_failure(self, e)
return False
# except Exception,e:
# logger.exception(e)
# raise Exception("Failed to save %s. Abandoned." % obj2str(obj))
class Serializer(base.Serializer):
"""Serializes a QuerySet to a py stream.
Usage: ``manage.py dumpdata --format py``
DEPRECATED. The problem with this approach is that a serializer
creates -by definition- one single file. And Python needs
-understandably- to load a module completely into memory before it
can be executed. Use :manage:`dump2py` instead.
"""
internal_use_only = False
class FlushDeferredObjects(object):
"""
Indicator class object.
Fixture may yield a `FlushDeferredObjects`
to indicate that all deferred objects should get saved before going on.
"""
pass
class DpyLoader(LoaderBase):
"""Instantiated by :xfile:`restore.py`.
"""
class DpyDeserializer(LoaderBase):
"""The Django deserializer for :ref:`dpy`.
Note that this deserializer explicitly ignores fixtures whose
source file is located in the current directory because i the case
of `.py` files this can lead to side effects when importing them.
See e.g. :ticket:`1029`. We consider it an odd behaviour of
Django to search for fixtures also in the current directory (and
not, as `documented
<https://docs.djangoproject.com/en/1.11/howto/initial-data/#where-django-finds-fixture-files>`__,
in the `fixtures` subdirs of plugins and the optional
:setting:`FIXTURE_DIRS`).
"""
def Deserializer(fp, **options):
"""The Deserializer used when ``manage.py loaddata`` encounters a
`.py` fixture.
"""
d = DpyDeserializer()
return d.deserialize(fp, **options)
class Migrator(object):
"""The SITE's Migrator class is instantiated by `install_migrations`.
If :attr:`migration_class<lino.core.site.Site.migration_class>` is
`None` (the default), then this class will be
instantiated. Applications may define their own Migrator class
which should be a subclasss of this.
"""
def after_load(self, todo):
"""Declare a function to be called after all data has been loaded."""
assert callable(todo)
# al = self.globals_dict['AFTER_LOAD_HANDLERS']
self.loader.AFTER_LOAD_HANDLERS.append(todo)
def before_load(self, todo):
"""Declare a function to be called before loading dumped data."""
assert callable(todo)
self.loader.before_load_handlers.append(todo)
def install_migrations(self, loader):
"""
Install "migrators" into the given global namespace.
Python dumps are generated with one line near the end of their
:xfile:`restore.py` file which calls this method, passing it their
global namespace::
settings.SITE.install_migrations(globals())
A dumped fixture should always call this, even if there is no
version change and no data migration, because this also does
certain other things:
- set :attr:`loading_from_dump
<lino.core.site.Site.loading_from_dump>` to `True`
- remove any Permission and Site objects that might have been
generated by `post_syncdb` signal if these apps are installed.
"""
globals_dict = loader.globals_dict
self.loading_from_dump = True
# if self.is_installed('auth'):
# from django.contrib.auth.models import Permission
# Permission.objects.all().delete()
if self.is_installed('sites'):
from django.contrib.sites.models import Site
Site.objects.all().delete()
current_version = self.version
if current_version is None:
logger.info("Unversioned Site instance : no database migration")
return
if globals_dict['SOURCE_VERSION'] == current_version:
logger.info("Source version is %s : no migration needed",
current_version)
return
if self.migration_class is not None:
mc = import_string(self.migration_class)
migrator = mc(self, loader)
else:
migrator = self
while True:
from_version = globals_dict['SOURCE_VERSION']
funcname = 'migrate_from_' + from_version.replace('.', '_')
m = getattr(migrator, funcname, None)
if m is not None:
# logger.info("Found %s()", funcname)
to_version = m(globals_dict)
if not isinstance(to_version, six.string_types):
raise Exception("Oops, %s didn't return a string!" % m)
if PV(to_version) <= PV(from_version):
raise Exception(
"Oops, %s tries to migrate from version %s to %s ?!" %
(m, from_version, to_version))
msg = "Migrating from version %s to %s" % (
from_version, to_version)
if m.__doc__:
msg += ":\n" + m.__doc__
logger.info(msg)
globals_dict['SOURCE_VERSION'] = to_version
else:
if from_version != current_version:
logger.warning(
"No method for migrating from version %s to %s",
from_version, current_version)
break
def unused_load_fixture_from_module(m, **options):
"""No longer used in unit tests to manually load a given fixture
module.
"""
# filename = m.__file__[:-1]
# print filename
# assert filename.endswith('.py')
# fp = open(filename)
d = DpyDeserializer()
for o in d.deserialize_module(m, **options):
o.save()
# 20140506 Don't remember why the following was. But it disturbed
# in Lino `/tutorials/tables/index`.
# if d.saved != 1:
# logger.info("20140506 Loaded %d objects", d.saved)
# raise Exception("Failed to load Python fixture from module %s" %
# m.__name__)
# return d
# from functools import wraps
def override(globals_dict):
"""A decorator to be applied when redefining, in a
:meth:`migrate_from_VERSION` method, one of the
:func:`create_APP_MODEL` functions defined in the
:xfile:`restore.py` file of a dump.
"""
return override_decorator
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
3717,
12,
7908,
25463,
2611,
1222,
17634,
12052,
198,
2,
13789,
25,
347,
10305,
11,
766,
2393,
38559,
24290,
329,
517,
3307,
13,
198,
198,
37811,
198,
7469,
1127,... | 2.445013 | 4,692 |
from model.move import Move
| [
6738,
2746,
13,
21084,
1330,
10028,
628
] | 4.142857 | 7 |
from setuptools import find_packages, setup
setup(
name="tqp",
version="0.5.1",
description="An opinionated library for pub/sub over SQS and SNS",
url="https://github.com/4Catalyzer/tqp",
author="Giacomo Tagliabue",
author_email="giacomo@gmail.com",
license="MIT",
python_requires=">=3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
keywords="pub sub pubsub flask",
packages=find_packages(),
install_requires=("boto3",),
extras_require={
"dev": [
"pytest",
"fourmat~=0.4.3",
"pre-commit",
"moto[server]",
"boto3",
]
},
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
83,
80,
79,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
20,
13,
16,
1600,
198,
220,
220,
220,
6764,
2625,
2025,
4459,
... | 2.27897 | 466 |
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
from django.contrib.auth.models import User
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
11593,
33,
43312,
62,
43,
2149,
24290,
834,
198,
2,
15069,
357,
34,
8,
3648,
12,
10333,
1578,
1829,
5070,
355,
7997,
416,
198,
2,
262,
22998,
286,
262,
2351,
15781,
261,
2306,
8... | 3.216495 | 97 |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Experiments to accompany "Measuring Corpus Bias via First-Order Co-occurrence',
author='Daniel Smarda',
license='MIT',
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
10677,
3256,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
22784,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,... | 2.912088 | 91 |
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import io
import warnings
from contextlib import redirect_stdout
import numpy as np
from nums.core import settings
from nums.numpy.numpy_utils import update_doc_string
warnings.filterwarnings("ignore", category=RuntimeWarning)
# pylint: disable=import-outside-toplevel, possibly-unused-variable, eval-used, reimported
if __name__ == "__main__":
# pylint: disable=import-error
from nums.core import application_manager
settings.system_name = "serial"
nps_app_inst = application_manager.instance()
# test_doctest_fallback(nps_app_inst)
# test_manual_cov(nps_app_inst)
test_manual(nps_app_inst)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
357,
34,
8,
12131,
31835,
50,
7712,
4816,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393... | 3.212276 | 391 |
#! python
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 13:30:52 2017
@author: Adam Deller, UNIVERSITY COLLEGE LONDON.
Tools for calculating the Stark effect in Rydberg helium
using the Numerov method.
Based on:
Stark structure of the Rydberg states of alkali-metal atoms
M. L. Zimmerman et al. Phys. Rev. A, 20 2251 (1979)
http://dx.doi.org/10.1103/PhysRevA.20.2251
Rydberg atom diamagnetism
M. M. KASH, PhD thesis, Massachusetts Institute of Technology.
Dept. of Physics. (1988)
http://hdl.handle.net/1721.1/14367
Cold atoms and molecules by Zeeman deceleration and Rydberg-
Stark deceleration
S. D. Hogan, Habilitationsschrift, ETH Zurich (2012)
http://dx.doi.org/10.3929/ethz-a-007577485
"""
from __future__ import print_function, division
from math import ceil, log, exp
import numpy as np
from numba import jit
from tqdm import trange
from .drake1999 import quantum_defects
import errno
import os
from datetime import datetime
from scipy.constants import h, hbar, c, alpha, m_e, e, epsilon_0, atomic_mass, pi, Rydberg, physical_constants
a_0 = physical_constants['Bohr radius'][0]
E_h = physical_constants['Hartree energy'][0]
# derived constants
mass_He = 4.002602 * atomic_mass
Z = 2
mu_me = (mass_He - m_e) / mass_He
mu_M = m_e / mass_He
En_h_He = E_h * mu_me
a_0_He = a_0 / mu_me
@jit
def get_nl_vals(nmin, nmax, m):
""" n and l vals for each matrix column, using range n_min to n_max.
"""
n_rng = np.arange(nmin, nmax + 1)
n_vals = np.array([], dtype='int32')
l_vals = np.array([], dtype='int32')
for n in n_rng:
l_rng = np.arange(np.abs(m), n)
n_vals = np.append(n_vals, np.array(np.zeros_like(l_rng) + n))
l_vals = np.append(l_vals, l_rng)
return n_vals, l_vals
@jit
def get_nlm_vals(nmin, nmax):
""" n, l and m vals for each matrix column, using range n_min to n_max.
"""
n_rng = np.arange(nmin, nmax + 1)
n_vals = np.array([], dtype='int32')
l_vals = np.array([], dtype='int32')
m_vals = np.array([], dtype='int32')
for n in n_rng:
l_rng = np.arange(0, n)
for l in l_rng:
m_rng = np.arange(-l, l+1)
n_vals = np.append(n_vals, np.array(np.zeros_like(m_rng) + n))
l_vals = np.append(l_vals, np.array(np.zeros_like(m_rng) + l))
m_vals = np.append(m_vals, m_rng)
return n_vals, l_vals, m_vals
@jit
def get_J_vals(S, L_vals, diff):
""" J = L + diff; unless L == 0, in which case J = S.
"""
J_vals = L_vals + diff
J_vals[L_vals == 0] = S
return J_vals
@jit
def get_triplet_nLJ(nmin, nmax, m):
""" n and L and J vals for each matrix column, using range n_min to n_max.
"""
n_rng = np.arange(nmin, nmax + 1, dtype='int32')
n_vals = np.array([], dtype='int32')
L_vals = np.array([], dtype='int32')
J_vals = np.array([], dtype='int32')
for n in n_rng:
l_rng = np.arange(m, n, dtype='int32')
for l in l_rng:
if l == 0:
n_vals = np.append(n_vals, n)
L_vals = np.append(L_vals, 0)
J_vals = np.append(J_vals, 1)
else:
n_vals = np.append(n_vals, np.zeros(3, dtype='int32') + n)
L_vals = np.append(L_vals, np.zeros(3, dtype='int32') + l)
J_vals = np.append(J_vals, np.arange(l - 1, l + 2, dtype='int32'))
return n_vals, L_vals, J_vals
@jit
def get_qd(S, n_vals, L_vals, J_vals):
""" Calculate quantum defects.
"""
iterations = int(10)
num_cols = len(n_vals)
qd = np.zeros(num_cols)
for i in range(num_cols):
n = n_vals[i]
L = L_vals[i]
J = J_vals[i]
if L in quantum_defects[S]:
if J in quantum_defects[S][L]:
delta = quantum_defects[S][L][J]
# calculate quantum defects
qd_i = delta[0]
for rep in range(iterations):
# repeat to get convergence
m = n - qd_i
defect = delta[0]
for j, d in enumerate(delta[1:]):
defect = defect + d*m**(-2.0*(j + 1))
qd_i = defect
qd[i] = defect
else:
qd[i] = np.nan
else:
qd[i] = 0.0
return qd
@jit
def En_0(neff):
""" Field-free energy. Ignores extra correction terms.
-- atomic units --
"""
energy = np.array([])
for n in neff:
en = -0.5 * n**-2.0
energy = np.append(energy, en)
return energy
@jit
def W_n(S, n_vals, L_vals, J_vals):
""" Field-free energy. Includes extra correction terms.
-- atomic units --
"""
neff = n_vals - get_qd(S, n_vals, L_vals, J_vals)
energy = np.array([])
for i, n in enumerate(n_vals):
en = -0.5 * (neff[i]**-2.0 - 3.0 * alpha**2.0 / (4.0 * n**4.0) + \
mu_M**2.0 * ((1.0 + (5.0 / 6.0) * (alpha * Z)**2.0)/ n**2.0))
energy = np.append(energy, en)
return energy
@jit
def E_zeeman(m_vals, B_z):
""" Energy shift due to the interaction of the orbital angular momentum of the Rydberg electron with the magnetic field.
-- atomic units --
"""
return m_vals * B_z * (1/2)
@jit
@jit
@jit
def wf_numerov(n, l, nmax, rmin, step):
""" Use the Numerov method to find the wavefunction for state n*, l, where
n* = n - delta.
nmax ensures that wavefunctions from different values of n can be aligned.
"""
l = float(l) # horrible hack. Otherwise jit fails if l = int(>81).
W1 = -0.5 * n**-2.0
W2 = (l + 0.5)**2.0
rmax = 2 * nmax * (nmax + 15)
r_in = n**2.0 - n * (n**2.0 - l*(l + 1.0))**0.5
step_sq = step**2.0
# ensure wf arrays will align using nmax
if n == nmax:
i = 0
r_sub2 = rmax
else:
i = int(ceil(log(rmax / (2 * n * (n + 15))) / step))
r_sub2 = rmax * exp(-i*step)
i += 1
# initialise
r_sub1 = rmax * exp(-i*step)
rvals = [r_sub2, r_sub1]
g_sub2 = 2.0 * r_sub2**2.0 * (-1.0 / r_sub2 - W1) + W2
g_sub1 = 2.0 * r_sub1**2.0 * (-1.0 / r_sub1 - W1) + W2
y_sub2 = 1e-10
y_sub1 = y_sub2 * (1.0 + step * g_sub2**0.5)
yvals = [y_sub2, y_sub1]
# Numerov method
i += 1
r = r_sub1
while r >= rmin:
## next step
r = rmax * exp(-i*step)
g = 2.0 * r**2.0 * (-1.0 / r - W1) + W2
y = (y_sub2 * (g_sub2 - (12.0 / step_sq)) + y_sub1 * \
(10.0 * g_sub1 + (24.0 / step_sq))) / ((12.0 / step_sq) - g)
## check for divergence
if r < r_in:
dy = abs((y - y_sub1) / y_sub1)
dr = (r**(-l-1) - r_sub1**(-l-1)) / r_sub1**(-l-1)
if dy > dr:
break
## store vals
rvals.append(r)
yvals.append(y)
## next iteration
r_sub1 = r
g_sub2 = g_sub1
g_sub1 = g
y_sub2 = y_sub1
y_sub1 = y
i += 1
rvals = np.array(rvals)
yvals = np.array(yvals)
# normalisation
yvals = yvals * (np.sum((yvals**2.0) * (rvals**2.0)))**-0.5
return rvals, yvals
@jit
def find_first(arr, val):
""" Index of the first occurence of val in arr.
"""
i = 0
while i < len(arr):
if val == arr[i]:
return i
i += 1
raise Exception('val not found in arr')
@jit
def find_last(arr, val):
""" Index of the last occurence of val in arr.
"""
i = len(arr) - 1
while i > 0:
if val == arr[i]:
return i
i -= 1
raise Exception('val not found in arr')
@jit
def wf_align(r1, y1, r2, y2):
""" Align two lists pairs (r, y) on r, assuming r array values overlap
except at head and tail, and that arrays are reverse sorted.
"""
if r1[0] != r2[0]:
# trim front end
if r1[0] > r2[0]:
idx = find_first(r1, r2[0])
r1 = r1[idx:]
y1 = y1[idx:]
else:
idx = find_first(r2, r1[0])
r2 = r2[idx:]
y2 = y2[idx:]
if r1[-1] != r2[-1]:
# trim back end
if r1[-1] < r2[-1]:
idx = find_last(r1, r2[-1])
r1 = r1[:idx + 1]
y1 = y1[:idx + 1]
else:
idx = find_last(r2, r1[-1])
r2 = r2[:idx + 1]
y2 = y2[:idx + 1]
if r1[0] == r2[0] and r1[-1] == r2[-1] and len(r1) == len(r2):
return r1, y1, r2, y2
else:
raise Exception("Failed to align wavefunctions.")
@jit
def wf_overlap(r1, y1, r2, y2, p=1.0):
""" Find the overlap between two radial wavefunctions (r, y).
"""
r1, y1, r2, y2 = wf_align(r1, y1, r2, y2)
return np.sum(y1 * y2 * r1**(2.0 + p))
def rad_overlap(n_eff_1, n_eff_2, l_1, l_2, rmin, step_params, p=1.0):
""" Radial overlap for state n1, l1 and n2 l2.
"""
nmax = max(n_eff_1, n_eff_2)
lmax = max(l_1, l_2)
numerov_step = chose_step(nmax, lmax, step_params)
r1, y1 = wf_numerov(n_eff_1, l_1, nmax, rmin, numerov_step)
r2, y2 = wf_numerov(n_eff_2, l_2, nmax, rmin, numerov_step)
return wf_overlap(r1, y1, r2, y2, p)
@jit
def ang_overlap_stark(l_1, l_2, m_1, m_2, field_orientation, dm_allow):
""" Angular overlap <l1, m| cos(theta) |l2, m>.
For Stark interaction
"""
dl = l_2 - l_1
dm = m_2 - m_1
l, m = int(l_1), int(m_1)
if field_orientation=='parallel':
if (dm == 0) and (dm in dm_allow):
if dl == +1:
return +(((l+1)**2-m**2)/((2*l+3)*(2*l+1)))**0.5
elif dl == -1:
return +((l**2-m**2)/((2*l+1)*(2*l-1)))**0.5
elif (dm == +1) and (dm in dm_allow):
if dl == +1:
return -((l+m+2)*(l+m+1)/(2*(2*l+3)*(2*l+1)))**0.5
elif dl == -1:
return +((l-m)*(l-m-1)/(2*(2*l+1)*(2*l-1)))**0.5
elif (dm == -1) and (dm in dm_allow):
if dl == +1:
return +((l-m+2)*(l-m+1)/(2*(2*l+3)*(2*l+1)))**0.5
elif dl == -1:
return -((l+m)*(l+m-1)/(2*(2*l+1)*(2*l-1)))**0.5
elif field_orientation=='crossed':
if dm == +1:
if dl == +1:
return +(0.5*(-1)**(m-2*l)) * (((l+m+1)*(l+m+2))/((2*l+1)*(2*l+3)))**0.5
elif dl == -1:
return -(0.5*(-1)**(-m+2*l)) * (((l-m-1)*(l-m)) /((2*l-1)*(2*l+1)))**0.5
elif dm == -1:
if dl == +1:
return +(0.5*(-1)**(m-2*l)) * (((l-m+1)*(l-m+2))/((2*l+1)*(2*l+3)))**0.5
elif dl == -1:
return -(0.5*(-1)**(-m+2*l)) * (((l+m-1)*(l+m)) /((2*l-1)*(2*l+1)))**0.5
return 0.0
@jit
def ang_overlap_diamagnetic(l_1, l_2, m_1, m_2):
""" Angular overlap <l1, m| sin^2(theta) |l2, m>.
For diamagnetic interaction
"""
dl = l_2 - l_1
dm = m_2 - m_1
lmin = min(l_1, l_2)
l, m = int(l_1), int(m_1)
if (dm == 0):
if (dl == 0):
return (2*(l**2+l-1+m**2))/((2*l-1)*(2*l+3))
elif (abs(dl) == 2):
return -(( (lmin+m+2)*(lmin+m+1)*(lmin-m+2)*(lmin-m+1) )/( (2*lmin+5)*((2*lmin+3)**2)*(2*lmin+1) ))**0.5
return 0.0
@jit
def stark_int(n_eff_1, n_eff_2, l_1, l_2, m_1, m_2, field_orientation, dm_allow, step_params=['flat',0.005], rmin=0.65):
""" Stark interaction between states |n1, l1, m> and |n2, l2, m>.
"""
dl = l_2 - l_1
dm = m_2 - m_1
if (abs(dl) == 1) and (abs(dm) <= 1):
# Stark interaction
return ang_overlap_stark(l_1, l_2, m_1, m_2, field_orientation, dm_allow) * \
rad_overlap(n_eff_1, n_eff_2, l_1, l_2, rmin, step_params, p=1.0)
else:
return 0.0
@jit
def diamagnetic_int(n_eff_1, n_eff_2, l_1, l_2, m_1, m_2, step_params=['flat',0.005], rmin=0.65):
""" Diamagnetic interaction between states |n1, l1, m> and |n2, l2, m>.
"""
dl = l_2 - l_1
dm = m_2 - m_1
if (abs(dl) in [0,2]) and (abs(dm) == 0):
# Diamagnetic interaction
return ang_overlap_diamagnetic(l_1, l_2, m_1, m_2) * \
rad_overlap(n_eff_1, n_eff_2, l_1, l_2, rmin, step_params, p=2.0)
else:
return 0.0
@jit
def stark_matrix(neff_vals, l_vals, m_vals, field_orientation, dm_allow=[0], step_params=['flat',0.005], disableTQDM=False):
""" Stark interaction matrix.
"""
num_cols = len(neff_vals)
mat_S = np.zeros([num_cols, num_cols])
for i in trange(num_cols, desc="Calculating Stark terms", disable=disableTQDM):
n_eff_1 = neff_vals[i]
l_1 = l_vals[i]
m_1 = m_vals[i]
for j in range(i + 1, num_cols):
n_eff_2 = neff_vals[j]
l_2 = l_vals[j]
m_2 = m_vals[j]
mat_S[i][j] = stark_int(n_eff_1, n_eff_2, l_1, l_2, m_1, m_2, field_orientation, dm_allow, step_params)
# assume matrix is symmetric
mat_S[j][i] = mat_S[i][j]
return mat_S
@jit
def stark_matrix_select_m(neff_vals, l_vals, m, field_orientation, dm_allow=[0], step_params=['flat',0.005], disableTQDM=False):
""" Stark interaction matrix.
"""
num_cols = len(neff_vals)
mat_I = np.zeros([num_cols, num_cols])
for i in trange(num_cols, desc="calculate Stark terms", disable=disableTQDM):
n_eff_1 = neff_vals[i]
l_1 = l_vals[i]
for j in range(i + 1, num_cols):
n_eff_2 = neff_vals[j]
l_2 = l_vals[j]
mat_I[i][j] = stark_int(n_eff_1, n_eff_2, l_1, l_2, m, m, field_orientation, dm_allow, step_params)
# assume matrix is symmetric
mat_I[j][i] = mat_I[i][j]
return mat_I
@jit
def diamagnetic_matrix(neff_vals, l_vals, m_vals, step_params=['flat',0.005], disableTQDM=False):
""" Diamagnetic interaction matrix.
"""
num_cols = len(neff_vals)
mat_D = np.zeros([num_cols, num_cols])
for i in trange(num_cols, desc="Calculating diamagnetic terms", disable=disableTQDM):
n_eff_1 = neff_vals[i]
l_1 = l_vals[i]
m_1 = m_vals[i]
for j in range(i, num_cols):
n_eff_2 = neff_vals[j]
l_2 = l_vals[j]
m_2 = m_vals[j]
mat_D[i][j] = diamagnetic_int(n_eff_1, n_eff_2, l_1, l_2, m_1, m_2, step_params)
# assume matrix is symmetric
mat_D[j][i] = mat_D[i][j]
return mat_D
@jit
def diamagnetic_matrix_select_m(neff_vals, l_vals, m, step_params=['flat',0.005], disableTQDM=False):
""" Diamagnetic interaction matrix.
"""
num_cols = len(neff_vals)
mat_D = np.zeros([num_cols, num_cols])
for i in trange(num_cols, desc="calculate Diamagnetic terms", disable=disableTQDM):
n_eff_1 = neff_vals[i]
l_1 = l_vals[i]
for j in range(i, num_cols):
n_eff_2 = neff_vals[j]
l_2 = l_vals[j]
mat_D[i][j] = diamagnetic_int(n_eff_1, n_eff_2, l_1, l_2, m, m, step_params)
# assume matrix is symmetric
mat_D[j][i] = mat_D[i][j]
return mat_D
def eig_sort(w, v):
""" sort eignenvalues and eigenvectors by eigenvalue.
"""
ids = np.argsort(w)
return w[ids], v[:, ids]
@jit
def stark_map(H_0, mat_S, field, H_Z=0, H_D=0, disableTQDM=False):
""" Calculate the eigenvalues for H_0 + H_S, where
- H_0 is the field-free Hamiltonian,
- H_S = D * F * mat_S
- D is the electric dipole moment,
- F is each value of the electric field (a.u.),
- mat_S is the Stark interaction matrix.
- H_Z is the Zeeman interaction Hamiltonian
- H_D is the Diamagnetic interaction Hamiltonian
return eig_val [array.shape(num_fields, num_states)]
"""
num_fields = len(field)
num_cols = np.shape(H_0)[0]
# initialise output arrays
eig_val = np.empty((num_fields, num_cols), dtype=float)
# loop over field values
for i in trange(num_fields, desc="diagonalise Hamiltonian", disable=disableTQDM):
F = field[i]
H_S = F * mat_S
# diagonalise, assuming matrix is Hermitian.
eig_val[i] = np.linalg.eigh(H_0 + H_Z + H_S + H_D)[0]
return eig_val
@jit
def stark_map_vec(H_0, mat_S, field, H_Z=0, H_D=0, disableTQDM=False):
""" Calculate the eigenvalues for H_0 + H_S, where
- H_0 is the field-free Hamiltonian,
- H_S = D * F * mat_S
- D is the electric dipole moment,
- F is each value of the electric field (a.u.),
- mat_S is the Stark interaction matrix.
- H_Z is the Zeeman interaction Hamiltonian
- H_D is the Diamagnetic interaction Hamiltonian
return eig_val [array.shape(num_fields, num_states)],
eig_vec [array.shape(num_fields, num_states, num_states)]
------------------------------------------------------------------
Note: A significant amount of memory may be required to hold the
array of eigenvectors.
------------------------------------------------------------------
"""
num_fields = len(field)
num_cols = np.shape(H_0)[0]
# initialise output arrays
eig_val = np.empty((num_fields, num_cols), dtype=float)
eig_vec = np.empty((num_fields, num_cols, num_cols), dtype=float)
# loop over field values
for i in trange(num_fields, desc="diagonalise Hamiltonian", disable=disableTQDM):
F = field[i]
H_S = F * mat_S
# diagonalise, assuming matrix is Hermitian.
eig_val[i], eig_vec[i] = np.linalg.eigh(H_0 + H_Z + H_S + H_D)
return eig_val, eig_vec
| [
2,
0,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2365,
1679,
1511,
25,
1270,
25,
4309,
2177,
198,
198,
31,
9800,
25,
7244,
360,
12368,
11,
49677,
9050,
20444,
2538,
826... | 1.872176 | 9,427 |
from django.test import TestCase
from contact.models import Contact
from company.models import Company
from users.models import User
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
2800,
13,
27530,
1330,
14039,
198,
6738,
1664,
13,
27530,
1330,
5834,
198,
6738,
2985,
13,
27530,
1330,
11787,
628
] | 4.354839 | 31 |
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Implementations for the client credentials & JWT-bearer authorization grants
"""
import couch
from tornado.gen import coroutine, Return
from tornado.options import options
from perch import Repository, Service
from .scope import Scope
from .token import generate_token, decode_token
from .exceptions import InvalidGrantType, BadRequest, Unauthorized
_registry = {}
def get_grant(request, token=None):
"""Grant factory"""
if token is None:
key = request.grant_type
else:
decoded = decode_token(token)
key = decoded['grant_type']
try:
grant_type = _registry[key]
except KeyError:
raise InvalidGrantType(key)
return grant_type(request)
class ClientCredentials(BaseGrant):
"""
Implementation of the OAuth2 client credentials grant
See https://tools.ietf.org/html/rfc6749
"""
grant_type = 'client_credentials'
@coroutine
def validate_scope(self):
"""Vaildate that the client is authorized for the requested scope"""
yield self.requested_scope.validate(self.request.client)
@coroutine
def generate_token(self):
"""Verify the client is authorized and generate a token"""
self.validate_grant()
yield self.validate_scope()
token, expiry = generate_token(self.request.client,
self.requested_scope,
self.grant_type)
raise Return((token, expiry))
@coroutine
def verify_access(self, token):
"""Verify a token has access to a resource"""
decoded = decode_token(token)
scope = decoded['scope']
client = yield Service.get(decoded['client']['id'])
self.verify_scope(scope)
yield [self.verify_access_service(client),
self.verify_access_hosted_resource(client)]
ClientCredentials.register()
class AuthorizeDelegate(BaseGrant):
"""
Use the JWT Bearer authorization grant for authorizing a delegate
See https://tools.ietf.org/html/rfc7523
"""
grant_type = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
@property
def assertion(self):
"""
The assertion should be a JSON Web Token authorizing the client
to request a token to act as a delegate on another client's behalf
"""
assertion = getattr(self, '_assertion', None)
if not assertion:
try:
assertion = self.request.body_arguments['assertion'][0]
except (KeyError, IndexError):
raise ValueError('A JSON Web Token must be included as an '
'"assertion" parameter')
self._assertion = assertion = decode_token(assertion)
return assertion
def validate_scope(self):
"""Vaildate that the client's scope is granted by the provided JWT"""
id_scope = 'delegate[{}]:{}'.format(
self.request.client_id,
str(self.requested_scope))
try:
url_scope = 'delegate[{}]:{}'.format(
self.request.client.location,
str(self.requested_scope))
except AttributeError:
url_scope = None
if str(self.assertion['scope']) not in (id_scope, url_scope):
raise Unauthorized('Requested scope does not match token')
@coroutine
def generate_token(self):
"""Generate a delegate token"""
self.validate_grant()
self.validate_scope()
# Assuming delegation always requires write access
# should change it to a param
client = yield Service.get(self.assertion['client']['id'])
has_access = client.authorized('w', self.request.client)
if not has_access:
raise Unauthorized('Client "{}" may not delegate to service "{}"'.format(
self.assertion['client']['id'],
self.request.client_id
))
token, expiry = generate_token(client,
self.requested_scope,
self.grant_type,
delegate_id=self.request.client_id)
raise Return((token, expiry))
@coroutine
def verify_access(self, token):
"""Verify a token has access to a resource"""
decoded = decode_token(token)
scope = decoded['scope']
self.verify_scope(scope)
try:
delegate = yield Service.get(decoded['sub'])
except couch.NotFound:
raise Unauthorized("Unknown delegate '{}'".format(decoded['sub']))
client = yield Service.get(decoded['client']['id'])
yield [self.verify_access_service(delegate),
self.verify_access_service(client),
self.verify_access_hosted_resource(client)]
AuthorizeDelegate.register()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
4946,
2448,
8481,
19193,
15135,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779... | 2.448061 | 2,243 |
import itertools
import torch
import torch.nn as nn
import numpy as np
class Anchors(nn.Module):
"""
adapted and modified from https://github.com/google/automl/blob/master/efficientdet/anchors.py by Zylo117
"""
def forward(self, image, dtype=torch.float32):
"""Generates multiscale anchor boxes.
Args:
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
image_shape = image.shape[2:]
if image_shape == self.last_shape and image.device in self.last_anchors:
return self.last_anchors[image.device]
if self.last_shape is None or self.last_shape != image_shape:
self.last_shape = image_shape
if dtype == torch.float16:
dtype = np.float16
else:
dtype = np.float32
boxes_all = []
for stride in self.strides:
boxes_level = []
for scale, ratio in itertools.product(self.scales, self.ratios):
if image_shape[1] % stride != 0:
raise ValueError('input size must be divided by the stride.')
base_anchor_size = self.anchor_scale * stride * scale
anchor_size_x_2 = base_anchor_size * ratio[0] / 2.0
anchor_size_y_2 = base_anchor_size * ratio[1] / 2.0
x = np.arange(stride / 2, image_shape[1], stride)
y = np.arange(stride / 2, image_shape[0], stride)
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
# y1,x1,y2,x2
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device)
anchor_boxes = anchor_boxes.unsqueeze(0)
# save it for later use to reduce overhead
self.last_anchors[image.device] = anchor_boxes
return anchor_boxes
| [
11748,
340,
861,
10141,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
4871,
29253,
669,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1657... | 2.145804 | 1,406 |
SUPERUSUARIO = 1
ADMINISTRADOR = 2
USUARIO = 3 | [
40331,
1137,
2937,
52,
1503,
9399,
796,
352,
198,
2885,
23678,
1797,
5446,
2885,
1581,
796,
362,
198,
2937,
52,
1503,
9399,
796,
513
] | 1.916667 | 24 |
#!/usr/bin/env python3
import argparse
import gzip
import collections
import yaml
import vcf
import sys
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
308,
13344,
198,
11748,
17268,
198,
11748,
331,
43695,
198,
11748,
410,
12993,
198,
11748,
25064,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366... | 2.826923 | 52 |
import json, util
import numpy as np
from collections import defaultdict
if __name__ == "__main__":
print_results(util.find_data_directory())
| [
11748,
33918,
11,
7736,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
62,
43420,
7,
22602,
13,
19796,
62,
789... | 3.23913 | 46 |
""" Cisco_IOS_XR_tunnel_l2tun_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-l2tun package operational data.
This module contains definitions
for the following management objects\:
l2tp\: L2TP operational data
l2tpv2\: l2tpv2
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class DigestHash(Enum):
"""
DigestHash (Enum Class)
Digest hash types
.. data:: md5 = 0
MD5
.. data:: sha1 = 1
SHA1
"""
md5 = Enum.YLeaf(0, "md5")
sha1 = Enum.YLeaf(1, "sha1")
class L2Tp(Entity):
"""
L2TP operational data
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters>`
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations>`
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.CounterHistFail>`
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Classes>`
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels>`
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions>`
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Counters(Entity):
"""
L2TP control messages counters
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Control(Entity):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr>`
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelXr(Entity):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication>`
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Authentication(Entity):
"""
Tunnel authentication counters
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.NonceAvp>`
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.CommonDigest>`
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.LocalSecret>`
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class NonceAvp(Entity):
"""
Nonce AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class CommonDigest(Entity):
"""
Common digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class PrimaryDigest(Entity):
"""
Primary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class SecondaryDigest(Entity):
"""
Secondary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class IntegrityCheck(Entity):
"""
Integrity check statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class LocalSecret(Entity):
"""
Local secret statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class ChallengeAvp(Entity):
"""
Challenge AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class ChallengeReponse(Entity):
"""
Challenge response statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class OverallStatistics(Entity):
"""
Overall statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Global(Entity):
"""
Tunnel counters
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global.Transmit>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global.Retransmit>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global.Received>`
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global.Drop>`
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Transmit(Entity):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Retransmit(Entity):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Received(Entity):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Drop(Entity):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnels(Entity):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnel(Entity):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id (key)
L2TP tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Brief>`
.. attribute:: global_
Global data
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Brief(Entity):
"""
L2TP control message local and remote addresses
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Global(Entity):
"""
Global data
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Retransmit>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Received>`
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Drop>`
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Transmit(Entity):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Retransmit(Entity):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Received(Entity):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Drop(Entity):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelConfigurations(Entity):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations.TunnelConfiguration>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelConfiguration(Entity):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2TpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations.TunnelConfiguration.L2TpClass>`
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class L2TpClass(Entity):
"""
L2Tp class data
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class CounterHistFail(Entity):
"""
Failure events leading to disconnection
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Classes(Entity):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Classes.Class>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Class(Entity):
"""
L2TP class name
.. attribute:: class_name (key)
L2TP class name
**type**\: str
**length:** 1..31
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnels(Entity):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnel(Entity):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of int
**range:** 0..65535
**units**\: second
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Sessions(Entity):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Session(Entity):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_session_id (key)
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData>`
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
.. attribute:: is_session_up
True if session is up
**type**\: bool
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class SessionApplicationData(Entity):
"""
Session application data
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData.Xconnect>`
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData.Vpdn>`
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Xconnect(Entity):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Vpdn(Entity):
"""
VPDN data
.. attribute:: username
Session username
**type**\: str
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9./\-]+
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Session(Entity):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Session.Unavailable>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Unavailable(Entity):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class L2Tpv2(Entity):
"""
l2tpv2
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters>`
.. attribute:: statistics
L2TP v2 statistics information
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Statistics>`
.. attribute:: tunnel
L2TPv2 tunnel
**type**\: :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel>`
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations>`
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.CounterHistFail>`
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Classes>`
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnels>`
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions>`
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Counters(Entity):
"""
L2TP control messages counters
.. attribute:: forwarding
L2TP forwarding messages counters
**type**\: :py:class:`Forwarding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding>`
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Forwarding(Entity):
"""
L2TP forwarding messages counters
.. attribute:: sessions
List of class and session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding.Sessions>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Sessions(Entity):
"""
List of class and session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Session(Entity):
"""
L2TP information for a particular session
.. attribute:: tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: session_id (key)
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
.. attribute:: in_packets
Number of packets sent in
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: out_packets
Number of packets sent out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: in_bytes
Number of bytes sent in
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: out_bytes
Number of bytes sent out
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Control(Entity):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr>`
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelXr(Entity):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication>`
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Authentication(Entity):
"""
Tunnel authentication counters
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.NonceAvp>`
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.CommonDigest>`
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.LocalSecret>`
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class NonceAvp(Entity):
"""
Nonce AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class CommonDigest(Entity):
"""
Common digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class PrimaryDigest(Entity):
"""
Primary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class SecondaryDigest(Entity):
"""
Secondary digest statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class IntegrityCheck(Entity):
"""
Integrity check statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class LocalSecret(Entity):
"""
Local secret statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class ChallengeAvp(Entity):
"""
Challenge AVP statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class ChallengeReponse(Entity):
"""
Challenge response statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class OverallStatistics(Entity):
"""
Overall statistics
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Global(Entity):
"""
Tunnel counters
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global.Transmit>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global.Retransmit>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global.Received>`
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global.Drop>`
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Transmit(Entity):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Retransmit(Entity):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Received(Entity):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Drop(Entity):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnels(Entity):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnel(Entity):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id (key)
L2TP tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Brief>`
.. attribute:: global_
Global data
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Brief(Entity):
"""
L2TP control message local and remote addresses
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Global(Entity):
"""
Global data
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global.Transmit>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global.Retransmit>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global.Received>`
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global.Drop>`
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Transmit(Entity):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Retransmit(Entity):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Received(Entity):
"""
Received data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Drop(Entity):
"""
Drop data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Statistics(Entity):
"""
L2TP v2 statistics information
.. attribute:: tunnels
Number of tunnels
**type**\: int
**range:** 0..4294967295
.. attribute:: sessions
Number of sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: sent_packets
Number of packets sent
**type**\: int
**range:** 0..4294967295
.. attribute:: received_packets
Number of packets received
**type**\: int
**range:** 0..4294967295
.. attribute:: average_packet_processing_time
Average processing time for received packets (in micro seconds)
**type**\: int
**range:** 0..4294967295
**units**\: microsecond
.. attribute:: received_out_of_order_packets
Out of order packets received
**type**\: int
**range:** 0..4294967295
.. attribute:: reorder_packets
Re order packets
**type**\: int
**range:** 0..4294967295
.. attribute:: reorder_deviation_packets
Re order deviation
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_dropped_packets
In coming packets dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: buffered_packets
Bufferred packets
**type**\: int
**range:** 0..4294967295
.. attribute:: netio_packets
Packets RX in netio
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnel(Entity):
"""
L2TPv2 tunnel
.. attribute:: accounting
Tunnel accounting counters
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel.Accounting>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Accounting(Entity):
"""
Tunnel accounting counters
.. attribute:: statistics
Tunnel accounting statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel.Accounting.Statistics>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Statistics(Entity):
"""
Tunnel accounting statistics
.. attribute:: records_sent_successfully
Accounting records sent successfully
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: start
Accounting start
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: stop
Accounting stop
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: reject
Accounting reject
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: transport_failures
Transport failures
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: positive_acknowledgement
Positive acknowledgement
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: negative_acknowledgement
Negative acknowledgement
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_checkpointed
Total records checkpointed
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_failed_to_checkpoint
Records fail to checkpoint
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_sent_from_queue
Records sent from queue
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: memory_failures
Memory failures
**type**\: int
**range:** 0..4294967295
.. attribute:: current_size
Current checkpoint size
**type**\: int
**range:** 0..4294967295
.. attribute:: records_recovered_from_checkpoint
Records recovered from checkpoint
**type**\: int
**range:** 0..4294967295
.. attribute:: records_fail_to_recover
Records fail to recover
**type**\: int
**range:** 0..4294967295
.. attribute:: queue_statistics_size
Queue statistics size
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelConfigurations(Entity):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations.TunnelConfiguration>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class TunnelConfiguration(Entity):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2TpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations.TunnelConfiguration.L2TpClass>`
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class L2TpClass(Entity):
"""
L2Tp class data
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class CounterHistFail(Entity):
"""
Failure events leading to disconnection
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Classes(Entity):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Classes.Class>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Class(Entity):
"""
L2TP class name
.. attribute:: class_name (key)
L2TP class name
**type**\: str
**length:** 1..31
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHash <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHash>`
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnels(Entity):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Tunnel(Entity):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of int
**range:** 0..65535
**units**\: second
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Sessions(Entity):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Session(Entity):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id (key)
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_session_id (key)
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData>`
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
.. attribute:: is_session_up
True if session is up
**type**\: bool
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class SessionApplicationData(Entity):
"""
Session application data
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData.Xconnect>`
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData.Vpdn>`
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Xconnect(Entity):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Vpdn(Entity):
"""
VPDN data
.. attribute:: username
Session username
**type**\: str
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9./\-]+
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Session(Entity):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Session.Unavailable>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
class Unavailable(Entity):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
| [
37811,
28289,
62,
40,
2640,
62,
55,
49,
62,
28286,
4954,
62,
75,
17,
28286,
62,
3575,
220,
198,
198,
1212,
8265,
4909,
257,
4947,
286,
575,
15567,
17336,
198,
1640,
28289,
314,
2640,
41441,
55,
49,
13275,
41441,
75,
17,
28286,
5301,... | 1.391106 | 178,182 |
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from markdown2 import markdown
from django.db import models
from email_service.constants import ATTACHMENT_TYPE_CHOICES
| [
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
10283,
62,
31499,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
9570,
29800,
23081,
2929,
198,
6738,
1317,
2902,
17,
1330,
1317,
2902,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
498... | 3.6 | 60 |
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, TORY, TYER, TPUB, APIC, USLT, COMM
from mutagen.mp4 import MP4, MP4Cover
from mutagen.flac import Picture, FLAC
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
import urllib.request
import base64
from spotdl.metadata import EmbedderBase
from spotdl.metadata import BadMediaFileError
import logging
logger = logging.getLogger(__name__)
# Apple has specific tags - see mutagen docs -
# http://mutagen.readthedocs.io/en/latest/api/mp4.html
M4A_TAG_PRESET = {
"album": "\xa9alb",
"artist": "\xa9ART",
"date": "\xa9day",
"title": "\xa9nam",
"year": "\xa9day",
"originaldate": "purd",
"comment": "\xa9cmt",
"group": "\xa9grp",
"writer": "\xa9wrt",
"genre": "\xa9gen",
"tracknumber": "trkn",
"albumartist": "aART",
"discnumber": "disk",
"cpil": "cpil",
"albumart": "covr",
"copyright": "cprt",
"tempo": "tmpo",
"lyrics": "\xa9lyr",
"comment": "\xa9cmt",
"explicit": "rtng",
}
TAG_PRESET = {}
for key in M4A_TAG_PRESET.keys():
TAG_PRESET[key] = key
class EmbedderDefault(EmbedderBase):
"""
A class for applying metadata on media files.
Examples
--------
- Applying metadata on an already downloaded MP3 file:
>>> from spotdl.metadata_search import MetadataSearch
>>> provider = MetadataSearch("ncs spectre")
>>> metadata = provider.on_youtube()
>>> from spotdl.metadata.embedders import EmbedderDefault
>>> embedder = EmbedderDefault()
>>> embedder.as_mp3("media.mp3", metadata)
"""
supported_formats = ("mp3", "m4a", "flac", "ogg", "opus")
def as_mp3(self, path, metadata, cached_albumart=None):
"""
Apply metadata on MP3 media files.
Parameters
----------
path: `str`
Path to the media file.
metadata: `dict`
Metadata (standardized) to apply to the media file.
cached_albumart: `bool`
An albumart image binary. If passed, the albumart URL
present in the ``metadata`` won't be downloaded or used.
"""
logger.debug('Writing MP3 metadata to "{path}".'.format(path=path))
# EasyID3 is fun to use ;)
# For supported easyid3 tags:
# https://github.com/quodlibet/mutagen/blob/master/mutagen/easyid3.py
# Check out somewhere at end of above linked file
audiofile = EasyID3(path)
self._embed_basic_metadata(audiofile, metadata, "mp3", preset=TAG_PRESET)
audiofile["media"] = metadata["type"]
audiofile["author"] = metadata["artists"][0]["name"]
audiofile["lyricist"] = metadata["artists"][0]["name"]
audiofile["arranger"] = metadata["artists"][0]["name"]
audiofile["performer"] = metadata["artists"][0]["name"]
provider = metadata["provider"]
audiofile["website"] = metadata["external_urls"][provider]
audiofile["length"] = str(metadata["duration"])
if metadata["publisher"]:
audiofile["encodedby"] = metadata["publisher"]
if metadata["external_ids"]["isrc"]:
audiofile["isrc"] = metadata["external_ids"]["isrc"]
audiofile.save(v2_version=3)
# For supported id3 tags:
# https://github.com/quodlibet/mutagen/blob/master/mutagen/id3/_frames.py
# Each class in the linked source file represents an id3 tag
audiofile = ID3(path)
if metadata["year"]:
audiofile["TORY"] = TORY(encoding=3, text=metadata["year"])
audiofile["TYER"] = TYER(encoding=3, text=metadata["year"])
if metadata["publisher"]:
audiofile["TPUB"] = TPUB(encoding=3, text=metadata["publisher"])
provider = metadata["provider"]
audiofile["COMM"] = COMM(
encoding=3, text=metadata["external_urls"][provider]
)
if metadata["lyrics"]:
audiofile["USLT"] = USLT(
encoding=3, desc=u"Lyrics", text=metadata["lyrics"]
)
if cached_albumart is None:
cached_albumart = urllib.request.urlopen(
metadata["album"]["images"][0]["url"]
).read()
try:
audiofile["APIC"] = APIC(
encoding=3,
mime="image/jpeg",
type=3,
desc=u"Cover",
data=cached_albumart,
)
except IndexError:
pass
audiofile.save(v2_version=3)
def as_m4a(self, path, metadata, cached_albumart=None):
"""
Apply metadata on FLAC media files.
Parameters
----------
path: `str`
Path to the media file.
metadata: `dict`
Metadata (standardized) to apply to the media file.
cached_albumart: `bool`
An albumart image binary. If passed, the albumart URL
present in the ``metadata`` won't be downloaded or used.
"""
logger.debug('Writing M4A metadata to "{path}".'.format(path=path))
# For supported m4a tags:
# https://github.com/quodlibet/mutagen/blob/master/mutagen/mp4/__init__.py
# Look for the class named `MP4Tags` in the linked source file
audiofile = MP4(path)
self._embed_basic_metadata(audiofile, metadata, "m4a", preset=M4A_TAG_PRESET)
if metadata["year"]:
audiofile[M4A_TAG_PRESET["year"]] = metadata["year"]
provider = metadata["provider"]
audiofile[M4A_TAG_PRESET["comment"]] = metadata["external_urls"][provider]
if metadata["lyrics"]:
audiofile[M4A_TAG_PRESET["lyrics"]] = metadata["lyrics"]
# Explicit values: Dirty: 4, Clean: 2, None: 0
audiofile[M4A_TAG_PRESET["explicit"]] = (4,) if metadata["explicit"] else (2,)
try:
if cached_albumart is None:
cached_albumart = urllib.request.urlopen(
metadata["album"]["images"][0]["url"]
).read()
audiofile[M4A_TAG_PRESET["albumart"]] = [
MP4Cover(cached_albumart, imageformat=MP4Cover.FORMAT_JPEG)
]
except IndexError:
pass
audiofile.save()
def as_flac(self, path, metadata, cached_albumart=None):
"""
Apply metadata on MP3 media files.
Parameters
----------
path: `str`
Path to the media file.
metadata: `dict`
Metadata (standardized) to apply to the media file.
cached_albumart: `bool`
An albumart image binary. If passed, the albumart URL
present in the ``metadata`` won't be downloaded or used.
"""
logger.debug('Writing FLAC metadata to "{path}".'.format(path=path))
# For supported flac tags:
# https://github.com/quodlibet/mutagen/blob/master/mutagen/mp4/__init__.py
# Look for the class named `MP4Tags` in the linked source file
audiofile = FLAC(path)
self._embed_basic_metadata(audiofile, metadata, "flac")
self._embed_ogg_metadata(audiofile, metadata)
self._embed_mbp_picture(audiofile, "metadata", cached_albumart, "flac")
audiofile.save()
| [
6738,
4517,
11286,
13,
38171,
312,
18,
1330,
16789,
2389,
18,
198,
6738,
4517,
11286,
13,
312,
18,
1330,
4522,
18,
11,
309,
15513,
11,
24412,
1137,
11,
309,
5105,
33,
11,
3486,
2149,
11,
1294,
27734,
11,
22240,
198,
6738,
4517,
1128... | 2.232444 | 3,261 |
from output.models.ms_data.model_groups.mg_l004_xsd.mg_l004 import (
Doc,
Foo,
)
__all__ = [
"Doc",
"Foo",
]
| [
6738,
5072,
13,
27530,
13,
907,
62,
7890,
13,
19849,
62,
24432,
13,
11296,
62,
75,
22914,
62,
87,
21282,
13,
11296,
62,
75,
22914,
1330,
357,
198,
220,
220,
220,
14432,
11,
198,
220,
220,
220,
36080,
11,
198,
8,
198,
198,
834,
4... | 1.909091 | 66 |
import datetime
from haystack import indexes
from .models import Hashtag
| [
11748,
4818,
8079,
198,
6738,
27678,
25558,
1330,
39199,
198,
6738,
764,
27530,
1330,
21059,
12985,
220,
628
] | 4.166667 | 18 |
#!/usr/bin/env python
"""
_GetParentAndGrandParentInfo_
Figure out parentage information for a file in WMBS. This will return
information about a file's parent and it's grand parent such as the
lfn, id and whether or not the file is merged. This will also determine
whether or not the file is a redneck parent or redneck child.
"""
from __future__ import division
from WMCore.Database.DBFormatter import DBFormatter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
62,
3855,
24546,
1870,
23581,
24546,
12360,
62,
198,
198,
11337,
503,
2560,
496,
1321,
329,
257,
2393,
287,
30376,
4462,
13,
220,
770,
481,
1441,
198,
17018,
546,
257,
2393... | 3.725664 | 113 |
from CommonCode.queryExecutor import QueryExecuter
from CommonCode.strings import Strings
from Enums.databaseTables import Tables
from Searcher.searcherHelper import SearcherHelper
from Searcher.sercherConfig import SearcherConfig
from protobuff.entity_pb2 import StatusEnum
from protobuff.workertype_pb2 import UNKNOWN_WORKER_TYPE, WorkerTypeEnum
| [
6738,
8070,
10669,
13,
22766,
23002,
38409,
1330,
43301,
23002,
11894,
198,
6738,
8070,
10669,
13,
37336,
1330,
4285,
654,
198,
6738,
2039,
5700,
13,
48806,
51,
2977,
1330,
33220,
198,
6738,
42016,
2044,
13,
325,
283,
2044,
47429,
1330,
... | 3.684211 | 95 |
if foo: # reiz: tp
...
...
if foo: # reiz: tp
with foo:
...
with bar:
...
if foo:
...
if foo:
...
...
...
if foo:
with foo:
...
if foo:
with foo:
...
with bar:
...
with baz:
...
| [
361,
22944,
25,
220,
1303,
302,
528,
25,
256,
79,
198,
220,
220,
220,
2644,
198,
220,
220,
220,
2644,
198,
198,
361,
22944,
25,
220,
1303,
302,
528,
25,
256,
79,
198,
220,
220,
220,
351,
22944,
25,
198,
220,
220,
220,
220,
220,
... | 1.652941 | 170 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# ------------------------------------
from torch import nn
import models
# pylint: disable=no-member
class Decoder(nn.Module):
"""
Decoder network
"""
| [
2,
20368,
650,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
2,
20368,
650,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
4981,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
19... | 3.929577 | 71 |
from kedro.pipeline import node
from kedro_viz.data_access.repositories import (
GraphEdgesRepository,
GraphNodesRepository,
)
from kedro_viz.models.graph import GraphEdge, GraphNode
| [
6738,
479,
276,
305,
13,
79,
541,
4470,
1330,
10139,
198,
198,
6738,
479,
276,
305,
62,
85,
528,
13,
7890,
62,
15526,
13,
260,
1930,
270,
1749,
1330,
357,
198,
220,
220,
220,
29681,
7407,
3212,
6207,
13264,
11,
198,
220,
220,
220,... | 2.694444 | 72 |
#coding:utf-8
#
# PROGRAM/MODULE: saturnin-sdk
# FILE: examples/dummy/api.py
# DESCRIPTION: API for Dummy microservice
# CREATED: 18.12.2019
#
# The contents of this file are subject to the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Copyright (c) 2019 Firebird Project (www.firebirdsql.org)
# All Rights Reserved.
#
# Contributor(s): Pavel Císař (original code)
# ______________________________________.
"""Saturnin SDK examples - API for Dummy microservice
This microservice does nothing, and is intended for testing of service management machinery.
It's possible to configure the service to fail (raise an exception) during `initialize()`,
`aquire_resources()`, `release_resources()`, `start_activities()` or `stop_activities()`.
"""
from __future__ import annotations
import uuid
from enum import Enum, auto
from functools import partial
from firebird.base.config import create_config, EnumOption, ListOption
from saturnin.base import VENDOR_UID, ComponentConfig, pkg_name, AgentDescriptor, ServiceDescriptor
# OID: iso.org.dod.internet.private.enterprise.firebird.butler.platform.saturnin.micro.dummy
SERVICE_OID: str = '1.3.6.1.4.1.53446.1.2.0.3.0'
SERVICE_UID: uuid.UUID = uuid.uuid5(uuid.NAMESPACE_OID, SERVICE_OID)
SERVICE_VERSION: str = '0.1.0'
# Configuration
class DummyConfig(ComponentConfig):
"""Text file reader microservice configuration.
"""
# Service description
SERVICE_AGENT: AgentDescriptor = \
AgentDescriptor(uid=SERVICE_UID,
name="saturnin.micro.dummy",
version=SERVICE_VERSION,
vendor_uid=VENDOR_UID,
classification="test/dummy")
SERVICE_DESCRIPTOR: ServiceDescriptor = \
ServiceDescriptor(agent=SERVICE_AGENT,
api=[],
description="Test dummy microservice",
facilities=[],
package=pkg_name(__name__),
factory=f'{pkg_name(__name__)}.service:MicroDummySvc',
config=partial(create_config, DummyConfig,
f'{SERVICE_AGENT.name}.service'))
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
198,
2,
46805,
14,
33365,
24212,
25,
3332,
700,
259,
12,
21282,
74,
198,
2,
45811,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
6096,
14,
67,
13513,
14,
15042,
13,
9078,
198,
2,... | 2.73887 | 1,168 |
#!/usr/bin/env python
from pyrl.components import Energy, Name
from pyrl.components.action import SimpleAction, ponder
from pyrl.resources import Messages
from pyrl.world_helpers import ActionProcessor, act
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
279,
2417,
75,
13,
5589,
3906,
1330,
6682,
11,
6530,
198,
6738,
279,
2417,
75,
13,
5589,
3906,
13,
2673,
1330,
17427,
12502,
11,
37375,
198,
6738,
279,
2417,
75,
13,
37540,
133... | 3.409836 | 61 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Pentestdb, a database for penetration test.
Copyright (c) 2015 alpha1e0
'''
import os
import sys
import logging
import types
import re
import urlparse
import urllib
import ConfigParser
from lxml import etree
import yaml
from colorama import Fore, Style
from appdirs import AppDirs
class WordList(object):
'''
字典文件迭代器
'''
class YamlConf(object):
'''
Yaml配置文件加载器
'''
class Output(object):
'''
终端输出功能
该类用于输出信息到控制台和文件
'''
_WIDTH = 80
_CHAR = "-"
def __init__(self, title=None, tofile=None):
'''
@params:
title: 输出的标题
tofile: 输出文件
'''
self._title = title
self._fileName = tofile
self._file = self._openFile(tofile)
@classmethod
def safeEncode(cls, msg, method=None):
'''
安全编码
如果msg中有不能编码的字节,自动处理为16进制
'''
if isinstance(msg, str):
return msg
elif isinstance(msg, unicode):
method = method.lower() if method else sys.stdin.encoding
try:
return msg.encode(method)
except UnicodeError:
resultList = []
for word in msg:
try:
encodedWord = word.encode(method)
except UnicodeError:
encodedWord = "\\x" + repr(word)[4:6] + "\\x" + repr(word)[6:8]
resultList.append(encodedWord)
return "".join(resultList)
else:
try:
msg = unicode(msg)
except UnicodeDecodeError:
msg = str(msg)
return cls.safeEncode(msg,method)
@classmethod
def R(cls, msg):
'''
字符串着色为红色
'''
return Fore.RED + msg + Style.RESET_ALL
@classmethod
def Y(cls, msg):
'''
字符串着色为橙色
'''
return Fore.YELLOW + msg + Style.RESET_ALL
@classmethod
def B(cls, msg):
'''
字符串着色为蓝色
'''
return Fore.BLUE + msg + Style.RESET_ALL
@classmethod
def G(cls, msg):
'''
字符串着色为绿色
'''
return Fore.GREEN + msg + Style.RESET_ALL
@classmethod
def raw(cls, msg):
'''
无颜色输出
'''
print cls.safeEncode(msg)
@classmethod
def red(cls, msg):
'''
打印红色信息
'''
cls.raw(cls.R(msg))
@classmethod
def yellow(cls, msg):
'''
打印橙色信息
'''
cls.raw(cls.Y(msg))
@classmethod
def blue(cls, msg):
'''
打印蓝色信息
'''
cls.raw(cls.B(msg))
@classmethod
def green(cls, msg):
'''
打印绿色信息
'''
cls.raw(cls.G(msg))
@classmethod
@classmethod
@classmethod
def write(self, data):
'''
写入数据到文件
'''
if self._file:
try:
self._file.write(data)
return True
except IOError:
raise PenError("write output file '{0}' failed".format(self._fileName))
else:
return False
def writeLine(self, line, parser=None):
'''
写入一行数据到文件
@params:
line: 待写入的数据
parser: 处理待写入数据的回调函数
'''
if self._file:
if parser and isinstance(parser, types.FunctionType):
line = parser(line)
try:
self._file.write(line + "\n")
return True
except IOError:
raise PenError("write output file '{0}' failed".format(self._fileName))
else:
return False
def _banner(self):
'''
生成banner信息
'''
fmt = "|{0:^" + "{0}".format(self._WIDTH+7) + "}|"
banner = "+" + self._CHAR * (self._WIDTH-2) + "+\n"
banner = banner + fmt.format(self.Y("PentestDB.") + " Tools and Resources for Web Penetration Test.") + "\n"
banner = banner + fmt.format(self.G("https://github.com/alpha1e0/pentestdb")) + "\n"
banner = banner + "+" + self._CHAR * (self._WIDTH-2) + "+\n"
return banner
class Log(object):
'''
Log class
support:critical, error, warning, info, debug, notset
Params:
logname: specify the logname
toConsole: whether outputing to console
tofile: whether to logging to file
'''
class URL(object):
'''
URL处理
'''
_urlPattern = re.compile(r"^((?:http(?:s)?\://)?(?:[-0-9a-zA-Z_]+\.)+(?:[-0-9a-zA-Z_]+)(?:\:\d+)?)[^:]*$")
_ipPattern = re.compile(r"^(?:http(s)?\://)?(\d+\.){3}(\d+)(?:\:\d+)?.*")
@classmethod
def check(cls, url):
'''
检查URL格式是否正确
'''
matchs = cls._urlPattern.match(url)
if not matchs:
return False
else:
return True
@classmethod
def isIP(cls, url):
'''
检查URL是否是ip类型的url
'''
matchs = cls._ipPattern.match(url)
if matchs:
return True
else:
return False
@classmethod
def _completeURL(cls, url):
'''
补全URL
如果URL不包含协议类型,则补全协议类型
'''
if "://" not in url:
url = "http://" + url
if not cls.check(url):
raise PenError("url format error")
return url
@classmethod
def format(cls, url):
'''
格式化url
@returns:
protocol/url/host/path/baseURL/params: baseURL类似于dirname
@examples:
http://www.aaa.com/path/index.php?a=1&b=2
protocol: http
uri: http://www.aaa.com/path/index.php
host: www.aaa.com
path: /path/index.php
baseURL: http://www.aaa.com/path/ baseURL依据URL末尾是否有"/"来判断,返回结果以"/"结束
params: {'a': '1', 'b': '2'}
'''
url = cls._completeURL(url)
parsed = urlparse.urlparse(url)
protocol = parsed[0]
host = parsed[1]
uri = parsed[0] + "://" + parsed[1] + parsed[2]
path = parsed[2]
if not path.endswith("/"):
sp = path.split("/")
baseURL = parsed[0] + "://" + parsed[1] + "/".join(sp[0:-1]) + "/"
else:
baseURL = uri
params = dict()
for param in parsed[4].split("&"):
if not param:
continue
sp = param.split("=")
try:
params[sp[0]] = urllib.unquote(sp[1])
except IndexError:
params[sp[0]] = ""
return Dict(protocol=protocol,uri=uri,host=host,path=path,baseURL=baseURL,params=params)
@classmethod
@classmethod
conf = Config()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
47,
298,
395,
9945,
11,
257,
6831,
329,
23271,
1332,
13,
198,
15269,
357,
66,
8,
1853,
17130,
16,
68... | 1.651533 | 4,141 |
# Author: Simon Liedtke <liedtke.simon@googlemail.com>
#
# This module was developed with funding provided by
# the Google Summer of Code (2013).
import fnmatch
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pytest
from sunpy.database.commands import AddEntry, RemoveEntry, EditEntry,\
AddTag, RemoveTag, NoSuchEntryError, NonRemovableTagError,\
EmptyCommandStackError, CommandManager, CompositeOperation
from sunpy.database.tables import DatabaseEntry, Tag
@pytest.fixture
@pytest.fixture
| [
2,
6434,
25,
11288,
406,
798,
83,
365,
1279,
18511,
83,
365,
13,
14323,
261,
31,
13297,
4529,
13,
785,
29,
198,
2,
198,
2,
770,
8265,
373,
4166,
351,
4918,
2810,
416,
198,
2,
262,
3012,
10216,
286,
6127,
357,
6390,
737,
198,
198... | 3.289017 | 173 |
"""
Base class for Backlog all object
"""
from .. import exceptions
class BacklogBase:
"""
Base class is identified by id and class name
"""
_endpoint = None
_crud_func = ('all', 'get', 'filter', 'create', 'update', 'delete')
@property
def from_json(self, response):
"""
Create the Object by json response
:param dict response: dict type object
:return Space space: self
"""
if not isinstance(response, dict):
return None
for key, res in self._attr:
setattr(self, key, response.get(res, None))
return self
def _all(self):
"""
Get all object
"""
res = self.client.fetch_json(self._endpoint, method='GET')
return [self.__class__(self.client).from_json(x) for x in res]
def _get(self, id_=None):
"""
Get one object
"""
if self.id is not None:
id_ = self.id
res = self.client.fetch_json(f'{self._endpoint}/{id_}', method='GET')
return self.__class__(self.client).from_json(res)
def _filter(self, **params):
"""
Get filtering object
:return:
"""
res = self.client.fetch_json(self._endpoint, method='GET', query_params=params)
return [self.__class__(self.client).from_json(x) for x in res]
def _create(self, **params):
"""
Create new object
"""
res = self.client.fetch_json(self._endpoint, method='POST', post_params=params)
return self.__class__(self.client).from_json(res)
def _update(self, id_=None, **params):
"""
Update the object
"""
if self.id is not None:
id_ = self.id
res = self.client.fetch_json(f'{self._endpoint}/{id_}', method='POST', post_params=params)
return self.__class__(self.client).from_json(res)
| [
37811,
198,
14881,
1398,
329,
5157,
6404,
477,
2134,
198,
37811,
198,
198,
6738,
11485,
1330,
13269,
628,
198,
4871,
5157,
6404,
14881,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7308,
1398,
318,
5174,
416,
4686,
290,
1398,
14... | 2.226371 | 857 |
from unittest import mock
from django.test import TestCase
from rest_framework.fields import CharField
from ..serializers import ContentField
class ContentFieldTestCase(TestCase):
"""
Tests the ContentField serializer field.
"""
def test_existing_fields(self):
"""
Tests that the value returned is a dict of the serialized values of the fields.
"""
obj = mock.MagicMock(
field1='value of field 1',
field2='value of field 2',
)
value = self.serializer_field.to_representation(obj)
self.assertEqual(
value,
{
'field1': 'value of field 1',
'field2': 'value of field 2'
}
)
def test_non_existing_fields(self):
"""
Tests that if the obj doesn't have the specific fields, it returns an empty dict.
"""
value = self.serializer_field.to_representation(TestObj())
self.assertEqual(
value, {}
)
def test_None(self):
"""
Tests that if the obj is None, it returns an empty dict.
"""
value = self.serializer_field.to_representation(None)
self.assertEqual(
value, {}
)
| [
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
1334,
62,
30604,
13,
25747,
1330,
3178,
15878,
198,
198,
6738,
11485,
46911,
11341,
1330,
14041,
15878,
628,
198,
4871,
14041,
15878,
... | 2.241135 | 564 |
'''
列表生成表达式
'''
l = [i for i in range(10) if i % 2 == 0]
print(l) # [0, 2, 4, 6, 8]
# 与上述写法等价
l = []
for i in range(10):
if i % 2 == 0:
l.append(i)
'''
字典生成表达式
'''
d = {i: i + 10 for i in range(10)}
print(d) # {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17, 8: 18, 9: 19}
# 与上述写法等价
d = {}
for i in range(10):
d[i] = i + 10
'''
生成器表达式
'''
g = (i for i in range(10))
# 返回生成器
print(g) # <generator object <genexpr> at 0x1083fdc78>
print('使用__next__')
print(g.__next__())
print('通过循环打印生成器中的值')
for i in g:
print(i)
| [
7061,
6,
198,
26344,
245,
26193,
101,
37955,
22755,
238,
26193,
101,
164,
122,
122,
28156,
237,
198,
7061,
6,
198,
75,
796,
685,
72,
329,
1312,
287,
2837,
7,
940,
8,
611,
1312,
4064,
362,
6624,
657,
60,
198,
4798,
7,
75,
8,
220,... | 1.478992 | 357 |
import nextcord, random, datetime
from typing import List
from util.loaders.json import get_path
from util.constants import Emojis
""" TicTacToe """
"""Wordle"""
EMOJI_CODES = Emojis.EMOJI_CODES
cwd = get_path()
popular_words = open(cwd + "/game/wordle/popular.txt").read().splitlines()
all_words = set(word.strip() for word in open(cwd + "/game/wordle/sowpods.txt"))
def generate_colored_word(guess: str, answer: str) -> str:
"""
Builds a string of emoji codes where each letter is
colored based on the key:
- Same letter, same place: Green
- Same letter, different place: Yellow
- Different letter: Gray
Args:
word (str): The word to be colored
answer (str): The answer to the word
Returns:
str: A string of emoji codes
"""
colored_word = [EMOJI_CODES["gray"][letter] for letter in guess]
guess_letters = list(guess)
answer_letters = list(answer)
# change colors to green if same letter and same place
for i in range(len(guess_letters)):
if guess_letters[i] == answer_letters[i]:
colored_word[i] = EMOJI_CODES["green"][guess_letters[i]]
answer_letters[i] = None
guess_letters[i] = None
# change colors to yellow if same letter and not the same place
for i in range(len(guess_letters)):
if guess_letters[i] is not None and guess_letters[i] in answer_letters:
colored_word[i] = EMOJI_CODES["yellow"][guess_letters[i]]
answer_letters[answer_letters.index(guess_letters[i])] = None
return "".join(colored_word)
def generate_blanks() -> str:
"""
Generate a string of 5 blank white square emoji characters
Returns:
str: A string of white square emojis
"""
return "\N{WHITE MEDIUM SQUARE}" * 5
def generate_puzzle_embed(bot, user: nextcord.User, puzzle_id: int) -> nextcord.Embed:
"""
Generate an embed for a new puzzle given the puzzle id and user
Args:
user (nextcord.User): The user who submitted the puzzle
puzzle_id (int): The puzzle ID
Returns:
nextcord.Embed: The embed to be sent
"""
embed = nextcord.Embed(title="🎲 | **Play `Wordle` with me**", description="\n".join([generate_blanks()] * 6 ), color=nextcord.Color.blue())
embed.description = "\n".join([generate_blanks()] * 6)
embed.set_author(name="OpenSource Utility Bot", icon_url=bot.user.display_avatar)
embed.set_author(name=user.name, icon_url=user.display_avatar.url)
embed.set_footer(
text=f"ID: {puzzle_id} ︱ To play, use the command /play!\n"
"To guess, reply to this message with a word."
)
return embed
def update_embed(embed: nextcord.Embed, guess: str) -> nextcord.Embed:
"""
Updates the embed with the new guesses
Args:
embed (nextcord.Embed): The embed to be updated
puzzle_id (int): The puzzle ID
guess (str): The guess made by the user
Returns:
nextcord.Embed: The updated embed
"""
puzzle_id = int(embed.footer.text.split()[1])
answer = popular_words[puzzle_id]
colored_word = generate_colored_word(guess, answer)
empty_slot = generate_blanks()
# replace the first blank with the colored word
embed.description = embed.description.replace(empty_slot, colored_word, 1)
# check for game over
num_empty_slots = embed.description.count(empty_slot)
if guess == answer:
if num_empty_slots == 0:
embed.description += "\n\nPhew!"
if num_empty_slots == 1:
embed.description += "\n\nGreat!"
if num_empty_slots == 2:
embed.description += "\n\nSplendid!"
if num_empty_slots == 3:
embed.description += "\n\nImpressive!"
if num_empty_slots == 4:
embed.description += "\n\nMagnificent!"
if num_empty_slots == 5:
embed.description += "\n\nGenius!"
elif num_empty_slots == 0:
embed.description += f"\n\nThe answer was {answer}!"
return embed
def is_valid_word(word: str) -> bool:
"""
Validates a word
Args:
word (str): The word to validate
Returns:
bool: Whether the word is valid
"""
return word in all_words
def random_puzzle_id() -> int:
"""
Generates a random puzzle ID
Returns:
int: A random puzzle ID
"""
return random.randint(0, len(popular_words) - 1)
def daily_puzzle_id() -> int:
"""
Calculates the puzzle ID for the daily puzzle
Returns:
int: The puzzle ID for the daily puzzle
"""
# calculate days since 1/1/2022 and mod by the number of puzzles
num_words = len(popular_words)
time_diff = datetime.datetime.now().date() - datetime.date(2022, 1, 1)
return time_diff.days % num_words
def is_game_over(embed: nextcord.Embed) -> bool:
"""
Checks if the game is over in the embed
Args:
embed (nextcord.Embed): The embed to check
Returns:
bool: Whether the game is over
"""
return "\n\n" in embed.description
def generate_info_embed() -> nextcord.Embed:
"""
Generates an embed with information about the bot
Returns:
nextcord.Embed: The embed to be sent
"""
join_url = "https://discord.com/api/oauth2/authorize?client_id=932265924541681727&permissions=11264&scope=bot%20applications.commands"
discord_url = "https://discord.io/OpenSourceGames"
youtube_url = "https://tiny.cc/DiscoHuge-YT"
github_url = "https://github.com/abindent/Nextcord-Utility-Bot"
return nextcord.Embed(
title="About Wordle",
description=(
"Discord Wordle is a game of wordle-like puzzle solving.\n\n"
"**You can start a game with**\n\n"
f"{Emojis.sunny} `/playwordle <choose daily from the options>` - Play the puzzle of the day\n"
f"{Emojis.game_die} `/playwordle <choose random from the options>` - Play a random puzzle\n"
f"{Emojis.boxing_glove} `/playwordle <choose id from the options> <puzzle_id>` - Play a puzzle by ID\n\n"
f"{Emojis.member_join} [Add this bot to your server]({join_url})\n"
f"{Emojis.discord} [Join my Discord server]({discord_url})\n"
f"{Emojis.youtube} [YouTube tutorial on the making of this bot]({youtube_url})\n"
f"{Emojis.github} [View the source code on GitHub]({github_url})\n"
),
)
async def process_message_as_guess(
bot: nextcord.Client, message: nextcord.Message
) -> bool:
"""
Check if a new message is a reply to a Wordle game.
If so, validate the guess and update the bot's message.
Args:
bot (nextcord.Client): The bot
message (nextcord.Message): The new message to process
Returns:
bool: True if the message was processed as a guess, False otherwise
"""
# get the message replied to
ref = message.reference
if not ref or not isinstance(ref.resolved, nextcord.Message):
return False
parent = ref.resolved
# if the parent message is not the bot's message, ignore it
if parent.author.id != bot.user.id:
return False
# check that the message has embeds
if not parent.embeds:
return False
embed = parent.embeds[0]
guess = message.content.lower()
# check that the user is the one playing
if (
embed.author.name != message.author.name
or embed.author.icon_url != message.author.display_avatar.url
):
reply = "Start a new game with /play"
if embed.author:
reply = f"This game was started by {embed.author.name}. " + reply
await message.reply(reply, delete_after=5)
try:
await message.delete(delay=5)
except Exception:
pass
return True
# check that the game is not over
if is_game_over(embed):
await message.reply(
"The game is already over. Start a new game with /play", delete_after=5
)
try:
await message.delete(delay=5)
except Exception:
pass
return True
# check that a single word is in the message
if len(message.content.split()) > 1:
await message.reply(
"Please respond with a single 5-letter word.", delete_after=5
)
try:
await message.delete(delay=5)
except Exception:
pass
return True
# check that the word is valid
if not is_valid_word(guess):
await message.reply("That is not a valid word", delete_after=5)
try:
await message.delete(delay=5)
except Exception:
pass
return True
# update the embed
embed = update_embed(embed, guess)
await parent.edit(embed=embed)
# attempt to delete the message
try:
await message.delete()
except Exception:
pass
return True
| [
11748,
1306,
66,
585,
11,
4738,
11,
4818,
8079,
198,
6738,
19720,
1330,
7343,
198,
6738,
7736,
13,
2220,
364,
13,
17752,
1330,
651,
62,
6978,
198,
6738,
7736,
13,
9979,
1187,
1330,
2295,
13210,
271,
198,
198,
37811,
309,
291,
51,
33... | 2.425116 | 3,679 |
# -*- coding: utf-8 -*-
"""
orm sql schema package.
"""
from pyrin.packaging.base import Package
class ORMSQLSchemaPackage(Package):
"""
orm sql schema package class.
"""
NAME = __name__
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
579,
44161,
32815,
5301,
13,
198,
37811,
198,
198,
6738,
279,
2417,
259,
13,
8002,
3039,
13,
8692,
1330,
15717,
628,
198,
4871,
6375,
5653,
48,
6561,
2395,
... | 2.5875 | 80 |
from math import sqrt
m = 2000000
nums = range(2,m)
primes = [2]
s = 2
for n in nums:
prime = True
for j in primes:
if(n%j==0):
prime = False
break
if(prime):
i = 2
while (i*n < m and i*n in nums):
nums.remove(nums.index(i*n))
i+=1
primes.append(n)
s += n
print s
| [
6738,
10688,
1330,
19862,
17034,
198,
76,
796,
939,
2388,
198,
77,
5700,
796,
2837,
7,
17,
11,
76,
8,
198,
1050,
999,
796,
685,
17,
60,
198,
82,
796,
362,
198,
1640,
299,
287,
997,
82,
25,
198,
220,
220,
220,
6994,
796,
6407,
... | 1.716981 | 212 |
# -*- coding: utf-8 -*-
##############################################################################
#
# India-GST
#
# Merlin Tecsol Pvt. Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'India-GST',
'description': "Goods & Service-Tax.",
'version': '1.0.1',
'category': 'Accounting',
'author': 'Merlin Tecsol Pvt. Ltd.',
'website': 'http://www.merlintecsol.com',
'summary': 'Indian GST Reports',
'license': 'AGPL-3',
'depends': ['sale','purchase','account','report_xlsx'],
'data': [
"views/gst_view.xml",
"views/gst_sale_view.xml",
"views/gst_purchase_view.xml",
"data/tax_data.xml",
"data/res.country.state.csv",
"data/fiscal_data.xml",
"report/gst_sales_invoice_pdf.xml",
"report/gst_invoice_pdf.xml",
"report/gst_invoice.xml",
'report/gst_b2b.xml',
'wizard/gstr_b2b_wizard.xml',
'wizard/gstr_b2cl_wizard.xml',
'report/gst_b2cl_report.xml',
'wizard/gstr_b2cs_wizard.xml',
'report/gst_b2cs_report.xml',
'wizard/gstr_hsn_wizard.xml',
'report/gst_hsn_report.xml',
'wizard/gstr_export_wizard.xml',
'report/gst_export_report.xml',
'views/port_code.xml',
],
'images': ['static/description/banner.png'],
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
220,
220,
220,
3794,
12,
38,
2257,
198,
2,
198,
2,
220,
220,
220,
32918,
48257,
34453,
18367,
83,
13,
12052,
13,
198,... | 2.43535 | 843 |
import pyeccodes.accessors as _
| [
11748,
279,
5948,
535,
4147,
13,
15526,
669,
355,
4808,
628
] | 3 | 11 |
"""
Test utilities for LKPY tests.
"""
import os
import os.path
import logging
from contextlib import contextmanager
import numpy as np
from .. import matrix
import pytest
from lenskit.datasets import MovieLens, ML100K
_log = logging.getLogger(__name__)
ml_test = MovieLens('ml-latest-small')
ml100k = ML100K()
def rand_csr(nrows=100, ncols=50, nnz=1000, values=True):
"Generate a random CSR for testing."
coords = np.random.choice(np.arange(ncols * nrows, dtype=np.int32), nnz, False)
rows = np.mod(coords, nrows, dtype=np.int32)
cols = np.floor_divide(coords, nrows, dtype=np.int32)
if values:
vals = np.random.randn(nnz)
else:
vals = None
return matrix.CSR.from_coo(rows, cols, vals, (nrows, ncols))
@contextmanager
wantjit = pytest.mark.skipif('NUMBA_DISABLE_JIT' in os.environ,
reason='JIT required')
| [
37811,
198,
14402,
20081,
329,
406,
42,
47,
56,
5254,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
18931,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
... | 2.328982 | 383 |
from pprint import pprint
| [
6738,
279,
4798,
1330,
279,
4798,
628
] | 3.857143 | 7 |
"""Train and test bigram classifier"""
import dga_classifier.data as data
from keras.layers.core import Dense
from keras.models import Sequential
import sklearn
from sklearn import feature_extraction
from sklearn.cross_validation import train_test_split
def build_model(max_features):
"""Builds logistic regression model"""
model = Sequential()
model.add(Dense(1, input_dim=max_features, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam')
return model
def run(max_epoch=50, nfolds=10, batch_size=128):
"""Run train/test on logistic regression model"""
indata = data.get_data()
# Extract data and labels
X = [x[1] for x in indata]
labels = [x[0] for x in indata]
# Create feature vectors
print "vectorizing data"
ngram_vectorizer = feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(2, 2))
count_vec = ngram_vectorizer.fit_transform(X)
max_features = count_vec.shape[1]
# Convert labels to 0-1
y = [0 if x == 'benign' else 1 for x in labels]
final_data = []
for fold in range(nfolds):
print "fold %u/%u" % (fold+1, nfolds)
X_train, X_test, y_train, y_test, _, label_test = train_test_split(count_vec, y,
labels, test_size=0.2)
print 'Build model...'
model = build_model(max_features)
print "Train..."
X_train, X_holdout, y_train, y_holdout = train_test_split(X_train, y_train, test_size=0.05)
best_iter = -1
best_auc = 0.0
out_data = {}
for ep in range(max_epoch):
model.fit(X_train.todense(), y_train, batch_size=batch_size, nb_epoch=1)
t_probs = model.predict_proba(X_holdout.todense())
t_auc = sklearn.metrics.roc_auc_score(y_holdout, t_probs)
print 'Epoch %d: auc = %f (best=%f)' % (ep, t_auc, best_auc)
if t_auc > best_auc:
best_auc = t_auc
best_iter = ep
probs = model.predict_proba(X_test.todense())
out_data = {'y':y_test, 'labels': label_test, 'probs':probs, 'epochs': ep,
'confusion_matrix': sklearn.metrics.confusion_matrix(y_test, probs > .5)}
print sklearn.metrics.confusion_matrix(y_test, probs > .5)
else:
# No longer improving...break and calc statistics
if (ep-best_iter) > 5:
break
final_data.append(out_data)
return final_data
| [
37811,
44077,
290,
1332,
1263,
859,
1398,
7483,
37811,
198,
11748,
288,
4908,
62,
4871,
7483,
13,
7890,
355,
1366,
198,
6738,
41927,
292,
13,
75,
6962,
13,
7295,
1330,
360,
1072,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
19... | 2.108152 | 1,239 |
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch import autograd
import time
import os
import fire
import random
import numpy as np
from nasmc.models import NonlinearSSM, NonlinearSSMProposal
from nasmc.datasets import NonlinearSSMDataset
from nasmc.filters import nonlinear_ssm_smc
if __name__ == '__main__':
fire.Fire(NASMCTrainer)
| [
11748,
28034,
198,
198,
6738,
28034,
13,
26791,
13,
83,
22854,
3526,
1330,
21293,
34379,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
198,
6738,
28034,
1330,
1960,
519,
6335,
198,
198,
11748,
640,
198,
11748,
28686,
19... | 3.067164 | 134 |
from setuptools import setup
setup(
name='desktopsort',
version='1.1',
packages=['desksort'],
url='',
license='',
author='MasterOfAllEvil',
description=''
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
8906,
21841,
2840,
419,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
16,
3256,
198,
220,
220,
220,
10392,
28,
17816,
8906,
591,
419,
6,
4357... | 2.384615 | 78 |
#!/usr/bin/python
# -*- coding= utf-8 -*-
import sys
import os
import time
import string
from threading import current_thread
from collections import deque as ThreadQueue
import traceback
from autocomplete import *
from scrollFrame import VerticalScrolledFrame
import math
OS_name = os.name
icom_version = 5
icom_subversion = 69
icom_version_description = 'ICOM Tools Version %d.%d\n'%(icom_version,icom_subversion)
N=Tkinter.N
S=Tkinter.S
E=Tkinter.E
W=Tkinter.W
default_config_xml_content=r'''<?xml version="1.0" encoding="utf-8"?>
<icom>
<config>
<expert_mode>0</expert_mode>
<shell>cmd.exe</shell>
<textbackground>white</textbackground>
<textforeground>black</textforeground>
<textwidth>80</textwidth>
<entryheight>25</entryheight>
<listheight>6</listheight>
<poll_timer>3000</poll_timer>
<baudrate>115200</baudrate>
<send_encoding>utf-8</send_encoding>
<diaplay_encoding>utf-8</diaplay_encoding>
<auto_start_multicast_server>1</auto_start_multicast_server>
<auto_start_tcp_server>0</auto_start_tcp_server>
<auto_start_udp_server>0</auto_start_udp_server>
<tcp_srv_port>3000</tcp_srv_port>
<udp_srv_port>3000</udp_srv_port>
<multicast_ip>224.0.0.119</multicast_ip>
<multicast_port>30000</multicast_port>
<parse_key_word>61 74 </parse_key_word>
<realtime>0</realtime>
</config>
<groups>
<group name="AT" timeout="2000" tail="\r\n">
<string desc="modem off">at+cfun=0</string>
<string desc="modem on">at+cfun=1</string>
<string desc="show infomation">ati</string>
<string desc="show version">at^version?</string>
<string desc="sysinfoex query">at^sysinfoex</string>
<string desc="syscfgex query">at^syscfgex?</string>
<string desc="pin status query">at^cpin?</string>
<string desc="echo on">ate</string>
<string desc="show AT error string">at+cmee=2</string>
<string desc="ussd mode">at^ussdmode=0</string>
<string desc="ussd 123 query number">AT+CUSD=1,"123",15</string>
</group>
<group name="linux" timeout="500" tail="\r\n">
<string desc="busybox sh">busybox sh</string>
<string desc="ifconfig">ifconfig</string>
<string desc="list">ls</string>
<string desc="ps">ps</string>
<string desc="show current dir">pwd</string>
</group>
<group name="DOS" timeout="200" tail="\r\n">
<string desc="list dir">dir</string>
<string desc="list dir">cd</string>
<string desc="show netcard ip">ipconfig</string>
<string desc="show route">route print</string>
</group>
</groups>
</icom>'''
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
2,
532,
9,
12,
19617,
28,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
11748,
640,
201,
198,
11748,
4731,
201,
198,
201,
198,
6... | 2.373483 | 1,071 |
from aiogram import Dispatcher, Bot
from data import config
bot = Bot(token=config.BOT_TOKEN, parse_mode="HTML")
dp = Dispatcher(bot, loop=config.loop)
| [
6738,
257,
72,
21857,
1330,
3167,
8071,
2044,
11,
18579,
198,
6738,
1366,
1330,
4566,
198,
198,
13645,
796,
18579,
7,
30001,
28,
11250,
13,
33,
2394,
62,
10468,
43959,
11,
21136,
62,
14171,
2625,
28656,
4943,
198,
26059,
796,
3167,
80... | 2.886792 | 53 |
print('=== IDENTIFICANDO VOGAIS =====')
words = ('Computador', 'Programaçao', 'Cadeira', 'Mousepad', 'Fone')
vogais = ('a', 'e', 'i', 'o', 'u')
for c in words:
print(f'\nNa palavra {c} temos', end=' ')
for v in c:
if v.lower() in 'aeiou':
print(v, end='')
| [
4798,
10786,
18604,
4522,
3525,
30643,
6981,
46,
569,
7730,
32,
1797,
29335,
11537,
198,
10879,
796,
19203,
5377,
1996,
7079,
3256,
705,
15167,
64,
16175,
5488,
3256,
705,
34,
671,
8704,
3256,
705,
39643,
15636,
3256,
705,
37,
505,
1153... | 2.028571 | 140 |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR XLSX driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
sys.path.append('../pymod')
import gdaltest
from osgeo import gdal
from osgeo import ogr
###############################################################################
# Check
###############################################################################
# Basic tests
###############################################################################
# Test OGR_XLSX_HEADERS = DISABLE
###############################################################################
# Test OGR_XLSX_FIELD_TYPES = STRING
###############################################################################
# Run test_ogrsf
###############################################################################
# Test write support
###############################################################################
# Test reading a file using inlineStr representation.
###############################################################################
# Test update support
###############################################################################
# Test number of columns > 26 (#5774)
###############################################################################
# Test Integer64
###############################################################################
# Test DateTime with milliseconds
###############################################################################
# Test reading sheet with more than 26 columns with holes (#6363)"
###############################################################################
# Test reading a sheet whose file is stored as "absolute" in
# workbook.xml.rels (#6733)
###############################################################################
# Test that data types are correctly picked up even if first row is missing data
###############################################################################
# Test that field names are picked up even if last field has no data
###############################################################################
# Test appending a layer to an existing document
###############################################################################
# Test Boolean
gdaltest_list = [
ogr_xlsx_1,
ogr_xlsx_2,
ogr_xlsx_3,
ogr_xlsx_4,
ogr_xlsx_5,
ogr_xlsx_6,
ogr_xlsx_7,
ogr_xlsx_8,
ogr_xlsx_9,
ogr_xlsx_10,
ogr_xlsx_11,
ogr_xlsx_12,
ogr_xlsx_13,
ogr_xlsx_14,
ogr_xlsx_15,
ogr_xlsx_boolean,
]
if __name__ == '__main__':
gdaltest.setup_run('ogr_xlsx')
gdaltest.run_tests(gdaltest_list)
sys.exit(gdaltest.summarize())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
720,
7390,
3,
198,
2,
198,
2,
4935,
25,
220,
27044,
1847,
14,
49656,
6208,
26264,
198,
2,
32039,
25,
220,
6208,
1100,
11244,
329,
440,
10761,... | 4.105986 | 1,019 |
exp = str(input('Digite uma expressão: '))
pilha = []
for v in exp:
if v == '(':
pilha.append('(')
elif v == ')':
if len(pilha) > 0:
pilha.pop()
else:
pilha.append(')')
break
print('A expressão é valida' if len(pilha) == 0 else 'O expressão não é valida')
| [
11201,
796,
965,
7,
15414,
10786,
19511,
578,
334,
2611,
4911,
28749,
25,
705,
4008,
198,
79,
346,
3099,
796,
17635,
198,
1640,
410,
287,
1033,
25,
198,
220,
220,
220,
611,
410,
6624,
29513,
10354,
198,
220,
220,
220,
220,
220,
220,... | 1.905882 | 170 |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of comparison chain expressions.
Consult the Developer Manual for information. TODO: Add ability to sync
source code comments with Developer Manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementReleaseVariable,
)
from nuitka.nodes.ComparisonNodes import makeComparisonExpression
from nuitka.nodes.ConditionalNodes import makeStatementConditional
from nuitka.nodes.OperatorNodesUnary import ExpressionOperationNot
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
getKind,
makeStatementsSequenceFromStatement,
)
| [
2,
220,
220,
220,
220,
15069,
33448,
11,
17356,
9075,
268,
11,
6920,
1462,
25,
5568,
13,
71,
323,
268,
31,
14816,
13,
785,
198,
2,
198,
2,
220,
220,
220,
220,
2142,
286,
366,
45,
5013,
4914,
1600,
281,
45780,
11361,
17050,
326,
... | 3.391213 | 478 |
"""Generate target."""
from itertools import chain
import json
from pathlib import Path
import shlex
import subprocess
from tempfile import NamedTemporaryFile
import typing as t
import yaml
from sausage.engines import JinjaEngine
from sausage.wildcards import (
get_wildcard_candidates, has_wildcard, replace_wildcards
)
class ContextRecipe(t.NamedTuple):
"""Recipe to build context for templates."""
recipe: str # path in src/ or command
def with_replaced_wildcards(self, replacement: str) -> "ContextRecipe":
"""Construct ContextRecipe with replaced '%'s."""
recipe = replace_wildcards(self.recipe, replacement)
return ContextRecipe(recipe)
def eval(self, root: Path) -> t.Any:
"""Evaluate context in root/src/."""
src = root/"src"
path = src/self.recipe
if path.is_file():
text = path.read_text()
try:
return json.loads(text)
except json.decoder.JSONDecodeError:
return yaml.safe_load(text)
except yaml.parser.ParserError:
pass
with NamedTemporaryFile() as temp_file:
out = Path(temp_file.name)
tokens = [
str(out) if a == "$out" else a
for a in shlex.split(self.recipe)
]
proc = subprocess.run(
tokens,
text=True,
capture_output=True,
check=True,
cwd=src
)
return out.read_text() if str(out) in tokens else proc.stdout
class Target(t.NamedTuple):
"""File to be generated."""
name: str
template: Path
context: t.Optional[ContextRecipe] = None
namespace: t.Dict[str, ContextRecipe] = {}
def eval_context(self, root: Path) -> t.Any:
""""Evaluate template context in root/src/."""
context = {} if not self.context else self.context.eval(root)
if isinstance(context, dict):
for name, recipe in self.namespace.items():
context[name] = recipe.eval(root)
return context
def generate(self, root: Path) -> str:
"""Generate target from template."""
engine = JinjaEngine(root)
context = self.eval_context(root)
return engine.render(self.template, context)
def get_globs(self) -> t.Iterable[str]:
"""Extract wildcard patterns used by target.
Converts '%' into '*' for globbing.
"""
recipes = chain(
self.namespace.values(),
[self.context] if self.context else [],
)
for recipe in recipes:
for token in shlex.split(recipe.recipe):
if has_wildcard(token):
yield replace_wildcards(token, "*")
def expand(self, root: Path) -> t.Iterator["Target"]:
"""Expand into concrete targets.
E.g. % gets replaced with appropriate values.
If the target doesn't have %, then it only yields itself.
"""
if not has_wildcard(self.name):
yield self
return
patterns = self.get_globs()
replacements = set.intersection(
*(set(get_wildcard_candidates(root, p)) for p in patterns)
)
yield from (
Target(
name=replace_wildcards(self.name, replacement),
template=self.template,
context=(
self.context.with_replaced_wildcards(replacement)
if self.context else None
),
namespace={
k: v.with_replaced_wildcards(replacement)
for k, v in self.namespace.items()
},
)
for replacement in replacements
)
| [
37811,
8645,
378,
2496,
526,
15931,
198,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,... | 2.149128 | 1,777 |
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module contains the class definition for the macro server door"""
__all__ = ["MacroProxy", "BaseInputHandler", "MSDoor"]
__docformat__ = 'restructuredtext'
import weakref
import collections
from taurus.core.util.log import Logger
from sardana import ElementType
from sardana.sardanaevent import EventType
from sardana.macroserver.msbase import MSObject
from sardana.macroserver.msparameter import Type
from sardana.macroserver.msexception import MacroServerException
class MSDoor(MSObject):
"""Sardana door object"""
macro_executor = property(get_macro_executor)
running_macro = property(get_running_macro)
last_macro = property(get_last_macro)
pylab_handler = property(get_pylab_handler, set_pylab_handler)
pylab = property(get_pylab)
pyplot_handler = property(get_pyplot_handler, set_pyplot_handler)
pyplot = property(get_pyplot)
input_handler = property(get_input_handler, set_input_handler)
report_logger = property(get_report_logger)
def report(self, msg, *args, **kwargs):
"""
Record a log message in the sardana report (if enabled) with default
level **INFO**. The msg is the message format string, and the args are
the arguments which are merged into msg using the string formatting
operator. (Note that this means that you can use keywords in the
format string, together with a single dictionary argument.)
*kwargs* are the same as :meth:`logging.Logger.debug` plus an optional
level kwargs which has default value **INFO**
Example::
self.report("this is an official report!")
:param msg: the message to be recorded
:type msg: :obj:`str`
:param args: list of arguments
:param kwargs: list of keyword arguments"""
return self.macro_server.report(msg, *args, **kwargs)
state = property(get_state, set_state)
status = property(get_status, set_status)
result = property(get_result, set_result)
macro_status = property(get_macro_status, set_macro_status)
record_data = property(get_record_data, set_record_data)
def get_env(self, key=None, macro_name=None):
"""Gets the environment with the context for this door matching the
given parameters:
- macro_name defines the context where to look for the environment. If
None, the global environment is used. If macro name is given the
environment in the context of that macro is given
- If key is None it returns the complete environment, otherwise
key must be a string containing the environment variable name.
:param key:
environment variable name [default: None, meaning all environment]
:type key: :obj:`str`
:param macro_name:
local context for a given macro [default: None, meaning no macro
context is used]
:type macro_name: :obj:`str`
:raises: UnknownEnv"""
return self.macro_server.environment_manager.getAllDoorEnv(self.name)
def __getattr__(self, name):
"""Get methods from macro server"""
return getattr(self.macro_server, name)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
29113,
29113,
7804,
4242,
2235,
198,
2235,
198,
2,
770,
2393,
318,
636,
286,
46997,
2271,
198,
2235,
198,
2,
2638,
1378,
2503,
13,
82,
446,
2271,
12,
13716,
82,
13,
2398,
14,
... | 3.010138 | 1,381 |
fruits = ['apple', 'banana', 'carrot', 'pear', 'cherries']
del fruits[0]
print(fruits)
| [
69,
50187,
796,
37250,
18040,
3256,
705,
3820,
2271,
3256,
705,
7718,
10599,
3256,
705,
431,
283,
3256,
705,
2044,
1678,
20520,
198,
12381,
15921,
58,
15,
60,
198,
4798,
7,
69,
50187,
8,
198
] | 2.485714 | 35 |
import sys
from panda3d.core import TextNode
from direct.showbase.ShowBase import ShowBase
from direct.gui.DirectGui import DirectLabel
from metagui.gui import SizeSpec
from metagui.gui import WholeScreen
from metagui.gui import HorizontalFrame
from metagui.gui import VerticalFrame
from metagui.gui import Empty
from metagui.gui import Element
from metagui.gui import spacer
from metagui.gui import filler
if __name__ == '__main__':
Application()
filler_style = dict(
text="filler",
text_pos=(0, -0.02),
text_align=TextNode.ACenter,
text_scale=0.07,
frameColor=(0,0.5,0,1),
)
gui = WholeScreen(
HorizontalFrame(
VerticalFrame(
Element(
DirectLabel,
kwargs=dict(
text="Foo",
text_pos=(0, -0.02),
text_align=TextNode.ACenter,
text_scale=0.07,
frameColor=(1,0,0,1),
),
size_spec=SizeSpec(h_min=0.3, h_weight=0.0, w_min=0.3, w_weight=0.0),
),
filler(filler_style),
weight=0.0,
),
Empty(),
VerticalFrame(
Element(
DirectLabel,
kwargs=dict(
text="Foo",
text_pos=(0, -0.02),
text_align=TextNode.ACenter,
text_scale=0.07,
frameColor=(1,0,0,1),
),
size_spec=SizeSpec(h_min=0.5, h_weight=0.0, w_min=0.5, w_weight=0.0),
),
filler(filler_style),
weight=0.0,
),
),
)
gui.create()
base.run()
| [
11748,
25064,
198,
198,
6738,
279,
5282,
18,
67,
13,
7295,
1330,
8255,
19667,
198,
6738,
1277,
13,
12860,
8692,
13,
15307,
14881,
1330,
5438,
14881,
198,
6738,
1277,
13,
48317,
13,
13470,
8205,
72,
1330,
4128,
33986,
198,
198,
6738,
1... | 1.676895 | 1,108 |
"""Generated message classes for vpcaccess version v1alpha1.
API for managing VPC access connectors.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'vpcaccess'
class Connector(_messages.Message):
r"""Definition of a Serverless VPC Access connector.
Enums:
StatusValueValuesEnum: Output only. Status of the VPC access connector.
Fields:
connectedProjects: Output only. List of projects using the connector.
id: Identifier for the connector, short form of the name. Example:
`access1`.
ipCidrRange: The range of internal addresses that follows RFC 4632
notation. Example: `10.132.0.0/28`.
machineType: Machine type of VM Instance underlying connector. Default is
e2-micro.
maxInstances: Maximum value of instances in autoscaling group underlying
the connector.
maxThroughput: Maximum throughput of the connector in Mbps. Default is
300, max is 1000.
minInstances: Minimum value of instances in autoscaling group underlying
the connector.
minThroughput: Minimum throughput of the connector in Mbps. Default and
min is 200.
name: The resource name in the format
`projects/*/locations/*/connectors/*`.
network: Name of a VPC network.
status: Output only. Status of the VPC access connector.
subnet: The subnet in which to house the VPC Access Connector.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Output only. Status of the VPC access connector.
Values:
STATUS_UNSPECIFIED: Invalid state.
READY: Connector is deployed and ready to receive traffic.
CREATING: An Insert operation is in progress.
DELETING: A Delete operation is in progress.
ERROR: Connector is in a bad state, manual deletion recommended.
UPDATING: The connector is being updated.
"""
STATUS_UNSPECIFIED = 0
READY = 1
CREATING = 2
DELETING = 3
ERROR = 4
UPDATING = 5
connectedProjects = _messages.StringField(1, repeated=True)
id = _messages.StringField(2)
ipCidrRange = _messages.StringField(3)
machineType = _messages.StringField(4)
maxInstances = _messages.IntegerField(5, variant=_messages.Variant.INT32)
maxThroughput = _messages.IntegerField(6, variant=_messages.Variant.INT32)
minInstances = _messages.IntegerField(7, variant=_messages.Variant.INT32)
minThroughput = _messages.IntegerField(8, variant=_messages.Variant.INT32)
name = _messages.StringField(9)
network = _messages.StringField(10)
status = _messages.EnumField('StatusValueValuesEnum', 11)
subnet = _messages.MessageField('Subnet', 12)
class HeartbeatConnectorRequest(_messages.Message):
r"""Heartbeat requests come in from each connector VM to report their IP and
serving state.
Fields:
heartbeatTime: Required. When this request was sent.
ipAddress: Required. The IP address of the VM.
lameduck: If the VM is in lameduck mode, meaning that it is in the process
of shutting down and should not be used for new connections.
"""
heartbeatTime = _messages.StringField(1)
ipAddress = _messages.StringField(2)
lameduck = _messages.BooleanField(3)
class HeartbeatConnectorResponse(_messages.Message):
r"""This is an empty placeholder (as opposed to using google.protobuf.Empty)
for fields to potentially be added in the future.
"""
class ListConnectorsResponse(_messages.Message):
r"""Response for listing Serverless VPC Access connectors.
Fields:
connectors: List of Serverless VPC Access connectors.
nextPageToken: Continuation token.
"""
connectors = _messages.MessageField('Connector', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
r"""Metadata for google.longrunning.Operation.
Fields:
createTime: Output only. Time when the operation was created.
endTime: Output only. Time when the operation completed.
method: Output only. Method that initiated the operation e.g.
google.cloud.vpcaccess.v1.Connectors.CreateConnector.
target: Output only. Name of the resource that this operation is acting on
e.g. projects/my-project/locations/us-central1/connectors/v1.
"""
createTime = _messages.StringField(1)
endTime = _messages.StringField(2)
method = _messages.StringField(3)
target = _messages.StringField(4)
class OperationMetadataV1Alpha1(_messages.Message):
r"""Metadata for google.longrunning.Operation.
Fields:
endTime: Output only. Time when the operation completed.
insertTime: Output only. Time when the operation was created.
method: Output only. Method that initiated the operation e.g.
google.cloud.vpcaccess.v1alpha1.Connectors.CreateConnector.
target: Output only. Name of the resource that this operation is acting on
e.g. projects/my-project/locations/us-central1/connectors/v1.
"""
endTime = _messages.StringField(1)
insertTime = _messages.StringField(2)
method = _messages.StringField(3)
target = _messages.StringField(4)
class OperationMetadataV1Beta1(_messages.Message):
r"""Metadata for google.longrunning.Operation.
Fields:
createTime: Output only. Time when the operation was created.
endTime: Output only. Time when the operation completed.
method: Output only. Method that initiated the operation e.g.
google.cloud.vpcaccess.v1beta1.Connectors.CreateConnector.
target: Output only. Name of the resource that this operation is acting on
e.g. projects/my-project/locations/us-central1/connectors/v1.
"""
createTime = _messages.StringField(1)
endTime = _messages.StringField(2)
method = _messages.StringField(3)
target = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Subnet(_messages.Message):
r"""The subnet in which to house the connector.
Fields:
name: Subnet name (relative, not fully qualified). E.g. if the full subnet
selfLink is https://compute.googleapis.com/compute/v1/projects/{project}
/regions/{region}/subnetworks/{subnetName} the correct input for this
field would be {subnetName}
projectId: Project in which the subnet exists. If not set, this project is
assumed to be the project for which the connector create request was
issued.
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2)
class VpcaccessProjectsLocationsConnectorsCreateRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsCreateRequest object.
Fields:
connector: A Connector resource to be passed as the request body.
parent: Required. The project and location in which the configuration
should be created, specified in the format `projects/*/locations/*`.
"""
connector = _messages.MessageField('Connector', 1)
parent = _messages.StringField(2, required=True)
class VpcaccessProjectsLocationsConnectorsDeleteRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsDeleteRequest object.
Fields:
name: Required. Name of a Serverless VPC Access connector to delete.
"""
name = _messages.StringField(1, required=True)
class VpcaccessProjectsLocationsConnectorsGetRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsGetRequest object.
Fields:
name: Required. Name of a Serverless VPC Access connector to get.
"""
name = _messages.StringField(1, required=True)
class VpcaccessProjectsLocationsConnectorsHeartbeatRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsHeartbeatRequest object.
Fields:
heartbeatConnectorRequest: A HeartbeatConnectorRequest resource to be
passed as the request body.
name: Required.
"""
heartbeatConnectorRequest = _messages.MessageField('HeartbeatConnectorRequest', 1)
name = _messages.StringField(2, required=True)
class VpcaccessProjectsLocationsConnectorsListRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsListRequest object.
Fields:
pageSize: Maximum number of functions to return per call.
pageToken: Continuation token.
parent: Required. The project and location from which the routes should be
listed.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class VpcaccessProjectsLocationsConnectorsPatchRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsConnectorsPatchRequest object.
Fields:
connector: A Connector resource to be passed as the request body.
name: The resource name in the format
`projects/*/locations/*/connectors/*`.
updateMask: The fields to update on the entry group. If absent or empty,
all modifiable fields are updated.
"""
connector = _messages.MessageField('Connector', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class VpcaccessProjectsLocationsListRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsListRequest object.
Fields:
filter: A filter to narrow down results to a preferred subset. The
filtering language accepts strings like "displayName=tokyo", and is
documented in more detail in [AIP-160](https://google.aip.dev/160).
name: The resource that owns the locations collection, if applicable.
pageSize: The maximum number of results to return. If not set, the service
selects a default.
pageToken: A page token received from the `next_page_token` field in the
response. Send that page token to receive the subsequent page.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class VpcaccessProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class VpcaccessProjectsLocationsOperationsListRequest(_messages.Message):
r"""A VpcaccessProjectsLocationsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
37811,
8645,
515,
3275,
6097,
329,
410,
14751,
15526,
2196,
410,
16,
26591,
16,
13,
198,
198,
17614,
329,
11149,
569,
5662,
1895,
34472,
13,
198,
37811,
198,
2,
24550,
25,
770,
2393,
318,
1960,
519,
877,
515,
290,
815,
407,
307,
130... | 3.23017 | 7,312 |
# !/usr/bin/env python
# coding: utf-8
import json
from pathlib import Path
import joblib
import numpy as np
HERE = Path(__file__).parent
DATA_ROOT = HERE.joinpath('../../data')
SPLITS = ['train', 'val', 'test']
def list2vec(a_list):
"""convert a list representing part of a sharded dataset into one big numpy array"""
if type(a_list) == np.ndarray:
return a_list
elif type(a_list) == list:
if all([type(item) == list for item in a_list]):
a_list = [item for sublist in a_list for item in sublist]
if all([type(item) == str for item in a_list]):
a_list = np.asarray(a_list)
if all([type(item) == np.ndarray for item in a_list]):
a_list = np.concatenate(a_list)
return a_list
else:
raise TypeError('expected list or numpy array')
ALSO_ADD = ['set_sizes_by_stim_type', 'shard_train', 'shard_size']
GRID_SHAPE = (5, 5)
TRAIN_MASK = np.zeros(GRID_SHAPE).astype(np.int32)
TRAIN_MASK[:, :3] = 1
if __name__ == '__main__':
main()
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
1693,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
198,
39,
9338,
796,
10... | 2.261905 | 462 |