content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# ----------------------------------------------------------------------
# |
# | TypeAliasStatement.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-10-14 13:22:30
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains the TypeAliasStatement object"""
import os
from typing import Callable, cast, Tuple, Union
import CommonEnvironment
from CommonEnvironment import Interface
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..Common import Tokens as CommonTokens
from ...GrammarInfo import AST, DynamicPhrasesType, GrammarPhrase, ParserInfo
from ....Lexer.Phrases.DSL import (
CreatePhrase,
ExtractDynamic,
ExtractSequence,
ExtractToken,
)
from ....Parser.Parser import CreateParserRegions, GetParserInfo
from ....Parser.Statements.TypeAliasStatementParserInfo import (
TypeAliasStatementParserInfo,
TypeParserInfo,
)
# ----------------------------------------------------------------------
class TypeAliasStatement(GrammarPhrase):
"""\
Create a new type name.
'using' <name> '=' <type>
Examples:
using PositiveInt = Int<min_value=0>
"""
PHRASE_NAME = "Type Alias Statement"
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@staticmethod
@Interface.override
| [
2,
16529,
23031,
201,
198,
2,
930,
201,
198,
2,
930,
220,
5994,
40489,
48682,
13,
9078,
201,
198,
2,
930,
201,
198,
2,
930,
220,
3271,
4373,
695,
1279,
9945,
31,
11006,
20644,
695,
13,
785,
29,
201,
198,
2,
930,
220,
220,
220,
... | 3.122478 | 694 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# www.pagebot.io
# Licensed under MIT conditions
#
# -----------------------------------------------------------------------------
#
# E20_UseBorders.py
#
from random import random
'''
# FIXME: shouldn't import DrawBot.
from drawBot import Variable
from drawBot.misc import DrawBotError
'''
from pagebot import getContext
from pagebot.constants import (A4, CENTER,TOP, BOTTOM, INLINE, OUTLINE, ONLINE,
EXPORT)
from pagebot.elements import *
from pagebot.document import Document
from pagebot.style import getRootStyle
from pagebot.toolbox.color import color, noColor
from pagebot.toolbox.units import pt
from pagebot.toolbox.transformer import path2FileName
ViewPadding = 64
PageSize = 500
GUTTER = 24 # Distance between the squares.
SQUARE = 3 * GUTTER # Size of the squares
DashWhite = 4
DashBlack = 4
LineType = ONLINE
FILENAME = path2FileName(__file__)
def draw(contextName):
"""Make a new document, using the rs as root style."""
exportPath = '%s/%s-%s.pdf' % (EXPORT, FILENAME, contextName)
context = getContext(contextName)
#W = H = 120 # Get the standard a4 width and height in points.
W = H = PageSize
# Hard coded SQUARE and GUTTE, just for simple demo, instead of filling
# padding an columns in the root style. Page size decides on the amount
# squares that is visible. Page padding is centered then.
sqx = int(W/(SQUARE + GUTTER)) # Whole amount of squares that fit on the page.
sqy = int(H/(SQUARE + GUTTER))
# Calculate centered paddings for the amount of fitting squares.
# Set values in the rootStyle, so we can compare with column calculated square position and sizes.
#rs['colH'] = rs['colW'] = SQUARE # Make default colW and colH square.
padX = (W - sqx*(SQUARE + GUTTER) + GUTTER)/2
my = (H - sqy*(SQUARE + GUTTER) + GUTTER)/2
doc = Document(title='Color Squares', w=W, h=H, context=context)
doc.view.padding = 0 # Don't show cropmarks in this example.
# Get list of pages with equal y, then equal x.
#page = doc[1][0] # Get the single page from te document.
page = doc.getPage(1) # Get page on pageNumber, first in row (this is only one now).
page.name = 'This demo page'
page.w = W
page.h = H
page.padding3D = padX # Set all 3 paddings to same value
page.gutter3D = GUTTER # Set all 3 gutters to same value
#newRect((0, 0), w=square, h=square, parent=page, fill=color(1, 0, 0), stroke=noColor)
for ix in range(sqx): # Run through the range of (0, 1, ...) number of horizontal squares
for iy in range(sqy): # Same with vertical squares
# Place squares in random colors
color1 = color(random()*0.5+0.5, 0.1, 0.6)
color2 = color(random()*0.5+0.5, 0.1, 0.6)
# Calculate the position for each square as combination
# of paddings and (ix, iy)
p = padX + ix * (SQUARE + GUTTER), my + iy * (SQUARE + GUTTER) # Make 2-dimensional point tuple.
# Create Rect object and place it in the page on position p
# Initialize the borders dicts on lineWidth == 0
e = newRect(xy=p, w=SQUARE, h=SQUARE, parent=page,
fill=color1, stroke=noColor, borders=1) # border=1 also works, identical.
#lineType = {-1:ONLINE, 0:INLINE, 1:ONLINE, 2:OUTLINE}[LineType]
e.borderLeft['line'] = ONLINE
e.borderLeft['stroke'] = color(0, 0, 0, 0.5)
e.borderLeft['dash'] = (DashWhite, DashBlack)
e.borderBottom['strokeWidth'] = pt((ix+1)*4)
e.borderBottom['line'] = ONLINE
e.borderBottom['stroke'] = color(0, 1, 0)
e.borderBottom['dash'] = (DashWhite, DashBlack)
e.borderTop['strokeWidth'] = pt((iy+1)*4)
e.borderTop['line'] = ONLINE
e.borderTop['stroke'] = color(1, 1, 0, 0.5)
e.borderRight['strokeWidth'] = pt((iy+1)*4)
e.borderRight['line'] = ONLINE
e.borderRight['stroke'] = color(0, 0, 1, 0.5)
page.solve()
doc.export(exportPath)
for contextName in ('DrawBot', 'Flat'):
draw(contextName)
'''
if __name__ == '__main__': # If running from DrawBot
Variable([
dict(name="LineType", ui="RadioGroup", args=dict(titles=[INLINE, ONLINE, OUTLINE],
isVertical=True)),
dict(name='DashWhite', ui='Slider', args=dict(minValue=0, value=8, maxValue=8)),
dict(name='DashBlack', ui='Slider', args=dict(minValue=0, value=0, maxValue=8)),
dict(name='PageSize', ui='Slider', args=dict(minValue=100, value=400, maxValue=800)),
], globals())
d = makeDocument()
d.export(EXPORT_PATH)
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
198,
2,
220,
220,
220,
220,
350,
317,
402,
412,
347,
440,
309,
220,
412,
1395,
317,
337,
... | 2.523238 | 1,915 |
from . import linalg
from .advanced import *
from .arithmetic import *
from .functions import *
from .linear_algebra import *
| [
6738,
764,
1330,
300,
1292,
70,
198,
198,
6738,
764,
32225,
2903,
1330,
1635,
198,
6738,
764,
283,
29848,
1330,
1635,
198,
6738,
764,
12543,
2733,
1330,
1635,
198,
6738,
764,
29127,
62,
282,
29230,
1330,
1635,
198
] | 3.342105 | 38 |
from setuptools import setup,find_packages
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
]
with open('README.txt') as file:
long_description = file.read()
with open('requirements.txt') as reqs:
install_requires = reqs.read().splitlines()
print(find_packages())
setup(
name="entropytriangle",
version="1.0.2",
packages= find_packages(),
python_requires='>=3',
install_requires = install_requires,
author="Jaime de los Rios Mouvet",
author_email="jaime.delosriosmouvet@gmail.com",
classifiers=classifiers,
description="Calculation of the entropy triangles",
long_description=long_description,
keywords="Entropy Triangle Information Theory",
license="MIT",
url="https://github.com/Jaimedlrm/entropytriangle",
download_url="https://github.com/Jaimedlrm/entropytriangle.git",
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
19796,
62,
43789,
198,
198,
4871,
13350,
796,
685,
198,
220,
220,
220,
366,
41206,
12678,
7904,
513,
532,
12995,
1600,
198,
220,
220,
220,
366,
5317,
1631,
7591,
1240,
7904,
5800,
14,
25104,
16... | 2.890052 | 382 |
from multiprocessing import Process
import os
from CPAC.utils.utils import create_seeds_, create_group_log_template
from CPAC.utils import Configuration
import yaml
import time
from time import strftime
| [
6738,
18540,
305,
919,
278,
1330,
10854,
198,
11748,
28686,
198,
6738,
16932,
2246,
13,
26791,
13,
26791,
1330,
2251,
62,
325,
5379,
62,
11,
2251,
62,
8094,
62,
6404,
62,
28243,
198,
6738,
16932,
2246,
13,
26791,
1330,
28373,
198,
117... | 3.507937 | 63 |
from flask import Flask, render_template, request
import pymysql as asu
app = Flask(__name__)
@app.route('/')
if __name__ == '__main__':
app.run(debug = True) | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
11748,
279,
4948,
893,
13976,
355,
355,
84,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
31,
1324,
13,
38629,
10786,
14,
11537,
198,
198,
361,
11593,
3672,... | 2.66129 | 62 |
# -*- coding: utf-8 -*-
"""UTILS MODULE
This module contains utility functions globally available to lenspack.
"""
import numpy as np
from astropy.units.core import Unit
from astropy.constants import G as G_newton
from astropy.constants import c as c_light
def round_up_to_odd(x):
"""Round up to the nearest odd integer."""
return (np.ceil(x) // 2 * 2 + 1).astype(int)
def convert_units(x, target):
"""Convert or attach units to a variable.
Parameters
----------
x : float
Quantity to convert.
target : str
Target units given as an acceptable astropy.units string (e.g. 'km').
Raises
------
Exception
If the conversion fails.
Examples
--------
>>> conv(5, 'kpc')
<Quantity 5. kpc>
>>> x = 4e14
>>> x = conv(x, 'solMass')
>>> conv(x, 'kg')
<Quantity 7.95390166e+44 kg>
"""
try:
x = x.to(Unit(target))
except AttributeError:
x = x * Unit(target)
except Exception as e:
raise
return x
def sigma_critical(zl, zs, cosmology):
"""Critical surface mass density between a lens and source galaxy(-ies).
Sigma_critical = [c^2 / (4 * pi * G)] * D_os / (D_ol * D_ls)
Angular diameter distances D are calculated in a universe specified by
an instance of astropy.cosmology.core.Cosmology.
Parameters
----------
zl : float
Redshift of the lens.
zs : array_like
Redshift(s) of the source galaxies.
cosmology : astropy.cosmology.core.Cosmology
Cosmological model.
Returns
-------
astropy.units.quantity.Quantity
Critical surface mass density between a lens (i.e. cluster or DM halo)
and each source redshift in units of solar masses per square parsec.
For sources at the redshift of the halo and below, Sigma_critical is
set to np.inf.
Examples
--------
...
TODO
----
Include the option for source redshift probability distributions.
"""
# Ensure vectorization
zs = np.atleast_1d(zs).astype(float)
assert (zs >= 0).all(), "Redshifts must be positive."
result = np.zeros_like(zs)
# Compute distances
d_ol = cosmology.angular_diameter_distance(zl)
d_os = cosmology.angular_diameter_distance(zs)
d_ls = cosmology.angular_diameter_distance_z1z2(zl, zs)
# Avoid division by zero
d_ls[d_ls == 0] = np.inf
# Compute Sigma_crit
factor = np.power(c_light, 2) / (4 * np.pi * G_newton)
result = factor * d_os / (d_ol * d_ls)
# Sources at lower z than the halo are not lensed
result[result <= 0] = np.inf
# Clean up
if len(zs) == 1:
result = result[0]
return convert_units(result, "solMass / pc2")
def bin2d(x, y, npix=10, v=None, w=None, extent=None, verbose=False):
"""Bin samples of a spatially varying quantity according to position.
The (weighted) average is taken of values falling into the same bin. This
function is relatively general, but it is mainly used within this package
to produce maps of the two components of shear from a galaxy catalog.
Parameters
----------
x, y : array_like
1D position arrays.
npix : int or list or tuple as (nx, ny), optional
Number of bins in the `x` and `y` directions. If an int N is given,
use (N, N). Binning defaults to (10, 10) if not provided.
v : array_like, optional
Values at positions (`x`, `y`). This can be given as many arrays
(v1, v2, ...) of len(`x`) to bin simultaneously. If None, the bin
count in each pixel is returned.
w : array_like, optional
Weights for `v` during averaging. If provided, the same weights are
applied to each input `v`.
extent : array_like, optional
Boundaries of the resulting grid, given as (xmin, xmax, ymin, ymax).
If None, bin edges are set as the min/max coordinate values of the
input position arrays.
verbose : boolean, optional
If True, print details of the binning.
Returns
-------
ndarray or tuple of ndarray
2D numpy arrays of values `v` binned into pixels. The number of
outputs matches the number of input `v` arrays.
Examples
--------
>>> # 100 values at random positions within the ranges -0.5 < x, y < 0.5
>>> # and binned within -1 < x, y < 1 to a (5, 5) grid.
>>> x = np.random.random(100) - 0.5
>>> y = np.random.random(100) - 0.5
>>> v = np.random.randn(100) * 5
>>> bin2d(x, y, v=v, npix=5, extent=(-1, 1, -1, 1))
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 4.43560619, -2.33308373, 0.48447844, 0. ],
[ 0. , 1.94903524, -0.29253335, 1.3694618 , 0. ],
[ 0. , -1.0202718 , 0.37112266, -1.43062585, 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
"""
# Regroup extent if necessary
if extent is not None:
assert len(extent) == 4
extent = [extent[:2], extent[2:]]
if v is None:
# Return the simple bin count map
bincount, xbins, ybins = np.histogram2d(x, y, bins=npix, range=extent)
result = bincount.T
else:
# Prepare values to bin
v = np.atleast_1d(v)
if len(v.shape) == 1:
v = v.reshape(1, len(v))
# Prepare weights
if w is not None:
w = np.atleast_1d(w)
has_weights = True
else:
w = np.ones_like(x)
has_weights = False
# Compute weighted bin count map
wmap, xbins, ybins = np.histogram2d(x, y, bins=npix, range=extent,
weights=w)
# Handle division by zero (i.e., empty pixels)
wmap[wmap == 0] = np.inf
# Compute mean values per pixel
result = tuple((np.histogram2d(x, y, bins=npix, range=extent,
weights=(vv * w))[0] / wmap).T for vv in v)
# Clean up
if len(result) == 1:
result = result[0]
if verbose:
if v is not None:
print("Binning {} array{} with{} weights.".format(len(v),
['', 's'][(len(v) > 1)], ['out', ''][has_weights]))
else:
print("Returning bin count map.")
print("npix : {}".format(npix))
print("extent : {}".format([xbins[0], xbins[-1], ybins[0], ybins[-1]]))
print("(dx, dy) : ({}, {})".format(xbins[1] - xbins[0],
ybins[1] - ybins[0]))
return result
def radius2d(N, center=None, mode='exact'):
"""Distances from every pixel to a fixed center in a square matrix.
Parameters
----------
N : int
Number of pixels to a side.
center : array_like, optional
Incides of the central pixel, given as (x0, y0). If not given, the
center is taken to be (N / 2, N / 2) (though see `mode` description).
mode : {'exact', 'fft'}
How to treat the case when N is even. If 'exact', compute distances
from the true (fractional) central pixel location. If 'fft', use the
numpy.fft.fftfreq convention such that the central pixel location
is rounded up to the nearest integer.
Returns
-------
numpy array
2D matrix of distances.
Notes
-----
Non-integer center coordinates are not supported. If a `center` is
provided, `mode` is ignored.
Examples
--------
>>> radius2d(4, mode='exact')
array([[ 2.12132034, 1.58113883, 1.58113883, 2.12132034],
[ 1.58113883, 0.70710678, 0.70710678, 1.58113883],
[ 1.58113883, 0.70710678, 0.70710678, 1.58113883],
[ 2.12132034, 1.58113883, 1.58113883, 2.12132034]])
>>> radius2d(4, mode='fft')
array([[ 2.82842712, 2.23606798, 2. , 2.23606798],
[ 2.23606798, 1.41421356, 1. , 1.41421356],
[ 2. , 1. , 0. , 1. ],
[ 2.23606798, 1.41421356, 1. , 1.41421356]])
"""
# Verify inputs
N = int(N)
assert mode in ('exact', 'fft'), "Mode must be either 'exact' or 'fft'."
# Generate index grids
x, y = np.indices((N, N))
# Determine center
if center is not None:
x0, y0 = map(int, center)
else:
if mode == 'fft' and N % 2 == 0:
x0 = N / 2.
y0 = N / 2.
else:
x0 = (N - 1) / 2.
y0 = (N - 1) / 2.
# Compute radii
return np.hypot(x - x0, y - y0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
3843,
45484,
33893,
198,
198,
1212,
8265,
4909,
10361,
5499,
18309,
1695,
284,
10317,
8002,
13,
198,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
... | 2.189354 | 3,945 |
import pytest
from pyspj.models import load_result, SimpleSPJResult, ContinuitySPJResult, ResultType
@pytest.mark.unittest
| [
11748,
12972,
9288,
198,
198,
6738,
279,
893,
79,
73,
13,
27530,
1330,
3440,
62,
20274,
11,
17427,
4303,
41,
23004,
11,
6389,
14834,
4303,
41,
23004,
11,
25414,
6030,
628,
198,
31,
9078,
9288,
13,
4102,
13,
403,
715,
395,
198
] | 3 | 42 |
from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder, pn_network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
MODEL_REGISTRY = {}
| [
6738,
374,
75,
62,
19966,
13,
11321,
1330,
2134,
62,
69,
9548,
198,
11748,
374,
75,
62,
19966,
13,
14016,
418,
62,
13165,
354,
198,
6738,
374,
75,
62,
19966,
13,
14016,
418,
62,
13165,
354,
1330,
3127,
62,
38272,
11,
279,
77,
62,
... | 2.743902 | 82 |
import math
from __builtin__ import round as _round
def clamp(x=0.0, min=0.0, max=1.0):
"""
Clamps the value x between min and max
:rtype: float
"""
pass
def gamma(c, g):
"""
Gamma color correction of c with a single scalar gamma value g
:rtype: float
"""
pass
def round(value, ndigits=0):
"""
round(number[, ndigits]) -> float
Round a number to a given precision in decimal digits (default 0 digits).
This always returns a floating point number. Precision may be negative.
This builtin function was overloaded in mathutils to work on complex numbers,
in that case rel and imaginary values are rounded separately
"""
pass
def linmap(min, max, x):
"""
Returns the value of a linear remapping function.
performs a linear interpolation between 0 and 1 in the interval min to max,
but does not clamp the range
:rtype: float
"""
pass
def blend(a, b, weight=0.5):
"""
blend(a, b[, weight=0.5]) :
Blends values a and b according to normalized weight w,
returns a for weight == 0.0 and b for weight = 1.0, a*(1.0-weight)+b*weight in between
:rtype: float
"""
pass
def imag(x):
"""
the imaginary part of x
"""
pass
def conjugate(x):
"""
the conjugate part of x
"""
pass
def hermite(x=0.0, v0=0.0, v1=0.0, s0=0.0, s1=0.0):
"""
As the MEL command : This command returns x point along on x hermite curve from the five given control arguments.
The first two arguments are the start and end points of the curve, respectively.
The next two arguments are the tangents of the curve at the start point and end point of the curve, respectively.
The fifth argument, parameter, specifies the point on the hermite curve that is returned by this function.
This parameter is the unitized distance along the curve from the start point to the end point.
A parameter value of 0.0 corresponds to the start point and x parameter value of 1.0 corresponds to the end point of the curve.
:rtype: float
"""
pass
def smoothstep(min, max, x):
"""
Returns the value of a smooth step function.
Returns 0 if x < min, 1 if x > max, and performs a smooth Hermite
interpolation between 0 and 1 in the interval min to max.
:rtype: float
"""
pass
def smoothmap(min, max, x):
"""
Returns the value of a smooth remapping function.
performs a smooth Hermite interpolation between 0 and 1 in the interval min to max,
but does not clamp the range
:rtype: float
"""
pass
def hermiteInterp(x=0.0, y0=0.0, y1=1.0, s0=0.0, s1=0.0):
"""
Hermite interpolation of x between points y0 and y1 of tangent slope s0 and s1
:rtype: float
"""
pass
def linstep(min, max, x):
"""
Returns the value of a linear step function.
Returns 0 if x < min, 1 if x > max, and performs a linear
interpolation between 0 and 1 in the interval min to max.
:rtype: float
"""
pass
def real(x):
"""
the real part of x
"""
pass
def setRange(x=0.0, oldmin=0.0, oldmax=1.0, newmin=0.0, newmax=1.0):
"""
Resets x range from x linear interpolation of oldmin to oldmax to x linear interpolation from newmin to newmax
:rtype: float
"""
pass
| [
11748,
10688,
198,
198,
6738,
11593,
18780,
259,
834,
1330,
2835,
355,
4808,
744,
198,
198,
4299,
29405,
7,
87,
28,
15,
13,
15,
11,
949,
28,
15,
13,
15,
11,
3509,
28,
16,
13,
15,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
... | 2.704762 | 1,260 |
import re
| [
11748,
302,
628
] | 3.666667 | 3 |
# Optimal Account Balancing
# given a list of transactions between a group of people, with each transaction
# as a tuple (x, y, z), meaining person x send person y amount z of money
# assume x != y and z > 0, id x and y might not be linear
# return the minimum number of transactions required to settle the debt
# Optimal Account Balancing
# given a list of transactions between a group of people, with each transaction
# as a tuple (x, y, z), meaining person x send person y amount z of money
# assume x != y and z > 0, id x and y might not be linear
# return the minimum number of transactions required to settle the debt
| [
2,
13123,
4402,
10781,
8528,
5077,
198,
2,
1813,
257,
1351,
286,
8945,
1022,
257,
1448,
286,
661,
11,
351,
1123,
8611,
198,
2,
355,
257,
46545,
357,
87,
11,
331,
11,
1976,
828,
502,
1397,
1048,
2124,
3758,
1048,
331,
2033,
1976,
2... | 4 | 156 |
import logging
import os
import shutil
from importlib.resources import read_text
from subprocess import check_call
import btemu.resources
if __name__ == '__main__':
main()
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
1330,
8019,
13,
37540,
1330,
1100,
62,
5239,
198,
6738,
850,
14681,
1330,
2198,
62,
13345,
198,
198,
11748,
275,
11498,
84,
13,
37540,
628,
628,
198,
361,
11593,
3672,
... | 3.290909 | 55 |
#coding=utf8
from uliweb import settings
from uliweb.utils.common import safe_str, import_attr
from uliweb.utils.storage import Storage
from uliweb.orm import do_, get_model
from uliweb.utils.sorteddict import SortedDict
from sqlalchemy import __version__ as sa_version, select, true, text, literal
import logging
DEBUG = False
__schemas__ = {}
__relations__ = None
__default_limit__ = 10
log = logging.getLogger(__name__)
__relations__ = Relations()
def get_relation_condition(key):
"""
Get relation condition
:param key: should be (schema_a, schema_b)
:return:
"""
global __relations__
return __relations__.get_condition(key)
def query(d):
"""
Query schema
:param d: dict options
:return:
"""
q = Query(d)
return q.run()
| [
2,
66,
7656,
28,
40477,
23,
198,
198,
6738,
334,
4528,
12384,
1330,
6460,
198,
6738,
334,
4528,
12384,
13,
26791,
13,
11321,
1330,
3338,
62,
2536,
11,
1330,
62,
35226,
198,
6738,
334,
4528,
12384,
13,
26791,
13,
35350,
1330,
20514,
... | 2.773519 | 287 |
"""
Stein Variational Gradient Descent for Deep ConvNet on GPU.
Current implementation is mainly using for-loops over model instances.
"""
import torch
import numpy as np
from time import time
from args import args, device
import h5py
import os
from models.model_det import DenseED
from models.Bayesian_model_NN import Bayesian_model_NN
from models.model_train import Bayesian_model_train
from utils.misc import mkdirs, logger
from utils.plot1 import plot_prediction_det1
from utils.plot import plot_prediction_det
from utils.mcs_data_upload import mcs_load_data
import json
import scipy.io as io
import sys
n_out_pixels_train = args.ntrain*128*128
n_out_pixels_test = args.ntest*128*128
dir = './models'
# Bayesian NN
Bayesian_model = torch.load('model_%d.pt'%args.ntrain)
KLE_val = 100
# load data
test_loader = mcs_load_data()
print('Loaded data!')
def test(epoch, logger, test_fixed=None):
"""Evaluate model during training.
Print predictions including 4 rows:
1. target
2. predictive mean
3. error of the above two
4. two sigma of predictive variance
Args:
test_fixed (Tensor): (2, N, *), `test_fixed[0]` is the fixed test input,
`test_fixed[1]` is the corresponding target
"""
Bayesian_model.eval()
mse_test, nlp_test = 0., 0.
mse_test_final = 0.
nlp_test_final = 0.
final_predict = []
mse_test, nlp_test = 0., 0.
final_target_UQ = []
final_predict_UQ = []
nlp_test_val = []
for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix) in enumerate(test_loader):
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input.float(),basis_patch.float(),A_matrix.float(),B_matrix.float(), target_P.float(), q_matrix.float()
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input_rr.to(device),output_basis.to(device),A1_transformed1.to(device),B1_transformed.to(device), target_pressure.to(device), q1_transformed.to(device)
#================================================================================
tocc = time()
output_basis = output_basis.view(144*args.batchs,1,15,15)
input_rr = input_rr.view(144*args.batchs,1,15,15)
A_app = []
for i in range(args.batchs):
A_torch = A1_transformed1[i,:,:]
A_torch1 = A_torch[:,0:2]
A_torch2 = A_torch[:,2]
A_torch1 = A_torch1.type(torch.LongTensor).to(device)
A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))
A_app.append(A_torch_final)
A1_transformed = torch.stack(A_app,dim=0).to(device)
#================================================================================
C = io.loadmat(dir+'/matlab_index_save_1.mat')
C = C['basis_save']
C = np.squeeze(C)
X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
for i in range(X.shape[0]):
X[i] = C[i]
# -1 because of matlab and python
X1 = X.reshape(144,225)-1
#==============
#If tanining un-comment below part
#==============
X2 = np.zeros((144,225))
for i in range(144):
var2 = np.zeros((15,15))
ele = X1[i,0]
for varu in range(15):
var1 = ele+128*(varu)
for vm in range(15):
var2[varu,vm] = var1+vm
var3 = var2.reshape(1,225)
X2[i,:] = var3
X2 = torch.Tensor(X2)
mse, nlp, output, target = Bayesian_model.test_model(A1_transformed, B1_transformed,q1_transformed,input_rr, target_pressure,batch_idx, X2,
size_average=True, out=True)
y_noise_var = (- Bayesian_model.log_beta).exp().mean()
mse_test += mse.item()
nlp_test += nlp.item()
nlp1 = nlp.cpu().detach().numpy()
nlp_test_val.append(nlp1)
final_predict_UQ.append(output)
final_target_UQ.append(target)
ticc = time()
print('total time',ticc-tocc)
save_pred = np.array(final_predict_UQ)
save_tar = np.array(final_target_UQ)
mse_test_final += mse_test
nlp_test_final += nlp_test
nlp_test_val = np.array(nlp_test_val)
return mse_test_final, nlp_test_final, save_pred, save_tar
#==========================================================
#==========================================================
if __name__ == "__main__":
main()
| [
37811,
198,
7447,
259,
15965,
864,
17701,
1153,
2935,
1087,
329,
10766,
34872,
7934,
319,
11362,
13,
198,
11297,
7822,
318,
8384,
1262,
329,
12,
5439,
2840,
625,
2746,
10245,
13,
198,
37811,
198,
198,
11748,
28034,
198,
11748,
299,
3215... | 2.213115 | 2,074 |
from action_handlers.action_handler import ActionHandler, Action
from telegram.inlinekeyboardmarkup import InlineKeyboardMarkup
from telegram.inlinekeyboardbutton import InlineKeyboardButton
from telegram.ext import Filters
from telegram.parsemode import ParseMode
from telegram.error import BadRequest
import constants as const
import utils
import datetime
import logging
import counter
import math
import random
MODULE_ACTION_TYPE = const.TYPE_MANAGE_BILL
ACTION_GET_MANAGE_BILL = 0
ACTION_GET_MANAGE_BILL_KB = 1
ACTION_SHARE_BILL = 2
ACTION_CALCULATE_SPLIT = 3
ACTION_REFRESH_BILL = 4
ACTION_SEND_DEBTS_BILL_ADMIN = 5
ACTION_GET_CONFIRM_PAYMENTS_KB = 6
ACTION_CONFIRM_BILL_PAYMENT = 7
ACTION_SEND_DEBTS_BILL = 8
ACTION_SEND_BILL = 9
ACTION_SHARE_BILL_ITEM = 10
ACTION_SHARE_ALL_ITEMS = 11
ACTION_GET_SHARE_ITEMS_KB = 12
ACTION_GET_PAY_ITEMS_KB = 13
ACTION_PAY_DEBT = 14
ACTION_GET_INSPECT_BILL_KB = 15
ACTION_GET_FORCE_CONFIRM_PAYMENTS_KB = 16
ACTION_FORCE_CONFIRM_PAYMENT = 17
ACTION_ADD_SOMEONE = 18
ERROR_ITEMS_NOT_SHARED = "The bill cannot be split because the following items are not shared:\n{}"
REQUEST_CALC_SPLIT_CONFIRMATION = "You are about to calculate the splitting of the bill. Once this is done, no new person can be added to the bill anymore. Do you wish to continue? Reply /yes or /no."
ERROR_INVALID_CONTACT = "Sorry, invalid Contact or name sent. Name can only be 250 characters long. Please try again."
REQUEST_PAY_CONFIRMATION = "You are about to confirm <b>{}'s</b> payment of {}{:.2f}. This action is irreversible. Do you wish to continue? Reply /yes or /no."
REQUEST_FORCE_PAY_CONFIRMATION = "You are about to forcibly confirm <b>{}'s</b> payment of {}{:.2f}. This person has not indicated payment yet. This action is irreversible. Do you wish to continue? Reply /yes or /no."
REQUEST_CONTACT = "Please send me the <b>Contact</b> or name of the person. However, this person might <b>not</b> be able to indicate payment for this bill later on. You will have to force confirm his/her payment. To stop this, reply /no."
YES_WITH_QUOTES = "'yes'"
YES = 'yes'
NO_WITH_QUOTES = "'no'"
NO = 'no'
| [
6738,
2223,
62,
4993,
8116,
13,
2673,
62,
30281,
1330,
7561,
25060,
11,
7561,
198,
6738,
573,
30536,
13,
45145,
2539,
3526,
4102,
929,
1330,
554,
1370,
9218,
3526,
9704,
929,
198,
6738,
573,
30536,
13,
45145,
2539,
3526,
16539,
1330,
... | 2.986034 | 716 |
from datetime import datetime
import unittest
from mock import patch
from zoomus import components
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RegisterV1TestCase))
return suite
if __name__ == '__main__':
unittest.main()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
555,
715,
395,
198,
198,
6738,
15290,
1330,
8529,
198,
198,
6738,
19792,
385,
1330,
6805,
628,
198,
4299,
18389,
33529,
198,
220,
220,
220,
37227,
7469,
500,
477,
262,
5254,
286,
262,
82... | 2.891892 | 111 |
from peewee import (
SqliteDatabase,
Model,
CharField,
ForeignKeyField,
DateField,
DecimalField,
DateTimeField,
IntegerField,
TextField
)
import os
import click
from datetime import date, datetime, timedelta
from decimal import Decimal
from enum import IntEnum
db = SqliteDatabase(os.path.join(os.getcwd(), 'finance.db'))
@click.group()
@cli.command()
@click.argument('name')
@click.argument('code')
@click.argument('sign')
@cli.command()
@cli.command()
@click.argument('name')
@click.argument('currency_code')
@cli.command()
@click.argument('account_id', type=click.INT)
@click.argument('name')
@cli.command()
@cli.command()
@click.argument('account_id', type=click.INT)
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
@click.argument('balance')
@cli.command()
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
@cli.command()
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
@cli.command()
@click.argument('account_id', type=click.INT)
@click.argument('type', type=click.Choice([TransactionType.DEBIT.name, TransactionType.CREDIT.name, TransactionType.TRANSFER_OUT.name, TransactionType.TRANSFER_IN.name]))
@click.argument('amount')
@click.argument('comment')
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
@click.argument('hour', type=click.INT, default=datetime.now().hour)
@click.argument('minute', type=click.INT, default=datetime.now().minute)
@click.argument('second', type=click.INT, default=datetime.now().second)
@cli.command()
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
@cli.command()
@click.argument('transaction_id', click.INT)
@cli.command()
@click.argument('account_id', click.INT)
@cli.command()
@click.argument('entry_id', click.INT)
@cli.command()
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@cli.command()
@click.argument('year', type=click.INT, default=date.today().year)
@click.argument('month', type=click.INT, default=date.today().month)
@click.argument('day', type=click.INT, default=date.today().day)
if __name__ == '__main__':
cli()
| [
6738,
613,
413,
1453,
1330,
357,
198,
220,
220,
220,
311,
13976,
578,
38105,
11,
198,
220,
220,
220,
9104,
11,
198,
220,
220,
220,
3178,
15878,
11,
198,
220,
220,
220,
8708,
9218,
15878,
11,
198,
220,
220,
220,
7536,
15878,
11,
19... | 2.912651 | 996 |
from queue import PriorityQueue
Coordinate = tuple[int, int]
map: list[list[int]] = []
PART_TWO = True
# For each position, we store the lowest cost path to get there.
lowest_cost: list[list[None | tuple[int, list[Coordinate]]]] = []
with open('2021-12-15.txt') as f:
for line in (l.strip() for l in f):
map_values = [int(x) for x in line]
if PART_TWO:
for i in range(1,5):
map_values += [(int(x)+i) for x in line]
map.append(map_values)
lowest_cost.append([None] * len(map_values))
if PART_TWO:
# Expand map 4 times below.
orig_map_len = len(map)
for i in range(1,5):
for y in range(orig_map_len):
map.append([(x+i) for x in map[y]])
lowest_cost.append([None] * len(map[0]))
# Deal with overflows: At most 9+4, so just subtract 9 as needed.
for y in range(len(map)):
for x in range(len(map[y])):
if map[y][x] > 9:
map[y][x] -= 9
# Priority queue always draws the current lowest cost path
work_queue: PriorityQueue[tuple[int,Coordinate, list[Coordinate]]] = PriorityQueue()
work_queue.put_nowait((0,(0,0),[(0,0)]))
NEIGHBORS = ((-1, 0), (1, 0), (0, 1), (0, -1))
max_y, max_x = len(map)-1, len(map[0])-1
while not work_queue.empty():
cost, (x, y), path = work_queue.get_nowait()
if lowest_cost[max_y][max_x] is not None:
if lowest_cost[max_y][max_x][0] < cost:
# Drain task if there is already a cheaper way to reach the end.
work_queue.task_done()
break
if lowest_cost[y][x] is not None and lowest_cost[y][x][0] < cost:
work_queue.task_done()
continue
lowest_cost[y][x] = (cost, path)
for dx, dy in NEIGHBORS:
nx, ny = x+dx, y+dy
# Skip out of bounds
if min(nx, ny) < 0 or ny > max_y or nx > max_x:
continue
new_cost = cost + map[ny][nx]
new_path = path + [(nx, ny)]
# Skip unless we're getting there cheaper.
if lowest_cost[ny][nx] is not None:
if lowest_cost[ny][nx][0] <= new_cost:
continue
# NOT THREAD SAFE: Per cell threading.Lock on lowest_cost cells would fix.
lowest_cost[ny][nx] = (new_cost, new_path)
work_queue.put_nowait((new_cost, (nx, ny), new_path))
work_queue.task_done()
print(lowest_cost[max_y][max_x])
print(lowest_cost[max_y][max_x][0])
| [
6738,
16834,
1330,
34416,
34991,
198,
198,
7222,
45480,
796,
46545,
58,
600,
11,
493,
60,
198,
198,
8899,
25,
1351,
58,
4868,
58,
600,
11907,
796,
17635,
198,
198,
30709,
62,
34551,
46,
796,
6407,
198,
198,
2,
1114,
1123,
2292,
11,
... | 2.110339 | 1,151 |
#!/usr/bin/python
import sys
import getopt
from gensim.models import Word2Vec
import pprint
from time import time
from pyonmttok import Tokenizer
#from smart_open import open
import json
from pyspark.sql import DataFrame
from pyspark.sql.types import StructType, StructField, StringType,ArrayType
from pyspark.sql.functions import col,udf,struct,collect_list
from pyspark import SparkContext, StorageLevel
from pyspark.sql import SparkSession
import csv
import os
import re
import logging
import site
from pyspark.sql.functions import col, lit, regexp_replace, trim, lower, concat, count
import numpy as np
import pandas as pd
import nltk
import uuid
def spark_session(appName="log-parser"):
"""
Function to create new spark session
"""
sc = SparkContext(appName="log-parser")
return SparkSession.builder.config(conf=sc._conf).getOrCreate()
@udf(returnType=StringType())
class MyCorpus(object):
"""An interator that yields sentences (lists of str)."""
if __name__ == "__main__":
main(sys.argv[1:]) # get everything after the script name
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
651,
8738,
198,
6738,
308,
641,
320,
13,
27530,
1330,
9678,
17,
53,
721,
198,
11748,
279,
4798,
198,
6738,
640,
1330,
640,
198,
6738,
12972,
261,
76,
926,
482,
133... | 2.724706 | 425 |
#!/usr/bin/env python
# This software is Copyright (c) 2019 - Dhiru Kholia, Copyright (c) 2018 -
# axcheron, and it is hereby released under the MIT License.
#
# Key parts of this program are borrowed from the pyvmx-cracker project.
#
# See https://github.com/axcheron/pyvmx-cracker for details.
import os
import re
import sys
import base64
import argparse
from binascii import hexlify
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.parse import unquote
else:
from urllib import unquote
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [.vmx files]\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
770,
3788,
318,
15069,
357,
66,
8,
13130,
532,
20529,
35406,
509,
3937,
544,
11,
15069,
357,
66,
8,
2864,
532,
198,
2,
7877,
2044,
261,
11,
290,
340,
318,
29376,
2716,
739,... | 2.491349 | 289 |
import torch
from .adabound import AdaBound, AdaBoundW
from .lars import LARSOptimizer
| [
11748,
28034,
198,
198,
6738,
764,
324,
397,
633,
1330,
47395,
49646,
11,
47395,
49646,
54,
198,
6738,
764,
75,
945,
1330,
47211,
15821,
457,
320,
7509,
628,
198
] | 3.103448 | 29 |
from print_running_function import print_running_function
import time
# Hackish method to import from another directory
# Useful while xendit-python isn't released yet to the public
import importlib.machinery
loader = importlib.machinery.SourceFileLoader("xendit", "../xendit/__init__.py")
xendit = loader.load_module("xendit")
| [
6738,
3601,
62,
20270,
62,
8818,
1330,
3601,
62,
20270,
62,
8818,
198,
198,
11748,
640,
198,
198,
2,
18281,
680,
2446,
284,
1330,
422,
1194,
8619,
198,
2,
49511,
981,
2124,
437,
270,
12,
29412,
2125,
470,
2716,
1865,
284,
262,
1171,... | 3.343137 | 102 |
from django.urls import path
from . import views
urlpatterns = [
path('category/<int:category_slug>/',
views.store, name='products_by_category'),
path('category/<int:category_slug>/<int:subcategory_slug>/',
views.store, name='products_by_subcategory'),
path('category/<int:category_slug>/<int:subcategory_slug>/<slug:product_slug>/',
views.product_detail, name='product_detail'),
path('search/', views.search, name='search'),
path('submit_review/<int:product_id>/',
views.submit_review, name='submit_review'),
path('brand_detail/<int:brand_id>/',
views.brand_detail, name='brand_detail'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
22872,
14,
27,
600,
25,
22872,
62,
6649,
1018,
29,
14,
3256,
198,
220,
220,
220,
220,
... | 2.477612 | 268 |
from typing import NoReturn
from .command_program import CommandProgram
from .utils import WindowsInstallationPackage, MacOSInstallationPackage, GNULinuxDistributionInstallationPackage
class TypescriptCommand(CommandProgram):
"""
Command to verify if ``tsc`` command is recognized by the operating system.
If its not verify, the class install it automatically if you want.
"""
def __init__(self, allow_install: bool, update_package_manager: bool = True) -> NoReturn:
"""
Constructor and initializer.
Parameters
----------
allow_install : bool
True if you want to automatically install the required package, False otherwise
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
windows = WindowsInstallationPackage(
choco_command="choco install typescript",
standard_command="npm install -g typescript",
update_package_manager=update_package_manager
)
macos = MacOSInstallationPackage(
standard_command="npm install -g typescript",
brew_command="brew install typescript",
update_package_manager=update_package_manager
)
linux = GNULinuxDistributionInstallationPackage(
standard_command="npm install -g typescript",
update_package_manager=update_package_manager
)
super().__init__("tsc --version", allow_install,
windows, macos, linux)
| [
6738,
19720,
1330,
1400,
13615,
198,
198,
6738,
764,
21812,
62,
23065,
1330,
9455,
15167,
198,
6738,
764,
26791,
1330,
3964,
30838,
27813,
11,
4100,
2640,
30838,
27813,
11,
15484,
6239,
259,
2821,
20344,
3890,
30838,
27813,
628,
198,
4871... | 2.764505 | 586 |
import torch
from torch import nn
from fastNLP.modules.utils import initial_parameter
def allowed_transitions(id2label, encoding_type='bio'):
"""
:param dict id2label: key是label的indices,value是str类型的tag或tag-label。value可以是只有tag的, 比如"B", "M"; 也可以是
"B-NN", "M-NN", tag和label之间一定要用"-"隔开。一般可以通过Vocabulary.get_id2word()id2label。
:param encoding_type: str, 支持"bio", "bmes"。
:return: List[Tuple(int, int)]], 内部的Tuple是(from_tag_id, to_tag_id)。 返回的结果考虑了start和end,比如"BIO"中,B、O可以
位于序列的开端,而I不行。所以返回的结果中会包含(start_idx, B_idx), (start_idx, O_idx), 但是不包含(start_idx, I_idx).
start_idx=len(id2label), end_idx=len(id2label)+1。
"""
num_tags = len(id2label)
start_idx = num_tags
end_idx = num_tags + 1
encoding_type = encoding_type.lower()
allowed_trans = []
id_label_lst = list(id2label.items()) + [(start_idx, 'start'), (end_idx, 'end')]
for from_id, from_label in id_label_lst:
if from_label in ['<pad>', '<unk>']:
continue
from_tag, from_label = split_tag_label(from_label)
for to_id, to_label in id_label_lst:
if to_label in ['<pad>', '<unk>']:
continue
to_tag, to_label = split_tag_label(to_label)
if is_transition_allowed(encoding_type, from_tag, from_label, to_tag, to_label):
allowed_trans.append((from_id, to_id))
return allowed_trans
def is_transition_allowed(encoding_type, from_tag, from_label, to_tag, to_label):
"""
:param encoding_type: str, 支持"BIO", "BMES"。
:param from_tag: str, 比如"B", "M"之类的标注tag. 还包括start, end等两种特殊tag
:param from_label: str, 比如"PER", "LOC"等label
:param to_tag: str, 比如"B", "M"之类的标注tag. 还包括start, end等两种特殊tag
:param to_label: str, 比如"PER", "LOC"等label
:return: bool,能否跃迁
"""
if to_tag=='start' or from_tag=='end':
return False
encoding_type = encoding_type.lower()
if encoding_type == 'bio':
"""
第一行是to_tag, 第一列是from_tag. y任意条件下可转,-只有在label相同时可转,n不可转
+-------+---+---+---+-------+-----+
| | B | I | O | start | end |
+-------+---+---+---+-------+-----+
| B | y | - | y | n | y |
+-------+---+---+---+-------+-----+
| I | y | - | y | n | y |
+-------+---+---+---+-------+-----+
| O | y | n | y | n | y |
+-------+---+---+---+-------+-----+
| start | y | n | y | n | n |
+-------+---+---+---+-------+-----+
| end | n | n | n | n | n |
+-------+---+---+---+-------+-----+
"""
if from_tag == 'start':
return to_tag in ('b', 'o')
elif from_tag in ['b', 'i']:
return any([to_tag in ['end', 'b', 'o'], to_tag=='i' and from_label==to_label])
elif from_tag == 'o':
return to_tag in ['end', 'b', 'o']
else:
raise ValueError("Unexpect tag {}. Expect only 'B', 'I', 'O'.".format(from_tag))
elif encoding_type == 'bmes':
"""
第一行是to_tag, 第一列是from_tag,y任意条件下可转,-只有在label相同时可转,n不可转
+-------+---+---+---+---+-------+-----+
| | B | M | E | S | start | end |
+-------+---+---+---+---+-------+-----+
| B | n | - | - | n | n | n |
+-------+---+---+---+---+-------+-----+
| M | n | - | - | n | n | n |
+-------+---+---+---+---+-------+-----+
| E | y | n | n | y | n | y |
+-------+---+---+---+---+-------+-----+
| S | y | n | n | y | n | y |
+-------+---+---+---+---+-------+-----+
| start | y | n | n | y | n | n |
+-------+---+---+---+---+-------+-----+
| end | n | n | n | n | n | n |
+-------+---+---+---+---+-------+-----+
"""
if from_tag == 'start':
return to_tag in ['b', 's']
elif from_tag == 'b':
return to_tag in ['m', 'e'] and from_label==to_label
elif from_tag == 'm':
return to_tag in ['m', 'e'] and from_label==to_label
elif from_tag in ['e', 's']:
return to_tag in ['b', 's', 'end']
else:
raise ValueError("Unexpect tag type {}. Expect only 'B', 'M', 'E', 'S'.".format(from_tag))
else:
raise ValueError("Only support BIO, BMES encoding type, got {}.".format(encoding_type))
class ConditionalRandomField(nn.Module):
"""
:param int num_tags: 标签的数量。
:param bool include_start_end_trans: 是否包含起始tag
:param list allowed_transitions: ``List[Tuple[from_tag_id(int), to_tag_id(int)]]``. 允许的跃迁,可以通过allowed_transitions()得到。
如果为None,则所有跃迁均为合法
:param str initial_method:
"""
def _normalizer_likelihood(self, logits, mask):
"""Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
:param logits:FloatTensor, max_len x batch_size x num_tags
:param mask:ByteTensor, max_len x batch_size
:return:FloatTensor, batch_size
"""
seq_len, batch_size, n_tags = logits.size()
alpha = logits[0]
if self.include_start_end_trans:
alpha += self.start_scores.view(1, -1)
for i in range(1, seq_len):
emit_score = logits[i].view(batch_size, 1, n_tags)
trans_score = self.trans_m.view(1, n_tags, n_tags)
tmp = alpha.view(batch_size, n_tags, 1) + emit_score + trans_score
alpha = log_sum_exp(tmp, 1) * mask[i].view(batch_size, 1) + alpha * (1 - mask[i]).view(batch_size, 1)
if self.include_start_end_trans:
alpha += self.end_scores.view(1, -1)
return log_sum_exp(alpha, 1)
def _glod_score(self, logits, tags, mask):
"""
Compute the score for the gold path.
:param logits: FloatTensor, max_len x batch_size x num_tags
:param tags: LongTensor, max_len x batch_size
:param mask: ByteTensor, max_len x batch_size
:return:FloatTensor, batch_size
"""
seq_len, batch_size, _ = logits.size()
batch_idx = torch.arange(batch_size, dtype=torch.long, device=logits.device)
seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device)
# trans_socre [L-1, B]
trans_score = self.trans_m[tags[:seq_len-1], tags[1:]] * mask[1:, :]
# emit_score [L, B]
emit_score = logits[seq_idx.view(-1,1), batch_idx.view(1,-1), tags] * mask
# score [L-1, B]
score = trans_score + emit_score[:seq_len-1, :]
score = score.sum(0) + emit_score[-1] * mask[-1]
if self.include_start_end_trans:
st_scores = self.start_scores.view(1, -1).repeat(batch_size, 1)[batch_idx, tags[0]]
last_idx = mask.long().sum(0) - 1
ed_scores = self.end_scores.view(1, -1).repeat(batch_size, 1)[batch_idx, tags[last_idx, batch_idx]]
score += st_scores + ed_scores
# return [B,]
return score
def forward(self, feats, tags, mask):
"""
Calculate the neg log likelihood
:param feats:FloatTensor, batch_size x max_len x num_tags
:param tags:LongTensor, batch_size x max_len
:param mask:ByteTensor batch_size x max_len
:return:FloatTensor, batch_size
"""
feats = feats.transpose(0, 1)
tags = tags.transpose(0, 1).long()
mask = mask.transpose(0, 1).float()
all_path_score = self._normalizer_likelihood(feats, mask)
gold_path_score = self._glod_score(feats, tags, mask)
return all_path_score - gold_path_score
def viterbi_decode(self, data, mask, get_score=False, unpad=False):
"""Given a feats matrix, return best decode path and best score.
:param data:FloatTensor, batch_size x max_len x num_tags
:param mask:ByteTensor batch_size x max_len
:param get_score: bool, whether to output the decode score.
:param unpad: bool, 是否将结果unpad,
如果False, 返回的是batch_size x max_len的tensor,
如果True,返回的是List[List[int]], List[int]为每个sequence的label,已经unpadding了,即每个
List[int]的长度是这个sample的有效长度
:return: 如果get_score为False,返回结果根据unpadding变动
如果get_score为True, 返回 (paths, List[float], )。第一个仍然是解码后的路径(根据unpad变化),第二个List[Float]
为每个seqence的解码分数。
"""
batch_size, seq_len, n_tags = data.size()
data = data.transpose(0, 1).data # L, B, H
mask = mask.transpose(0, 1).data.float() # L, B
# dp
vpath = data.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long)
vscore = data[0]
transitions = self._constrain.data.clone()
transitions[:n_tags, :n_tags] += self.trans_m.data
if self.include_start_end_trans:
transitions[n_tags, :n_tags] += self.start_scores.data
transitions[:n_tags, n_tags+1] += self.end_scores.data
vscore += transitions[n_tags, :n_tags]
trans_score = transitions[:n_tags, :n_tags].view(1, n_tags, n_tags).data
for i in range(1, seq_len):
prev_score = vscore.view(batch_size, n_tags, 1)
cur_score = data[i].view(batch_size, 1, n_tags)
score = prev_score + trans_score + cur_score
best_score, best_dst = score.max(1)
vpath[i] = best_dst
vscore = best_score * mask[i].view(batch_size, 1) + vscore * (1 - mask[i]).view(batch_size, 1)
vscore += transitions[:n_tags, n_tags+1].view(1, -1)
# backtrace
batch_idx = torch.arange(batch_size, dtype=torch.long, device=data.device)
seq_idx = torch.arange(seq_len, dtype=torch.long, device=data.device)
lens = (mask.long().sum(0) - 1)
# idxes [L, B], batched idx from seq_len-1 to 0
idxes = (lens.view(1,-1) - seq_idx.view(-1,1)) % seq_len
ans = data.new_empty((seq_len, batch_size), dtype=torch.long)
ans_score, last_tags = vscore.max(1)
ans[idxes[0], batch_idx] = last_tags
for i in range(seq_len - 1):
last_tags = vpath[idxes[i], batch_idx, last_tags]
ans[idxes[i+1], batch_idx] = last_tags
ans = ans.transpose(0, 1)
if unpad:
paths = []
for idx, seq_len in enumerate(lens):
paths.append(ans[idx, :seq_len+1].tolist())
else:
paths = ans
if get_score:
return paths, ans_score.tolist()
return paths
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
3049,
45,
19930,
13,
18170,
13,
26791,
1330,
4238,
62,
17143,
2357,
628,
628,
198,
4299,
3142,
62,
7645,
1756,
7,
312,
17,
18242,
11,
21004,
62,
4906,
11639,
65,
952,
6,... | 1.85028 | 5,724 |
# %%
import sys
sys.path.insert(0, "..")
import time
import timeit
import numpy as np
import torch
import matplotlib.pyplot as plt
import cpab
import torch.autograd.profiler as profiler
import torch.utils.benchmark as benchmark
# %% SETUP
tess_size = 50
backend = "pytorch" # ["pytorch", "numpy"]
device = "gpu" # ["cpu", "gpu"]
zero_boundary = True
use_slow = False
outsize = 100
batch_size = 20
method = "closed_form"
T = cpab.Cpab(tess_size, backend, device, zero_boundary)
T.params.use_slow = use_slow
grid = T.uniform_meshgrid(outsize)
theta = T.sample_transformation(batch_size)
theta = T.identity(batch_size, epsilon=1.0)
# T.params.nSteps1 = 5
# T.params.nSteps2 = 5
grid_t = T.transform_grid(grid, theta, method)
# plt.plot(grid_t.cpu().T)
print(1)
# %% PYTORCH BENCHMARK
t0 = benchmark.Timer(
stmt="""
theta_grad = torch.autograd.Variable(theta, requires_grad=True)
grid_t = T.transform_grid(grid, theta_grad, method)
loss = torch.norm(grid_t)
loss.backward()
""",
globals={"T": T, "grid": grid, "theta": theta, "method": method}
)
# t0.timeit(1)
t0.blocked_autorange(min_run_time=0.5)
# %% CPROFILE
import cProfile
cProfile.run(
"""
theta_grad = torch.autograd.Variable(theta, requires_grad=True)
for i in range(1000):
grid_t = T.transform_grid(grid, theta_grad, method)
# loss = torch.norm(grid_t)
# loss.backward()
""",
sort="cumtime",
)
# %% YEP + PPROF
import yep
# torch.set_num_threads(1)
theta_grad = torch.autograd.Variable(theta, requires_grad=True)
yep.start("profile.prof")
for i in range(100):
grid_t = T.transform_grid(grid, theta_grad, method)
# loss = torch.norm(grid_t)
# loss.backward()
yep.stop()
# %% TIMEIT
repetitions = 1000
n = 10
timing = timeit.Timer(
lambda: T.transform_grid(grid, theta),
# setup="gc.enable()"
).repeat(repetitions, n)
print("Time: ", np.mean(timing) / n, "+-", np.std(timing) / np.sqrt(n))
# %% PYTORCH PROFILER
with profiler.profile(with_stack=True, profile_memory=True) as prof:
T.transform_grid(grid, theta, method)
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=50))
# prof.export_chrome_trace("trace.json")
# %% snakeviz
# %prun -D program.prof T.transform_grid(grid, theta)
# %%
from itertools import product
results = []
num_threads_arr = [1] # [1, 2, 4]
backend_arr = ["pytorch"] # ["pytorch", "numpy"]
device_arr = ["cpu", "gpu"] # ["cpu", "gpu"]
method_arr = ["closed_form"] # ["closed_form", "numeric"]
use_slow_arr = [False] # [True, False]
zero_boundary_arr = [True] # [True, False]
tess_size_arr = [50]
outsize_arr = [1000]
batch_size_arr = [200]
for (
backend,
device,
method,
use_slow,
zero_boundary,
tess_size,
outsize,
batch_size,
) in product(
backend_arr,
device_arr,
method_arr,
use_slow_arr,
zero_boundary_arr,
tess_size_arr,
outsize_arr,
batch_size_arr,
):
# SETUP
T = cpab.Cpab(tess_size, backend, device, zero_boundary)
T.params.use_slow = use_slow
grid = T.uniform_meshgrid(outsize)
theta = T.identity(batch_size, epsilon=1)
label = "CPAB: backend, device, method, use_slow, zero_boundary, tess_size, outsize, batch_size"
# sub_label = f"[{backend}, {device}, {method}, {'slow' if use_slow else 'fast'}, {'zero_boundary' if zero_boundary else 'no_zero_boundary'}, {tess_size}, {outsize}, {batch_size}]"
sub_label = f"[{backend}, {device}, {method}, {use_slow}, {zero_boundary}, {tess_size}, {outsize}, {batch_size}]"
print(sub_label)
for num_threads in num_threads_arr:
repetitions = 1
# FORWARD
t0 = benchmark.Timer(
stmt=
"""
grid_t = T.transform_grid(grid, theta, method)
""",
globals={"T": T, "grid": grid, "theta": theta, "method": method},
num_threads=num_threads,
label=label,
sub_label=sub_label,
description="Forward",
)
# results.append(t0.timeit(repetitions))
results.append(t0.blocked_autorange(min_run_time=0.5))
# results.append(t0.adaptive_autorange())
# BACKWARD
t1 = benchmark.Timer(
stmt=
"""
theta_grad = torch.autograd.Variable(theta, requires_grad=True)
grid_t = T.transform_grid(grid, theta_grad, method)
loss = torch.norm(grid_t)
loss.backward()
""",
globals={"T": T, "grid": grid, "theta": theta, "method": method},
num_threads=num_threads,
label=label,
sub_label=sub_label,
description="Backward",
)
# results.append(t1.timeit(repetitions))
results.append(t1.blocked_autorange(min_run_time=0.5))
# results.append(t1.adaptive_autorange())
# %%
compare = benchmark.Compare(results)
compare.trim_significant_figures()
compare.colorize()
compare.print()
# %% RESULTS TO LATEX
import pandas as pd
df = [
pd.DataFrame({
'experiment': t.as_row_name.replace('[', '').replace(']', ''),
'description': t.task_spec.description,
'threads': t.task_spec.num_threads,
'time': t.raw_times,
'time_mean': np.mean(t.raw_times),
'time_std': np.std(t.raw_times),
})
for t in results
]
df = pd.concat(df, ignore_index=True)
header = ['Backend', 'Device', 'Method', 'Speed', 'Boundary', 'Tess Size', 'Grid Size', 'Batch Size']
parameters = pd.DataFrame(df["experiment"].str.split(',', expand=True).values, columns=header)
a = pd.concat([parameters, df], axis=1).drop(columns=['experiment'])
a.to_latex(index=False, escape=False)
# %% RESULTS TO PLOT
import seaborn as sns
import pandas as pd
df = [
pd.DataFrame({
'experiment': t.as_row_name,
'description': t.task_spec.description,
'threads': t.task_spec.num_threads,
'time': t.raw_times})
for t in results
]
df = pd.concat(df, ignore_index=True)
df['experiment_id'] = df.groupby('experiment', sort=False).ngroup().apply(str)
n = pd.unique(df.experiment_id)
exps = pd.unique(df.experiment)
caption = '\n'.join([k + ": " + exps[int(k)] for k in n])
header = ['Backend', 'Device', 'Method', 'Speed', 'Boundary', 'Tess Size', 'Grid Size', 'Batch Size']
cell_text = [e.replace('[','').replace(']','').split(', ') for e in exps]
vlen = np.vectorize(len)
w = np.max(vlen(cell_text + [header]), axis=0)
# %%
import matplotlib
with sns.axes_style("whitegrid"):
g = sns.catplot(
x="time", y="experiment_id",
hue="threads", col="description",
data=df, kind="box", ci=None, sharex=True,
fliersize=2, linewidth=1, width=0.75)
sns.despine(top=False, right=False, left=False, bottom=False)
plt.xticks(np.logspace(-10,-1, num=10))
# plt.figtext(0, -0.1, caption, wrap=True,
# verticalalignment='top', horizontalalignment='left', fontsize=10)
table = plt.table(
cellText=cell_text,
rowLabels=n,
colLabels=header,
colWidths = w,
cellLoc='center',
loc='bottom',
# fontsize=50
bbox=[-1.0,-0.5, 1.2, 0.35]
)
table.auto_set_font_size(False)
table.set_fontsize(8)
# table.auto_set_column_width(n)
# table.scale(1, 1)
for ax in g.axes[0]:
ax.set_xscale('log')
ax.grid(axis="x", which="minor", ls="--", c='gray', alpha=0.2)
plt.savefig('example.png')
# %%
| [
2,
43313,
198,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
366,
492,
4943,
198,
198,
11748,
640,
198,
11748,
640,
270,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
2603,
29487,
8019,
13,
... | 2.193132 | 3,407 |
# -*- coding: windows-1252 -*-
# dpp2607.py
#
# sends commands to DPP2607 ASIC using I2C
#
# Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Texas Instruments Incorporated nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
*** Note *** - this module is generated, changes will be lost!
Python Interface to DLP DPP2607
"""
import time
import struct
from enum import IntEnum
from logging import log, DEBUG
import i2c
COMPOUND_CMD_TIMEOUT = 2.0 # seconds
#####################################################
# Constants
#####################################################
X_0_TO_255_YCRCB = 0
X_16_TO_240_Y_112_TO_112_CRCB = 1
X_1_WHITE_AND_1_BLACK = 9
X_1_WHITE_AND_7_BLACK = 7
X_2_3_VGA_PORTRAIT = 4
X_3_2_VGA_LANDSCAPE = 5
X_4_2_2_YCR_CB_16_BIT = 8
X_4_2_2_YCR_CB_8_BIT = 9
X_90_DEGREE_ROTATION = 1
ACTIVE_HIGH = 1
ACTIVE_HIGH_PDM = 1
ACTIVE_HIGH_PULSE = 1
ACTIVE_LOW = 0
ACTIVE_LOW_PDM = 0
ACTIVE_LOW_PULSE = 0
ANSI_4X4_CHECKERBOARD = 0
BLACK = 0
BLUE = 4
BT_601 = 0
BT_656_I_F = 4
BT_709 = 1
COMPLETE = 1
CYAN = 6
DATA_SAMPLES_ON_FALLING_EDGE = 1
DATA_SAMPLES_ON_RISING_EDGE = 0
DIAGONAL_LINES = 10
DISABLED = 0
DLPC2601 = 130
DLPC2607 = 138
DSYS_PORTA_BIT_0 = 0
DSYS_PORTA_BIT_1 = 1
DSYS_PORTA_BIT_2 = 2
DSYS_PORTA_BIT_3 = 3
DSYS_PORTA_BIT_4 = 4
DSYS_PORTA_BIT_5 = 5
DSYS_PORTA_BIT_6 = 6
DSYS_PORTA_BIT_7 = 7
DSYS_PORTB_BIT_0 = 8
DSYS_PORTB_BIT_1 = 9
DSYS_PORTB_BIT_2 = 10
DSYS_PORTB_BIT_3 = 11
DSYS_PORTB_BIT_4 = 12
DSYS_PORTB_BIT_5 = 13
DSYS_PORTB_BIT_6 = 14
DSYS_PORTB_BIT_7 = 15
DSYS_PORTC_BIT_4 = 16
DSYS_PORTC_BIT_5 = 17
DSYS_PORTC_BIT_6 = 18
DSYS_PORTC_BIT_7 = 19
ENABLED = 1
ENABLED_ACTIVATES_CONTROL_BELOW = 1
ERROR_DETECTED = 0
EXTERNAL_VIDEO_PARALLEL_I_F = 0
FINE_CHECKERBOARD = 13
FLASH_BUSY = 1
GAMMA_CURVE_0 = 0
GAMMA_CURVE_1 = 1
GAMMA_CURVE_2 = 2
GAMMA_CURVE_3 = 3
GAMMA_CURVE_4 = 4
GAMMA_CURVE_5 = 5
GAMMA_CURVE_6 = 6
GREEN = 2
HORIZONTAL_GREY_RAMPS = 12
HORIZONTAL_LINES_1W_1B = 9
HORIZONTAL_LINES_1W_7B = 7
INITIALIZATION_COMPLETE = 0
INTERNAL_TEST_PATTERNS = 1
IN_PROGRESS = 0
MAGENTA = 5
NHD_LANDSCAPE = 27
NHD_PORTRAIT = 26
NOT_COMPLETE = 1
NO_ERRORS = 1
NO_ROTATION = 0
NO_TIMEOUTS = 0
NTSC_LANDSCAPE = 23
OFFSET__0 = 0
OFFSET__16 = 1
OPTICAL_TEST_IMAGE = 9
PAL_LANDSCAPE = 25
PARK_THE_DMD = 1
PIO_CYUSBI2C = 16
PIO_CYUSBSPI = 17
PIO_DEVASYS = 3
PIO_GENERICSERIAL = 7
PIO_MMKUSB = 9
PIO_SERIAL = 4
PIO_TESTER = 6
PIO_USB = 5
PIO_USBHID = 10
PIO_USBI2CPRO = 8
QVGA_LANDSCAPE = 1
QVGA_PORTRAIT = 0
QWVGA_LANDSCAPE = 3
QWVGA_PORTRAIT = 2
RED = 1
RGB565_16_BIT = 0
RGB565_8_BIT = 3
RGB666_16_BIT = 7
RGB666_18_BIT = 1
RGB666_8_BIT = 6
RGB888_16_BIT = 5
RGB888_24_BIT = 2
RGB888_8_BIT = 4
SEQUENCE_0 = 0
SEQUENCE_10 = 10
SEQUENCE_11 = 11
SEQUENCE_12 = 12
SEQUENCE_13 = 13
SEQUENCE_14 = 14
SEQUENCE_15 = 15
SEQUENCE_1 = 1
SEQUENCE_2 = 2
SEQUENCE_3 = 3
SEQUENCE_4 = 4
SEQUENCE_5 = 5
SEQUENCE_6 = 6
SEQUENCE_7 = 7
SEQUENCE_8 = 8
SEQUENCE_9 = 9
SET_AS_OFFSET_OFFSET__128 = 1
SET_AS_SIGNED_OFFSET__0 = 0
SOLID_BLACK = 1
SOLID_BLUE = 4
SOLID_GREEN = 3
SOLID_RED = 5
SOLID_WHITE = 2
SPLASH_IMAGE_0 = 0
SPLASH_IMAGE_1 = 1
SPLASH_IMAGE_2 = 2
SPLASH_IMAGE_3 = 3
SPLASH_SCREEN = 2
TIMEOUT_ERROR_HAS_OCCURRED = 1
UNPARK_THE_DMD = 0
VERTICAL_GREY_RAMPS = 11
VERTICAL_LINES_1W_1B = 8
VERTICAL_LINES_1W_7B = 6
VGA_LANDSCAPE = 7
VGA_PORTRAIT = 6
WHITE = 7
WVGA_720_LANDSCAPE = 9
WVGA_720_PORTRAIT = 8
WVGA_752_LANDSCAPE = 11
WVGA_752_PORTRAIT = 10
WVGA_800_LANDSCAPE = 13
WVGA_800_PORTRAIT = 12
WVGA_852_LANDSCAPE = 15
WVGA_852_PORTRAIT = 14
WVGA_853_LANDSCAPE = 17
WVGA_853_PORTRAIT = 16
WVGA_854_LANDSCAPE = 19
WVGA_854_OR_VGA_OUTPUT = 29
WVGA_854_PORTRAIT = 18
WVGA_864_LANDSCAPE = 21
WVGA_864_PORTRAIT = 20
YELLOW = 3
#####################################################
# Enumerations uses by function parameters
#####################################################
class DMDCurtainColor(IntEnum):
"""
DMD Curtain Color
"""
BLACK = 0x00
RED = 0x01
GREEN = 0x02
BLUE = 0x04
YELLOW = 0x03
MAGENTA = 0x05
CYAN = 0x06
WHITE = 0x07
class TestPatternVLines(IntEnum):
"""
Line Count
"""
X_1_WHITE_AND_7_BLACK = 0x06
X_1_WHITE_AND_1_BLACK = 0x08
class TestPatternHLines(IntEnum):
"""
Line Count
"""
X_1_WHITE_AND_7_BLACK = 0x07
X_1_WHITE_AND_1_BLACK = 0x09
class PolarityPixelClock(IntEnum):
"""
Pixel Clock Polarity
"""
DATA_SAMPLES_ON_RISING_EDGE = 0x00
DATA_SAMPLES_ON_FALLING_EDGE = 0x01
class DevLEDStatus(IntEnum):
"""
LED Timeout Status
"""
NO_TIMEOUTS = 0x00
TIMEOUT_ERROR_HAS_OCCURRED = 0x01
class PixFormat(IntEnum):
"""
Pixel Data Format
"""
RGB565_16_BIT_ = 0x00
RGB666_18_BIT_ = 0x01
RGB888_24_BIT_ = 0x02
RGB565_8_BIT_ = 0x03
RGB888_8_BIT_ = 0x04
RGB888_16_BIT_ = 0x05
RGB666_8_BIT_ = 0x06
RGB666_16_BIT_ = 0x07
X_4_2_2_YCR_CB_16_BIT_ = 0x08
X_4_2_2_YCR_CB_8_BIT_ = 0x09
class DMDPARK(IntEnum):
"""
DMD Park Control
"""
UNPARK_THE_DMD = 0x00
PARK_THE_DMD = 0x01
class Resolution(IntEnum):
"""
Resolution
"""
QVGA_PORTRAIT = 0x00
QVGA_LANDSCAPE = 0x01
QWVGA_PORTRAIT = 0x02
QWVGA_LANDSCAPE = 0x03
X_2_3_VGA_PORTRAIT = 0x04
X_3_2_VGA_LANDSCAPE = 0x05
VGA_PORTRAIT = 0x06
VGA_LANDSCAPE = 0x07
WVGA_720_PORTRAIT = 0x08
WVGA_720_LANDSCAPE = 0x09
WVGA_752_PORTRAIT = 0x0A
WVGA_752_LANDSCAPE = 0x0B
WVGA_800_PORTRAIT = 0x0C
WVGA_800_LANDSCAPE = 0x0D
WVGA_852_PORTRAIT = 0x0E
WVGA_852_LANDSCAPE = 0x0F
WVGA_853_PORTRAIT = 0x10
WVGA_853_LANDSCAPE = 0x11
WVGA_854_PORTRAIT = 0x12
WVGA_854_LANDSCAPE = 0x13
WVGA_864_PORTRAIT = 0x14
WVGA_864_LANDSCAPE = 0x15
NTSC_LANDSCAPE = 0x17
PAL_LANDSCAPE = 0x19
NHD_PORTRAIT = 0x1A
NHD_LANDSCAPE = 0x1B
WVGA_854_OR_VGA_OUTPUT = 0x1D
class CompoundStat(IntEnum):
"""
LED Calibration State
mDDR Built-In Self-Test State
"""
COMPLETE = 0x00
NOT_COMPLETE = 0x01
class TestPattern(IntEnum):
"""
Current Pattern
"""
ANSI_4X4_CHECKERBOARD = 0x00
SOLID_BLACK = 0x01
SOLID_WHITE = 0x02
SOLID_GREEN = 0x03
SOLID_BLUE = 0x04
SOLID_RED = 0x05
VERTICAL_LINES_1W_7B_ = 0x06
HORIZONTAL_LINES_1W_7B_ = 0x07
VERTICAL_LINES_1W_1B_ = 0x08
HORIZONTAL_LINES_1W_1B_ = 0x09
DIAGONAL_LINES = 0x0A
VERTICAL_GREY_RAMPS = 0x0B
HORIZONTAL_GREY_RAMPS = 0x0C
FINE_CHECKERBOARD = 0x0D
class RotationSetting(IntEnum):
"""
Rotation Setting
"""
NO_ROTATION = 0x00
X_90_DEGREE_ROTATION = 0x01
class PolarityDataEn(IntEnum):
"""
DATAEN Signal Polarity
"""
ACTIVE_LOW = 0x00
ACTIVE_HIGH = 0x01
class TestPatternSolids(IntEnum):
"""
Color
"""
BLACK = 0x01
WHITE = 0x02
GREEN = 0x03
BLUE = 0x04
RED = 0x05
class SourceSel(IntEnum):
"""
Input Source
"""
EXTERNAL_VIDEO_PARALLEL_I_F_ = 0x00
INTERNAL_TEST_PATTERNS = 0x01
SPLASH_SCREEN = 0x02
BT_656_I_F = 0x04
class DevID(IntEnum):
"""
Device ID
"""
DLPC2601 = 0x82
DLPC2607 = 0x8A
class DevInitStatus(IntEnum):
"""
Auto-Initialization Status
"""
IN_PROGRESS = 0x00
INITIALIZATION_COMPLETE = 0x01
class CompoundLooks(IntEnum):
"""
Selected Looks Sequence
"""
SEQUENCE_0 = 0x00
SEQUENCE_1 = 0x01
SEQUENCE_2 = 0x02
SEQUENCE_3 = 0x03
SEQUENCE_4 = 0x04
SEQUENCE_5 = 0x05
SEQUENCE_6 = 0x06
SEQUENCE_7 = 0x07
SEQUENCE_8 = 0x08
SEQUENCE_9 = 0x09
SEQUENCE_10 = 0x0a
SEQUENCE_11 = 0x0b
SEQUENCE_12 = 0x0c
SEQUENCE_13 = 0x0d
SEQUENCE_14 = 0x0e
SEQUENCE_15 = 0x0f
class EnabledDisabled(IntEnum):
"""
Blue LED State
DMD Curtain Control
DMD Long Side Flip
DMD Short Side Flip
Green LED State
Red LED State
"""
DISABLED = 0x00
ENABLED = 0x01
class Polarity(IntEnum):
"""
HSYNC Signal Polarity
VSYNC Signal Polarity
"""
ACTIVE_LOW_PULSE = 0x00
ACTIVE_HIGH_PULSE = 0x01
class DevFlashStatus(IntEnum):
"""
Flash Initialization Status
"""
INITIALIZATION_COMPLETE = 0x00
FLASH_BUSY = 0x01
class CompoundSplash(IntEnum):
"""
Splash Screen Select
"""
SPLASH_IMAGE_0 = 0x00
SPLASH_IMAGE_1 = 0x01
SPLASH_IMAGE_2 = 0x02
SPLASH_IMAGE_3 = 0x03
OPTICAL_TEST_IMAGE = 0x09
#####################################################
# Support functions
#####################################################
def DPP2607_Open(*args):
"""
Open I2C interface.
"""
log(DEBUG, "DPP2607_Open()")
i2c.initialize()
def DPP2607_Close():
"""
Close I2C interface
DPP2607_Close().
:rtype: None
"""
log(DEBUG, "DPP2607_Close()")
i2c.terminate()
def DPP2607_GetIODebug():
"""
Return the IO debugging status.
:returns: enable, log_path
:rtype: tuple[bool, str|None]
"""
return i2c.get_debug(), None
def DPP2607_SetIODebug(enable, log_path=None):
"""
Enable/disable logging IO to a log file. Log_path is ignored.
:type enable: bool
:type log_path: str, not used
:rtype: None
"""
log(DEBUG, "DPP2607_SetIODebug(%s, %s)", enable, log_path)
i2c.set_debug(enable)
def DPP2607_GetSlaveAddr():
"""
Get the I2C slave address (default: 0x36).
:returns: slave_addr
:rtype: int
"""
return i2c.get_slave_address()
def DPP2607_SetSlaveAddr(slave_addr):
"""
Set the I2C slave address (default: 0x36).
:type slave_addr: int
:rtype: None
"""
if slave_addr != i2c.get_slave_address():
log(DEBUG, "DPP2607_SetSlaveAddr(%s)", hex(slave_addr))
i2c.terminate()
i2c.initialize(slave_addr)
#####################################################
# ASIC Command Functions
#####################################################
def DPP2607_Read_CcaC1r1Coefficient():
"""
Reads: CCA C1R1 Coefficient.
DPP2607_Read_CcaC1r1Coefficient(DWORD &&CCAC1R1).
:returns: ccac1r1
:rtype: int
"""
i2c.write([0x15, 0x5F])
payload = i2c.read(4)
ccac1r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC1r1Coefficient: ccac1r1=%r', ccac1r1)
return ccac1r1
def DPP2607_Read_CcaC1r2Coefficient():
"""
Reads: CCA C1R2 Coefficient.
DPP2607_Read_CcaC1r2Coefficient(DWORD &&CCAC1R2).
:returns: ccac1r2
:rtype: int
"""
i2c.write([0x15, 0x60])
payload = i2c.read(4)
ccac1r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC1r2Coefficient: ccac1r2=%r', ccac1r2)
return ccac1r2
def DPP2607_Read_CcaC1r3Coefficient():
"""
Reads: CCA C1R3 Coefficient.
DPP2607_Read_CcaC1r3Coefficient(DWORD &&CCAC1R3).
:returns: ccac1r3
:rtype: int
"""
i2c.write([0x15, 0x61])
payload = i2c.read(4)
ccac1r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC1r3Coefficient: ccac1r3=%r', ccac1r3)
return ccac1r3
def DPP2607_Read_CcaC2r1Coefficient():
"""
Reads: CCA C2R1 Coefficient.
DPP2607_Read_CcaC2r1Coefficient(DWORD &&CCAC2R1).
:returns: ccac2r1
:rtype: int
"""
i2c.write([0x15, 0x62])
payload = i2c.read(4)
ccac2r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC2r1Coefficient: ccac2r1=%r', ccac2r1)
return ccac2r1
def DPP2607_Read_CcaC2r2Coefficient():
"""
Reads: CCA C2R2 Coefficient.
DPP2607_Read_CcaC2r2Coefficient(DWORD &&CCAC2R2).
:returns: ccac2r2
:rtype: int
"""
i2c.write([0x15, 0x63])
payload = i2c.read(4)
ccac2r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC2r2Coefficient: ccac2r2=%r', ccac2r2)
return ccac2r2
def DPP2607_Read_CcaC2r3Coefficient():
"""
Reads: CCA C2R3 Coefficient.
DPP2607_Read_CcaC2r3Coefficient(DWORD &&CCAC2R3).
:returns: ccac2r3
:rtype: int
"""
i2c.write([0x15, 0x64])
payload = i2c.read(4)
ccac2r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC2r3Coefficient: ccac2r3=%r', ccac2r3)
return ccac2r3
def DPP2607_Read_CcaC3r1Coefficient():
"""
Reads: CCA C3R1 Coefficient.
DPP2607_Read_CcaC3r1Coefficient(DWORD &&CCAC3R1).
:returns: ccac3r1
:rtype: int
"""
i2c.write([0x15, 0x65])
payload = i2c.read(4)
ccac3r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC3r1Coefficient: ccac3r1=%r', ccac3r1)
return ccac3r1
def DPP2607_Read_CcaC3r2Coefficient():
"""
Reads: CCA C3R2 Coefficient.
DPP2607_Read_CcaC3r2Coefficient(DWORD &&CCAC3R2).
:returns: ccac3r2
:rtype: int
"""
i2c.write([0x15, 0x66])
payload = i2c.read(4)
ccac3r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC3r2Coefficient: ccac3r2=%r', ccac3r2)
return ccac3r2
def DPP2607_Read_CcaC3r3Coefficient():
"""
Reads: CCA C3R3 Coefficient.
DPP2607_Read_CcaC3r3Coefficient(DWORD &&CCAC3R3).
:returns: ccac3r3
:rtype: int
"""
i2c.write([0x15, 0x67])
payload = i2c.read(4)
ccac3r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC3r3Coefficient: ccac3r3=%r', ccac3r3)
return ccac3r3
def DPP2607_Read_CcaC4r1Coefficient():
"""
Reads: CCA C4R1 Coefficient.
DPP2607_Read_CcaC4r1Coefficient(DWORD &&CCAC4R1).
:returns: ccac4r1
:rtype: int
"""
i2c.write([0x15, 0x68])
payload = i2c.read(4)
ccac4r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC4r1Coefficient: ccac4r1=%r', ccac4r1)
return ccac4r1
def DPP2607_Read_CcaC4r2Coefficient():
"""
Reads: CCA C4R2 Coefficient.
DPP2607_Read_CcaC4r2Coefficient(DWORD &&CCAC4R2).
:returns: ccac4r2
:rtype: int
"""
i2c.write([0x15, 0x69])
payload = i2c.read(4)
ccac4r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC4r2Coefficient: ccac4r2=%r', ccac4r2)
return ccac4r2
def DPP2607_Read_CcaC4r3Coefficient():
"""
Reads: CCA C4R3 Coefficient.
DPP2607_Read_CcaC4r3Coefficient(DWORD &&CCAC4R3).
:returns: ccac4r3
:rtype: int
"""
i2c.write([0x15, 0x6A])
payload = i2c.read(4)
ccac4r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC4r3Coefficient: ccac4r3=%r', ccac4r3)
return ccac4r3
def DPP2607_Read_CcaC5r1Coefficient():
"""
Reads: CCA C5R1 Coefficient.
DPP2607_Read_CcaC5r1Coefficient(DWORD &&CCAC5R1).
:returns: ccac5r1
:rtype: int
"""
i2c.write([0x15, 0x6B])
payload = i2c.read(4)
ccac5r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC5r1Coefficient: ccac5r1=%r', ccac5r1)
return ccac5r1
def DPP2607_Read_CcaC5r2Coefficient():
"""
Reads: CCA C5R2 Coefficient.
DPP2607_Read_CcaC5r2Coefficient(DWORD &&CCAC5R2).
:returns: ccac5r2
:rtype: int
"""
i2c.write([0x15, 0x6C])
payload = i2c.read(4)
ccac5r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC5r2Coefficient: ccac5r2=%r', ccac5r2)
return ccac5r2
def DPP2607_Read_CcaC5r3Coefficient():
"""
Reads: CCA C5R3 Coefficient.
DPP2607_Read_CcaC5r3Coefficient(DWORD &&CCAC5R3).
:returns: ccac5r3
:rtype: int
"""
i2c.write([0x15, 0x6D])
payload = i2c.read(4)
ccac5r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC5r3Coefficient: ccac5r3=%r', ccac5r3)
return ccac5r3
def DPP2607_Read_CcaC6r1Coefficient():
"""
Reads: CCA C6R1 Coefficient.
DPP2607_Read_CcaC6r1Coefficient(DWORD &&CCAC6R1).
:returns: ccac6r1
:rtype: int
"""
i2c.write([0x15, 0x6E])
payload = i2c.read(4)
ccac6r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC6r1Coefficient: ccac6r1=%r', ccac6r1)
return ccac6r1
def DPP2607_Read_CcaC6r2Coefficient():
"""
Reads: CCA C6R2 Coefficient.
DPP2607_Read_CcaC6r2Coefficient(DWORD &&CCAC6R2).
:returns: ccac6r2
:rtype: int
"""
i2c.write([0x15, 0x6F])
payload = i2c.read(4)
ccac6r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC6r2Coefficient: ccac6r2=%r', ccac6r2)
return ccac6r2
def DPP2607_Read_CcaC6r3Coefficient():
"""
Reads: CCA C6R3 Coefficient.
DPP2607_Read_CcaC6r3Coefficient(DWORD &&CCAC6R3).
:returns: ccac6r3
:rtype: int
"""
i2c.write([0x15, 0x70])
payload = i2c.read(4)
ccac6r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC6r3Coefficient: ccac6r3=%r', ccac6r3)
return ccac6r3
def DPP2607_Read_CcaC7r1Coefficient():
"""
Reads: CCA C7R1 Coefficient.
DPP2607_Read_CcaC7r1Coefficient(DWORD &&CCAC7R1).
:returns: ccac7r1
:rtype: int
"""
i2c.write([0x15, 0x71])
payload = i2c.read(4)
ccac7r1 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC7r1Coefficient: ccac7r1=%r', ccac7r1)
return ccac7r1
def DPP2607_Read_CcaC7r2Coefficient():
"""
Reads: CCA C7R2 Coefficient.
DPP2607_Read_CcaC7r2Coefficient(DWORD &&CCAC7R2).
:returns: ccac7r2
:rtype: int
"""
i2c.write([0x15, 0x72])
payload = i2c.read(4)
ccac7r2 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC7r2Coefficient: ccac7r2=%r', ccac7r2)
return ccac7r2
def DPP2607_Read_CcaC7r3Coefficient():
"""
Reads: CCA C7R3 Coefficient.
DPP2607_Read_CcaC7r3Coefficient(DWORD &&CCAC7R3).
:returns: ccac7r3
:rtype: int
"""
i2c.write([0x15, 0x73])
payload = i2c.read(4)
ccac7r3 = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1ff
log(DEBUG, 'DPP2607_Read_CcaC7r3Coefficient: ccac7r3=%r', ccac7r3)
return ccac7r3
def DPP2607_Read_CcaFunctionEnable():
"""
Reads: CCA Function Enable.
DPP2607_Read_CcaFunctionEnable(DWORD &&CCAEnable).
:returns: cca_enable
:rtype: int
"""
i2c.write([0x15, 0x5E])
payload = i2c.read(4)
cca_enable = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1
log(DEBUG, 'DPP2607_Read_CcaFunctionEnable: cca_enable=%r', cca_enable)
return cca_enable
def DPP2607_Read_CommunicationStatus():
"""
Reads: Communication Status.
DPP2607_Read_CommunicationStatus(DWORD &&CompoundStatInvCmd, DWORD &&CompoundStatParCmd, DWORD &&CompoundStatMemRd, DWORD &&CompoundStatCmdPar, DWORD &&CompoundStatCmdAbt).
:returns: compound_stat_inv_cmd, compound_stat_par_cmd, compound_stat_mem_rd, compound_stat_cmd_par, compound_stat_cmd_abt
:rtype: tuple[int, int, int, int, int]
"""
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xC4])
_poll_complete()
i2c.write([0x15, 0x39])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
compound_stat_inv_cmd = (value >> 8) & 0x1
compound_stat_par_cmd = (value >> 9) & 0x1
compound_stat_mem_rd = (value >> 10) & 0x1
compound_stat_cmd_par = (value >> 11) & 0x1
compound_stat_cmd_abt = (value >> 12) & 0x1
log(DEBUG, 'DPP2607_Read_CommunicationStatus: compound_stat_inv_cmd=%r, compound_stat_par_cmd=%r, compound_stat_mem_rd=%r, compound_stat_cmd_par=%r, compound_stat_cmd_abt=%r', compound_stat_inv_cmd, compound_stat_par_cmd, compound_stat_mem_rd, compound_stat_cmd_par, compound_stat_cmd_abt)
return compound_stat_inv_cmd, compound_stat_par_cmd, compound_stat_mem_rd, compound_stat_cmd_par, compound_stat_cmd_abt
def DPP2607_Read_CropFirstLine():
"""
Reads: Crop - First Line.
DPP2607_Read_CropFirstLine(DWORD &&FirstActiveLine).
:returns: first_active_line
:rtype: int
"""
i2c.write([0x15, 0x29])
payload = i2c.read(4)
first_active_line = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_CropFirstLine: first_active_line=%r', first_active_line)
return first_active_line
def DPP2607_Read_CropFirstPixel():
"""
Reads: Crop - First Pixel.
DPP2607_Read_CropFirstPixel(DWORD &&FirstActivePixel).
:returns: first_active_pixel
:rtype: int
"""
i2c.write([0x15, 0x2B])
payload = i2c.read(4)
first_active_pixel = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_CropFirstPixel: first_active_pixel=%r', first_active_pixel)
return first_active_pixel
def DPP2607_Read_CropLastLine():
"""
Reads: Crop - Last Line.
DPP2607_Read_CropLastLine(DWORD &&LastActiveLine).
:returns: last_active_line
:rtype: int
"""
i2c.write([0x15, 0x2A])
payload = i2c.read(4)
last_active_line = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_CropLastLine: last_active_line=%r', last_active_line)
return last_active_line
def DPP2607_Read_CropLastPixel():
"""
Reads: Crop - Last Pixel.
DPP2607_Read_CropLastPixel(DWORD &&LastActivePixel).
:returns: last_active_pixel
:rtype: int
"""
i2c.write([0x15, 0x2C])
payload = i2c.read(4)
last_active_pixel = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_CropLastPixel: last_active_pixel=%r', last_active_pixel)
return last_active_pixel
def DPP2607_Read_DeviceStatus():
"""
Reads: Device Status.
DPP2607_Read_DeviceStatus(DWORD &&DevID, DWORD &&DevFlashStatus, DWORD &&DevInitStatus, DWORD &&DevLEDStatus).
:returns: dev_id, dev_flash_status, dev_init_status, dev_led_status
:rtype: tuple[DevID, DevFlashStatus, DevInitStatus, DevLEDStatus]
"""
i2c.write([0x15, 0x03])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
dev_id = DevID((value >> 0) & 0xff)
dev_flash_status = DevFlashStatus((value >> 10) & 0x1)
dev_init_status = DevInitStatus((value >> 11) & 0x1)
dev_led_status = DevLEDStatus((value >> 12) & 0x1)
log(DEBUG, 'DPP2607_Read_DeviceStatus: dev_id=%r, dev_flash_status=%r, dev_init_status=%r, dev_led_status=%r', dev_id, dev_flash_status, dev_init_status, dev_led_status)
return dev_id, dev_flash_status, dev_init_status, dev_led_status
def DPP2607_Read_DisplayCurtainControl():
"""
Reads: Display Curtain Control.
DPP2607_Read_DisplayCurtainControl(DWORD &&DMDCurtainCtl, DWORD &&DMDCurtainColor).
:returns: dmd_curtain_ctl, dmd_curtain_color
:rtype: tuple[EnabledDisabled, DMDCurtainColor]
"""
i2c.write([0x15, 0xA6])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
dmd_curtain_ctl = EnabledDisabled((value >> 0) & 0xf)
dmd_curtain_color = DMDCurtainColor((value >> 4) & 0xf)
log(DEBUG, 'DPP2607_Read_DisplayCurtainControl: dmd_curtain_ctl=%r, dmd_curtain_color=%r', dmd_curtain_ctl, dmd_curtain_color)
return dmd_curtain_ctl, dmd_curtain_color
def DPP2607_Read_DmdPark():
"""
Reads: DMD PARK.
DPP2607_Read_DmdPark(DWORD &&DMDPARK).
:returns: dmdpark
:rtype: DMDPARK
"""
i2c.write([0x15, 0x2D])
payload = i2c.read(4)
dmdpark = DMDPARK((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1)
log(DEBUG, 'DPP2607_Read_DmdPark: dmdpark=%r', dmdpark)
return dmdpark
def DPP2607_Read_EmbeddedSoftwareVersion():
"""
Reads: Embedded Software Version.
DPP2607_Read_EmbeddedSoftwareVersion(DWORD &&CompoundICPPatch, DWORD &&CompoundICPMinor, DWORD &&CompoundICPMajor).
:returns: compound_icp_patch, compound_icp_minor, compound_icp_major
:rtype: tuple[int, int, int]
"""
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0x02])
_poll_complete()
i2c.write([0x15, 0x39])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
compound_icp_patch = (value >> 0) & 0xffff
compound_icp_minor = (value >> 16) & 0xff
compound_icp_major = (value >> 24) & 0xff
log(DEBUG, 'DPP2607_Read_EmbeddedSoftwareVersion: compound_icp_patch=%r, compound_icp_minor=%r, compound_icp_major=%r', compound_icp_patch, compound_icp_minor, compound_icp_major)
return compound_icp_patch, compound_icp_minor, compound_icp_major
def DPP2607_Read_ImageLongFlip():
"""
Reads: Image Long Flip.
DPP2607_Read_ImageLongFlip(DWORD &&FlipLong).
:returns: flip_long
:rtype: EnabledDisabled
"""
i2c.write([0x15, 0x0F])
payload = i2c.read(4)
flip_long = EnabledDisabled((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1)
log(DEBUG, 'DPP2607_Read_ImageLongFlip: flip_long=%r', flip_long)
return flip_long
def DPP2607_Read_ImageRotationSettings():
"""
Reads: Image Rotation Settings.
DPP2607_Read_ImageRotationSettings(DWORD &&RotationSetting).
:returns: rotation_setting
:rtype: RotationSetting
"""
i2c.write([0x15, 0x0E])
payload = i2c.read(4)
rotation_setting = RotationSetting((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1)
log(DEBUG, 'DPP2607_Read_ImageRotationSettings: rotation_setting=%r', rotation_setting)
return rotation_setting
def DPP2607_Read_ImageShortFlip():
"""
Reads: Image Short Flip.
DPP2607_Read_ImageShortFlip(DWORD &&FlipShort).
:returns: flip_short
:rtype: EnabledDisabled
"""
i2c.write([0x15, 0x10])
payload = i2c.read(4)
flip_short = EnabledDisabled((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1)
log(DEBUG, 'DPP2607_Read_ImageShortFlip: flip_short=%r', flip_short)
return flip_short
def DPP2607_Read_InternalTestPattern():
"""
Reads: Internal Test Pattern.
DPP2607_Read_InternalTestPattern(DWORD &&TestPattern).
:returns: test_pattern
:rtype: TestPattern
"""
i2c.write([0x15, 0x11])
payload = i2c.read(4)
test_pattern = TestPattern((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0xf)
log(DEBUG, 'DPP2607_Read_InternalTestPattern: test_pattern=%r', test_pattern)
return test_pattern
def DPP2607_Read_InterruptStatus():
"""
Reads: Interrupt Status.
DPP2607_Read_InterruptStatus(DWORD &&IntSeqAbort, DWORD &&IntDMDResetOverrun, DWORD &&IntDMDBlockError, DWORD &&IntDMDIFOverrun, DWORD &&IntFormatBufOverflow, DWORD &&IntFormatStarvation, DWORD &&IntFlashFIFOErr, DWORD &&IntFlashDMAErr, DWORD &&IntFormatMultErr, DWORD &&IntFormatCmdErr, DWORD &&IntFormatQueueWarn, DWORD &&IntDDROverflowBP, DWORD &&IntDDROverflowFB, DWORD &&IntScalerLineErr, DWORD &&IntScalerPixerr, DWORD &&IntLEDTimeout).
:returns: int_seq_abort, int_dmd_reset_overrun, int_dmd_block_error, int_dmdif_overrun, int_format_buf_overflow, int_format_starvation, int_flash_fifo_err, int_flash_dma_err, int_format_mult_err, int_format_cmd_err, int_format_queue_warn, int_ddr_overflow_bp, int_ddr_overflow_fb, int_scaler_line_err, int_scaler_pixerr, int_led_timeout
:rtype: tuple[int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int]
"""
i2c.write([0x15, 0x00])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
int_seq_abort = (value >> 0) & 0x1
int_dmd_reset_overrun = (value >> 1) & 0x1
int_dmd_block_error = (value >> 2) & 0x1
int_dmdif_overrun = (value >> 3) & 0x1
int_format_buf_overflow = (value >> 4) & 0x1
int_format_starvation = (value >> 5) & 0x1
int_flash_fifo_err = (value >> 7) & 0x1
int_flash_dma_err = (value >> 8) & 0x1
int_format_mult_err = (value >> 9) & 0x1
int_format_cmd_err = (value >> 10) & 0x1
int_format_queue_warn = (value >> 11) & 0x1
int_ddr_overflow_bp = (value >> 12) & 0x1
int_ddr_overflow_fb = (value >> 13) & 0x1
int_scaler_line_err = (value >> 14) & 0x1
int_scaler_pixerr = (value >> 15) & 0x1
int_led_timeout = (value >> 18) & 0x1
log(DEBUG, 'DPP2607_Read_InterruptStatus: int_seq_abort=%r, int_dmd_reset_overrun=%r, int_dmd_block_error=%r, int_dmdif_overrun=%r, int_format_buf_overflow=%r, int_format_starvation=%r, int_flash_fifo_err=%r, int_flash_dma_err=%r, int_format_mult_err=%r, int_format_cmd_err=%r, int_format_queue_warn=%r, int_ddr_overflow_bp=%r, int_ddr_overflow_fb=%r, int_scaler_line_err=%r, int_scaler_pixerr=%r, int_led_timeout=%r', int_seq_abort, int_dmd_reset_overrun, int_dmd_block_error, int_dmdif_overrun, int_format_buf_overflow, int_format_starvation, int_flash_fifo_err, int_flash_dma_err, int_format_mult_err, int_format_cmd_err, int_format_queue_warn, int_ddr_overflow_bp, int_ddr_overflow_fb, int_scaler_line_err, int_scaler_pixerr, int_led_timeout)
return int_seq_abort, int_dmd_reset_overrun, int_dmd_block_error, int_dmdif_overrun, int_format_buf_overflow, int_format_starvation, int_flash_fifo_err, int_flash_dma_err, int_format_mult_err, int_format_cmd_err, int_format_queue_warn, int_ddr_overflow_bp, int_ddr_overflow_fb, int_scaler_line_err, int_scaler_pixerr, int_led_timeout
def DPP2607_Read_LedCurrentBlue():
"""
Reads: LED Current - Blue.
DPP2607_Read_LedCurrentBlue(DWORD &&PWMBlu).
:returns: pwm_blu
:rtype: int
"""
i2c.write([0x15, 0x14])
payload = i2c.read(4)
pwm_blu = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_LedCurrentBlue: pwm_blu=%r', pwm_blu)
return pwm_blu
def DPP2607_Read_LedCurrentGreen():
"""
Reads: LED Current - Green.
DPP2607_Read_LedCurrentGreen(DWORD &&PWMGrn).
:returns: pwm_grn
:rtype: int
"""
i2c.write([0x15, 0x13])
payload = i2c.read(4)
pwm_grn = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_LedCurrentGreen: pwm_grn=%r', pwm_grn)
return pwm_grn
def DPP2607_Read_LedCurrentRed():
"""
Reads: LED Current - Red.
DPP2607_Read_LedCurrentRed(DWORD &&PWMRed).
:returns: pwm_red
:rtype: int
"""
i2c.write([0x15, 0x12])
payload = i2c.read(4)
pwm_red = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7ff
log(DEBUG, 'DPP2607_Read_LedCurrentRed: pwm_red=%r', pwm_red)
return pwm_red
def DPP2607_Read_LedDriverEnable():
"""
Reads: LED Driver Enable.
DPP2607_Read_LedDriverEnable(DWORD &&LEDEnableRed, DWORD &&LEDEnableGrn, DWORD &&LEDEnableBlu).
:returns: led_enable_red, led_enable_grn, led_enable_blu
:rtype: tuple[EnabledDisabled, EnabledDisabled, EnabledDisabled]
"""
i2c.write([0x15, 0x16])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
led_enable_red = EnabledDisabled((value >> 0) & 0x1)
led_enable_grn = EnabledDisabled((value >> 1) & 0x1)
led_enable_blu = EnabledDisabled((value >> 2) & 0x1)
log(DEBUG, 'DPP2607_Read_LedDriverEnable: led_enable_red=%r, led_enable_grn=%r, led_enable_blu=%r', led_enable_red, led_enable_grn, led_enable_blu)
return led_enable_red, led_enable_grn, led_enable_blu
def DPP2607_Read_ParallelBusPolarityControl():
"""
Reads: Parallel Bus Polarity Control.
DPP2607_Read_ParallelBusPolarityControl(DWORD &&PolarityHSYNC, DWORD &&PolarityVSYNC, DWORD &&PolarityPixelClock, DWORD &&PolarityDataEn).
:returns: polarity_hsync, polarity_vsync, polarity_pixel_clock, polarity_data_en
:rtype: tuple[Polarity, Polarity, PolarityPixelClock, PolarityDataEn]
"""
i2c.write([0x15, 0xAF])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
polarity_hsync = Polarity((value >> 1) & 0x1)
polarity_vsync = Polarity((value >> 2) & 0x1)
polarity_pixel_clock = PolarityPixelClock((value >> 3) & 0x1)
polarity_data_en = PolarityDataEn((value >> 4) & 0x1)
log(DEBUG, 'DPP2607_Read_ParallelBusPolarityControl: polarity_hsync=%r, polarity_vsync=%r, polarity_pixel_clock=%r, polarity_data_en=%r', polarity_hsync, polarity_vsync, polarity_pixel_clock, polarity_data_en)
return polarity_hsync, polarity_vsync, polarity_pixel_clock, polarity_data_en
def DPP2607_Read_SystemStatus():
"""
Reads: System Status.
DPP2607_Read_SystemStatus(DWORD &&CompoundStatInit, DWORD &&CompoundStatFlash, DWORD &&CompoundStatTemp, DWORD &&CompoundStatPAD, DWORD &&CompoundStatLED, DWORD &&CompoundStatBIST).
:returns: compound_stat_init, compound_stat_flash, compound_stat_temp, compound_stat_pad, compound_stat_led, compound_stat_bist
:rtype: tuple[int, int, int, int, CompoundStat, CompoundStat]
"""
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xC4])
_poll_complete()
i2c.write([0x15, 0x39])
payload = i2c.read(4)
value = struct.unpack(">I", str(bytearray(payload[0:4])))[0]
compound_stat_init = (value >> 0) & 0x1
compound_stat_flash = (value >> 1) & 0x1
compound_stat_temp = (value >> 2) & 0x1
compound_stat_pad = (value >> 3) & 0x1
compound_stat_led = CompoundStat((value >> 5) & 0x1)
compound_stat_bist = CompoundStat((value >> 6) & 0x1)
log(DEBUG, 'DPP2607_Read_SystemStatus: compound_stat_init=%r, compound_stat_flash=%r, compound_stat_temp=%r, compound_stat_pad=%r, compound_stat_led=%r, compound_stat_bist=%r', compound_stat_init, compound_stat_flash, compound_stat_temp, compound_stat_pad, compound_stat_led, compound_stat_bist)
return compound_stat_init, compound_stat_flash, compound_stat_temp, compound_stat_pad, compound_stat_led, compound_stat_bist
def DPP2607_Read_SystemTemperature():
"""
Reads: System Temperature.
DPP2607_Read_SystemTemperature(DWORD &&CompoundTemp).
:returns: compound_temp
:rtype: int
"""
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xC5])
_poll_complete()
i2c.write([0x15, 0x39])
payload = i2c.read(4)
compound_temp = (struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0xffffffffL
log(DEBUG, 'DPP2607_Read_SystemTemperature: compound_temp=%r', compound_temp)
return compound_temp
def DPP2607_Read_VideoPixelFormat():
"""
Reads: Video Pixel Format.
DPP2607_Read_VideoPixelFormat(DWORD &&PixFormat).
:returns: pix_format
:rtype: PixFormat
"""
i2c.write([0x15, 0x0D])
payload = i2c.read(4)
pix_format = PixFormat((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0xf)
log(DEBUG, 'DPP2607_Read_VideoPixelFormat: pix_format=%r', pix_format)
return pix_format
def DPP2607_Read_VideoResolution():
"""
Reads: Video Resolution.
DPP2607_Read_VideoResolution(DWORD &&Resolution).
:returns: resolution
:rtype: Resolution
"""
i2c.write([0x15, 0x0C])
payload = i2c.read(4)
resolution = Resolution((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x1f)
log(DEBUG, 'DPP2607_Read_VideoResolution: resolution=%r', resolution)
return resolution
def DPP2607_Read_VideoSourceSelection():
"""
Reads: Video Source Selection.
DPP2607_Read_VideoSourceSelection(DWORD &&SourceSel).
:returns: source_sel
:rtype: SourceSel
"""
i2c.write([0x15, 0x0B])
payload = i2c.read(4)
source_sel = SourceSel((struct.unpack(">I", str(bytearray(payload[0:4])))[0] >> 0) & 0x7)
log(DEBUG, 'DPP2607_Read_VideoSourceSelection: source_sel=%r', source_sel)
return source_sel
def DPP2607_Write_CcaC1r1Coefficient(ccac1r1):
"""
Writes: CCA C1R1 Coefficient.
DPP2607_Write_CcaC1r1Coefficient(DWORD CCAC1R1).
:type ccac1r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC1r1Coefficient(%r)', ccac1r1)
payload = [0x5F]
payload.extend(list(bytearray(struct.pack(">I", ccac1r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC1r2Coefficient(ccac1r2):
"""
Writes: CCA C1R2 Coefficient.
DPP2607_Write_CcaC1r2Coefficient(DWORD CCAC1R2).
:type ccac1r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC1r2Coefficient(%r)', ccac1r2)
payload = [0x60]
payload.extend(list(bytearray(struct.pack(">I", ccac1r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC1r3Coefficient(ccac1r3):
"""
Writes: CCA C1R3 Coefficient.
DPP2607_Write_CcaC1r3Coefficient(DWORD CCAC1R3).
:type ccac1r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC1r3Coefficient(%r)', ccac1r3)
payload = [0x61]
payload.extend(list(bytearray(struct.pack(">I", ccac1r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC2r1Coefficient(ccac2r1):
"""
Writes: CCA C2R1 Coefficient.
DPP2607_Write_CcaC2r1Coefficient(DWORD CCAC2R1).
:type ccac2r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC2r1Coefficient(%r)', ccac2r1)
payload = [0x62]
payload.extend(list(bytearray(struct.pack(">I", ccac2r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC2r2Coefficient(ccac2r2):
"""
Writes: CCA C2R2 Coefficient.
DPP2607_Write_CcaC2r2Coefficient(DWORD CCAC2R2).
:type ccac2r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC2r2Coefficient(%r)', ccac2r2)
payload = [0x63]
payload.extend(list(bytearray(struct.pack(">I", ccac2r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC2r3Coefficient(ccac2r3):
"""
Writes: CCA C2R3 Coefficient.
DPP2607_Write_CcaC2r3Coefficient(DWORD CCAC2R3).
:type ccac2r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC2r3Coefficient(%r)', ccac2r3)
payload = [0x64]
payload.extend(list(bytearray(struct.pack(">I", ccac2r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC3r1Coefficient(ccac3r1):
"""
Writes: CCA C3R1 Coefficient.
DPP2607_Write_CcaC3r1Coefficient(DWORD CCAC3R1).
:type ccac3r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC3r1Coefficient(%r)', ccac3r1)
payload = [0x65]
payload.extend(list(bytearray(struct.pack(">I", ccac3r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC3r2Coefficient(ccac3r2):
"""
Writes: CCA C3R2 Coefficient.
DPP2607_Write_CcaC3r2Coefficient(DWORD CCAC3R2).
:type ccac3r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC3r2Coefficient(%r)', ccac3r2)
payload = [0x66]
payload.extend(list(bytearray(struct.pack(">I", ccac3r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC3r3Coefficient(ccac3r3):
"""
Writes: CCA C3R3 Coefficient.
DPP2607_Write_CcaC3r3Coefficient(DWORD CCAC3R3).
:type ccac3r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC3r3Coefficient(%r)', ccac3r3)
payload = [0x67]
payload.extend(list(bytearray(struct.pack(">I", ccac3r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC4r1Coefficient(ccac4r1):
"""
Writes: CCA C4R1 Coefficient.
DPP2607_Write_CcaC4r1Coefficient(DWORD CCAC4R1).
:type ccac4r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC4r1Coefficient(%r)', ccac4r1)
payload = [0x68]
payload.extend(list(bytearray(struct.pack(">I", ccac4r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC4r2Coefficient(ccac4r2):
"""
Writes: CCA C4R2 Coefficient.
DPP2607_Write_CcaC4r2Coefficient(DWORD CCAC4R2).
:type ccac4r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC4r2Coefficient(%r)', ccac4r2)
payload = [0x69]
payload.extend(list(bytearray(struct.pack(">I", ccac4r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC4r3Coefficient(ccac4r3):
"""
Writes: CCA C4R3 Coefficient.
DPP2607_Write_CcaC4r3Coefficient(DWORD CCAC4R3).
:type ccac4r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC4r3Coefficient(%r)', ccac4r3)
payload = [0x6A]
payload.extend(list(bytearray(struct.pack(">I", ccac4r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC5r1Coefficient(ccac5r1):
"""
Writes: CCA C5R1 Coefficient.
DPP2607_Write_CcaC5r1Coefficient(DWORD CCAC5R1).
:type ccac5r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC5r1Coefficient(%r)', ccac5r1)
payload = [0x6B]
payload.extend(list(bytearray(struct.pack(">I", ccac5r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC5r2Coefficient(ccac5r2):
"""
Writes: CCA C5R2 Coefficient.
DPP2607_Write_CcaC5r2Coefficient(DWORD CCAC5R2).
:type ccac5r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC5r2Coefficient(%r)', ccac5r2)
payload = [0x6C]
payload.extend(list(bytearray(struct.pack(">I", ccac5r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC5r3Coefficient(ccac5r3):
"""
Writes: CCA C5R3 Coefficient.
DPP2607_Write_CcaC5r3Coefficient(DWORD CCAC5R3).
:type ccac5r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC5r3Coefficient(%r)', ccac5r3)
payload = [0x6D]
payload.extend(list(bytearray(struct.pack(">I", ccac5r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC6r1Coefficient(ccac6r1):
"""
Writes: CCA C6R1 Coefficient.
DPP2607_Write_CcaC6r1Coefficient(DWORD CCAC6R1).
:type ccac6r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC6r1Coefficient(%r)', ccac6r1)
payload = [0x6E]
payload.extend(list(bytearray(struct.pack(">I", ccac6r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC6r2Coefficient(ccac6r2):
"""
Writes: CCA C6R2 Coefficient.
DPP2607_Write_CcaC6r2Coefficient(DWORD CCAC6R2).
:type ccac6r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC6r2Coefficient(%r)', ccac6r2)
payload = [0x6F]
payload.extend(list(bytearray(struct.pack(">I", ccac6r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC6r3Coefficient(ccac6r3):
"""
Writes: CCA C6R3 Coefficient.
DPP2607_Write_CcaC6r3Coefficient(DWORD CCAC6R3).
:type ccac6r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC6r3Coefficient(%r)', ccac6r3)
payload = [0x70]
payload.extend(list(bytearray(struct.pack(">I", ccac6r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC7r1Coefficient(ccac7r1):
"""
Writes: CCA C7R1 Coefficient.
DPP2607_Write_CcaC7r1Coefficient(DWORD CCAC7R1).
:type ccac7r1: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC7r1Coefficient(%r)', ccac7r1)
payload = [0x71]
payload.extend(list(bytearray(struct.pack(">I", ccac7r1 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC7r2Coefficient(ccac7r2):
"""
Writes: CCA C7R2 Coefficient.
DPP2607_Write_CcaC7r2Coefficient(DWORD CCAC7R2).
:type ccac7r2: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC7r2Coefficient(%r)', ccac7r2)
payload = [0x72]
payload.extend(list(bytearray(struct.pack(">I", ccac7r2 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaC7r3Coefficient(ccac7r3):
"""
Writes: CCA C7R3 Coefficient.
DPP2607_Write_CcaC7r3Coefficient(DWORD CCAC7R3).
:type ccac7r3: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaC7r3Coefficient(%r)', ccac7r3)
payload = [0x73]
payload.extend(list(bytearray(struct.pack(">I", ccac7r3 & 0x1ff))))
i2c.write(payload)
def DPP2607_Write_CcaFunctionEnable(cca_enable):
"""
Writes: CCA Function Enable.
DPP2607_Write_CcaFunctionEnable(DWORD CCAEnable).
:type cca_enable: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CcaFunctionEnable(%r)', cca_enable)
payload = [0x5E]
payload.extend(list(bytearray(struct.pack(">I", cca_enable & 0x1))))
i2c.write(payload)
def DPP2607_Write_CheckerboardAnsiPattern():
"""
Writes: Checkerboard ANSI Pattern.
DPP2607_Write_CheckerboardAnsiPattern().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CheckerboardAnsiPattern()', )
payload = [0x11]
payload.extend([0, 0, 0, 13]) # test_pattern_ansi
i2c.write(payload)
def DPP2607_Write_CropFirstLine(first_active_line):
"""
Writes: Crop - First Line.
DPP2607_Write_CropFirstLine(DWORD FirstActiveLine).
:type first_active_line: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CropFirstLine(%r)', first_active_line)
payload = [0x29]
payload.extend(list(bytearray(struct.pack(">I", first_active_line & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_CropFirstPixel(first_active_pixel):
"""
Writes: Crop - First Pixel.
DPP2607_Write_CropFirstPixel(DWORD FirstActivePixel).
:type first_active_pixel: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CropFirstPixel(%r)', first_active_pixel)
payload = [0x2B]
payload.extend(list(bytearray(struct.pack(">I", first_active_pixel & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_CropLastLine(last_active_line):
"""
Writes: Crop - Last Line.
DPP2607_Write_CropLastLine(DWORD LastActiveLine).
:type last_active_line: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CropLastLine(%r)', last_active_line)
payload = [0x2A]
payload.extend(list(bytearray(struct.pack(">I", last_active_line & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_CropLastPixel(last_active_pixel):
"""
Writes: Crop - Last Pixel.
DPP2607_Write_CropLastPixel(DWORD LastActivePixel).
:type last_active_pixel: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_CropLastPixel(%r)', last_active_pixel)
payload = [0x2C]
payload.extend(list(bytearray(struct.pack(">I", last_active_pixel & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_DiagonalLinesPattern():
"""
Writes: Diagonal Lines Pattern.
DPP2607_Write_DiagonalLinesPattern().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_DiagonalLinesPattern()', )
payload = [0x11]
payload.extend([0, 0, 0, 10]) # test_pattern_d_lines
i2c.write(payload)
def DPP2607_Write_DisplayCurtainControl(dmd_curtain_ctl, dmd_curtain_color):
"""
Writes: Display Curtain Control.
DPP2607_Write_DisplayCurtainControl(DWORD DMDCurtainCtl, DWORD DMDCurtainColor).
:type dmd_curtain_ctl: EnabledDisabled
:type dmd_curtain_color: DMDCurtainColor
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_DisplayCurtainControl(%r, %r)', dmd_curtain_ctl, dmd_curtain_color)
payload = [0xA6]
value = 0
value |= (dmd_curtain_ctl & 0xf) << 0
value |= (dmd_curtain_color & 0xf) << 4
payload.extend(list(bytearray(struct.pack(">I", value))))
i2c.write(payload)
def DPP2607_Write_DmdPark(dmdpark):
"""
Writes: DMD PARK.
DPP2607_Write_DmdPark(DWORD DMDPARK).
:type dmdpark: DMDPARK
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_DmdPark(%r)', dmdpark)
payload = [0x2D]
payload.extend(list(bytearray(struct.pack(">I", dmdpark & 0x1))))
i2c.write(payload)
def DPP2607_Write_FineCheckerboardPattern():
"""
Writes: Fine Checkerboard Pattern.
DPP2607_Write_FineCheckerboardPattern().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_FineCheckerboardPattern()', )
payload = [0x11]
payload.extend([0, 0, 0, 0]) # test_pattern_fine_checker
i2c.write(payload)
def DPP2607_Write_HorizontalGrayRampPattern():
"""
Writes: Horizontal Gray Ramp Pattern.
DPP2607_Write_HorizontalGrayRampPattern().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_HorizontalGrayRampPattern()', )
payload = [0x11]
payload.extend([0, 0, 0, 12]) # test_pattern_gray_ramp_h
i2c.write(payload)
def DPP2607_Write_HorizontalLinesPattern(test_pattern_h_lines):
"""
Writes: Horizontal Lines Pattern.
DPP2607_Write_HorizontalLinesPattern(DWORD TestPatternHLines).
:type test_pattern_h_lines: TestPatternHLines
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_HorizontalLinesPattern(%r)', test_pattern_h_lines)
payload = [0x11]
payload.extend(list(bytearray(struct.pack(">I", test_pattern_h_lines & 0xf))))
i2c.write(payload)
def DPP2607_Write_ImageLongFlip(flip_long):
"""
Writes: Image Long Flip.
DPP2607_Write_ImageLongFlip(DWORD FlipLong).
:type flip_long: EnabledDisabled
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_ImageLongFlip(%r)', flip_long)
payload = [0x0F]
payload.extend(list(bytearray(struct.pack(">I", flip_long & 0x1))))
i2c.write(payload)
def DPP2607_Write_ImageRotationSettings(rotation_setting):
"""
Writes: Image Rotation Settings.
DPP2607_Write_ImageRotationSettings(DWORD RotationSetting).
:type rotation_setting: RotationSetting
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_ImageRotationSettings(%r)', rotation_setting)
payload = [0x0E]
payload.extend(list(bytearray(struct.pack(">I", rotation_setting & 0x1))))
i2c.write(payload)
def DPP2607_Write_ImageShortFlip(flip_short):
"""
Writes: Image Short Flip.
DPP2607_Write_ImageShortFlip(DWORD FlipShort).
:type flip_short: EnabledDisabled
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_ImageShortFlip(%r)', flip_short)
payload = [0x10]
payload.extend(list(bytearray(struct.pack(">I", flip_short & 0x1))))
i2c.write(payload)
def DPP2607_Write_InterruptStatus(int_seq_abort, int_dmd_reset_overrun, int_dmd_block_error, int_dmdif_overrun, int_format_buf_overflow, int_format_starvation, int_flash_fifo_err, int_flash_dma_err, int_format_mult_err, int_format_cmd_err, int_format_queue_warn, int_ddr_overflow_bp, int_ddr_overflow_fb, int_scaler_line_err, int_scaler_pixerr, int_led_timeout):
"""
Writes: Interrupt Status.
DPP2607_Write_InterruptStatus(DWORD IntSeqAbort, DWORD IntDMDResetOverrun, DWORD IntDMDBlockError, DWORD IntDMDIFOverrun, DWORD IntFormatBufOverflow, DWORD IntFormatStarvation, DWORD IntFlashFIFOErr, DWORD IntFlashDMAErr, DWORD IntFormatMultErr, DWORD IntFormatCmdErr, DWORD IntFormatQueueWarn, DWORD IntDDROverflowBP, DWORD IntDDROverflowFB, DWORD IntScalerLineErr, DWORD IntScalerPixerr, DWORD IntLEDTimeout).
:type int_seq_abort: int
:type int_dmd_reset_overrun: int
:type int_dmd_block_error: int
:type int_dmdif_overrun: int
:type int_format_buf_overflow: int
:type int_format_starvation: int
:type int_flash_fifo_err: int
:type int_flash_dma_err: int
:type int_format_mult_err: int
:type int_format_cmd_err: int
:type int_format_queue_warn: int
:type int_ddr_overflow_bp: int
:type int_ddr_overflow_fb: int
:type int_scaler_line_err: int
:type int_scaler_pixerr: int
:type int_led_timeout: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_InterruptStatus(%r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r)', int_seq_abort, int_dmd_reset_overrun, int_dmd_block_error, int_dmdif_overrun, int_format_buf_overflow, int_format_starvation, int_flash_fifo_err, int_flash_dma_err, int_format_mult_err, int_format_cmd_err, int_format_queue_warn, int_ddr_overflow_bp, int_ddr_overflow_fb, int_scaler_line_err, int_scaler_pixerr, int_led_timeout)
payload = [0x00]
value = 0
value |= (int_seq_abort & 0x1) << 0
value |= (int_dmd_reset_overrun & 0x1) << 1
value |= (int_dmd_block_error & 0x1) << 2
value |= (int_dmdif_overrun & 0x1) << 3
value |= (int_format_buf_overflow & 0x1) << 4
value |= (int_format_starvation & 0x1) << 5
value |= (int_flash_fifo_err & 0x1) << 7
value |= (int_flash_dma_err & 0x1) << 8
value |= (int_format_mult_err & 0x1) << 9
value |= (int_format_cmd_err & 0x1) << 10
value |= (int_format_queue_warn & 0x1) << 11
value |= (int_ddr_overflow_bp & 0x1) << 12
value |= (int_ddr_overflow_fb & 0x1) << 13
value |= (int_scaler_line_err & 0x1) << 14
value |= (int_scaler_pixerr & 0x1) << 15
value |= (int_led_timeout & 0x1) << 18
payload.extend(list(bytearray(struct.pack(">I", value))))
i2c.write(payload)
def DPP2607_Write_LedCurrentBlue(pwm_blu):
"""
Writes: LED Current - Blue.
DPP2607_Write_LedCurrentBlue(DWORD PWMBlu).
:type pwm_blu: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_LedCurrentBlue(%r)', pwm_blu)
payload = [0x14]
payload.extend(list(bytearray(struct.pack(">I", pwm_blu & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_LedCurrentGreen(pwm_grn):
"""
Writes: LED Current - Green.
DPP2607_Write_LedCurrentGreen(DWORD PWMGrn).
:type pwm_grn: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_LedCurrentGreen(%r)', pwm_grn)
payload = [0x13]
payload.extend(list(bytearray(struct.pack(">I", pwm_grn & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_LedCurrentRed(pwm_red):
"""
Writes: LED Current - Red.
DPP2607_Write_LedCurrentRed(DWORD PWMRed).
:type pwm_red: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_LedCurrentRed(%r)', pwm_red)
payload = [0x12]
payload.extend(list(bytearray(struct.pack(">I", pwm_red & 0x7ff))))
i2c.write(payload)
def DPP2607_Write_LedDriverEnable(led_enable_red, led_enable_grn, led_enable_blu):
"""
Writes: LED Driver Enable.
DPP2607_Write_LedDriverEnable(DWORD LEDEnableRed, DWORD LEDEnableGrn, DWORD LEDEnableBlu).
:type led_enable_red: EnabledDisabled
:type led_enable_grn: EnabledDisabled
:type led_enable_blu: EnabledDisabled
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_LedDriverEnable(%r, %r, %r)', led_enable_red, led_enable_grn, led_enable_blu)
payload = [0x16]
value = 0
value |= (led_enable_red & 0x1) << 0
value |= (led_enable_grn & 0x1) << 1
value |= (led_enable_blu & 0x1) << 2
payload.extend(list(bytearray(struct.pack(">I", value))))
i2c.write(payload)
def DPP2607_Write_ParallelBusPolarityControl(polarity_hsync, polarity_vsync, polarity_pixel_clock, polarity_data_en):
"""
Writes: Parallel Bus Polarity Control.
DPP2607_Write_ParallelBusPolarityControl(DWORD PolarityHSYNC, DWORD PolarityVSYNC, DWORD PolarityPixelClock, DWORD PolarityDataEn).
:type polarity_hsync: Polarity
:type polarity_vsync: Polarity
:type polarity_pixel_clock: PolarityPixelClock
:type polarity_data_en: PolarityDataEn
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_ParallelBusPolarityControl(%r, %r, %r, %r)', polarity_hsync, polarity_vsync, polarity_pixel_clock, polarity_data_en)
payload = [0xAF]
value = 0
value |= (polarity_hsync & 0x1) << 1
value |= (polarity_vsync & 0x1) << 2
value |= (polarity_pixel_clock & 0x1) << 3
value |= (polarity_data_en & 0x1) << 4
payload.extend(list(bytearray(struct.pack(">I", value))))
i2c.write(payload)
def DPP2607_Write_PropagateLedCurrents(led_latch):
"""
Writes: Propagate LED Currents.
DPP2607_Write_PropagateLedCurrents(DWORD LEDLatch).
:type led_latch: int
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_PropagateLedCurrents(%r)', led_latch)
payload = [0x39]
payload.extend(list(bytearray(struct.pack(">I", led_latch))))
i2c.write(payload)
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xD3])
_poll_complete()
def DPP2607_Write_SequenceSelect(compound_looks):
"""
Writes: Sequence Select.
DPP2607_Write_SequenceSelect(DWORD CompoundLooks).
:type compound_looks: CompoundLooks
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_SequenceSelect(%r)', compound_looks)
payload = [0x39]
payload.extend(list(bytearray(struct.pack(">I", compound_looks))))
i2c.write(payload)
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xC1])
_poll_complete()
def DPP2607_Write_SetSplashScreen(compound_splash):
"""
Writes: Set Splash Screen.
DPP2607_Write_SetSplashScreen(DWORD CompoundSplash).
:type compound_splash: CompoundSplash
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_SetSplashScreen(%r)', compound_splash)
payload = [0x39]
payload.extend(list(bytearray(struct.pack(">I", compound_splash))))
i2c.write(payload)
i2c.write([0x3A, 0x00, 0x00, 0x00, 0x01])
i2c.write([0x38, 0x00, 0x00, 0x00, 0xBD])
_poll_complete()
def DPP2607_Write_SolidFieldPattern(test_pattern_solids):
"""
Writes: Solid Field Pattern.
DPP2607_Write_SolidFieldPattern(DWORD TestPatternSolids).
:type test_pattern_solids: TestPatternSolids
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_SolidFieldPattern(%r)', test_pattern_solids)
payload = [0x11]
payload.extend(list(bytearray(struct.pack(">I", test_pattern_solids & 0xf))))
i2c.write(payload)
def DPP2607_Write_SystemReset():
"""
Writes: System Reset.
DPP2607_Write_SystemReset().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_SystemReset()', )
payload = [0x1F]
payload.extend([0, 0, 0, 1]) # dev_rst
i2c.write(payload)
def DPP2607_Write_VeritcalLinesPattern(test_pattern_v_lines):
"""
Writes: Veritcal Lines Pattern.
DPP2607_Write_VeritcalLinesPattern(DWORD TestPatternVLines).
:type test_pattern_v_lines: TestPatternVLines
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_VeritcalLinesPattern(%r)', test_pattern_v_lines)
payload = [0x11]
payload.extend(list(bytearray(struct.pack(">I", test_pattern_v_lines & 0xf))))
i2c.write(payload)
def DPP2607_Write_VerticalGrayRampPattern():
"""
Writes: Vertical Gray Ramp Pattern.
DPP2607_Write_VerticalGrayRampPattern().
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_VerticalGrayRampPattern()', )
payload = [0x11]
payload.extend([0, 0, 0, 11]) # test_pattern_gray_ramp_v
i2c.write(payload)
def DPP2607_Write_VideoPixelFormat(pix_format):
"""
Writes: Video Pixel Format.
DPP2607_Write_VideoPixelFormat(DWORD PixFormat).
:type pix_format: PixFormat
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_VideoPixelFormat(%r)', pix_format)
payload = [0x0D]
payload.extend(list(bytearray(struct.pack(">I", pix_format & 0xf))))
i2c.write(payload)
def DPP2607_Write_VideoResolution(resolution):
"""
Writes: Video Resolution.
DPP2607_Write_VideoResolution(DWORD Resolution).
:type resolution: Resolution
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_VideoResolution(%r)', resolution)
payload = [0x0C]
payload.extend(list(bytearray(struct.pack(">I", resolution & 0x1f))))
i2c.write(payload)
def DPP2607_Write_VideoSourceSelection(source_sel):
"""
Writes: Video Source Selection.
DPP2607_Write_VideoSourceSelection(DWORD SourceSel).
:type source_sel: SourceSel
:rtype: None
"""
log(DEBUG, 'DPP2607_Write_VideoSourceSelection(%r)', source_sel)
payload = [0x0B]
payload.extend(list(bytearray(struct.pack(">I", source_sel & 0x7))))
i2c.write(payload)
| [
2,
532,
9,
12,
19617,
25,
9168,
12,
1065,
4309,
532,
9,
12,
198,
198,
2,
288,
381,
21719,
22,
13,
9078,
198,
2,
198,
2,
12800,
9729,
284,
360,
10246,
21719,
22,
48445,
1262,
314,
17,
34,
198,
2,
198,
2,
15069,
357,
34,
8,
21... | 2.104105 | 28,846 |
"""
Tests for the command module
"""
import os
import subprocess
import pytest
import pathlib
from pyassert import assert_that
# from aws-iam-tester import cli
script_path = pathlib.Path(__file__).parent.absolute()
# Keep this method last to avoid disrupting other methods
| [
37811,
198,
51,
3558,
329,
262,
3141,
8265,
198,
37811,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
12972,
9288,
198,
11748,
3108,
8019,
198,
198,
6738,
12972,
30493,
1330,
6818,
62,
5562,
198,
2,
422,
3253,
82,
12,
1789,
1... | 3.506329 | 79 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import os
import time
import datetime
import tempfile
from jira.client import JIRA
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='Tajo patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-skip-rb', '--skip-reviewboard', action='store_true', dest='skip_reviewboard', required=False, help='Skip a review request to reviewboard.')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-c', '--change-description', action='store', dest='change_description', required=False, help='Description of what changed in this revision of the review request when updating an existing request')
popt.add_argument('-pa', '--patch-available', action='store_true', dest='patch_available', required=False, help='Transite the JIRA status to Patch Available. If its status is already Patch Available, it updates the status of the JIRA issue by transiting its status to Open and Patch Available sequentially.')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
# the patch name is determined here.
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
# get remote and branch name
remote_name=opt.branch.split("/")[0]
branch_name=opt.branch.split("/")[1]
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
# update the specified remote branch
git_remote_update="git fetch " + remote_name
print "Updating your remote branche " + opt.branch + " to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
# get jira and issue instance
jira=get_jira()
issue = jira.issue(opt.jira)
if not opt.skip_reviewboard:
rb_command="post-review --publish --tracking-branch " + opt.branch + " --target-groups=Tajo --branch=" + branch_name + " --bugs-closed=" + opt.jira
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
summary=issue.key + ": " + issue.fields.summary # default summary is 'TAJO-{NUM}: {JIRA TITLE}'
if opt.summary: # if a summary is given, this field is added or updated
summary=opt.summary
if not opt.reviewboard: # if a review request is created
rb_command=rb_command + " --summary '" + summary + "'"
description=issue.fields.description
if opt.description: # if a descriptin is give, this field is added
description = opt.description
if opt.reviewboard and opt.change_description:
rb_command=rb_command + " --change-description '" + opt.change_description + "'"
if not opt.reviewboard: # if a review request is created
rb_command=rb_command + " --description '" + description + "'"
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
rb_command=rb_command + " --debug"
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
p.close()
if opt.debug:
print 'rb url=',rb_url
git_command="git diff --no-prefix " + opt.branch + " > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Creating diff against', opt.branch, 'and uploading patch to ',opt.jira
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
# Add comment about a request to reviewboard and its url.
if not opt.skip_reviewboard:
comment="Created a review request against branch " + branch_name + " in reviewboard "
if opt.reviewboard:
comment="Updated the review request against branch " + branch_name + " in reviewboard "
comment = comment + "\n" + rb_url
jira.add_comment(opt.jira, comment)
# Transition the jira status to Patch Available
if opt.patch_available:
if issue.fields.status.id == '10002': # If the jira status is already Patch Available (id - 10002)
jira.transition_issue(issue, '731') # Cancel (id - 731) the uploaded patch
issue = jira.issue(opt.jira)
jira.transition_issue(issue, '10002')
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
32... | 3.048872 | 2,394 |
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
average = (width - GRAPH_MARGIN_SIZE*2) // 12
x_coordinate = GRAPH_MARGIN_SIZE + ((year_index - 1900)/10) * average
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
# Create two horizontal lines to the canvas.
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE)
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
# Add vertical lines to the canvas.
for i in range(len(YEARS)): # len(YEARS)-1
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[i]))
canvas.create_line(x, 0, x, CANVAS_HEIGHT)
# Add years caption to the canvas.
for i in range(len(YEARS)): # len(YEARS)-1
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[i]))
canvas.create_text(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, text=YEARS[i], anchor=tkinter.NW, font='times 10')
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
y_begin = int(GRAPH_MARGIN_SIZE)
average = (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE*2)/1000
for i in range(len(lookup_names)):
for j in range(len(YEARS)-1): # len(YEARS)-1
print("-----------------------")
print(len(YEARS))
ls = list(name_data[lookup_names[i]])
print(ls)
# Create a switch. When the rank of year isn't recorded, the switch will be changed into True.
missed = False
# Create a switch. When the rank of the next year isn't recorded, the switch will be changed into True.
next_missed = False
# Change switch into True, if the rank of the year isn't recorded.
if str(YEARS[j]) in ls:
print('yes, with record') # PROCESSED!
else:
print('no, data missed!')
missed = True
# Change switch into True, if the rank of the next year isn't recorded.
if str(YEARS[j+1]) in ls:
print('yes, next year with record') # PROCESSED!
else:
print('no, next year data missed!')
next_missed = True
# Adjust color of the line.
color_num = i
if color_num > len(COLORS)-1:
color_num = color_num % len(COLORS)
color = COLORS[color_num]
if missed == True: # The data of first year is missed.
print('###################')
print('This is missed-if')
# X coordinate of the year.
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j]))
print('x: ')
print(x)
# X coordinate of the next year.
x_next = 0
if j == (len(YEARS) - 1):
pass
else:
x_next = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j + 1]))
print('x_next: ')
print(x_next) # 100
if next_missed is True: # Data in the first year is missed, and that of next year is missed.
# Create a line on the canvas.
canvas.create_line(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, x_next, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, width=LINE_WIDTH, fill=color)
# Add name and rank to the canvas.
name_and_rank = str(lookup_names[i] + ' * ')
print(name_and_rank)
canvas.create_text(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, text=name_and_rank, anchor=tkinter.SW, font='times 10', fill=color)
else: # Data in the first year is missed, but that of next year isn't missed.
# X coordinate of the year.
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j]))
# X coordinate of the next year.
x_next = 0
if j == (len(YEARS) - 1):
pass
else:
x_next = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j + 1]))
# Count the rank of the next year, and then compute y coordinate of the next year.
rank_of_next_year = 0
if j == (len(YEARS) - 1):
pass
else:
rank_of_next_year = int(name_data[lookup_names[i]][str(YEARS[j + 1])])
# Y coordinate of the next year.
y_next = int(y_begin + average * rank_of_next_year)
# Create the line on the canvas.
canvas.create_line(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, x_next, y_next, width=LINE_WIDTH, fill=color)
# Add name and rank to the canvas.
name_and_rank = str(lookup_names[i] + ' * ')
canvas.create_text(x, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, text=name_and_rank, anchor=tkinter.SW, font='times 10', fill=color)
else: # Data in the first year isn't missed.
if next_missed is True: # Data in the first year isn't missed, but that of next year is missed.
# X coordinate of the year.
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j]))
# X coordinate of the next year.
x_next = 0
if j == (len(YEARS)-1):
pass
else:
x_next = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j + 1]))
# Count the rank of the year, and then compute the y coordinate of the year.
rank_of_year = int(name_data[lookup_names[i]][str(YEARS[j])])
# Y coordinate of the year.
y = int(y_begin + average * rank_of_year)
# Adjust color of the line.
color_num = i
if color_num > len(COLORS) - 1:
color_num = color_num % len(COLORS)
color = COLORS[color_num]
# Add the line to the canvas.
canvas.create_line(x, y, x_next, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, width=LINE_WIDTH, fill=color)
# Add name and rank to the canvas.
name_and_rank = str(lookup_names[i] + ' ' + name_data[lookup_names[i]][str(YEARS[j])])
canvas.create_text(x+TEXT_DX, y, text=name_and_rank, anchor=tkinter.NW, font='times 10', fill=color)
else: # Data in the first year isn't missed, and that of next year isn't missed.
# X coordinate of the year.
x = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j]))
# X coordinate of the next year.
x_next = 0
if j == (len(YEARS)-1):
pass
else:
x_next = int(get_x_coordinate(width=CANVAS_WIDTH, year_index=YEARS[j + 1]))
# Count the rank of the year, and then compute the y coordinate of the year.
rank_of_year = int(name_data[lookup_names[i]][str(YEARS[j])])
# Y coordinate of the year.
y = int(y_begin + average * rank_of_year)
# Count the rank of the next year, and then compute y coordinate of the next year.
rank_of_next_year = 0
if j == (len(YEARS)-1):
pass
else:
rank_of_next_year = int(name_data[lookup_names[i]][str(YEARS[j+1])])
# Y coordinate of the next year.
y_next = int(y_begin + average * rank_of_next_year)
# Adjust color of the line.
color_num = i
if color_num > len(COLORS) - 1:
color_num = color_num % len(COLORS)
color = COLORS[color_num]
# Add the line to the canvas.
canvas.create_line(x, y, x_next, y_next, width=LINE_WIDTH, fill=color)
# Add name and rank to the canvas.
name_and_rank = str(lookup_names[i] + ' ' + name_data[lookup_names[i]][str(YEARS[j])])
canvas.create_text(x+TEXT_DX, y, text=name_and_rank, anchor=tkinter.NW, font='times 10', fill=color)
# main() code is provided, feel free to read through it but DO NOT MODIFY
if __name__ == '__main__':
main()
| [
37811,
198,
6173,
8784,
14801,
28531,
4935,
198,
48003,
276,
422,
8047,
2547,
75,
12427,
338,
14801,
28531,
16237,
416,
198,
43462,
406,
13481,
13,
198,
198,
56,
11698,
22196,
40165,
15698,
198,
37811,
198,
198,
11748,
256,
74,
3849,
19... | 1.998373 | 5,533 |
ITEMS_PER_PAGE = 10
| [
2043,
39201,
62,
18973,
62,
4537,
8264,
796,
838,
628
] | 2.1 | 10 |
import collections
wall, goal = 1, 3
width_g, height_g = 28, 28
| [
11748,
17268,
201,
198,
201,
198,
11930,
11,
3061,
796,
352,
11,
513,
201,
198,
10394,
62,
70,
11,
6001,
62,
70,
796,
2579,
11,
2579,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.205882 | 34 |
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = "$Id: sdist.py 81261 2010-05-17 10:54:43Z tarek.ziade $"
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import (DistutilsPlatformError, DistutilsOptionError,
DistutilsTemplateError)
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
| [
37811,
17080,
26791,
13,
21812,
13,
82,
17080,
198,
198,
3546,
1154,
902,
262,
4307,
26791,
705,
82,
17080,
6,
3141,
357,
17953,
257,
2723,
6082,
21387,
15931,
198,
198,
834,
260,
10178,
834,
796,
17971,
7390,
25,
264,
17080,
13,
9078... | 2.857843 | 408 |
"""
Type and data-conversion test package.
Tests the following:
1) HDF5 to NumPy type mapping
2) Data conversion
"""
| [
198,
37811,
198,
220,
220,
220,
5994,
290,
1366,
12,
1102,
9641,
1332,
5301,
13,
628,
220,
220,
220,
30307,
262,
1708,
25,
628,
220,
220,
220,
352,
8,
5572,
37,
20,
284,
31835,
20519,
2099,
16855,
198,
220,
220,
220,
362,
8,
6060,... | 2.833333 | 48 |
n = int(raw_input("What would you like to use for n? "))
answer = 0
for x in range(1, n+1):
answer += x
print answer
| [
77,
796,
493,
7,
1831,
62,
15414,
7203,
2061,
561,
345,
588,
284,
779,
329,
299,
30,
366,
4008,
198,
41484,
796,
657,
198,
198,
1640,
2124,
287,
2837,
7,
16,
11,
299,
10,
16,
2599,
198,
220,
220,
220,
3280,
15853,
2124,
198,
198... | 2.5625 | 48 |
"""
picture.py
Author: Jackson Lake
Credit: HHS page Github Tutorial
Assignment:
Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape).
Use at least:
1. Three different Color objects.
2. Ten different Sprite objects.
3. One (or more) RectangleAsset objects.
4. One (or more) CircleAsset objects.
5. One (or more) EllipseAsset objects.
6. One (or more) PolygonAsset objects.
See:
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
See:
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
# add your code here \/ \/ \/
from ggame import App, Color, LineStyle, Sprite
from ggame import RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
red = Color(0xff0000, 1.0)
green = Color(0x00ff00, 1.0)
blue = Color(0x0000ff, 1.0)
black = Color(0x000000, 1.0)
grey = Color(0xCDC0B0, 1.0)
firebrick1 = Color(0xFF3030, 1.0)
purple = Color(0xBF3EFF, 1.0)
gold = Color(0xFFD700, 1.0)
fade1 = Color(0xff0000, 0.6)
fade2 = Color(0xff0000, 0.4)
fade3 = Color(0xff0000, 0.2)
white = Color(0xF8F8FF, 1.0)
violet = Color(0xd147c5, 1.0)
thinline = LineStyle(1, black)
thinner = LineStyle(.4, red)
head = RectangleAsset(120, 100, thinline, grey)
neck = RectangleAsset(40, 28, thinline, grey)
body = RectangleAsset(200, 200, thinline, grey)
leg1 = RectangleAsset(45, 90, thinline, grey)
leg2 = RectangleAsset(45, 90, thinline, grey)
eye1 = CircleAsset(15, thinline, firebrick1)
eye2 = CircleAsset(15, thinline, firebrick1)
shoulder1 = CircleAsset(20, thinline, grey)
shoulder2 = CircleAsset(20, thinline, grey)
arm1 = RectangleAsset(100, 40, thinline, grey)
arm2 = RectangleAsset(100, 40, thinline, grey)
antenna = EllipseAsset(5, 40, thinline, purple)
mouth = EllipseAsset(30, 8, thinline, gold)
lip = RectangleAsset(59, 1, thinline, black)
wave1 = CircleAsset(10, thinner, fade1)
wave2 = CircleAsset(25, thinner, fade2)
wave3 = CircleAsset(42, thinner, fade3)
emblem = CircleAsset(37, thinline)
design = PolygonAsset([(0,0), (20, 50), (40,0)], thinline, violet)
Sprite(antenna, (485, 65))
Sprite(head, (432, 100))
Sprite(neck, (470, 200))
Sprite(body, (400, 228))
Sprite(leg1, (400, 428))
Sprite(leg2, (555, 428))
Sprite(eye1, (440, 115))
Sprite(eye2, (510, 115))
Sprite(arm1, (600, 228))
Sprite(arm2, (300, 228))
Sprite(shoulder1, (580, 228))
Sprite(shoulder2, (380, 228))
Sprite(mouth, (460, 165))
Sprite(lip, (460, 173))
Sprite(wave1, (480, 60))
Sprite(wave2, (465, 43))
Sprite(wave3, (447, 26))
Sprite(emblem, (465, 260))
Sprite(design, (480, 275))
# add your code here /\ /\ /\
myapp = App()
myapp.run()
| [
37811,
198,
34053,
13,
9078,
198,
13838,
25,
6612,
6233,
198,
23690,
25,
45497,
2443,
38994,
36361,
198,
198,
8021,
16747,
25,
198,
198,
11041,
262,
308,
6057,
5888,
284,
366,
79,
2913,
1,
257,
27831,
4286,
286,
1223,
357,
68,
13,
7... | 2.588785 | 1,070 |
# Generated by Django 2.0 on 2019-05-05 09:12
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
13130,
12,
2713,
12,
2713,
7769,
25,
1065,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.088235 | 34 |
expected_output = {
"url": {
"ftp://user:password@172.16.4.253/router-dhcp": {
"read": "Dec 01 1997 12:01 AM",
"written": "Never",
"status": "Last read succeeded. Bindings have been loaded in RAM.",
"delay_in_secs": 300,
"timeout_in_secs": 300,
"failures": 0,
"successes": 1,
}
}
}
| [
40319,
62,
22915,
796,
1391,
198,
220,
220,
220,
366,
6371,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
701,
79,
1378,
7220,
25,
28712,
31,
23628,
13,
1433,
13,
19,
13,
28592,
14,
472,
353,
12,
34985,
13155,
1298,
139... | 1.853081 | 211 |
expected_output = {
"vlans": {
"1": {
"vlan_id": "1",
"name": "default",
"state": "active",
"shutdown": False,
"mtu": 1500,
"said": 100001,
"trans1": 0,
"trans2": 0,
"type": "enet",
"interfaces": [
"GigabitEthernet1/0/1",
"GigabitEthernet1/0/2",
"GigabitEthernet1/0/3",
"GigabitEthernet1/0/5",
"GigabitEthernet1/0/6",
"GigabitEthernet1/0/12",
"GigabitEthernet1/0/13",
"GigabitEthernet1/0/14",
"GigabitEthernet1/0/15",
"GigabitEthernet1/0/16",
"GigabitEthernet1/0/17",
"GigabitEthernet1/0/18",
"GigabitEthernet1/0/19",
"GigabitEthernet1/0/20",
"GigabitEthernet1/0/21",
"GigabitEthernet1/0/22",
],
},
"2": {
"vlan_id": "2",
"name": "VLAN_0002",
"state": "active",
"shutdown": False,
"private_vlan": {"primary": True, "association": ["301", "302"]},
},
"301": {
"private_vlan": {
"primary": False,
"type": "community",
"ports": ["FastEthernet5/3", "FastEthernet5/25"],
}
},
"302": {"private_vlan": {"primary": False, "type": "community"}},
"10": {"private_vlan": {"primary": False, "type": "community"}},
"20": {
"vlan_id": "20",
"name": "VLAN-0020",
"shutdown": False,
"state": "active",
"remote_span_vlan": True,
"private_vlan": {"primary": True, "association": ["105"]},
},
"21": {"remote_span_vlan": True},
"24": {"remote_span_vlan": True},
"25": {"remote_span_vlan": True},
"26": {"remote_span_vlan": True},
"27": {"remote_span_vlan": True},
"105": {"private_vlan": {"primary": False, "type": "isolated"}},
"100": {
"vlan_id": "100",
"name": "V100",
"state": "suspend",
"shutdown": False,
"mtu": 1500,
"said": 100100,
"trans1": 0,
"trans2": 0,
"type": "enet",
"private_vlan": {"primary": True, "association": ["151"]},
},
"151": {"private_vlan": {"primary": False, "type": "non-operational"}},
"202": {"private_vlan": {"primary": False, "type": "community"}},
"303": {"private_vlan": {"primary": False, "type": "community"}},
"101": {
"vlan_id": "101",
"shutdown": False,
"name": "VLAN-0101",
"state": "active",
"mtu": 1500,
"said": 100101,
"trans1": 0,
"trans2": 0,
"type": "enet",
"private_vlan": {"primary": True, "association": ["402"]},
},
"402": {"private_vlan": {"primary": False, "type": "non-operational"}},
"102": {
"vlan_id": "102",
"shutdown": False,
"name": "VLAN_0102",
"state": "active",
"remote_span_vlan": True,
},
"103": {
"vlan_id": "103",
"shutdown": False,
"name": "VLAN-0103",
"state": "unsupport",
},
"104": {
"vlan_id": "104",
"name": "VLAN_0104",
"state": "shutdown",
"shutdown": True,
},
}
}
| [
40319,
62,
22915,
796,
1391,
198,
220,
220,
220,
366,
19279,
504,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
16,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
85,
9620,
62,
312,
1298,
... | 1.659555 | 2,203 |
from xarray_multiscale.reducers import windowed_mean, windowed_mode
import numpy as np
| [
6738,
2124,
18747,
62,
16680,
2304,
1000,
13,
445,
1229,
364,
1330,
4324,
276,
62,
32604,
11,
4324,
276,
62,
14171,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.107143 | 28 |
import functools
import json
import operator
import os
from collections import Counter, namedtuple
from multiprocessing import Pool, cpu_count
from pprint import pprint
import requests
import xlrd
import xmltodict
from tqdm import tqdm
_alb_fields = [
"title",
"link",
"author",
"pubDate",
"description",
"creator",
"isbn",
"isbn13",
"itemId",
"priceSales",
"priceStandard",
"stockStatus",
"mileage",
"cover",
"categoryId",
"categoryName",
"publisher",
"customerReviewRank",
"salesPoint",
"first_category",
"second_category",
]
AladinBook = namedtuple(
"AladinBook",
_alb_fields,
)
# ['version', 'title', 'link', 'pubDate', 'imageUrl', 'totalResults', 'startIndex', 'itemsPerPage', 'query', 'searchCategoryId', 'searchCategoryName', 'item']
# print(repr(aladin_from_isbn13('')))
# print(aladin_from_isbn13(0)["item"][0].keys())
# pprint(AladinBook(**aladin_from_isbn13(0)["item"][0]))
CATEGORIES = aladin_categories() # {CID: (CNAME, 1thCID, 2thCID)}
# print(len(CATEGORIES))
LibraryBook = namedtuple(
"LibraryBook",
[
"no",
"ranking",
"bookname",
"authors",
"publisher",
"publication_year",
"isbn13",
"addition_symbol",
"vol",
"class_no",
"loan_count",
"bookImageURL",
],
)
PAGE_SIZE = 100
# def library_high_school(n):
# # 가장 인기많은 순서대로 n개를 가져온다.
# params = {
# "authKey": "API KEY",
# "from_age": 17,
# "to_age": 19,
# "format": "json",
# }
# res = []
# page_num = 1
# cont = True
# while cont:
# params["pageNo"] = page_num
# r = requests.get("http://data4library.kr/api/loanItemSrch", params=params)
# try:
# ds = r.json()["response"]["docs"]
# res.extend(ds)
# except:
# cont = False
# if len(res) >= n:
# cont = False
# page_num += 1
# print(r.json()["response"]["resultNum"])
# return [LibraryBook(**d["doc"]) for d in res[:n]]
# 동시성 사용 버전의 알라딘
# def library_to_aladin(lbs):
# with Pool(cpu_count()) as p:
# chuncksize = int(len(lbs)/cpu_count() + 0.5) # 반올림
# it = p.imap(aladin_from_isbn13, [lb.isbn13 for lb in lbs], chuncksize)
# r = []
# for x in it:
# if isinstance(x, AladinBook):
# r.append(x)
# return r
LIST_SIZE = 50 # 리스트 요청 시 페이지당 아이ㅣ템의 개수
# QUERY TYPES : ["ItemNewAll", "ItemNewSpecial"]
| [
11748,
1257,
310,
10141,
198,
11748,
33918,
198,
11748,
10088,
198,
11748,
28686,
198,
6738,
17268,
1330,
15034,
11,
3706,
83,
29291,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
11,
42804,
62,
9127,
198,
6738,
279,
4798,
1330,
279,
47... | 1.944882 | 1,270 |
from podcast_dl.site_parser import parse_site, InvalidSite
import pytest
@pytest.mark.parametrize(
"site,name",
(
("http://talkpython.fm", "talkpython"),
("https://talkpython.fm", "talkpython"),
("http://pythonbytes.fm", "pythonbytes"),
("https://pythonbytes.fm", "pythonbytes"),
("https://talkpython.fm/episodes/rss", "talkpython"),
("https://changelog.com/podcast/", "changelog"),
("talkpython", "talkpython"),
("pythonbytes", "pythonbytes"),
("talkpython.fm", "talkpython"),
("www.talkpython.fm", "talkpython"),
("https://www.podcastinit.com/feed/mp3/", "podcastinit"),
("www.podcastinit.com/feed/mp3/", "podcastinit"),
),
)
| [
6738,
9905,
62,
25404,
13,
15654,
62,
48610,
1330,
21136,
62,
15654,
11,
17665,
29123,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
15654,
11,
3672,
1600,
198,
2... | 2.394822 | 309 |
import os
from time import sleep
import requests as r
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_yaml(tp: str) -> dict:
"""
Convert YAML to python dict.
Open dev_settings if exist.
Other way open prod_settings.
:param tp: category of settings which will be converted
:return: dict with data
"""
try:
settings = os.path.join(BASE_DIR, "dev_settings.yml")
with open(settings, 'r') as s:
data = yaml.load(s, Loader=yaml.Loader)[tp]
except FileNotFoundError:
settings = os.path.join(BASE_DIR, "prod_settings.yml")
with open(settings, 'r') as s:
data = yaml.load(s, Loader=yaml.Loader)[tp]
return data
def hard_get(data: dict, set_name: str):
"""
Get settings value from a dict,
Use when the setting required.
:param data: dict with data
:param set_name: setting name
:return: setting value
:raise: ValueError if value does not exist
"""
try:
value = data[set_name]
return value
except KeyError:
raise ValueError(f"Provide value for {set_name.upper()}")
def soft_get(data: dict, set_name: str, tp: type):
"""
Get setting value from a dict, or set it by default,
Use when setting *not* required.
:param data: dict with data
:param set_name: setting name
:param tp: value type
:return: setting value
"""
try:
value = data[set_name]
if type(value) != tp:
value = default(set_name)
except KeyError:
value = default(set_name)
return value
API_SLEEP = soft_get(get_yaml('generator'), 'api_sleep', float)
ADORABLE_AVATAR = hard_get(get_yaml('project'), 'adorable_avatar')
def generate_adorable_avatar(username: str) -> str:
"""
Generate user Adorable_avatar using email, and save it.
Generally any string can be used
:param username: user username
:return: avatar uri
"""
if ADORABLE_AVATAR:
response = r.request('GET', rf'https://api.adorable.io/avatars/150/{username}')
sleep(API_SLEEP)
avatar = os.path.join(BASE_DIR, 'media', 'avatars', f'{username}.png')
with open(avatar, 'wb') as img:
img.write(response.content)
else:
avatar = None
return avatar
| [
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
7007,
355,
374,
198,
11748,
331,
43695,
198,
198,
33,
11159,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
... | 2.413043 | 966 |
############################################
# imports
############################################
import fgclustering.utils as utils
import fgclustering.optimizer as optimizer
import fgclustering.plotting as plotting
############################################
# Forest-guided Clustering
############################################
def fgclustering(output, data, target_column, model,
max_K = 6, number_of_clusters = None, max_iter_clustering = 500,
bootstraps_JI = 300, discart_value_JI = 0.6,
bootstraps_p_value = 10000, thr_pvalue = 0.05, random_state = 42):
'''Run forest-guided clustering algirthm for Random Forest Classifier or Regressor. The optimal number of clusters
for a k-medoids clustering is computed, based on the distance matrix computed from the Random Forest proximity matrix.
Features are ranked and filtered based on statistical tests (ANOVA for continuous features, chi square for categorical features).
Feature distribution per cluster is shown in a heatmap and boxplots. Feature importance is plotted to show
the importance of each feature for each cluster, measured by variance and impurity of the feature within the cluster,
i.e. the higher the feature importance, the lower the feature variance/impurity within the cluster.
:param output: Filename to save plot.
:type output: str
:param data: Input data with feature matrix.
If target_column is a string it has to be a column in the data.
:type data: pandas.DataFrame
:param target_column: Name of target column or target values as numpy array.
:type target_column: str or numpy.ndarray
:param model: Trained Random Forest model.
:type model: sklearn.ensemble
:param max_K: Maximum number of clusters for cluster score computation, defaults to 6
:type max_K: int, optional
:param number_of_clusters: Number of clusters for the k-medoids clustering.
Leave None if number of clusters should be optimized, defaults to None
:type number_of_clusters: int, optional
:param max_iter_clustering: Number of iterations for k-medoids clustering, defaults to 500
:type max_iter_clustering: int, optional
:param bootstraps_JI: Number of bootstraps to compute the Jaccard Index, defaults to 300
:type bootstraps_JI: int, optional
:param discart_value_JI: Minimum Jaccard Index for cluster stability, defaults to 0.6
:type discart_value_JI: float, optional
:param bootstraps_p_value: Number of bootstraps to compute the p-value of feature importance, defaults to 10000
:type bootstraps_p_value: int, optional
:param thr_pvalue: P-value threshold for feature filtering, defaults to 0.05
:type thr_pvalue: float, optional
:param random_state: Seed number for random state, defaults to 42
:type random_state: int, optional
:return: Optimal number of clusters.
:rtype: int
'''
# check if random forest is regressor or classifier
is_regressor = 'RandomForestRegressor' in str(type(model))
is_classifier = 'RandomForestClassifier' in str(type(model))
if is_regressor is True:
method = "regression"
print("Interpreting RandomForestRegressor")
elif is_classifier is True:
method = "classifier"
print("Interpreting RandomForestClassifier")
else:
raise ValueError(f'Do not recognize {str(type(model))}. Can only work with sklearn RandomForestRegressor or RandomForestClassifier.')
if type(target_column)==str:
y = data.loc[:,target_column]
X = data.drop(columns=[target_column])
else:
y = target_column
X = data
distanceMatrix = 1 - utils.proximityMatrix(model, X.to_numpy())
if number_of_clusters is None:
k = optimizer.optimizeK(distanceMatrix, y.to_numpy(), max_K, bootstraps_JI, max_iter_clustering, discart_value_JI, method, random_state)
else:
k = number_of_clusters
print(f"Visualizing forest guided clustering for {k} clusters")
plotting.plot_forest_guided_clustering(output, X, y, method, distanceMatrix, k, thr_pvalue, bootstraps_p_value, random_state)
return k
| [
29113,
7804,
4242,
198,
2,
17944,
198,
29113,
7804,
4242,
198,
198,
11748,
277,
70,
565,
436,
1586,
13,
26791,
355,
3384,
4487,
198,
11748,
277,
70,
565,
436,
1586,
13,
40085,
7509,
355,
6436,
7509,
198,
11748,
277,
70,
565,
436,
15... | 2.997869 | 1,408 |
from cdhweb.pages.forms import SiteSearchForm
from cdhweb.pages.models import PageIntro
def page_intro(request):
"""Template context processor: if there is a PageIntro snippet
for this page, add it to the context for display."""
# wagtail stores link url without leading and trailing slashes,
# but requests to django view urls include them; strip them off to match
# NOTE: page intro modification time is NOT taken into account
# when generating Last-Modified headers and returning 304 Not Modified
page_intro = PageIntro.objects.filter(
page__link_url=request.path.strip("/")
).first()
if page_intro:
return {"page_intro": page_intro}
return {}
def site_search(request):
"""Template context processor: adds site search form to context."""
return {"site_search": SiteSearchForm()}
| [
6738,
22927,
71,
12384,
13,
31126,
13,
23914,
1330,
14413,
18243,
8479,
198,
6738,
22927,
71,
12384,
13,
31126,
13,
27530,
1330,
7873,
5317,
305,
628,
198,
4299,
2443,
62,
600,
305,
7,
25927,
2599,
198,
220,
220,
220,
37227,
30800,
47... | 3.211321 | 265 |
touched_files = danger.git.modified_files + danger.git.created_files
has_source_changes = any(map(lambda f: f.startswith("danger_python"), touched_files))
has_changelog_entry = "CHANGELOG.md" in touched_files
is_trivial = "#trivial" in danger.github.pr.title
if has_source_changes and not has_changelog_entry and not is_trivial:
warn("Please, add a CHANGELOG.md entry for non-trivial changes")
| [
83,
30075,
62,
16624,
796,
3514,
13,
18300,
13,
41771,
62,
16624,
1343,
3514,
13,
18300,
13,
25598,
62,
16624,
198,
10134,
62,
10459,
62,
36653,
796,
597,
7,
8899,
7,
50033,
277,
25,
277,
13,
9688,
2032,
342,
7203,
38537,
62,
29412,... | 2.85 | 140 |
# coding: utf-8
"""
VPlex REST API
A definition for the next-gen VPlex API # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Engine(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cluster_ip_seed': 'str',
'enclosure_id': 'str',
'engine_id': 'str',
'engine_family': 'str',
'health_indications': 'list[str]',
'health_state': 'str',
'marker_led': 'str',
'operational_status': 'str',
'part_number': 'str',
'revision_number': 'str',
'serial_number': 'str',
'top_level_assembly': 'str',
'wwn_seed': 'str',
'name': 'str'
}
attribute_map = {
'cluster_ip_seed': 'cluster_ip_seed',
'enclosure_id': 'enclosure_id',
'engine_id': 'engine_id',
'engine_family': 'engine_family',
'health_indications': 'health_indications',
'health_state': 'health_state',
'marker_led': 'marker_led',
'operational_status': 'operational_status',
'part_number': 'part_number',
'revision_number': 'revision_number',
'serial_number': 'serial_number',
'top_level_assembly': 'top_level_assembly',
'wwn_seed': 'wwn_seed',
'name': 'name'
}
def __init__(self, cluster_ip_seed=None, enclosure_id=None, engine_id=None, engine_family=None, health_indications=None, health_state=None, marker_led=None, operational_status=None, part_number=None, revision_number=None, serial_number=None, top_level_assembly=None, wwn_seed=None, name=None): # noqa: E501
"""Engine - a model defined in Swagger""" # noqa: E501
self._cluster_ip_seed = None
self._enclosure_id = None
self._engine_id = None
self._engine_family = None
self._health_indications = None
self._health_state = None
self._marker_led = None
self._operational_status = None
self._part_number = None
self._revision_number = None
self._serial_number = None
self._top_level_assembly = None
self._wwn_seed = None
self._name = None
self.discriminator = None
if cluster_ip_seed is not None:
self.cluster_ip_seed = cluster_ip_seed
if enclosure_id is not None:
self.enclosure_id = enclosure_id
if engine_id is not None:
self.engine_id = engine_id
if engine_family is not None:
self.engine_family = engine_family
if health_indications is not None:
self.health_indications = health_indications
if health_state is not None:
self.health_state = health_state
if marker_led is not None:
self.marker_led = marker_led
if operational_status is not None:
self.operational_status = operational_status
if part_number is not None:
self.part_number = part_number
if revision_number is not None:
self.revision_number = revision_number
if serial_number is not None:
self.serial_number = serial_number
if top_level_assembly is not None:
self.top_level_assembly = top_level_assembly
if wwn_seed is not None:
self.wwn_seed = wwn_seed
if name is not None:
self.name = name
@property
def cluster_ip_seed(self):
"""Gets the cluster_ip_seed of this Engine. # noqa: E501
:return: The cluster_ip_seed of this Engine. # noqa: E501
:rtype: str
"""
return self._cluster_ip_seed
@cluster_ip_seed.setter
def cluster_ip_seed(self, cluster_ip_seed):
"""Sets the cluster_ip_seed of this Engine.
:param cluster_ip_seed: The cluster_ip_seed of this Engine. # noqa: E501
:type: str
"""
self._cluster_ip_seed = cluster_ip_seed
@property
def enclosure_id(self):
"""Gets the enclosure_id of this Engine. # noqa: E501
:return: The enclosure_id of this Engine. # noqa: E501
:rtype: str
"""
return self._enclosure_id
@enclosure_id.setter
def enclosure_id(self, enclosure_id):
"""Sets the enclosure_id of this Engine.
:param enclosure_id: The enclosure_id of this Engine. # noqa: E501
:type: str
"""
self._enclosure_id = enclosure_id
@property
def engine_id(self):
"""Gets the engine_id of this Engine. # noqa: E501
:return: The engine_id of this Engine. # noqa: E501
:rtype: str
"""
return self._engine_id
@engine_id.setter
def engine_id(self, engine_id):
"""Sets the engine_id of this Engine.
:param engine_id: The engine_id of this Engine. # noqa: E501
:type: str
"""
self._engine_id = engine_id
@property
def engine_family(self):
"""Gets the engine_family of this Engine. # noqa: E501
:return: The engine_family of this Engine. # noqa: E501
:rtype: str
"""
return self._engine_family
@engine_family.setter
def engine_family(self, engine_family):
"""Sets the engine_family of this Engine.
:param engine_family: The engine_family of this Engine. # noqa: E501
:type: str
"""
self._engine_family = engine_family
@property
def health_indications(self):
"""Gets the health_indications of this Engine. # noqa: E501
:return: The health_indications of this Engine. # noqa: E501
:rtype: list[str]
"""
return self._health_indications
@health_indications.setter
def health_indications(self, health_indications):
"""Sets the health_indications of this Engine.
:param health_indications: The health_indications of this Engine. # noqa: E501
:type: list[str]
"""
self._health_indications = health_indications
@property
def health_state(self):
"""Gets the health_state of this Engine. # noqa: E501
:return: The health_state of this Engine. # noqa: E501
:rtype: str
"""
return self._health_state
@health_state.setter
def health_state(self, health_state):
"""Sets the health_state of this Engine.
:param health_state: The health_state of this Engine. # noqa: E501
:type: str
"""
self._health_state = health_state
@property
def marker_led(self):
"""Gets the marker_led of this Engine. # noqa: E501
:return: The marker_led of this Engine. # noqa: E501
:rtype: str
"""
return self._marker_led
@marker_led.setter
def marker_led(self, marker_led):
"""Sets the marker_led of this Engine.
:param marker_led: The marker_led of this Engine. # noqa: E501
:type: str
"""
self._marker_led = marker_led
@property
def operational_status(self):
"""Gets the operational_status of this Engine. # noqa: E501
:return: The operational_status of this Engine. # noqa: E501
:rtype: str
"""
return self._operational_status
@operational_status.setter
def operational_status(self, operational_status):
"""Sets the operational_status of this Engine.
:param operational_status: The operational_status of this Engine. # noqa: E501
:type: str
"""
self._operational_status = operational_status
@property
def part_number(self):
"""Gets the part_number of this Engine. # noqa: E501
:return: The part_number of this Engine. # noqa: E501
:rtype: str
"""
return self._part_number
@part_number.setter
def part_number(self, part_number):
"""Sets the part_number of this Engine.
:param part_number: The part_number of this Engine. # noqa: E501
:type: str
"""
self._part_number = part_number
@property
def revision_number(self):
"""Gets the revision_number of this Engine. # noqa: E501
:return: The revision_number of this Engine. # noqa: E501
:rtype: str
"""
return self._revision_number
@revision_number.setter
def revision_number(self, revision_number):
"""Sets the revision_number of this Engine.
:param revision_number: The revision_number of this Engine. # noqa: E501
:type: str
"""
self._revision_number = revision_number
@property
def serial_number(self):
"""Gets the serial_number of this Engine. # noqa: E501
:return: The serial_number of this Engine. # noqa: E501
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""Sets the serial_number of this Engine.
:param serial_number: The serial_number of this Engine. # noqa: E501
:type: str
"""
self._serial_number = serial_number
@property
def top_level_assembly(self):
"""Gets the top_level_assembly of this Engine. # noqa: E501
:return: The top_level_assembly of this Engine. # noqa: E501
:rtype: str
"""
return self._top_level_assembly
@top_level_assembly.setter
def top_level_assembly(self, top_level_assembly):
"""Sets the top_level_assembly of this Engine.
:param top_level_assembly: The top_level_assembly of this Engine. # noqa: E501
:type: str
"""
self._top_level_assembly = top_level_assembly
@property
def wwn_seed(self):
"""Gets the wwn_seed of this Engine. # noqa: E501
:return: The wwn_seed of this Engine. # noqa: E501
:rtype: str
"""
return self._wwn_seed
@wwn_seed.setter
def wwn_seed(self, wwn_seed):
"""Sets the wwn_seed of this Engine.
:param wwn_seed: The wwn_seed of this Engine. # noqa: E501
:type: str
"""
self._wwn_seed = wwn_seed
@property
def name(self):
"""Gets the name of this Engine. # noqa: E501
:return: The name of this Engine. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Engine.
:param name: The name of this Engine. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Engine, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Engine):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
23342,
2588,
30617,
7824,
628,
220,
220,
220,
317,
6770,
329,
262,
1306,
12,
5235,
23342,
2588,
7824,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
... | 2.239535 | 5,590 |
# A Dynamic Programming based on Python3
# program to count number of ways to
# cover a distance with 1, 2 and 3 steps
# driver program
dist = 4
print(printCountDP(dist))
| [
2,
317,
26977,
30297,
1912,
319,
11361,
18,
198,
2,
1430,
284,
954,
1271,
286,
2842,
284,
198,
2,
3002,
257,
5253,
351,
352,
11,
362,
290,
513,
4831,
628,
198,
198,
2,
4639,
1430,
198,
17080,
796,
604,
198,
4798,
7,
4798,
12332,
... | 3.55102 | 49 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import config
import sys, traceback
from datetime import datetime
from telegram import Bot, Update, User, Message
from telegram.ext import CommandHandler, Updater, MessageHandler, CallbackContext, Filters
from telegram.utils.request import Request
from telegram import KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4566,
198,
11748,
25064,
11,
12854,
1891,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
573,
30536,
13... | 3.457627 | 118 |
#!/usr/bin/env python
from __future__ import division, print_function
import argparse
import numpy as np
from numpy import linspace, zeros, ones, sin, cos, arctan, pi
import os
def gen_blockmeshdict(foil="0012"):
"""
Write a `blockMeshDict` for a NACA foil at specified angle of attack.
"""
# Foil geometry
c = 1.0 # Geometric chord length
NACA = [int(d) for d in foil] # NACA 4-digit designation
# Mesh dimensions
scale = 1 # Scaling factor
W = 0.5 # *Half* depth of foil (z-direction)
D = 1.2 # Length of downstream section
scalingX= 1.3 # A scaling factor in the x-direction, which is used to make the boundary similar to the leading edge of the airfoil
scalingY= 2 # A scaling factor in the y-direction, which is used to make the boundary similar to the leading edge of the airfoil
# Mesh resolution parameters
Ni = 400 # Number of interpolation points along the foil
# Nx = 200 # Number of mesh cells along the foil
Nleading = 40 # Number of mesh cells along the leading foil
Ntrailing = 40 # Number of mesh cells along the trailing foil
ND = 20 # Number of cells in the downstream direction
NT = 10 # Number of cells the transverse direction
NW = 1 # Number of cells in the z-direction (along the foil axis)
# Expansion rates
ExpTransverse = 1 # Expansion rate in transverse direction
ExpDownstream = 1 # Expansion rate in the downstream direction
ExpLeading = 1 # Expansion rate in the leading foil
ExpTrailing = 1 # Expansion rate in the trailing foil
# ------------------------- END OF MESH PARAMETER REGION --------------------- #
# Create a vector with x-coordinates, camber and thickness
beta = linspace(0, pi, Ni)
x = c*(0.5*(1 - cos(beta)))
y_c = zeros(len(x))
y_t = zeros(len(x))
theta = zeros(len(x))
# Values of m, p and t
m = NACA[0]/100
p = NACA[1]/10
t = (NACA[2]*10 + NACA[3])/100
# Calculate thickness
# The upper expression will give the airfoil a finite thickness at the trailing
# edge, witch might cause trouble. The lower expression is corrected to give
# zero thickness at the trailing edge, but the foil is strictly speaking no
# longer a proper NACA airfoil.
#
# See http://turbmodels.larc.nasa.gov/naca4412sep_val.html
# http://en.wikipedia.org/wiki/NACA_airfoil
#y_t = (t*c/0.2) * (0.2969*(x/c)**0.5 - 0.1260*(x/c) - 0.3516*(x/c)**2 + 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y_t = (t*c/0.2)*(0.2969*(x/c)**0.5 - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1036*(x/c)**4)
if p > 0:
# Calculate camber
y_c += (m*x/p**2)*(2*p - x/c)*(x < p*c)
y_c += (m*(c-x)/(1 - p)**2)*(1 + x/c - 2*p)*(x >= p*c)
# Calculate theta-value
theta += arctan((m/p**2) * (2*p - 2*x/c))*(x < p*c)
theta += arctan((m/(1 - p)**2) * (-2*x/c + 2*p))*(x >= p*c)
# Calculate coordinates of upper surface
Xu = x - y_t*sin(theta)
Yu = y_c + y_t*cos(theta)
# Calculate coordinates of lower surface
Xl = x + y_t*sin(theta)
Yl = y_c - y_t*cos(theta)
# Converts a one-dimensional array to a column vector (The data type: np.matrix)
# In order to be able to execute successfully functions (np.concatenate)
Xu = np.matrix([Xu]).transpose()
Yu = np.matrix([Yu]).transpose()
Xl = np.matrix([Xl]).transpose()
Yl = np.matrix([Yl]).transpose()
if p > 0:
# Find index i of max. camber
C_max_idx = np.where(y_c == max(y_c))[0][0]
else:
# Otherwise use location of max. thickness
C_max_idx = np.where(y_t == max(y_t))[0][0]
# Edge 4-5 and 16-17
pts1 = np.concatenate([Xu[1:C_max_idx],
Yu[1:C_max_idx],
W*ones(np.shape(Xu[1:C_max_idx]))], axis=1)
pts5 = np.concatenate([pts1[:, 0], pts1[:, 1], -pts1[:, 2]], axis=1)
# Edge 5-7 and 17-19
pts2 = np.concatenate([Xu[C_max_idx + 1:Ni - 1],
Yu[C_max_idx + 1:Ni - 1],
W*ones(np.shape(Xu[C_max_idx + 1:Ni - 1]))], axis=1)
pts6 = np.concatenate([pts2[:, 0], pts2[:, 1], -pts2[:, 2]], axis=1)
# Edge 4-6 and 16-18
pts3 = np.concatenate([Xl[1:C_max_idx],
Yl[1:C_max_idx],
W*ones(np.shape(Xl[1:C_max_idx]))], axis=1)
pts7 = np.concatenate([pts3[:, 0], pts3[:, 1], -pts3[:, 2]], axis=1)
# Edge 6-7 and 18-19
pts4 = np.concatenate([Xl[C_max_idx + 1:Ni - 1],
Yl[C_max_idx + 1:Ni - 1],
W*ones(np.shape(Xl[C_max_idx + 1:Ni - 1]))], axis=1)
pts8 = np.concatenate([pts4[:, 0], pts4[:, 1], -pts4[:, 2]], axis=1)
# Edge 0-1 and 12-13
# A scaling factor is used to make the boundary similar to the leading edge of the airfoil
pts9_x = pts1[:,0] * scalingX
pts9_x = pts9_x - (pts9_x[-1] - pts1[:,0][-1])
pts9_y = pts1[:,1] * scalingY
pts9 = np.concatenate([pts9_x,
pts9_y,
W*ones(np.shape(pts9_x))], axis=1)
pts11 = np.concatenate([pts9[:, 0], pts9[:, 1], -pts9[:, 2]], axis=1)
# Edge 0-9 and 12-21
# A scaling factor is used to make the boundary similar to the leading edge of the airfoil
pts10_x = pts3[:,0] * scalingX
pts10_x = pts10_x - (pts10_x[-1] - pts3[:,0][-1])
pts10_y = pts3[:,1] * scalingY
pts10 = np.concatenate([pts10_x,
pts10_y,
W*ones(np.shape(pts10_x))], axis=1)
pts12 = np.concatenate([pts10[:, 0], pts10[:, 1], -pts10[:, 2]], axis=1)
# Move point of mesh "nose"
NoseX = pts9_x[0]
NoseY = pts9_y[0]
# Calculate the location of the vertices on the positive y-axis and put them in a matrix
vertices = zeros((12, 3))
vertices[0, :] = [NoseX[0], NoseY[0], W]
vertices[1, :] = [Xu[C_max_idx], pts9_y[-1] , W]
vertices[2, :] = [Xu[-1], pts9_y[-1] , W]
vertices[3, :] = [D, pts9_y[-1] , W]
vertices[4, :] = [Xu[0], Yu[0], W]
vertices[5, :] = [Xu[C_max_idx], Yu[C_max_idx], W]
vertices[6, :] = [Xl[C_max_idx], Yl[C_max_idx], W]
vertices[7, :] = [Xu[-1], Yu[-1], W]
vertices[8, :] = [D, Yu[-1], W]
vertices[9, :] = [Xl[C_max_idx], pts10_y[-1], W]
vertices[10, :] = [Xu[-1], pts10_y[-1], W]
vertices[11, :] = [D, pts10_y[-1], W]
# Create vertices for other side (negative z-axis)
vertices2 = vertices.copy()
vertices2[:, 2] *= -1
vertices = np.vstack((vertices, vertices2))
# Open file
f = open("blockMeshDict", "w")
# Write file
f.write("/*--------------------------------*- C++ -*----------------------------------*\\ \n")
f.write("| ========= | | \n")
f.write("| \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox | \n")
f.write("| \\\\ / O peration | Version: 3.0.x | \n")
f.write("| \\\\ / A nd | Web: www.OpenFOAM.com | \n")
f.write("| \\\\/ M anipulation | | \n")
f.write("\\*---------------------------------------------------------------------------*/ \n")
f.write("FoamFile \n")
f.write("{ \n")
f.write(" version 2.0; \n")
f.write(" format ascii; \n")
f.write(" class dictionary; \n")
f.write(" object blockMeshDict; \n")
f.write("} \n")
f.write("// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // \n")
f.write("\n")
f.write("convertToMeters %f; \n" % scale)
f.write("\n")
f.write("vertices \n")
f.write("( \n")
for vertex in vertices:
f.write(" (%f %f %f)\n" % tuple(vertex))
f.write("); \n")
f.write("\n")
f.write("blocks \n")
f.write("( \n")
f.write(" hex (16 17 13 12 4 5 1 0) (%i %i %i) simpleGrading (%f %f 1) \n" % (Nleading, NT, NW, ExpLeading, ExpTransverse))
f.write(" hex (17 19 14 13 5 7 2 1) (%i %i %i) simpleGrading (%f %f 1) \n" % (Ntrailing, NT, NW, ExpTrailing, ExpTransverse))
f.write(" hex (19 20 15 14 7 8 3 2) (%i %i %i) simpleGrading (%f %f 1) \n" % (ND, NT, NW, ExpDownstream, ExpTransverse))
f.write(" hex (4 6 9 0 16 18 21 12) (%i %i %i) simpleGrading (%f %f 1) \n" % (Nleading, NT, NW, ExpLeading, ExpTransverse))
f.write(" hex (6 7 10 9 18 19 22 21) (%i %i %i) simpleGrading (%f %f 1) \n" % (Ntrailing, NT, NW, ExpTrailing, ExpTransverse))
f.write(" hex (7 8 11 10 19 20 23 22) (%i %i %i) simpleGrading (%f %f 1) \n" % (ND, NT, NW, ExpDownstream, ExpTransverse))
f.write("); \n")
f.write("\n")
f.write("edges \n")
f.write("( \n")
f.write(" spline 4 5 \n")
f.write(" ( \n")
for pt in np.array(pts1):
f.write(" (%f %f %f) \n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 5 7 \n")
f.write(" ( \n")
for pt in np.array(pts2):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 4 6 \n")
f.write(" ( \n")
for pt in np.array(pts3):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 6 7 \n")
f.write(" ( \n")
for pt in np.array(pts4):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 16 17 \n")
f.write(" ( \n")
for pt in np.array(pts5):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 17 19 \n")
f.write(" ( \n")
for pt in np.array(pts6):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 16 18 \n")
f.write(" ( \n")
for pt in np.array(pts7):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 18 19 \n")
f.write(" ( \n")
for pt in np.array(pts8):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 0 1 \n")
f.write(" ( \n")
for pt in np.array(pts9):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 12 13 \n")
f.write(" ( \n")
for pt in np.array(pts11):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 0 9 \n")
f.write(" ( \n")
for pt in np.array(pts10):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write(" spline 12 21 \n")
f.write(" ( \n")
for pt in np.array(pts12):
f.write(" (%f %f %f)\n" % tuple(pt))
f.write(" ) \n")
f.write("); \n")
f.write("\n")
f.write("boundary \n")
f.write("( \n")
f.write(" inlet \n")
f.write(" { \n")
f.write(" type patch; \n")
f.write(" faces \n")
f.write(" ( \n")
f.write(" (1 0 12 13) \n")
f.write(" (0 9 21 12) \n")
f.write(" ); \n")
f.write(" } \n")
f.write("\n")
f.write(" outlet \n")
f.write(" { \n")
f.write(" type patch; \n")
f.write(" faces \n")
f.write(" ( \n")
f.write(" (11 8 20 23) \n")
f.write(" (8 3 15 20) \n")
f.write(" ); \n")
f.write(" } \n")
f.write("\n")
f.write(" topAndBottom \n")
f.write(" { \n")
f.write(" type patch; \n")
f.write(" faces \n")
f.write(" ( \n")
f.write(" (3 2 14 15) \n")
f.write(" (2 1 13 14) \n")
f.write(" (9 10 22 21) \n")
f.write(" (10 11 23 22) \n")
f.write(" ); \n")
f.write(" } \n")
f.write("\n")
f.write(" airfoil \n")
f.write(" { \n")
f.write(" type wall; \n")
f.write(" faces \n")
f.write(" ( \n")
f.write(" (5 4 16 17) \n")
f.write(" (7 5 17 19) \n")
f.write(" (4 6 18 16) \n")
f.write(" (6 7 19 18) \n")
f.write(" ); \n")
f.write(" } \n")
f.write("); \n")
f.write(" \n")
f.write("mergePatchPairs \n")
f.write("( \n")
f.write("); \n")
f.write(" \n")
f.write("// ************************************************************************* // \n")
# Close file
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plotting results")
parser.add_argument("foil", help="NACA foil digits")
args = parser.parse_args()
print("Generating blockMeshDict for a NACA {}".format(args.foil))
gen_blockmeshdict(args.foil)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
300,
1040,
10223,
11,
1976,
27498,... | 1.838306 | 7,440 |
import json,logging
logger = logging.getLogger(__name__)
| [
11748,
33918,
11,
6404,
2667,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198
] | 2.809524 | 21 |
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
# Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
# Example 1:
# nums1 = [1, 3]
# nums2 = [2]
# The median is 2.0
# Example 2:
# nums1 = [1, 2]
# nums2 = [3, 4]
# The median is (2 + 3)/2 = 2.5
| [
2,
1318,
389,
734,
23243,
26515,
997,
82,
16,
290,
997,
82,
17,
286,
2546,
285,
290,
299,
8148,
13,
198,
198,
2,
9938,
262,
14288,
286,
262,
734,
23243,
26515,
13,
383,
4045,
1057,
640,
13357,
815,
307,
440,
7,
6404,
357,
76,
10... | 2.563492 | 126 |
import execute
import random
import traceback
# input sanity checker: prints 1 if valid
# correct checker: prints 1 if valid
# Returns score, status_message
| [
11748,
12260,
198,
11748,
4738,
198,
11748,
12854,
1891,
198,
198,
2,
5128,
34182,
2198,
263,
25,
20842,
352,
611,
4938,
198,
2,
3376,
2198,
263,
25,
20842,
352,
611,
4938,
198,
198,
2,
16409,
4776,
11,
3722,
62,
20500,
198
] | 3.878049 | 41 |
#/usr/bin/python3/
#coding=utf-8
#================ 简介 ===================
# 脚本: 伪·红石比较器
# 作者: 北方重工NK1
# 时间: 2017年12月10日 13:37:11
# 描述: 匹配元素_作业帮
#================ 简介 ===================
import re
Checking_Points1=r'<dt>考点:</dt>([\s\S]*?)</dd>'
Checking_Points2=r'.+?\[(.*?)\].+?'
Checking_Points_biology=r'<dd>([\s\S]*)\\n'
QQmsg=r'http://www.zybang.com/question/rcswebview/'
print("The comparisoner has been launched.")
| [
2,
14,
14629,
14,
8800,
14,
29412,
18,
14,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
2,
4770,
13328,
106,
222,
20015,
233,
36658,
855,
198,
2,
220,
220,
220,
220,
5525,
226,
248,
17312,
105,
171,
120,
248,
220,
220,
220,
220,
... | 1.557692 | 312 |
#!python3
# Definition for singly-linked list.
if __name__ == "__main__":
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
lsum = ListNode(7)
lsum.next = ListNode(0)
lsum.next.next = ListNode(8)
print(compareLinkedList(Solution().addTwoNumbers(l1, l2), lsum))
| [
2,
0,
29412,
18,
628,
198,
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
300,
16,
796,
7343,
19667,
7,
17,
8,
198,
220,
220,
220,
300,
... | 2.137363 | 182 |
#
# Open Source SAM-BA Programmer
# Copyright (C) Dean Camera, 2016.
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
#
# Released under a MIT license, see LICENCE.txt.
import abc
import logging
class TimeoutError(Exception):
"""Exception thrown when a read operation times out while waiting for more
data.
"""
pass
class TransportBase(object):
"""Base class for SAM-BA transports. Derived instances should override all
methods listed here.
"""
__metaclass__ = abc.ABCMeta
LOG = logging.getLogger(__name__)
@abc.abstractmethod
def read(self, length):
"""Reads a given number of bytes from the transport.
Args:
length : Number of bytes to read. If `None`, a full line will be
read until a terminator is reached.
Returns:
Byte array of the received data.
"""
pass
@abc.abstractmethod
def write(self, data):
"""Writes a given number of bytes to the transport.
Args:
data : Bytes to write.
"""
pass
| [
2,
198,
2,
220,
220,
220,
220,
220,
4946,
8090,
28844,
12,
4339,
6118,
647,
198,
2,
220,
220,
220,
220,
15069,
357,
34,
8,
11325,
20432,
11,
1584,
13,
198,
2,
198,
2,
220,
34798,
685,
265,
60,
1440,
86,
4262,
66,
549,
1548,
68... | 2.372709 | 491 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rebuild_frame.py
#
# Copyright 2016 Bruno S <bruno@oac.unc.edu.ar>
#
# This file is part of ProperImage (https://github.com/toros-astro/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/toros-astro/ProperImage/blob/master/LICENSE.txt
#
import os
import shlex
import subprocess
import sys
import numpy as np
import matplotlib.pyplot as plt
import sep
from astropy.io import fits
from properimage import simtools
from properimage import propercoadd as pc
from properimage import utils
# =============================================================================
# PSF measure test by propercoadd
# =============================================================================
N = 512
test_dir = os.path.abspath('./test/test_images/rebuild_psf2')
frame = utils.sim_varpsf(400, test_dir, SN=5.)
with pc.SingleImage(frame) as sim:
a_fields, psf_basis = sim.get_variable_psf()
utils.plot_afields(a_fields, frame.shape, os.path.join(test_dir, 'a_fields.png'))
utils.plot_psfbasis(psf_basis, os.path.join(test_dir, 'psf_basis.png'), nbook=False)
plt.imshow(np.log10(frame), interpolation='none')
#plt.plot(cat['sources']['x'], cat['sources']['y'], '.k')
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(test_dir, 'test_frame.png'))
plt.close()
cat = sep.extract(frame - sep.Background(frame),
thresh=4.5*sep.Background(frame).globalrms)
xy = [(int(row['y']), int(row['x'])) for row in cat]
weights = 100000. * cat['flux']/max(cat['flux'])
m = simtools.delta_point(N*2, center=False, xy=xy)#, weights=weights)
x, y = sim.get_afield_domain() # np.mgrid[:frame.shape[0], :frame.shape[1]]
rebuild = np.zeros_like(frame)
for i in range(len(psf_basis)):
psf = psf_basis[i]
a = a_fields[i]
rebuild += a(x, y) * simtools.convol_gal_psf_fft(m, psf)
rebuild += 1000.
plt.imshow(np.log10(rebuild), interpolation='none')
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(test_dir, 'frame_rebuild.png'))
plt.close()
f = fits.PrimaryHDU(frame)
f.writeto(os.path.join(test_dir, 'test_frame.fits'), overwrite=True)
r = fits.PrimaryHDU(rebuild)
r.writeto(os.path.join(test_dir, 'frame_rebuild.fits'), overwrite=True)
scale = np.vdot(frame.flatten(), rebuild.flatten())
scale = scale/np.vdot(rebuild.flatten(), rebuild.flatten())
diff = frame - scale*rebuild
plt.imshow(np.log10(diff), interpolation='none')
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(test_dir, 'diff.png'))
plt.close()
diff = fits.PrimaryHDU(diff)
diff.writeto(os.path.join(test_dir, 'diff.fits'), overwrite=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
17884,
62,
14535,
13,
9078,
198,
2,
198,
2,
220,
15069,
1584,
31045,
311,
1279,
1671,
36909,
31,
78... | 2.527938 | 1,038 |
# Generated by Django 2.2.13 on 2021-01-07 14:53
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1485,
319,
33448,
12,
486,
12,
2998,
1478,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.822222 | 45 |
#!/usr/bin/env python
# Copyright (c) 2016, Francesco Pace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
sys.path.append('../..')
import frameworks.spark.spark as spark_framework
import frameworks.spark.spark_jupyter as spark_jupyter
#################################
# Zoe Application customization #
#################################
APP_NAME = 'ibm-notebook'
SPARK_MASTER_MEMORY_LIMIT = 512 * (1024 ** 2) # 512MB
SPARK_WORKER_MEMORY_LIMIT = 12 * (1024 ** 3) # 12GB
NOTEBOOK_MEMORY_LIMIT = 4 * (1024 ** 3) # 4GB, contains also the Spark client
SPARK_WORKER_CORES = 6
SPARK_WORKER_COUNT = 2
DOCKER_REGISTRY = '172.17.131.201:5000' # Set to None to use images from the Docker Hub
SPARK_MASTER_IMAGE = 'iostackrepo/spark-master-ibm'
SPARK_WORKER_IMAGE = 'iostackrepo/spark-worker-ibm'
NOTEBOOK_IMAGE = 'iostackrepo/spark-jupyter-notebook-ibm'
#####################
# END CUSTOMIZATION #
#####################
if __name__ == "__main__":
app_dict = create_app(app_name=APP_NAME, notebook_memory_limit=NOTEBOOK_MEMORY_LIMIT,
spark_master_memory_limit=SPARK_MASTER_MEMORY_LIMIT,
spark_worker_memory_limit=SPARK_WORKER_MEMORY_LIMIT,
spark_worker_cores=SPARK_WORKER_CORES, spark_worker_count=SPARK_WORKER_COUNT,
docker_registry=DOCKER_REGISTRY, spark_master_image=SPARK_MASTER_IMAGE,
spark_worker_image=SPARK_WORKER_IMAGE, notebook_image=NOTEBOOK_IMAGE)
json.dump(app_dict, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
1584,
11,
27025,
1073,
44111,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,... | 2.565693 | 822 |
"""
SaltEventProcessor Plugin
#########################
Processor plugin to emit events on task execution progress,
used by Nornir Proxy Runner modules to track tasks flow.
SaltEventProcessor does not work outside of SaltStack environment.
SaltEventProcessor reference
============================
.. autofunction:: nornir_salt.plugins.processors.SaltEventProcessor.SaltEventProcessor
"""
import logging
import time
from nornir.core.inventory import Host
from nornir.core.task import AggregatedResult, MultiResult, Task
log = logging.getLogger(__name__)
try:
# starting with salt 3003 need to use loader_context to reconstruct
# __salt__ dunder within treads:
# details: https://github.com/saltstack/salt/issues/59962
try:
from salt.loader_context import loader_context
except ImportError:
# after salt 3004 api was updated - https://github.com/saltstack/salt/pull/60595
from salt.loader.context import loader_context
HAS_LOADER_CONTEXT = True
except ImportError:
HAS_LOADER_CONTEXT = False
class SaltEventProcessor:
"""
SaltEventProcessor can emit event on SaltStack Event bus about task execution progress.
:param __salt__: (obj) __salt__ dunder object
:param loader: (obj) salt loader, required to use __salt__ dunder within threads
for SaltStack version above 3003.
:param loader_context: (obj) salt loader context
:param proxy_id: (str) Proxy Minion ID to form event tags
:param tftr: (str) timestamp formatter string, default is "%d-%b-%Y %H:%M:%S"
:param identity: (dict) task identity dictionary of uuid4, jid, function_name keys
"""
def _emit_event(self, tag, data):
"""
Helper function to emit event on SaltStack Event BUS.
:param tag: (str) event tag string
:param data: (any) event data content
"""
if HAS_LOADER_CONTEXT and self.loader is not None:
with loader_context(self.loader):
self.__salt__["event.send"](tag=tag, data=data)
else:
self.__salt__["event.send"](tag=tag, data=data)
def _timestamp(self):
"""
Helper function to produce event data timestamp.
"""
return time.strftime(self.tftr)
| [
37811,
198,
43061,
9237,
18709,
273,
42636,
198,
14468,
7804,
2,
198,
198,
18709,
273,
13877,
284,
27588,
2995,
319,
4876,
9706,
4371,
11,
198,
1484,
416,
34121,
343,
38027,
21529,
13103,
284,
2610,
8861,
5202,
13,
198,
198,
43061,
9237... | 2.82 | 800 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__copyright__ = 'Copyright 2018 by Fadhiil Rachman'
__version__ = '1.0.1'
__license__ = 'MIT'
__author__ = 'Fadhiil Rachman'
__author_email__ = 'fadhiilrachman@gmail.com'
__url__ = 'https://github.com/fadhiilrachman/UMBPrivateAPI'
__all__ = (
'__version__'
) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
834,
22163,
4766,
834,
220,
220,
220,
220,
220,
220,
796,
705,
15269,
2864,
416,
376,
324,
5303,
... | 2.016304 | 184 |
import pytrellis
"""
Database copy utilities
This is used where there are several tiles with different types but the same or similar bit databases - such as all the
CIB tiles, some IO tiles, etc.
"""
def dbcopy(family, device, source, dest, copy_muxes=True, copy_words=True, copy_enums=True, copy_conns=True):
"""
Copy the bit database from one tile type to another
:param family: database family
:param device: database device
:param source: tiletype to copy from
:param dest: tiletype to copy to
:param copy_muxes: include muxes in copy
:param copy_words: include settings words in copy
:param copy_enums: include settings enums in copy
:param copy_conns: include fixed connections in copy
"""
srcdb = pytrellis.get_tile_bitdata(
pytrellis.TileLocator(family, device, source))
dstdb = pytrellis.get_tile_bitdata(
pytrellis.TileLocator(family, device, dest))
if copy_muxes:
sinks = srcdb.get_sinks()
for sink in sinks:
mux = srcdb.get_mux_data_for_sink(sink)
for src in mux.get_sources():
dstdb.add_mux_arc(mux.arcs[src])
if copy_words:
cwords = srcdb.get_settings_words()
for cword in cwords:
wd = srcdb.get_data_for_setword(cword)
dstdb.add_setting_word(wd)
if copy_enums:
cenums = srcdb.get_settings_enums()
for cenum in cenums:
ed = srcdb.get_data_for_enum(cenum)
dstdb.add_setting_enum(ed)
if copy_conns:
fcs = srcdb.get_fixed_conns()
for conn in fcs:
dstdb.add_fixed_conn(conn)
| [
11748,
12972,
83,
11252,
271,
198,
198,
37811,
198,
38105,
4866,
20081,
198,
198,
1212,
318,
973,
810,
612,
389,
1811,
19867,
351,
1180,
3858,
475,
262,
976,
393,
2092,
1643,
20083,
532,
884,
355,
477,
262,
198,
34,
9865,
19867,
11,
... | 2.312236 | 711 |
import numpy as np
from ligo.skymap import kde
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import to_rgb
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
#matplotlib.rc('text', usetex=True)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
300,
14031,
13,
15688,
8899,
1330,
479,
2934,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
6738,
2603,
29487,
8019,
13,
4033,
669,
1330,
284,
62,
81... | 2.75 | 88 |
"""count - itertools"""
# Apresentação do count
from itertools import count
# aceita número de ponto flutuante como step, mas n aceita um limite.
contador = count(start=5, step=0.05)
for v in contador:
print(round(v, 2)) # arredonda em duas casas decimais
if v > 10:
break
print(separador())
contador = count()
nomes = ['Matheus','Júlia','Rafaela']
nomes = zip(contador,nomes)
for v in nomes:
print(v[0], v[1]) | [
37811,
9127,
532,
340,
861,
10141,
37811,
198,
2,
5949,
2028,
64,
16175,
28749,
466,
954,
198,
6738,
340,
861,
10141,
1330,
954,
198,
2,
31506,
5350,
299,
21356,
647,
78,
390,
279,
5957,
781,
315,
84,
12427,
401,
78,
2239,
11,
12422... | 2.413408 | 179 |
import logging
from time import sleep
from mongodb_consistent_backup.Common import DB, MongoUri, validate_hostname
from mongodb_consistent_backup.Errors import DBOperationError, Error, OperationError
from mongodb_consistent_backup.Replication import Replset
| [
11748,
18931,
198,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
285,
506,
375,
65,
62,
5936,
7609,
62,
1891,
929,
13,
17227,
1330,
20137,
11,
42591,
52,
380,
11,
26571,
62,
4774,
3672,
198,
6738,
285,
506,
375,
65,
62,
5936,
7609,
... | 3.303797 | 79 |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 30 15:26:13 2016
@author: jtara1
General syntax for subreddits.txt:
: (colon character) denotes folder name
subreddit url or word denotes subreddit
For more examples see https://github.com/jtara1/RedditImageGrab/commit/8e4787ef9ac43ca694fc663be026f69a568bb622
Example of expected input and output:
subreddits.txt = "
pc-wallpapers:
https://www.reddit.com/r/wallpapers/
/r/BackgroundArt/
nature_pics:
http://www.reddit.com/r/EarthPorn/
:
Mountain
"
parse_subreddit_list('/MyPath/subreddits.txt', '/MyPath/') = [
('wallpapers', '/MyPath/pc-wallpaper/wallpapers'),
('BackgroundArt', '/MyPath/pc-wallpaper/BackgroundArt'),
('EarthPorn', '/MyPath/nature-pics/EarthPorn'),
('Mountain', '/MyPath/Mountain')
]
"""
import re
import os
from os import getcwd, mkdir
from .general_utility import get_subreddit_name
def parse_subreddit_list(file_path, base_path=getcwd()):
"""Gets list of subreddits from a file & returns folder for media from each subreddit
:param file_path: path of text file to load subreddits from (relative or full path)
:param base_path: base path that gets returned with each subreddit
:return: list containing tuples of subreddit & its associated folder to get media saved to
:rtype: list
"""
try:
file = open(file_path, 'r')
except IOError as e:
print(e)
raise IOError
output = []
folder_regex = re.compile('([a-zA-Z0-9_\- ]*):\n')
subreddit_regex = re.compile('(?:https?://)?(?:www.)?reddit.com/r/([a-zA-Z0-9_]*)')
subreddit_regex2 = re.compile('(?:/r/)?([a-zA-Z0-9_]*)')
if not os.path.isdir(base_path):
mkdir(base_path)
# iterate through the lines using regex to check if line is subreddit or folder title
path = base_path
for line in file:
if line == '\n':
continue
folder_match = re.match(folder_regex, line)
if folder_match:
if folder_match.group(1) != '':
path = os.path.join(base_path, line[:-2])
if not os.path.isdir(path):
mkdir(path)
else:
path = base_path
continue
subreddit_match = re.match(subreddit_regex, line)
if not subreddit_match:
subreddit_match = re.match(subreddit_regex2, line)
if not subreddit_match:
print('No match at position %s' % file.tell() )
print('parse_subreddit_list Error: No match found, skipping this iteration.')
continue
subreddit = get_subreddit_name(subreddit_match.group(1))
final_path = os.path.join(path, subreddit)
if not os.path.isdir(final_path):
mkdir(final_path)
output.append((subreddit, final_path))
return output
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2447,
1542,
1315,
25,
2075,
25,
1485,
1584,
198,
198,
31,
9800,
25,
474,
83,
3301,
16,
198,
198,
12218,
15582,
329,
46386,
13,
14116,
25... | 2.389313 | 1,179 |
from argparse import ArgumentParser
from jmm.scripts.generate_configuration import generate_configuration
from jmm.scripts.scrape import scrape
from jmm.scripts.valid_configuration import valid_configuration
from jmm.scripts.show_version import show_version
from jmm.scripts import command as COMMAND
if __name__ == '__main__':
main() # pragma: no cover
| [
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
474,
3020,
13,
46521,
13,
8612,
378,
62,
11250,
3924,
1330,
7716,
62,
11250,
3924,
198,
6738,
474,
3020,
13,
46521,
13,
1416,
13484,
1330,
42778,
198,
6738,
474,
3020,
13,
46521,
... | 3.539216 | 102 |
# This file is for testing and verifying the ratio-to-true calculation
# It will be done in real-time rather than as part of test 5
# As it will be much quicker to debug for #54
import pyautogui
import sys
import time
from windowcapture import WindowCapture
import os
print('Press Ctrl-C to quit.')
if __name__ == "__main__":
ct = ConvertTest()
ct.start()
| [
2,
770,
2393,
318,
329,
4856,
290,
45505,
262,
8064,
12,
1462,
12,
7942,
17952,
198,
2,
632,
481,
307,
1760,
287,
1103,
12,
2435,
2138,
621,
355,
636,
286,
1332,
642,
198,
2,
1081,
340,
481,
307,
881,
20061,
284,
14257,
329,
1303,... | 3.256637 | 113 |
#!/usr/bin/env python
# encoding: utf-8
r"""
Simple advection Riemann solvers
Basic advection Riemann solvers of the form (1d)
.. math::
q_t + A q_x = 0.
:Authors:
Kyle T. Mandli (2008-2-20): Initial version
"""
# ============================================================================
# Copyright (C) 2008 Kyle T. Mandli <mandli@amath.washington.edu>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
def rp_advection_1d(q_l, q_r, aux_l, aux_r, aux_global):
r"""Basic 1d advection riemann solver
*aux_global* should contain -
- *u* - (float) Determines advection speed
See :ref:`pyclaw_rp` for more details.
:Version: 1.0 (2008-2-20)
"""
# Riemann solver constants
meqn = 1
mwaves = 1
# Number of Riemann problems we are solving
nrp = q_l.shape[0]
# Return values
wave = np.empty((nrp, meqn, mwaves))
s = np.empty((nrp, mwaves))
amdq = np.zeros((nrp, meqn))
apdq = np.zeros((nrp, meqn))
wave[:, 0, 0] = q_r[:, 0] - q_l[:, 0]
s[:, 0] = aux_global["u"]
if aux_global["u"] > 0:
apdq[:, 0] = s[:, 0] * wave[:, 0, 0]
else:
amdq[:, 0] = s[:, 0] * wave[:, 0, 0]
return wave, s, amdq, apdq
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
81,
37811,
198,
26437,
512,
303,
596,
371,
26597,
1236,
1540,
690,
198,
198,
26416,
512,
303,
596,
371,
26597,
1236,
1540,
690,
286,
262,
1296,
... | 2.337705 | 610 |
# Generated by Django 3.1.3 on 2020-12-02 06:26
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
18,
319,
12131,
12,
1065,
12,
2999,
9130,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from django.shortcuts import render
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
628
] | 3.764706 | 17 |
import argparse
import json
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
torch.backends.cudnn.benchmark = True
from model import MultiTaskImageNetwork
from data import CARLA_Data
from class_converter import sub_classes
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, default='aim_vis_abs', help='Unique experiment identifier.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use')
parser.add_argument('--epochs', type=int, default=100, help='Number of train epochs.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate.')
parser.add_argument('--val_every', type=int, default=5, help='Validation frequency (epochs).')
parser.add_argument('--batch_size', type=int, default=24, help='Batch size')
parser.add_argument('--ignore_sides', action='store_true', help='Ignores side cameras')
parser.add_argument('--ignore_rear', action='store_true', help='Ignores rear camera')
parser.add_argument('--classes', type=str, default='no_stop')
parser.add_argument('--seq_len', type=int, default=1, help='Input sequence length (factor of 10)')
parser.add_argument('--pred_len', type=int, default=4, help='number of timesteps to predict')
parser.add_argument('--logdir', type=str, default='log', help='Directory to log data to.')
parser.add_argument('--input_scale', type=int, default=1, help='Inverse input scale factor')
parser.add_argument('--input_crop', type=float, default=0.64, help='Input crop size')
args = parser.parse_args()
args.logdir = os.path.join(args.logdir, args.id)
class Engine(object):
"""Engine that runs training and inference.
Args
- cur_epoch (int): Current epoch.
- print_every (int): How frequently (# batches) to print loss.
- validate_every (int): How frequently (# epochs) to run validation.
"""
# Data
root_dir = '/is/rg/avg/kchitta/carla9-10_data/2021/apv3'
train_towns = ['Town01', 'Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07', 'Town10']
val_towns = ['Town01_long', 'Town02_long', 'Town03_long', 'Town04_long', 'Town05_long', 'Town06_long']
train_data, val_data = [], []
for town in train_towns:
train_data.append(os.path.join(root_dir, town))
train_data.append(os.path.join(root_dir, town+'_small'))
for town in val_towns:
val_data.append(os.path.join(root_dir, town))
class_converter = sub_classes[args.classes]
print("classes: ", class_converter)
train_set = CARLA_Data(root=train_data,
pred_len=args.pred_len,
class_converter=class_converter,
ignore_sides=args.ignore_sides,
ignore_rear=args.ignore_rear,
seq_len=args.seq_len,
input_scale=args.input_scale,
input_crop=args.input_crop)
val_set = CARLA_Data(root=val_data,
pred_len=args.pred_len,
class_converter=class_converter,
ignore_sides=args.ignore_sides,
ignore_rear=args.ignore_rear,
seq_len=args.seq_len,
input_scale=args.input_scale,
input_crop=args.input_crop)
dataloader_train = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
dataloader_val = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# Model
num_segmentation_classes = len(np.unique(class_converter))
num_cameras = 1
if not args.ignore_sides:
num_cameras += 2
if not args.ignore_rear:
num_cameras += 1
model = MultiTaskImageNetwork('cuda', num_segmentation_classes, args.pred_len, num_cameras)
optimizer = optim.AdamW(model.parameters(), lr=args.lr)
conf_log = {
"id": args.id,
"epochs": args.epochs,
"batch_size": args.batch_size,
"lr": args.lr,
"seq_len": args.seq_len,
"pred_len": args.pred_len,
"classes": class_converter,
"class_name": args.classes,
"num_cameras": num_cameras,
}
trainer = Engine(conf_log)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print ('Total trainable parameters: ', params)
# Create logdir
if not os.path.isdir(args.logdir):
os.makedirs(args.logdir)
print('Created dir:', args.logdir)
elif os.path.isfile(os.path.join(args.logdir, 'recent.log')):
print('Loading checkpoint from ' + args.logdir)
with open(os.path.join(args.logdir, 'recent.log'), 'r') as f:
log_table = json.load(f)
# Load variables
trainer.cur_epoch = log_table['epoch']
if 'iter' in log_table: trainer.cur_iter = log_table['iter']
trainer.bestval = log_table['bestval']
trainer.train_loss = log_table['train_loss']
trainer.val_loss = log_table['val_loss']
# Load checkpoint
model.load_state_dict(torch.load(os.path.join(args.logdir, 'model.pth')))
optimizer.load_state_dict(torch.load(os.path.join(args.logdir, 'recent_optim.pth')))
# Log args
with open(os.path.join(args.logdir, 'args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
for epoch in range(trainer.cur_epoch, args.epochs):
trainer.train()
if epoch % args.val_every == 0:
trainer.validate()
trainer.save() | [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
26791,
13,
... | 2.68797 | 1,862 |
import numpy as np
import sympy as sp
import mpi4py.MPI as mpi
import pylbm
X, Y, LA = sp.symbols('X, Y, LA')
rho, qx, qy, T = sp.symbols('rho, qx, qy, T')
def run(dx, Tf, generator="cython", sorder=None, withPlot=True):
"""
Parameters
----------
dx: double
spatial step
Tf: double
final time
generator: pylbm generator
sorder: list
storage order
withPlot: boolean
if True plot the solution otherwise just compute the solution
"""
# parameters
T0 = .5
Tin = -.5
xmin, xmax, ymin, ymax = 0., 1., 0., 1.
Ra = 2000
Pr = 0.71
Ma = 0.01
alpha = .005
la = 1. # velocity of the scheme
rhoo = 1.
g = 9.81
uo = 0.025
nu = np.sqrt(Pr*alpha*9.81*(T0-Tin)*(ymax-ymin)/Ra)
kappa = nu/Pr
eta = nu
#print nu, kappa
snu = 1./(.5+3*nu)
seta = 1./(.5+3*eta)
sq = 8*(2-snu)/(8-snu)
se = seta
sf = [0., 0., 0., seta, se, sq, sq, snu, snu]
#print sf
a = .5
skappa = 1./(.5+10*kappa/(4+a))
#skappa = 1./(.5+np.sqrt(3)/6)
se = 1./(.5+np.sqrt(3)/3)
snu = se
sT = [0., skappa, skappa, se, snu]
#print sT
dico = {
'box':{'x':[xmin, xmax], 'y':[ymin, ymax], 'label':[1, 2, 0, 0]},
'elements':[
pylbm.Parallelogram([xmin, ymin], [ .1, 0], [0, .8], label=0),
pylbm.Parallelogram([xmax, ymin], [-.1, 0], [0, .8], label=0),
],
'space_step':dx,
'scheme_velocity':la,
'schemes':[
{
'velocities': list(range(9)),
'conserved_moments': [rho, qx, qy],
'polynomials':[
1, X, Y,
3*(X**2+Y**2)-4,
sp.Rational(1, 2)*(9*(X**2+Y**2)**2-21*(X**2+Y**2)+8),
3*X*(X**2+Y**2)-5*X, 3*Y*(X**2+Y**2)-5*Y,
X**2-Y**2, X*Y
],
'relaxation_parameters':sf,
'equilibrium':[
rho, qx, qy,
-2*rho + 3*(qx**2+qy**2),
rho - 3*(qx**2+qy**2),
-qx, -qy,
qx**2 - qy**2, qx*qy
],
'source_terms':{qy: alpha*g*T},
'init':{rho: 1., qx: 0., qy: 0.},
},
{
'velocities': list(range(5)),
'conserved_moments':T,
'polynomials':[1, X, Y, 5*(X**2+Y**2) - 4, (X**2-Y**2)],
'equilibrium':[T, T*qx, T*qy, a*T, 0.],
'relaxation_parameters':sT,
'init':{T:(init_T, (T0,))},
},
],
'boundary_conditions':{
0:{'method':{0: pylbm.bc.BouzidiBounceBack, 1: pylbm.bc.BouzidiAntiBounceBack}, 'value':(bc, (T0,))},
1:{'method':{0: pylbm.bc.BouzidiBounceBack, 1: pylbm.bc.BouzidiAntiBounceBack}, 'value': (bc_in, (T0, Tin, ymax, rhoo, uo))},
2:{'method':{0: pylbm.bc.NeumannX, 1: pylbm.bc.NeumannX},},
},
'generator': generator,
}
sol = pylbm.Simulation(dico)
if withPlot:
# create the viewer to plot the solution
viewer = pylbm.viewer.matplotlib_viewer
fig = viewer.Fig()
ax = fig[0]
im = ax.image(sol.m[T].transpose(), cmap='jet', clim=[Tin, T0])
ax.title = 'solution at t = {0:f}'.format(sol.t)
ax.polygon([[xmin/dx, ymin/dx],[xmin/dx, (ymin+.8)/dx], [(xmin+.1)/dx, (ymin+.8)/dx], [(xmin+.1)/dx, ymin/dx]], 'k')
ax.polygon([[(xmax-.1)/dx, ymin/dx],[(xmax-.1)/dx, (ymin+.8)/dx], [xmax/dx, (ymin+.8)/dx], [xmax/dx, ymin/dx]], 'k')
fig.animate(update, interval=1)
fig.show()
else:
while sol.t < Tf:
sol.one_time_step()
return sol
if __name__ == '__main__':
dx = 1./256
Tf = 10.
run(dx, Tf)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10558,
88,
355,
599,
198,
11748,
285,
14415,
19,
9078,
13,
7378,
40,
355,
285,
14415,
198,
198,
11748,
279,
2645,
20475,
198,
198,
55,
11,
575,
11,
9131,
796,
599,
13,
1837,
2022,
10220,
10... | 1.662075 | 2,323 |
import warnings
from typing import Tuple, Dict
import streamlit as st
from gutenTAG import GutenTAG
from timeeval_gui.timeseries_config import TimeSeriesConfig
from timeeval_gui.utils import get_base_oscillations, get_anomaly_types, get_anomaly_params, \
get_base_oscillation_parameters
from .page import Page
from ..files import Files
| [
11748,
14601,
198,
6738,
19720,
1330,
309,
29291,
11,
360,
713,
198,
198,
11748,
4269,
18250,
355,
336,
198,
6738,
308,
7809,
42197,
1330,
402,
7809,
42197,
198,
198,
6738,
640,
18206,
62,
48317,
13,
22355,
10640,
62,
11250,
1330,
3862,... | 3.355769 | 104 |
# Generated by Django 3.1.11 on 2021-05-17 02:12
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1157,
319,
33448,
12,
2713,
12,
1558,
7816,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
import time
import click
import scapy.all as scapy
from networktoolkit import networkscan
def gen_arp_response(target_ip, spoof_ip):
"""Generates an ARP Response packet
:param target_ip: ip address to send packet to
:param spoof_ip: ip address to spoof
:return: A scapy packet
"""
if scan_results := networkscan.get_clients(
target_ip, 10
): # checks to see if the target is reachable on the network
target = scan_results[0]
packet = scapy.ARP(
op=2, # ARP response (op=1 would be ARP request). We are spoofing a request packet
pdst=target_ip,
hwdst=target.mac_addr,
psrc=spoof_ip, # ip adddress we are spoofing (pretending to be)
)
return packet
def arpspoof(target_ip, spoof_ip, bi_directional=False, delay=1):
"""Spoof a given ip address by sending ARP Response packets
:param target_ip: ip address of target
:param spoof_ip: ip address to spoof
:param bi_directional: if True, also send ARP Responses to spoof_ip spoofing target_ip
:type bi_directional: bool
"""
packets = []
click.echo(f"[+] Generating ARP Response (dest={target_ip} spoofing={spoof_ip}")
packets.append(gen_arp_response(target_ip, spoof_ip))
if bi_directional:
click.echo(f"[+] Generating ARP Response (dest={spoof_ip} spoofing={target_ip}")
packets.append(gen_arp_response(spoof_ip, target_ip))
counter = 0
try:
while True:
counter += 1
for packet in packets:
scapy.send(packet, verbose=False)
click.echo(
f"Sent ARP Response to {packet.pdst} spoofing {packet.psrc} {counter} time{'s' if counter != 1 else ''}"
)
time.sleep(delay)
except KeyboardInterrupt:
click.echo(f"Detected keyboard interrupt. Exiting...")
@click.command()
@click.argument("target_ip")
@click.argument("spoof_ip")
@click.option("-b", "--bi_directional", is_flag=True, help="Spoof in both directions")
@click.option(
"-d",
"--delay",
default=1,
help="Delay between sending each set of packets (seconds)",
)
| [
11748,
640,
198,
198,
11748,
3904,
198,
11748,
629,
12826,
13,
439,
355,
629,
12826,
198,
198,
6738,
3127,
25981,
15813,
1330,
7686,
5171,
628,
198,
4299,
2429,
62,
5117,
62,
26209,
7,
16793,
62,
541,
11,
42078,
62,
541,
2599,
198,
... | 2.423673 | 904 |
from .models import Ticket, UserProfile, Order, TicketType, OrderType
# Retrieving user profile for the specified user
# Retrieving tickets for the specificied user profile
# Retrieving orders for the specified user profile
# Retrieving the total number of subalterns for a specific user profile
# Retrieving a list of all subalterns for a specific user profile
# Returns the type of the user:
# 0 - admin; 1 - user; 2 - helpdesk;
# Returns all exmployees from the database
# Returns the id of the ticket with the specified title | [
6738,
764,
27530,
1330,
24014,
11,
11787,
37046,
11,
8284,
11,
24014,
6030,
11,
8284,
6030,
198,
198,
2,
4990,
37418,
2836,
7034,
329,
262,
7368,
2836,
198,
198,
2,
4990,
37418,
8587,
329,
262,
2176,
798,
2836,
7034,
198,
198,
2,
49... | 3.977778 | 135 |
# Generated by Django 2.1.1 on 2018-11-08 16:04
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
1157,
12,
2919,
1467,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
from pagarme import transaction
from tests.resources.dictionaries import transaction_dictionary
import pytest
import time
| [
6738,
279,
32452,
1326,
1330,
8611,
198,
6738,
5254,
13,
37540,
13,
67,
2867,
3166,
1330,
8611,
62,
67,
14188,
198,
11748,
12972,
9288,
198,
11748,
640,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628
] | 3.810811 | 37 |
#Database setup file
#Useful when setting up for first time
import sqlite3
conn = sqlite3.connect("/home/contactbook/website/storage.db")
c = conn.cursor()
#Create tables for users and books
c.execute("DROP TABLE IF EXISTS users;")
c.execute("""
CREATE TABLE IF NOT EXISTS users(
id integer PRIMARY KEY,
username text NOT NULL,
password text NOT NULL,
pnumber text NOT NULL
);
""")
c.execute("DROP TABLE IF EXISTS books;")
c.execute("""
CREATE TABLE IF NOT EXISTS books(
id integer PRIMARY KEY,
time integer NOT NULL,
bookname text NOT NULL,
username text NOT NULL,
public text NOT NULL
);
""")
#Ensure that the tables are created
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
r = c.fetchall()
print(r)
| [
2,
38105,
9058,
2393,
201,
198,
2,
11041,
913,
618,
4634,
510,
329,
717,
640,
201,
198,
201,
198,
11748,
44161,
578,
18,
201,
198,
201,
198,
37043,
796,
44161,
578,
18,
13,
8443,
7203,
14,
11195,
14,
32057,
2070,
14,
732,
12485,
1... | 2.691525 | 295 |
import os
import sqlite3
from datetime import datetime
from pygate_grpc.client import PowerGateClient
api = os.getenv('POWERGATE_API')
token = os.getenv('POWERGATE_TOKEN')
powergate = PowerGateClient(api, False)
# get final storage deals info
storage_deals = powergate.deals.storage_deal_records(
include_pending=False, include_final=True, token=token
)
total_deals = len(storage_deals)
print(str(total_deals) + " finalized storage deals found.")
if total_deals > 0:
abs_path = os.getcwd()
split = os.path.split(abs_path)
db_path = os.path.join(
split[0], "pipeline/deplatformr_open_images_workflow.sqlite")
workflow_db = sqlite3.connect(db_path)
cursor = workflow_db.cursor()
for deal in storage_deals:
try:
price = deal["dealInfo"]["pricePerEpoch"]
except:
price = 0
utc_date = datetime.utcfromtimestamp(int(deal["time"]))
cid = deal["rootCid"]
cursor.execute("SELECT name from packages where cid = ?", (cid,),)
filename = cursor.fetchone()
cursor.execute("INSERT OR IGNORE INTO deals (deal_id, payload_cid, piece_cid, timestamp, piece_size, miner_id, start_epoch, activation_epoch, duration, price, wallet, state) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", (
deal["dealInfo"]["dealId"], deal["rootCid"], deal["dealInfo"]["pieceCid"], utc_date, deal["dealInfo"]["size"], deal["dealInfo"]["miner"], deal["dealInfo"]["startEpoch"], deal["dealInfo"]["activationEpoch"], deal["dealInfo"]["duration"], price, deal["address"], deal["dealInfo"]["stateName"]),)
workflow_db.commit()
workflow_db.close()
print("Database updated.") | [
11748,
28686,
198,
11748,
44161,
578,
18,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
12972,
10494,
62,
2164,
14751,
13,
16366,
1330,
4333,
22628,
11792,
198,
198,
15042,
796,
28686,
13,
1136,
24330,
10786,
47,
36048,
38,
6158,
... | 2.581395 | 645 |
from django.db.backends import BaseDatabaseIntrospection
from sqlanydb import ProgrammingError, OperationalError
import re
import sqlanydb
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
| [
6738,
42625,
14208,
13,
9945,
13,
1891,
2412,
1330,
7308,
38105,
5317,
305,
31308,
198,
6738,
44161,
1092,
9945,
1330,
30297,
12331,
11,
6564,
864,
12331,
198,
11748,
302,
198,
11748,
44161,
1092,
9945,
628,
198,
38823,
62,
2539,
62,
26... | 2.628866 | 97 |
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_validate
if __name__ == '__main__':
sklearn_reference_XOR() | [
6738,
1341,
35720,
13,
710,
1523,
62,
27349,
1330,
10373,
47,
9487,
7483,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
220,
1330,
3272,
62,
12102,
378,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220... | 3.1 | 50 |
# This is a standalone bundler. You can use this script by itself
# The output is a minimized JSON, which can be distributed
# Usage: py gbundle.py <inputPath>
# Output: bundle.json
# PY_INJECT
__main__()
| [
2,
770,
318,
257,
27669,
12207,
1754,
13,
921,
460,
779,
428,
4226,
416,
2346,
201,
198,
2,
383,
5072,
318,
257,
49491,
19449,
11,
543,
460,
307,
9387,
201,
198,
201,
198,
2,
29566,
25,
12972,
308,
65,
31249,
13,
9078,
1279,
15414... | 2.7875 | 80 |
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from python_on_whales.utils import DockerCamelModel
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
198,
6738,
21015,
62,
261,
62,
1929,
2040,
13,
26791,
1330,
25716,
34,
17983,
17633,
628
] | 3.794872 | 39 |
##############################################################################
# Node Class
##############################################################################
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
19081,
5016,
198,
29113,
29113,
7804,
4242,
2235,
628
] | 10.75 | 16 |
# Copyright (c) 2009-2013 Simon van Heeringen <s.vanheeringen@ncmls.ru.nl>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Configuration for GimmeMotifs """
import ConfigParser
import distutils.sysconfig
import os
### CONSTANTS ###
GM_VERSION = "0.8.5"
BG_TYPES = ["random", "random_genomic", "matched_genomic", "random_promoter"]
FA_VALID_BGS = ["random", "promoter", "user"]
BED_VALID_BGS = ["genomic_matched", "random", "promoter", "user"]
BG_RANK = {"user":1, "promoter":2, "genomic_matched":3, "random":4}
#if __name__ == "__main__":
# m = MotifConfig()
# print m.is_configured("meme")
| [
2,
15069,
357,
66,
8,
3717,
12,
6390,
11288,
5719,
679,
1586,
268,
1279,
82,
13,
10438,
258,
1586,
268,
31,
10782,
4029,
82,
13,
622,
13,
21283,
29,
198,
2,
198,
2,
770,
8265,
318,
1479,
3788,
13,
921,
460,
17678,
4163,
340,
290... | 2.774809 | 262 |
# -*- coding: utf-8 -*-
# @File : SimpleRewMsfModule.py
# @Date : 2019/1/11
# @Desc :
from Lib.ModuleAPI import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
8979,
220,
1058,
17427,
30003,
10128,
69,
26796,
13,
9078,
198,
2,
2488,
10430,
220,
1058,
13130,
14,
16,
14,
1157,
198,
2,
2488,
24564,
220,
1058,
198,
198,
... | 2.245283 | 53 |
import torch
import numpy as np
import logging, yaml, os, sys, argparse, time, math
from tqdm import tqdm
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy.io import wavfile
from random import sample
from Modules import GlowTTS
from Datasets import Text_to_Token, Token_Stack, Mel_Stack, Mel_for_GE2E_Stack, Pitch_Stack
from Pattern_Generator import Pattern_Generate, Text_Filtering
from Speaker_Embedding.Modules import Encoder as Speaker_Embedding, Normalize
from Arg_Parser import Recursive_Parse
hp = Recursive_Parse(yaml.load(
open('Hyper_Parameters.yaml', encoding='utf-8'),
Loader=yaml.Loader
))
if not hp.Device is None:
os.environ['CUDA_VISIBLE_DEVICES']= hp.Device
if not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format= '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'
)
if hp.Use_Mixed_Precision:
try:
from apex import amp
except:
logging.info('There is no apex modules in the environment. Mixed precision does not work.')
hp.Use_Mixed_Precision = False
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument('-c', '--checkpoint', required= True)
args = argParser.parse_args()
labels = [
'Alpha',
'Bravo'
]
texts = [
'Birds of a feather flock together.',
'A creative artist works on his next composition because he was not satisfied with his previous one.'
]
scales = [1.0, 0.9]
speakers = [0, 1]
refereces = [
'./Wav_for_Inference/LJ.LJ050-0278.wav',
'./Wav_for_Inference/VCTK.p361_209.wav'
]
inferencer = Inferencer(checkpoint_path= args.checkpoint)
inferencer.Inference(
labels= labels,
texts= texts,
scales= scales,
speakers= speakers,
references= refereces,
inference_path= 'XXX'
) | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
18931,
11,
331,
43695,
11,
28686,
11,
25064,
11,
1822,
29572,
11,
640,
11,
10688,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
2603,
29487,
8019,
198,
6759,
... | 2.446759 | 864 |
# Generated by Django 1.10.7 on 2017-07-03 21:23
from django.conf import settings
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('corehq', 'sql_proxy_accessors', 'sql_templates'), {
'PL_PROXY_CLUSTER_NAME': settings.PL_PROXY_CLUSTER_NAME
})
| [
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
2998,
12,
3070,
2310,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
198,
6738,
4755,
71,
80,
... | 2.709402 | 117 |
# coding: utf-8
"""
vpc
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RouteParameter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'destination_cidr_block': 'str',
'target_type_code': 'str',
'target_no': 'str',
'target_name': 'str'
}
attribute_map = {
'destination_cidr_block': 'destinationCidrBlock',
'target_type_code': 'targetTypeCode',
'target_no': 'targetNo',
'target_name': 'targetName'
}
def __init__(self, destination_cidr_block=None, target_type_code=None, target_no=None, target_name=None): # noqa: E501
"""RouteParameter - a model defined in Swagger""" # noqa: E501
self._destination_cidr_block = None
self._target_type_code = None
self._target_no = None
self._target_name = None
self.discriminator = None
self.destination_cidr_block = destination_cidr_block
self.target_type_code = target_type_code
if target_no is not None:
self.target_no = target_no
if target_name is not None:
self.target_name = target_name
@property
def destination_cidr_block(self):
"""Gets the destination_cidr_block of this RouteParameter. # noqa: E501
목적지CIDR블록 # noqa: E501
:return: The destination_cidr_block of this RouteParameter. # noqa: E501
:rtype: str
"""
return self._destination_cidr_block
@destination_cidr_block.setter
def destination_cidr_block(self, destination_cidr_block):
"""Sets the destination_cidr_block of this RouteParameter.
목적지CIDR블록 # noqa: E501
:param destination_cidr_block: The destination_cidr_block of this RouteParameter. # noqa: E501
:type: str
"""
if destination_cidr_block is None:
raise ValueError("Invalid value for `destination_cidr_block`, must not be `None`") # noqa: E501
self._destination_cidr_block = destination_cidr_block
@property
def target_type_code(self):
"""Gets the target_type_code of this RouteParameter. # noqa: E501
목적지유형코드 # noqa: E501
:return: The target_type_code of this RouteParameter. # noqa: E501
:rtype: str
"""
return self._target_type_code
@target_type_code.setter
def target_type_code(self, target_type_code):
"""Sets the target_type_code of this RouteParameter.
목적지유형코드 # noqa: E501
:param target_type_code: The target_type_code of this RouteParameter. # noqa: E501
:type: str
"""
if target_type_code is None:
raise ValueError("Invalid value for `target_type_code`, must not be `None`") # noqa: E501
self._target_type_code = target_type_code
@property
def target_no(self):
"""Gets the target_no of this RouteParameter. # noqa: E501
목적지번호 # noqa: E501
:return: The target_no of this RouteParameter. # noqa: E501
:rtype: str
"""
return self._target_no
@target_no.setter
def target_no(self, target_no):
"""Sets the target_no of this RouteParameter.
목적지번호 # noqa: E501
:param target_no: The target_no of this RouteParameter. # noqa: E501
:type: str
"""
self._target_no = target_no
@property
def target_name(self):
"""Gets the target_name of this RouteParameter. # noqa: E501
목적지이름 # noqa: E501
:return: The target_name of this RouteParameter. # noqa: E501
:rtype: str
"""
return self._target_name
@target_name.setter
def target_name(self, target_name):
"""Sets the target_name of this RouteParameter.
목적지이름 # noqa: E501
:param target_name: The target_name of this RouteParameter. # noqa: E501
:type: str
"""
self._target_name = target_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RouteParameter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
410,
14751,
628,
220,
220,
220,
220,
198,
220,
220,
220,
2980,
515,
416,
25,
3740,
1378,
12567,
13,
785,
14,
2032,
7928,
12,
15042,
14,
2032,
7928,
12,
8189,
5... | 2.12362 | 2,718 |
import sdl2
import sdl2.ext
| [
11748,
264,
25404,
17,
198,
11748,
264,
25404,
17,
13,
2302,
628,
628
] | 2.384615 | 13 |
import random
array = random.sample(range(1,100),10)
print(array)
array.sort()
print(array)
print(binary_search(array,array[3]) == 3)
print(binary_search_recursive(array,array[3],0,9) == 3) | [
11748,
4738,
198,
198,
18747,
796,
4738,
13,
39873,
7,
9521,
7,
16,
11,
3064,
828,
940,
8,
198,
4798,
7,
18747,
8,
198,
18747,
13,
30619,
3419,
198,
4798,
7,
18747,
8,
198,
4798,
7,
39491,
62,
12947,
7,
18747,
11,
18747,
58,
18,... | 2.594595 | 74 |
import pygame
| [
11748,
12972,
6057,
628
] | 3.75 | 4 |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
plt.rc('font', size=24) # controls default text sizes
plt.rc('axes', titlesize=24) # fontsize of the axes title
plt.rc('axes', labelsize=24) # fontsize of the x and y labels
plt.rc('xtick', labelsize=24) # fontsize of the tick labels
plt.rc('ytick', labelsize=24) # fontsize of the tick labels
plt.rc('legend', fontsize=24) # legend fontsize
plt.rc('figure', titlesize=30) # fontsize of the figure title
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
489,
83,
13,
7635,
13,
1904,
10786,
325,
397,
1211,
11537,
198,
489,
83,
13,
6015,
10786,
10331,
3256,
2546,
28,
1731,
8,
220,
22... | 2.570707 | 198 |
import requests
import base64
import zlib
import re
from urllib.parse import quote, unquote
import urllib3
from flask import Flask, redirect, request, Response
from uuid import uuid4
from textwrap import wrap
from lxml import etree
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
xdmod = Flask(__name__)
OOD_URL = "https://ood.example.org/pun/dev/xdmod"
XDMOD_URL = "https://xdmod.example.org"
EXCL_HEADERS = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
VERIFY=False
REPLACE_URI = [ r'/rest', r'/gui']
PRIVATE_KEY = "PRIVATE_KEY"
CERT = "CERT"
@xdmod.route("/<path:path>", methods=['GET', 'POST', 'DELETE', 'PUT', 'PATCH'])
@xdmod.route("/")
if __name__ == "__main__":
xdmod.run()
| [
11748,
7007,
198,
11748,
2779,
2414,
198,
11748,
1976,
8019,
198,
11748,
302,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
11,
555,
22708,
198,
11748,
2956,
297,
571,
18,
198,
6738,
42903,
1330,
46947,
11,
18941,
11,
2581,
11,
18... | 2.658363 | 281 |
# echo server - client
# source code from here -> https://realpython.com/python-sockets/
import socket
HOST = '127.0.0.1'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(b'Hello from client script')
data = s.recv(1024)
print ('Received reply(echo) from server: ', repr(data)) | [
2,
9809,
4382,
532,
5456,
201,
198,
2,
2723,
2438,
422,
994,
4613,
3740,
1378,
5305,
29412,
13,
785,
14,
29412,
12,
82,
11603,
14,
201,
198,
201,
198,
11748,
17802,
201,
198,
201,
198,
39,
10892,
796,
705,
16799,
13,
15,
13,
15,
... | 2.47619 | 147 |
"""Defines the configuration for an ingest trigger"""
from __future__ import unicode_literals
import logging
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from ingest.triggers.ingest_trigger_condition import IngestTriggerCondition
from job.configuration.data.job_connection import JobConnection
from recipe.configuration.data.recipe_connection import LegacyRecipeConnection
from recipe.triggers.configuration.trigger_rule import RecipeTriggerRuleConfiguration
from storage.models import Workspace
from trigger.configuration.exceptions import InvalidTriggerRule
logger = logging.getLogger(__name__)
INGEST_TRIGGER_SCHEMA = {
"type": "object",
"required": ["data"],
"additionalProperties": False,
"properties": {
"version": {
"description": "Version of the ingest trigger schema",
"type": "string",
},
"condition": {
"description": "Condition for an ingested file to trigger an event",
"type": "object",
"additionalProperties": False,
"properties": {
"media_type": {
"description": "Media type required by an ingested file to trigger an event",
"type": "string",
},
"data_types": {
"description": "Data types required by an ingested file to trigger an event",
"type": "array",
"items": {"$ref": "#/definitions/data_type_tag"}
},
}
},
"data": {
"description": "The input data to pass to a triggered job/recipe",
"type": "object",
"required": ["input_data_name", "workspace_name"],
"additionalProperties": False,
"properties": {
"input_data_name": {
"description": "The name of the job/recipe input data to pass the ingested file to",
"type": "string",
},
"workspace_name": {
"description": "The name of the workspace to use for the triggered job/recipe",
"type": "string",
}
}
}
},
"definitions": {
"data_type_tag": {
"description": "A simple data type tag string",
"type": "string",
}
}
}
class IngestTriggerRuleConfiguration(RecipeTriggerRuleConfiguration):
"""Represents a rule that triggers when ingested source files meet the defined conditions
"""
def __init__(self, trigger_rule_type, configuration):
"""Creates an ingest trigger from the given configuration
:param trigger_rule_type: The trigger rule type
:type trigger_rule_type: str
:param configuration: The ingest trigger configuration
:type configuration: dict
:raises trigger.configuration.exceptions.InvalidTriggerRule: If the configuration is invalid
"""
super(IngestTriggerRuleConfiguration, self).__init__(trigger_rule_type, configuration)
try:
validate(configuration, INGEST_TRIGGER_SCHEMA)
except ValidationError as validation_error:
raise InvalidTriggerRule(validation_error)
self._populate_default_values()
version = self._dict['version']
if version != '1.0':
raise InvalidTriggerRule('%s is an unsupported version number' % version)
def get_condition(self):
"""Returns the condition for this ingest trigger rule
:return: The trigger condition
:rtype: :class:`ingest.triggers.ingest_trigger_condition.IngestTriggerCondition`
"""
media_type = None
if self._dict['condition']['media_type']:
media_type = self._dict['condition']['media_type']
data_types = set(self._dict['condition']['data_types'])
return IngestTriggerCondition(media_type, data_types)
def get_input_data_name(self):
"""Returns the name of the input data that the ingested file should be passed to
:return: The input data name
:rtype: str
"""
return self._dict['data']['input_data_name']
def get_workspace_name(self):
"""Returns the name of the workspace to use for the triggered job/recipe
:return: The workspace name
:rtype: str
"""
return self._dict['data']['workspace_name']
def validate(self):
"""See :meth:`trigger.configuration.trigger_rule.TriggerRuleConfiguration.validate`
"""
workspace_name = self.get_workspace_name()
if Workspace.objects.filter(name=workspace_name).count() == 0:
raise InvalidTriggerRule('%s is an invalid workspace name' % workspace_name)
def validate_trigger_for_job(self, job_interface):
"""See :meth:`job.triggers.configuration.trigger_rule.JobTriggerRuleConfiguration.validate_trigger_for_job`
"""
input_file_name = self.get_input_data_name()
media_type = self.get_condition().get_media_type()
media_types = [media_type] if media_type else None
connection = JobConnection()
connection.add_input_file(input_file_name, False, media_types, False, False)
connection.add_workspace()
return job_interface.validate_connection(connection)
def validate_trigger_for_recipe(self, recipe_definition):
"""See :meth:`recipe.triggers.configuration.trigger_rule.RecipeTriggerRuleConfiguration.validate_trigger_for_recipe`
"""
input_file_name = self.get_input_data_name()
media_type = self.get_condition().get_media_type()
media_types = [media_type] if media_type else None
connection = LegacyRecipeConnection()
connection.add_input_file(input_file_name, False, media_types, False)
connection.add_workspace()
return recipe_definition.validate_connection(connection)
def _populate_default_values(self):
"""Populates any missing default values in the configuration
"""
if 'version' not in self._dict:
self._dict['version'] = '1.0'
if 'condition' not in self._dict:
self._dict['condition'] = {}
if 'media_type' not in self._dict['condition']:
self._dict['condition']['media_type'] = ''
if 'data_types' not in self._dict['condition']:
self._dict['condition']['data_types'] = []
| [
37811,
7469,
1127,
262,
8398,
329,
281,
26151,
7616,
37811,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
18931,
198,
198,
6738,
44804,
684,
2395,
2611,
1330,
26571,
198,
6738,
44804,
684,
2395,
2611,
... | 2.482811 | 2,618 |
import json
import re
from .base import Base
class Erlang(Base):
"""
Parses simple Erlang data format, as output by RabbitMQ status.
e.g.
[{pid,296},
{running_applications,
[{rabbitmq_management_visualiser,"RabbitMQ Visualiser","3.5.1"}]}]
"""
RE_STRINGY = [re.compile(r'<<"(.*)">>'), r'"\g<1>"']
CONTEXT_STRUCTURE = 0
CONTEXT_KEY = 1
CONTEXT_VALUE = 2
CHAR_ARR_S = '{'
CHAR_ARR_E = '}'
CHAR_OBJ_S = '['
CHAR_OBJ_E = ']'
CHAR_QUOTE = '"'
CHAR_SEP = ','
CHAR_WSP = [' ', '\n']
CHAR_E = [CHAR_ARR_E, CHAR_OBJ_E]
CHAR_SKIP = CHAR_WSP + [CHAR_ARR_E]
@staticmethod
def dict_set(d, k, v):
"""
Set value within arbitrary-depth dict, referenced by key path.
Note this uses simple recursion, and will blow the stack if too
deep.
PARAMETERS:
d : dict
dict to update
k : list
reverse-ordered key (e.g. ['depth-3', 'depth-2', 'depth-1'])
v : type
value to set
"""
if len(k) == 1:
d[k[0]] = v
else:
k2 = k.pop()
if k2 not in d:
d[k2] = {}
Erlang.dict_set(d[k2], k, v)
@staticmethod
| [
11748,
33918,
198,
11748,
302,
198,
198,
6738,
764,
8692,
1330,
7308,
628,
198,
4871,
5256,
17204,
7,
14881,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
23042,
274,
2829,
5256,
17204,
1366,
5794,
11,
355,
... | 1.713436 | 841 |