content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import unittest
import numpy as np
import math
import pytest
from shapely.geometry import Polygon
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis import rasterize, SpatialPartitionStrategy
if __name__ == "__main__":
unittest.main()
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
12280,
14520,
198,
6738,
30324,
893,
20928,
13,
41989,
13,
86... | 3.074468 | 94 |
import numpy as np
import torch
import torch.nn.functional as F
eps = 1
def weight_reshape(preds, trues, weight_channel=-1, min_weight_val=0.16):
"""
UNTESTED
Weight the prediction by the desired channel in trues. Assume tensor has
shape: (batch_size, channels, h, w)
Also assume weight_channel = -1
Clip weights by min_weight_val.
Test:
preds = torch.randn(10,8,6,4)
trues = torch.randn(10,8,6,4)
weights_channel = 3*torch.ones(weights_channel.shape)
Return updated trues without the weight channel, and updated preds
withouth the weight channel and multiplied by weights
"""
# batch_size = trues.size()[0]
trues_vals = trues[:, 0:weight_channel, :, :]
preds_vals = preds[:, 0:weight_channel, :, :]
weights_channel = trues[:, weight_channel, :, :]
# expand weights to same size as trues_vals (not sure how!)
# weights = weights_channel.expand(trues_vals.shape)
# simple, slow method, loop over the stack
# element wise multiply weights by preds_vals
for channel in range(preds_vals.shape[1]):
x = preds_vals[:, channel, :, :]
out = torch.mul(x, weights_channel)
preds_vals[:, channel, :, :] = out
return preds_vals, trues_vals
def focal(preds, trues, alpha=1, gamma=2, reduce=True, logits=True):
'''https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/65938'''
if logits:
BCE_loss = F.binary_cross_entropy_with_logits(preds, trues)#, reduce=False)
else:
BCE_loss = F.binary_cross_entropy(preds, trues)#, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = alpha * (1-pt)**gamma * BCE_loss
if reduce:
return torch.mean(F_loss)
else:
return F_loss
##########
def soft_dice_loss(outputs, targets, per_image=False):
'''
From cannab sn4
'''
batch_size = outputs.size()[0]
eps = 1e-5
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps
loss = (1 - (2 * intersection + eps) / union).mean()
return loss
def dice_cannab_v0(im1, im2, empty_score=1.0):
"""
From cannab sn4
Computes the Dice coefficient, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
def focal_cannab(outputs, targets, gamma=2, ignore_index=255):
'''From cannab sn4'''
outputs = outputs.contiguous()
targets = targets.contiguous()
eps = 1e-8
non_ignored = targets.view(-1) != ignore_index
targets = targets.view(-1)[non_ignored].float()
outputs = outputs.contiguous().view(-1)[non_ignored]
outputs = torch.clamp(outputs, eps, 1. - eps)
targets = torch.clamp(targets, eps, 1. - eps)
pt = (1 - targets) * (1 - outputs) + targets * outputs
return (-(1. - pt) ** gamma * torch.log(pt)).mean()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
25386,
796,
352,
628,
198,
4299,
3463,
62,
3447,
1758,
7,
28764,
82,
11,
491,
947,
11,
3463,
62,
17620,
10779,
16,
11,
949,
62... | 2.466914 | 1,617 |
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
import random
# xs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
# ys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float64)
# This will be used to create random data sets
# This is meant to tell us how good of a fit the line
# We made is
# Create our data set
xs, ys = create_dataset(1000, 100, 2, correlation='pos')
# Make the line of best fit
m, b = best_fit_slope_and_intercept(xs, ys)
regression_line = []
for x in xs:
regression_line.append((m * x) + b)
# This is how we know how good of a fit the line actually is
r_squared = coefficient_of_determination(ys, regression_line)
print(r_squared)
# Show the graph
plt.scatter(xs, ys, s=10)
plt.plot(xs, regression_line)
plt.show()
| [
6738,
7869,
1330,
1612,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
4738,
198,
198,
2,
2124,
82,
796,
45941,
13,
18747,
26933,
16,
11,
362,
11,
513,
11,
604,
11,
642,... | 2.645051 | 293 |
# coding: utf-8
"""
Selling Partner API for Finances
The Selling Partner API for Finances helps you obtain financial information relevant to a seller's business. You can obtain financial events for a given order, financial event group, or date range without having to wait until a statement period closes. You can also obtain financial event groups for a given date range. # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TaxWithheldComponent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tax_collection_model': 'str',
'taxes_withheld': 'ChargeComponentList'
}
attribute_map = {
'tax_collection_model': 'TaxCollectionModel',
'taxes_withheld': 'TaxesWithheld'
}
def __init__(self, tax_collection_model=None, taxes_withheld=None): # noqa: E501
"""TaxWithheldComponent - a model defined in Swagger""" # noqa: E501
self._tax_collection_model = None
self._taxes_withheld = None
self.discriminator = None
if tax_collection_model is not None:
self.tax_collection_model = tax_collection_model
if taxes_withheld is not None:
self.taxes_withheld = taxes_withheld
@property
def tax_collection_model(self):
"""Gets the tax_collection_model of this TaxWithheldComponent. # noqa: E501
The tax collection model applied to the item. Possible values: * MarketplaceFacilitator - Tax is withheld and remitted to the taxing authority by Amazon on behalf of the seller. * Standard - Tax is paid to the seller and not remitted to the taxing authority by Amazon. # noqa: E501
:return: The tax_collection_model of this TaxWithheldComponent. # noqa: E501
:rtype: str
"""
return self._tax_collection_model
@tax_collection_model.setter
def tax_collection_model(self, tax_collection_model):
"""Sets the tax_collection_model of this TaxWithheldComponent.
The tax collection model applied to the item. Possible values: * MarketplaceFacilitator - Tax is withheld and remitted to the taxing authority by Amazon on behalf of the seller. * Standard - Tax is paid to the seller and not remitted to the taxing authority by Amazon. # noqa: E501
:param tax_collection_model: The tax_collection_model of this TaxWithheldComponent. # noqa: E501
:type: str
"""
self._tax_collection_model = tax_collection_model
@property
def taxes_withheld(self):
"""Gets the taxes_withheld of this TaxWithheldComponent. # noqa: E501
:return: The taxes_withheld of this TaxWithheldComponent. # noqa: E501
:rtype: ChargeComponentList
"""
return self._taxes_withheld
@taxes_withheld.setter
def taxes_withheld(self, taxes_withheld):
"""Sets the taxes_withheld of this TaxWithheldComponent.
:param taxes_withheld: The taxes_withheld of this TaxWithheldComponent. # noqa: E501
:type: ChargeComponentList
"""
self._taxes_withheld = taxes_withheld
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TaxWithheldComponent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaxWithheldComponent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
47919,
35532,
7824,
329,
4463,
1817,
628,
220,
220,
220,
383,
47919,
35532,
7824,
329,
4463,
1817,
5419,
345,
7330,
3176,
1321,
5981,
284,
257,
18583,
338,
1597,
1... | 2.499508 | 2,032 |
'''
Created on Oct 21, 2011
@author: bluec0re
'''
from __future__ import absolute_import, division
import os
import os.path
import sys
import platform
import zipfile
import functools
import tempfile
import wx
import re
import math
from wx import xrc
from cvsscalc import cvsscalc, strings
from wx.lib.masked import TimeCtrl
try:
import wx.lib.agw.pygauge as PG
except ImportError:
def find_file(path, mode='r'):
"""Find the file named path in the sys.path.
Returns the full path name if found, None if not found"""
for dirname in sys.path:
if os.path.isfile(dirname):
zf = zipfile.ZipFile(dirname, mode='r')
if path in zf.namelist():
data = zf.read(path)
zf.close()
return data
continue
possible = os.path.join(dirname, path)
if os.path.isfile(possible):
with open(possible, mode) as fp:
return fp.read()
return None
def main(infile=None):
"""main function
Keyword arguments:
infile -- file like object to load (Default: None)
"""
app = MyApp()
if infile:
app.load_from_file(infile)
app.update_choices()
app.refresh_score()
app.MainLoop()
if __name__ == '__main__':
main()
| [
7061,
6,
198,
41972,
319,
2556,
2310,
11,
2813,
198,
198,
31,
9800,
25,
4171,
66,
15,
260,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
198,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,... | 2.201653 | 605 |
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.translation import gettext, ngettext
from django.views.generic import TemplateView
from mtp_common.auth.api_client import get_api_session
from requests.exceptions import RequestException
from security.context_processors import initial_params
from security.searches import get_saved_searches, populate_new_result_counts
logger = logging.getLogger('mtp')
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
11,
299,
1136,
5239,
198,
6738,
42625,
14208,
13,
3357... | 3.619048 | 126 |
import numpy as np
import scipy.linalg as spl
def planetodetic_to_pcpf(lon, lat,
h = 0.0,
a = 6378137.0,
b = 6356752.314245):
"""Convert planetodetic coordinates (longitude, latitude, and height
above the ellipsoid) into planet-centered, planet-fixed
coordinates.
By default, this uses the WGS84 ellipsoid and a height of 0.
Reference:
[0] https://en.wikipedia.org/wiki/Geographic_coordinate_conversion#From_geodetic_to_ECEF_coordinates
Args:
lon longitude (radians)
lat latitude (radians)
h altitude (m)
a ellipsoid semimajor axis / equatorial radius (m)
b ellipsoid semiminor axis / polar radius (m)
Returns:
A numpy array consisting of x, y, and z coordinates in a
planet-centered, planet-fixed frame.
"""
b2_over_a2 = b**2 / a**2
# Compute prime vertical radius of curvature
e2 = 1.0 - b2_over_a2
N = a / np.sqrt(1.0 - e2 * np.sin(lat))
return np.array([ (N + h) * np.cos(lat) * np.cos(lon),
(N + h) * np.cos(lat) * np.sin(lon),
(b2_over_a2 * N + h) * np.sin(lat) ])
def compute_reduced_latitude(mu, f):
"""Compute the reduced latitude from the best available guess about
the planetodetic latitude.
This is a helper for pcpf_to_planetodetic().
Args:
mu planetodetic latitude estimate (rad)
f first flattening
Returns:
The new estimate of the reduced latitude (rad).
"""
return np.arctan((1.0 - f) * np.sin(mu) / np.cos(mu))
def compute_planetodetic_latitude(s, rz, a, f, e2, beta):
"""Compute the planetodetic latitude from the reduced latitude.
This is a helper for pcpf_to_planetodetic().
Args:
s distance from the polar axis in the x/y plane (m)
rz distance along polar axis (m)
a equatorial radius (m)
f first flattening
e2 square of the first eccentricity
beta reduced latitude (rad)
Returns:
The planetodetic latitude (rad).
"""
return np.arctan((rz + (e2 * (1.0 - f) * a * np.sin(beta)**3 / (1.0 - e2)) / (1.0 - e2)) / (s - e2 * a * np.cos(beta)**3))
def pcpf_to_planetodetic(r_pcpf,
a = 6378137.0,
f = 0.003352810664775694,
tol = 1e-4,
small = 1e-12):
"""Given PCPF position information, compute the planetodetic/geodetic
longitude, latitude, and altitude above the geoid.
Args:
r_pcpf position in PCPF coordinates (m)
a semimajor axis / equatorial radius (m)
f first flattening
tol tolerance for latitude iteration and convergence (rad)
Returns:
A tuple of longitude, planetodetic latitude, and altitude. The
first two are in radians and the third is in meters.
"""
# Compute square of first eccentricity
e2 = 1.0 - (1.0 - f)**2
# Compute longitude and radius from polar axis
if r_pcpf[0] < small:
lon = 0.0
else:
lon = np.arctan(r_pcpf[1] / r_pcpf[0])
s = np.sqrt(r_pcpf[0]**2 + r_pcpf[1]**2)
if s < small:
if r_pcpf[2] > 0:
beta = np.pi/2.0
else:
beta = -np.pi/2.0
else:
# Start by calculating an initial guess for the planetodetic latitude.
beta = np.arctan( r_pcpf[2] / ((1.0 - f) * s) )
last_lat = compute_planetodetic_latitude(s, r_pcpf[2], a, f, e2, beta)
# Re-calculate it using our value for mu
beta = compute_reduced_latitude(last_lat, f)
lat = compute_planetodetic_latitude(s, r_pcpf[2], a, f, e2, beta)
while np.abs(lat - last_lat) > tol:
# Re-calculate reduced latitude
beta = compute_reduced_latitude(lat, f)
last_lat = lat
lat = compute_planetodetic_latitude(s, r_pcpf[2], a, f, e2, beta)
# Having converged, we now compute the altitude above the geoid
N = a / np.sqrt(1.0 - e2 * np.sin(lat)**2) # radius of curvature
# in the vertical prime
h = s * np.cos(lat) + (r_pcpf[2] + e2 * N * np.sin(lat)) * np.sin(lat) - N
return lon, lat, h
def compute_T_pcpf_to_enu(r_pcpf):
"""Find the attitude of an East--North--Up frame located at a given
location in a planet-centered, planet-fixed frame relative to that
frame.
Args:
r_pcpf planet-centered, planet-fixed position (m)
Returns:
A 3x3 transformation matrix.
"""
up = r_pcpf / spl.norm(r_pcpf)
z = np.array([0.0, 0.0, 1.0])
east = np.cross(up, z)
east /= spl.norm(east)
north = np.cross(up, east)
north /= spl.norm(north)
return np.vstack((east, north, up)).T
def rotate_z(t):
"""Rotation about the z axis by an angle t.
Args:
t angle (radians)
Returns:
3x3 orthonormal rotation matrix.
"""
return np.array([[np.cos(t), -np.sin(t), 0.0],
[np.sin(t), np.cos(t), 0.0],
[0, 0, 1]])
def compute_T_inrtl_to_pcpf(dt,
w_pcpf = 2.0 * np.pi / (23 * 3600.0 + 56 * 60.0 + 4.091),
T_inrtl_to_pcpf0 = None):
"""Compute the approximate attitude of the planet-centered,
planet-fixed frame relative to a planet-centered inertial frame.
Args:
dt time since epoch (s)
w_pcpf angular rate of the planet about its axis
(r/s; defaults to the inverse of earth's
sidereal day length times 2*pi)
T_inrtl_to_pcpf0 attitude matrix at epoch (defaults to
identity matrix)
Returns:
A 3x3 transformation matrix.
"""
# The negative sign inside rotate_z controls the direction of
# rotation.
if T_inrtl_to_pcpf0 is None:
if dt == 0:
return np.identity(3)
return rotate_z(-w_pcpf * dt)
else:
if dt == 0:
return T_inrtl_to_pcpf0
return rotate_z(-w_pcpf * dt).dot(T_inrtl_to_pcpf0)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
75,
1292,
70,
355,
4328,
198,
198,
4299,
5440,
375,
5139,
62,
1462,
62,
79,
13155,
69,
7,
14995,
11,
3042,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.018614 | 3,116 |
import socket
import time
if __name__ == "__main__":
main() | [
11748,
17802,
198,
11748,
640,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419
] | 2.782609 | 23 |
from django.db import models
import datetime
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
4818,
8079,
198,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.65 | 20 |
import pickle
import pytest
from trafaretrecord import memoryslots
@pytest.mark.parametrize('type2test', [memoryslots, ])
@pytest.mark.parametrize('n', [10, 1000000])
@pytest.mark.parametrize('type2test', [memoryslots, ])
@pytest.mark.parametrize('type2test', [memoryslots, ])
| [
11748,
2298,
293,
198,
198,
11748,
12972,
9288,
198,
6738,
1291,
69,
8984,
22105,
1330,
4088,
6649,
1747,
628,
628,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
4906,
17,
9288,
3256,
685,
31673,
6649,
1747,
11... | 2.621622 | 111 |
from __future__ import annotations
import sys
import threading
from queue import Queue
from attr import attrs, attr
@attrs
kitchen = Cutlery(knives=100, forks=100)
if __name__ == "__main__":
main()
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
198,
6738,
16834,
1330,
4670,
518,
198,
198,
6738,
708,
81,
1330,
708,
3808,
11,
708,
81,
628,
198,
31,
1078,
3808,
628,
198,
15813,
6607,
796,
37194... | 2.90411 | 73 |
import json
import shutil
import io
if __name__=="__main__":
main()
| [
11748,
33918,
198,
11748,
4423,
346,
198,
11748,
33245,
628,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
1388,
3419,
198
] | 2.769231 | 26 |
import os
import sys
import requests
import json
if __name__ == '__main__':
root_path = os.path.split(os.path.abspath(os.getcwd()))[0]
sys.path.append(root_path)
from deship import config
from deship import plinth
config.init_config()
phase = sys.argv[1]
if phase == 'dv':
keyfile = config.telehash_conf_path + 'seed_id'
seedfile = config.telehash_conf_path + 'seeds.json'
with open(keyfile, 'r') as f:
id_key = f.read()
seed = plinth.SwitchID(key=id_key)
seeds = []
node = dict()
node['hashname'] = seed.hash_name
node['pubkey'] = seed.pub_key
node['ip'] = '127.0.0.1'
node['port'] = 8090
seeds.append(node)
ser = json.dumps(seeds)
with open(seedfile, 'w+') as f:
f.write(ser)
else:
r = requests.get('https://printf.kr/seeds.json')
data = r.json
with open(config.telehash_conf_path+"seeds.json", 'w+') as f:
f.write(data)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
7007,
198,
11748,
33918,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
6808,
62,
6978,
796,
28686,
13,
6978,
13,
35312,
7,
418,
13,
6978,
13,
397,
... | 2.05 | 500 |
from copy import deepcopy
SHIP_BITS = ["U", "D", "L", "R", "C"]
WATERING_PATTERNS = {
"U":
"""
WXW
WUW
WWW
""",
"D":
"""
WWW
WDW
WXW
""",
"L":
"""
WWW
XLW
WWW
""",
"R":
"""
WWW
WRX
WWW
""",
"C":
"""
WXW
XCX
WXW
"""
}
if __name__ == "__main__":
puzzle_grid = """EEEEEE
EEEEEU
EEEEEE
EEEEEE
EEEEEE
EEEEWE
"""
sample_puzzle_a = BimaruPuzzle(puzzle_grid,
[3, 1, 3, 3, 1, 3],
[2, 3, 1, 3, 0, 5],
[4, 3, 2, 2, 1, 1, 1]
)
sample_puzzle_a.solve()
print(sample_puzzle_a)
| [
6738,
4866,
1330,
2769,
30073,
198,
198,
49423,
62,
26094,
50,
796,
14631,
52,
1600,
366,
35,
1600,
366,
43,
1600,
366,
49,
1600,
366,
34,
8973,
198,
198,
54,
23261,
2751,
62,
47,
1404,
5781,
8035,
796,
1391,
198,
220,
220,
220,
3... | 1.342618 | 718 |
from django.urls import path
from .schema1 import schema as orm_schema
from ..methods.schema1 import schema as methods_schema
urlpatterns = [
path('graphql', orm_schema.as_django_view()),
path('graphql-methods', methods_schema.as_django_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
15952,
2611,
16,
1330,
32815,
355,
393,
76,
62,
15952,
2611,
198,
6738,
11485,
24396,
82,
13,
15952,
2611,
16,
1330,
32815,
355,
5050,
62,
15952,
2611,
628,
198,
6371,... | 2.670103 | 97 |
import sys
import itertools
from os import listdir
from os.path import isfile, join
abc='abcdefghijklmnopqrstuvwxyz'
# first argument = poem
# second argument = ciphertxt or msg
if len(sys.argv) != 3: sys.exit(2)
#print encrypt([0, 5, 13, 16, 19], sys.argv[1], sys.argv[2])
decrypt(sys.argv[1], sys.argv[2])
| [
11748,
25064,
198,
11748,
340,
861,
10141,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
39305,
11639,
39305,
4299,
456,
2926,
41582,
10295,
404,
80,
81,
301,
14795,
86,
5431,
89,
... | 2.488 | 125 |
from number import Number
| [
6738,
1271,
1330,
7913,
628
] | 5.4 | 5 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2018 Palo Alto Networks techbizdev, <techbizdev@paloaltonetworks.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import time
_MIN_VERSION_ERROR = '{0} version ({1}) < minimum version ({2})'
HAS_PANDEVICE = True
try:
import pandevice
from pandevice.base import PanDevice
from pandevice.firewall import Firewall
from pandevice.panorama import DeviceGroup, Template, TemplateStack
from pandevice.policies import PreRulebase, PostRulebase, Rulebase
from pandevice.device import Vsys
from pandevice.errors import PanDeviceError
except ImportError:
HAS_PANDEVICE = False
def get_connection(vsys=None, device_group=None,
vsys_dg=None, vsys_importable=None,
rulebase=None, template=None, template_stack=None,
with_classic_provider_spec=False, with_state=False,
argument_spec=None, required_one_of=None,
min_pandevice_version=None, min_panos_version=None,
error_on_shared=False,
panorama_error=None, firewall_error=None):
"""Returns a helper object that handles pandevice object tree init.
The `vsys`, `device_group`, `vsys_dg`, `vsys_importable`, `rulebase`,
`template`, and `template_stack` params can be any of the following types:
* None - do not include this in the spec
* True - use the default param name
* string - use this string for the param name
The `min_pandevice_version` and `min_panos_version` args expect a 3 element
tuple of ints. For example, `(0, 6, 0)` or `(8, 1, 0)`.
If you are including template support (by defining either `template` and/or
`template_stack`), and the thing the module is enabling the management of is
an "importable", you should define either `vsys_importable` (whose default
value is None) or `vsys` (whose default value is 'vsys1').
Arguments:
vsys: The vsys (default: 'vsys1').
device_group: Panorama only - The device group (default: 'shared').
vsys_dg: The param name if vsys and device_group are a shared param.
vsys_importable: Either this or `vsys` should be specified. For:
- Interfaces
- VLANs
- Virtual Wires
- Virtual Routers
rulebase: This is a policy of some sort.
template: Panorama - The template name.
template_stack: Panorama - The template stack name.
with_classic_provider_spec(bool): Include the ip_address, username,
password, api_key, and port params in the base spec, and make the
"provider" param optional.
with_state(bool): Include the standard 'state' param.
argument_spec(dict): The argument spec to mixin with the
generated spec based on the given parameters.
required_one_of(list): List of lists to extend into required_one_of.
min_pandevice_version(tuple): Minimum pandevice version allowed.
min_panos_version(tuple): Minimum PAN-OS version allowed.
error_on_shared(bool): Don't allow "shared" vsys or device group.
panorama_error(str): The error message if the device is Panorama.
firewall_error(str): The error message if the device is a firewall.
Returns:
ConnectionHelper
"""
helper = ConnectionHelper(
min_pandevice_version, min_panos_version,
error_on_shared, panorama_error, firewall_error)
req = []
spec = {
'provider': {
'required': True,
'type': 'dict',
'required_one_of': [['password', 'api_key'], ],
'options': {
'ip_address': {'required': True},
'username': {'default': 'admin'},
'password': {'no_log': True},
'api_key': {'no_log': True},
'port': {'default': 443, 'type': 'int'},
'serial_number': {'no_log': True},
},
},
}
if with_classic_provider_spec:
spec['provider']['required'] = False
spec['provider']['options']['ip_address']['required'] = False
del(spec['provider']['required_one_of'])
spec.update({
'ip_address': {'required': False},
'username': {'default': 'admin'},
'password': {'no_log': True},
'api_key': {'no_log': True},
'port': {'default': 443, 'type': 'int'},
})
req.extend([
['provider', 'ip_address'],
['provider', 'password', 'api_key'],
])
if with_state:
spec['state'] = {
'default': 'present',
'choices': ['present', 'absent'],
}
if vsys_dg is not None:
if isinstance(vsys_dg, bool):
param = 'vsys_dg'
else:
param = vsys_dg
spec[param] = {}
helper.vsys_dg = param
else:
if vsys is not None:
if isinstance(vsys, bool):
param = 'vsys'
else:
param = vsys
spec[param] = {'default': 'vsys1'}
helper.vsys = param
if device_group is not None:
if isinstance(device_group, bool):
param = 'device_group'
else:
param = device_group
spec[param] = {'default': 'shared'}
helper.device_group = param
if vsys_importable is not None:
if vsys is not None:
raise KeyError('Define "vsys" or "vsys_importable", not both.')
if isinstance(vsys_importable, bool):
param = 'vsys'
else:
param = vsys_importable
spec[param] = {}
helper.vsys_importable = param
if rulebase is not None:
if isinstance(rulebase, bool):
param = 'rulebase'
else:
param = rulebase
spec[param] = {
'default': None,
'choices': ['pre-rulebase', 'rulebase', 'post-rulebase'],
}
helper.rulebase = param
if template is not None:
if isinstance(template, bool):
param = 'template'
else:
param = template
spec[param] = {}
helper.template = param
if template_stack is not None:
if isinstance(template_stack, bool):
param = 'template_stack'
else:
param = template_stack
spec[param] = {}
helper.template_stack = param
if argument_spec is not None:
for k in argument_spec.keys():
if k in spec:
raise KeyError('{0}: key used by connection helper.'.format(k))
spec[k] = argument_spec[k]
if required_one_of is not None:
req.extend(required_one_of)
# Done.
helper.argument_spec = spec
helper.required_one_of = req
return helper
| [
2,
770,
2438,
318,
636,
286,
28038,
856,
11,
475,
318,
281,
4795,
7515,
13,
198,
2,
770,
1948,
2393,
39442,
11,
290,
428,
2393,
39442,
691,
11,
318,
347,
10305,
11971,
13,
198,
2,
3401,
5028,
345,
3551,
1262,
428,
39442,
11,
543,
... | 2.377222 | 3,600 |
# Takes 2 rasters: the first contains integer values defining groups of data (e.g., a rasterized shapefile)
# the second one contains values to group (e.g., slopes or elevation)
from lsdtt_xtensor_python import comparison_stats_from_2darrays as gpc
import numpy as np
# from matplotlib import pyplot as plt
#TODO add real data here
# First array is the index one
index_array = np.ones((5000,5000), dtype = np.int32)
index_array[0:200,:] = 5
index_array[200:300,:] = 4
index_array[300:400,:] = 1
# Second array contains values
val_array = np.random.rand(5000,5000).astype(np.float32)
print("Getting values in CPP")
# The code takes the following arguments: the 2 arrays ofc, an optional value to ignore (e.g., NoDataValue), and the number of rows and cols
test = gpc(index_array,val_array,10000000,index_array.shape[0],index_array.shape[1])
print("Done")
| [
2,
33687,
362,
374,
7060,
25,
262,
717,
4909,
18253,
3815,
16215,
2628,
286,
1366,
357,
68,
13,
70,
1539,
257,
374,
1603,
1143,
5485,
7753,
8,
198,
2,
262,
1218,
530,
4909,
3815,
284,
1448,
357,
68,
13,
70,
1539,
35082,
393,
22910... | 3.071429 | 280 |
import sys
import re
import os
from pathlib import Path
import subprocess as sp
import numpy as np
import traceback
from . import cpu_info
from victor.constants import physconst
from victor import util
# Collection helpers
# execution steps
# input helpers
# main driver
| [
11748,
25064,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
850,
14681,
355,
599,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12854,
1891,
198,
198,
6738,
764,
1330,
42804,
62,
10951,
628,
198,
... | 3.671053 | 76 |
"""
Package for storing the Trajectories in a csv file
"""
from .ExtractTrajectories import ExtractTrajectories
| [
37811,
198,
27813,
329,
23069,
262,
4759,
752,
1749,
287,
257,
269,
21370,
2393,
198,
37811,
198,
198,
6738,
764,
11627,
974,
15721,
752,
1749,
1330,
29677,
15721,
752,
1749,
198
] | 3.645161 | 31 |
#!/usr/bin/env python
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import csv
import locale
locale.setlocale(locale.LC_ALL,'')
adcsumflag = False
if '-adcsum' in sys.argv:
adcsumflag = True
sys.argv.remove('-adcsum')
checktotalflag = False
check_ul_dl_flag = False
if '-checktotal' in sys.argv:
checktotalflag = True
index = sys.argv.index('-checktotal')
expected_total = int(sys.argv[index+1])
del(sys.argv[index])
del(sys.argv[index])
elif '-check_ul_dl' in sys.argv:
check_ul_dl_flag = True
index = sys.argv.index('-check_ul_dl')
expected_ul_total = int(sys.argv[index+1])
expected_dl_total = int(sys.argv[index+2])
del(sys.argv[index])
del(sys.argv[index])
del(sys.argv[index])
if len(sys.argv)<2:
print 'Usage: %s [-adcsum] [-checktotal <expected total pkt> | -check_ul_dl <expected ul pkt> <expected dl pkt> ] <cdr csv file name>'%(sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
print filename
reader = csv.reader(open(filename, "rb"), delimiter=',', quoting=csv.QUOTE_NONE)
header = reader.next()
['#time', 'ue_ip', 'dl_pkt_cnt', 'dl_bytes', 'ul_pkt_cnt', 'ul_bytes', 'rule_id', 'rule_type', 'rule', 'action', 'sponsor_id', 'service_id', 'rate_group', 'tarriff_group', 'tarriff_time']
dl_pkt_cnt_col = header.index('dl_pkt_cnt')
ul_pkt_cnt_col = header.index('ul_pkt_cnt')
rate_group_col = header.index('rate_group')
rule_type_col = header.index('rule_type')
#line = reader.next()
#print line[dl_pkt_cnt_col],line[rate_group_col]
#print line[ul_pkt_cnt_col],line[rate_group_col]
dl_rate_group_count_dict = {}
ul_rate_group_count_dict = {}
for line in reader:
rule_type = line[rule_type_col]
rate_group = line[rate_group_col]
if adcsumflag and rule_type == 'ADC':
rate_group = 'ADC'
dl_pkt_cnt = int(line[dl_pkt_cnt_col])
ul_pkt_cnt = int(line[ul_pkt_cnt_col])
#print line[dl_pkt_cnt_col],line[rate_group_col]
#print line[ul_pkt_cnt_col],line[rate_group_col]
try:
dl_rate_group_count_dict[rate_group]+=dl_pkt_cnt
except KeyError:
dl_rate_group_count_dict.update({rate_group:dl_pkt_cnt})
try:
ul_rate_group_count_dict[rate_group]+=ul_pkt_cnt
except KeyError:
ul_rate_group_count_dict.update({rate_group:ul_pkt_cnt})
dl_totalcount = 0
ul_totalcount = 0
for key in dl_rate_group_count_dict:
#print 'Traffic type: %s, pkt count: %d'%(key, dl_rate_group_count_dict[key])
print 'Traffic type: %s, DL pkt count: %s'%(key, locale.format("%d", dl_rate_group_count_dict[key], grouping=True))
dl_totalcount+=dl_rate_group_count_dict[key]
for key in ul_rate_group_count_dict:
#print 'Traffic type: %s, pkt count: %d'%(key, ul_rate_group_count_dict[key])
print 'Traffic type: %s, UL pkt count: %s'%(key, locale.format("%d", ul_rate_group_count_dict[key], grouping=True))
ul_totalcount+=ul_rate_group_count_dict[key]
print 'Total pkt DL count: %s'%(locale.format("%d", dl_totalcount, grouping=True))
print 'Total pkt UL count: %s'%(locale.format("%d", ul_totalcount, grouping=True))
if check_ul_dl_flag:
if expected_dl_total == dl_totalcount and expected_ul_total == ul_totalcount:
verdict = 'pass'
result = 0
else:
verdict = 'fail'
result = 1
print('CDR total pkt count check: %s'%(verdict))
sys.exit(result)
if checktotalflag:
if expected_total == dl_totalcount:
verdict = 'pass'
result = 0
else:
verdict = 'fail'
result = 1
print('CDR total pkt count check: %s'%(verdict))
sys.exit(result)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 2.477047 | 1,612 |
# Generated by Django 2.0.5 on 2018-05-31 10:15
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2713,
12,
3132,
838,
25,
1314,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.567568 | 37 |
"""
Copyright (C) 2020 Nederlandse Organisatie voor Toegepast Natuur-
wetenschappelijk Onderzoek TNO / TNO, Netherlands Organisation for
applied scientific research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Maaike de Boer, Roos Bakker
@contact: maaike.deboer@tno.nl, roos.bakker@tno.nl
"""
import argparse
import sys
import juridecompose.dict_to_dataframe as dict_to_dataframe
import juridecompose.wetten_xml_to_dict as wetten_xml_to_dict
if __name__ == '__main__':
xml_to_dataframe()
| [
37811,
198,
220,
220,
220,
15069,
357,
34,
8,
12131,
399,
5702,
1044,
325,
7221,
271,
265,
494,
410,
2675,
1675,
68,
469,
30119,
14393,
84,
333,
12,
198,
220,
220,
220,
9583,
641,
354,
1324,
417,
45961,
440,
681,
10872,
988,
309,
... | 3.101796 | 334 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'todo2.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import image_rc
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
83,
24313,
17,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
13,... | 2.752688 | 93 |
from module import (
Foo,
Bar,
)
print(Foo(), Bar)
| [
6738,
8265,
1330,
357,
198,
220,
220,
220,
36080,
11,
198,
220,
220,
220,
2409,
11,
198,
8,
198,
198,
4798,
7,
37,
2238,
22784,
2409,
8,
198
] | 2.142857 | 28 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import datastore_v1
from google.cloud.datastore_v1 import enums
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
| [
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 3.402778 | 288 |
from .core import AbstractModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable
from sru import SRU
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import time
from sklearn.utils import shuffle, resample
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import math
from collections import deque
import functools
| [
6738,
764,
7295,
1330,
27741,
17633,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
15553,
11,
35748,
198,
6738,
264,
622,
... | 3.595041 | 121 |
import httplib
try:
from xml.etree import ElementTree
except ImportError:
# Python < 2.5
from elementtree import ElementTree
# ARB expects request parameters in a particular order
REQUEST_KEY_ORDER = ("merchantAuthentication refId subscriptionId subscription name transactionKey "
"paymentSchedule interval length unit "
"startDate totalOccurrences trialOccurrences amount trialAmount "
"payment creditCard cardNumber expirationDate cardCode bankAccount "
"accountType routingNumber accountNumber nameOnAccount echeckType "
"bankName order invoiceNumber description customer id email "
"phoneNumber faxNumber billTo firstName lastName company address "
"city state zip country shipTo".split())
| [
11748,
1841,
489,
571,
198,
28311,
25,
198,
220,
220,
220,
422,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1303,
11361,
1279,
362,
13,
20,
198,
220,
220,
220,
422,
5002,
21048,
1330,
1... | 2.740385 | 312 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Decorators for using with IBMQProvider unit tests.
Environment variables used by the decorators:
* QISKIT_IBM_API_TOKEN: default API token to use.
* QISKIT_IBM_API_URL: default API url to use.
* QISKIT_IBM_HGP: default hub/group/project to use.
* QISKIT_IBM_PRIVATE_HGP: hub/group/project to use for private jobs.
* QISKIT_IBM_DEVICE: default device to use.
* QISKIT_IBM_USE_STAGING_CREDENTIALS: True if use staging credentials.
* QISKIT_IBM_STAGING_API_TOKEN: staging API token to use.
* QISKIT_IBM_STAGING_API_URL: staging API url to use.
* QISKIT_IBM_STAGING_HGP: staging hub/group/project to use.
* QISKIT_IBM_STAGING_DEVICE: staging device to use.
* QISKIT_IBM_STAGING_PRIVATE_HGP: staging hub/group/project to use for private jobs.
"""
import os
from functools import wraps
from unittest import SkipTest
from typing import Optional
from qiskit.test.testing_options import get_test_options
from qiskit.providers.ibmq import least_busy
from qiskit.providers.ibmq.ibmqfactory import IBMQFactory
from qiskit.providers.ibmq.credentials import Credentials, discover_credentials
from qiskit.providers.ibmq.accountprovider import AccountProvider
from qiskit.providers.ibmq import IBMQ
def requires_qe_access(func):
"""Decorator that signals that the test uses the online API.
It involves:
* determines if the test should be skipped by checking environment
variables.
* if the `QISKIT_IBM_USE_STAGING_CREDENTIALS` environment variable is
set, it reads the credentials from an alternative set of environment
variables.
* if the test is not skipped, it reads `qe_token` and `qe_url` from
environment variables or qiskitrc.
* if the test is not skipped, it appends `qe_token` and `qe_url` as
arguments to the test function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
return _wrapper
def requires_providers(func):
"""Decorator that signals the test uses the online API, via a public and premium provider.
This decorator delegates into the `requires_qe_access` decorator, but
instead of the credentials it appends a dictionary, containing the open access project
`public_provider` and a `premium_provider`, to the decorated function.
Args:
func (callable): Test function to be decorated.
Returns:
callable: The decorated function.
"""
@wraps(func)
@requires_qe_access
return _wrapper
def requires_provider(func):
"""Decorator that signals the test uses the online API, via a provider.
This decorator delegates into the `requires_qe_access` decorator, but
instead of the credentials it appends a `provider` argument to the decorated
function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
return _wrapper
def requires_private_provider(func):
"""Decorator that signals the test requires a provider for private jobs.
This decorator appends a `provider` argument to the decorated function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
return _wrapper
def requires_device(func):
"""Decorator that retrieves the appropriate backend to use for testing.
It involves:
* Enable the account using credentials obtained from the
`requires_qe_access` decorator.
* Use the backend specified by `QISKIT_IBM_STAGING_DEVICE` if
`QISKIT_IBM_USE_STAGING_CREDENTIALS` is set, otherwise use the backend
specified by `QISKIT_IBM_DEVICE`.
* if device environment variable is not set, use the least busy
real backend.
* appends arguments `backend` to the decorated function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
return _wrapper
def requires_runtime_device(func):
"""Decorator that retrieves the appropriate backend to use for testing.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
return _wrapper
def _get_backend(qe_token, qe_url, backend_name):
"""Get the specified backend."""
_enable_account(qe_token, qe_url)
_backend = None
provider = _get_custom_provider(IBMQ) or list(IBMQ._providers.values())[0]
if backend_name:
# Put desired provider as the first in the list.
providers = [provider] + IBMQ.providers()
for provider in providers:
backends = provider.backends(name=backend_name)
if backends:
_backend = backends[0]
break
else:
_backend = least_busy(provider.backends(simulator=False, min_num_qubits=5))
if not _backend:
raise Exception("Unable to find a suitable backend.")
return _backend
def _get_credentials():
"""Finds the credentials for a specific test and options.
Returns:
Credentials: set of credentials
Raises:
Exception: When the credential could not be set and they are needed
for that set of options.
"""
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", ""):
# Special case: instead of using the standard credentials mechanism,
# load them from different environment variables. This assumes they
# will always be in place, as is used by the Travis setup.
return Credentials(
os.getenv("QISKIT_IBM_STAGING_API_TOKEN"), os.getenv("QISKIT_IBM_STAGING_API_URL")
)
# Attempt to read the standard credentials.
discovered_credentials, _ = discover_credentials()
if discovered_credentials:
# Decide which credentials to use for testing.
if len(discovered_credentials) > 1:
try:
# Attempt to use QE credentials.
return discovered_credentials[(None, None, None)]
except KeyError:
pass
# Use the first available credentials.
return list(discovered_credentials.values())[0]
raise Exception("Unable to locate valid credentials.")
def _get_custom_provider(ibmq_factory: IBMQFactory) -> Optional[AccountProvider]:
"""Find the provider for the specific hub/group/project, if any.
Args:
ibmq_factory: IBMQFactory instance with account already loaded.
Returns:
Custom provider or ``None`` if default is to be used.
"""
hgp = (
os.getenv("QISKIT_IBM_STAGING_HGP", None)
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", "")
else os.getenv("QISKIT_IBM_HGP", None)
)
if hgp:
hgp = hgp.split("/")
return ibmq_factory.get_provider(hub=hgp[0], group=hgp[1], project=hgp[2])
return None # No custom provider.
def _enable_account(qe_token: str, qe_url: str) -> None:
"""Enable the account if one is not already active.
Args:
qe_token: API token.
qe_url: API URL.
"""
active_account = IBMQ.active_account()
if active_account:
if active_account.get("token", "") == qe_token:
return
IBMQ.disable_account()
IBMQ.enable_account(qe_token, qe_url)
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2864,
11,
13130,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330... | 2.593193 | 3,144 |
# encoding: utf-8
from __future__ import (print_function, unicode_literals)
import datetime
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from os.path import join as j
logging.basicConfig(filename='test.log', level=logging.DEBUG)
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
logging.getLogger().addHandler(stderr)
class SelfCleaningTestCase(unittest.TestCase):
"""TestCase subclass which cleans up self.tmpdir after each test"""
class TestBrunnhildeIntegration(SelfCleaningTestCase):
"""
Integration tests. sf (Siegfried) must be installed on user's system for tests to work.
"""
if __name__ == '__main__':
unittest.main()
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
4798,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
8,
198,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
... | 3.075949 | 237 |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from datetime import datetime as dt, timedelta
# Define object for external CSS stylesheet
external_stylesheets = [
'https://getbootstrap.com/docs/3.3/getting-started/',
{
'href': 'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css',
'rel': 'stylesheet',
'integrity': 'sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u',
'crossorigin': 'anonymous'
}
]
# Initialize app object and add external stylesheet
app = dash.Dash(__name__,
external_stylesheets=external_stylesheets)
# Define the layout
app.layout = html.Div(children=[
html.H2('Twitter Showcase'),
# Create a new Row in the UI for Inputs
html.Div([
html.Div([
html.Div([
html.Div([
html.P('Datumsbereich:')
],
className="control-label"
),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=(dt.now() - timedelta(weeks=52)).date(),
max_date_allowed=dt.today(),
initial_visible_month=dt.today(),
start_date=(dt.now() - timedelta(days=7)).date(),
end_date=dt.now().date(),
display_format='DD.MM.YYYY',
)
]
)
],
className="col-sm-4"
),
html.Div([
html.Div([
html.P('Hash-Tag oder Benutzer:')
],
className="control-label"
),
dcc.Input(
id='hashtag-input',
type='text',
value='@data2day'
)
],
className="col-sm-4"
),
html.Div([
html.Div([
html.P('Anzahl von Tweets:')
],
className="control-label"
),
dcc.Slider(
id='number-tweets-slider',
min=100,
max=2000,
value=500,
step=100,
marks={i: '{}'.format(i) for i in list(filter(lambda x: '{}'.format(x) if (x/100)%2 == 1 else '', [(100*(i+1)) for i in range(20)]))}
)
],
className="col-sm-4"
)
],
className="row"
),
# Create a new row for exemplary output
html.Div([
html.Div([
html.Div(id='output-container-date-picker-range')
],
className="col-sm-4"
),
html.Div([
html.Div(id='output-hashtag-input')
],
className="col-sm-4"
),
html.Div([
html.Div(id='ouput-number-tweets-slider')
],
className="col-sm-4"
)
],
className="row"
)
],
className="container-fluid"
)
# Create function based on input of date range slider
@app.callback(
Output('output-container-date-picker-range', 'children'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')]
)
# Create function based on input of text input
@app.callback(
Output('output-hashtag-input', 'children'),
[Input('hashtag-input', 'value')]
)
# Create function based on input of integer slider
@app.callback(
dash.dependencies.Output('ouput-number-tweets-slider', 'children'),
[dash.dependencies.Input('number-tweets-slider', 'value')])
# Host the app via Flask
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=8052, debug=True)
| [
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
... | 1.85939 | 2,034 |
"""
These will need to be run in a clean virtulenv to give an understanding of a clean install of python.... is this enough?
From: http://stackoverflow.com/questions/1006169/how-do-i-look-inside-a-python-object
type()
dir()
id()
getattr()
hasattr()
globals()
locals()
callable()
"""
import sys
import pkgutil
# List of built in modules, awesome.
print
print "sys.builtin_module_names:"
print sys.builtin_module_names
print
print "pkgutil.iter_modules:"
print( pkgutil.iter_modules)
print
print "dir(__builtins__)"
print dir(__builtins__)
print
#This seems quite complete, not sure where it pulls from though!
#help('modules')
| [
37811,
198,
4711,
481,
761,
284,
307,
1057,
287,
257,
3424,
4118,
377,
24330,
284,
1577,
281,
4547,
286,
257,
3424,
2721,
286,
21015,
1106,
318,
428,
1576,
30,
198,
198,
4863,
25,
2638,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
5... | 3 | 211 |
"""Ticketr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import event.views
urlpatterns = [
url(r'^$', event.views.Home.as_view(), name='index'),
url(r'^create/', event.views.CreateEvent.as_view(), name='create-event'),
url(r'^my-events/', event.views.MyEvents.as_view(), name='my-events'),
url(r'^manage/(?P<event_id>[0-9]+)$', event.views.ManageEvent.as_view(), name='manage-event'),
url(r'^manage/orders/(?P<event_id>[0-9]+)$', event.views.ViewEventOrders.as_view(), name='view-event-orders'),
url(r'^manage/tickets/(?P<event_id>[0-9]+)$', event.views.ManageTickets.as_view(), name='manage-tickets'),
url(r'^manage/', event.views.ListEvents.as_view(), name='list-events'),
url(r'^organisers-profile/(?P<organiser_id>[0-9]+)$', event.views.OrganisersProfile.as_view(),
name='organisers-profile'),
url(r'^manage/create-ticket/(?P<event_id>[0-9]+)$', event.views.CreateTicket.as_view(), name='create-ticket'),
url(r'(?P<event_id>[0-9]+)$', event.views.ViewEvent.as_view(), name='view-event'),
url(r'^browse/',event.views.ListEvents.as_view(), name='browse-events'),
url(r'^organiser-profiles/',event.views.OrganiserProfiles.as_view(), name='organiser-profiles'),
]
| [
37811,
51,
9715,
81,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
... | 2.557065 | 736 |
#! /usr/bin/env python3
import os
import sys
import subprocess
#from subprocess import run,call
from tkinter import Tk, Canvas,Toplevel,NW,Scrollbar,RIGHT,Y,LEFT,BOTH,TOP
import copy
import configparser
# ***********************************************
# Determine model of Pi - 1,2,3,4
# ************************************************
## awk '/^Revision/ {sub("^1000", "", $3); print $3}' /proc/cpuinfo
# ***********************************************
# Read and process configuration data
# ************************************************
# read display.cfg
# ***********************************************
# HDMI Monitor Commands for DSI and HDMI
# ************************************************
# ***********************************************
# Touchscreen Backlight Commands
# ************************************************
# used to test on a machine without a backlight
# **************************
# Test Harness
# **************************
# dummy debug monitor
"""
class PiPresents(object):
def __init__(self):
self.mon=Mon()
self.ref0=None
self.ref1=None
# ********************
# SET UP THE GUI
# ********************
self.options={'fullscreen':True}
# set up the displays and create a canvas for each display
self.dm=DisplayManager()
self.pp_dir='/home/pi/pipresents'
status,message,self.root=self.dm.init(self.options,self.end)
if status !='normal':
self.mon.err(self,message)
sys.exit(111)
self.canvas0=None
self.canvas1=None
status,message,self.dsi_id,canvas_id=self.dm.id_of_canvas('DSI0')
if status == 'normal':
self.canvas0=canvas_id
self.canvas0.create_text(20,20,anchor=NW,text='F4 to close',font='arial 14',fill='yellow')
self.canvas0.create_text(20,40,anchor=NW,text='display id: ' + str(self.dsi_id),font='arial 14',fill='yellow')
self.canvas0.create_text(20,60,anchor=NW,text='canvas for display 0: ' + str(self.canvas0),font='arial 14',fill='yellow')
width0,height0=self.dm.canvas_dimensions(self.dsi_id)
self.canvas0.create_text(20,80,anchor=NW,text='Canvas width/height: '+str(width0)+' '+str(height0),font='arial 14',fill='yellow')
self.canvas0.create_text(20,100,anchor=NW,text='Display Rotation: '+ self.dm.real_display_orientation(self.dsi_id),font='arial 14',fill='yellow')
self.matrix0,ms0=self.dm.touch_matrix_for(self.dsi_id)
self.canvas0.create_text(20,120,anchor=NW,text=self.matrix_text(self.matrix0),font='arial 14',fill='yellow')
self.canvas0.create_text(width0/2,height0/2,text='*',font='arial 16',fill='yellow')
self.canvas0.bind('<Button-1>',self.click_pressed)
#self.canvas0.bind('<Motion>',self.click_pressed)
self.canvas0.bind("<F4>", self.end_event)
print ('set up DSI0 as Canvas0',self.dsi_id,canvas_id)
status,message,self.hdmi0_id,canvas_id=self.dm.id_of_canvas('HDMI0')
if status == 'normal':
self.canvas1=canvas_id
width1,height1=self.dm.canvas_dimensions(self.hdmi0_id)
self.canvas1.create_text(20,20,anchor=NW,text='F4 to close',font='arial 14',fill='yellow')
self.canvas1.create_text(20,40,anchor=NW,text='display id: ' + str(self.hdmi0_id),font='arial 14',fill='yellow')
self.canvas1.create_text(20,60,anchor=NW,text='canvas for display 1: ' + str(self.canvas1),font='arial 14',fill='yellow')
self.canvas1.create_text(20,80,anchor=NW,text='Canvas width/height: '+str(width1)+' '+str(height1),font='arial 14',fill='yellow')
self.canvas1.create_text(20,100,anchor=NW,text='Display Rotation: '+ self.dm.real_display_orientation(self.hdmi0_id),font='arial 14',fill='yellow')
self.matrix1,ms1=self.dm.touch_matrix_for(self.hdmi0_id)
self.canvas1.create_text(20,120,anchor=NW,text=self.matrix_text(self.matrix1),font='arial 14',fill='yellow')
self.canvas1.create_text(width1/2,height1/2,text='*',font='arial 14',fill='yellow')
# self.canvas1.bind('<Motion>',self.click_pressed)
self.canvas1.bind('<Button-1>',self.click_pressed)
self.canvas1.bind("<F4>", self.end_event)
print ('set up HDMI0 as Canvas1',self.hdmi0_id,canvas_id)
status,message,self.hdmi1_id,canvas_id=self.dm.id_of_canvas('HDMI1')
if status == 'normal':
# reuse canvas0 because cannot have DSI0 and HDMI0
self.canvas0=canvas_id
self.canvas0.create_text(20,20,anchor=NW,text='F4 to close',font='arial 14',fill='yellow')
self.canvas0.create_text(20,40,anchor=NW,text='display id: ' + str(self.hdmi1_id),font='arial 14',fill='yellow')
self.canvas0.create_text(20,60,anchor=NW,text='canvas for display 0: ' + str(self.canvas0),font='arial 14',fill='yellow')
width3,height3=self.dm.canvas_dimensions(self.hdmi1_id)
self.canvas0.create_text(20,80,anchor=NW,text='Canvas width/height: '+str(width3)+' '+str(height3),font='arial 14',fill='yellow')
self.canvas0.create_text(20,100,anchor=NW,text='Display Rotation: '+ self.dm.real_display_orientation(self.hdmi1_id),font='arial 14',fill='yellow')
self.matrix0,ms0=self.dm.touch_matrix_for(self.hdmi1_id)
self.canvas0.create_text(20,120,anchor=NW,text=self.matrix_text(self.matrix0),font='arial 14',fill='yellow')
self.canvas0.create_text(width3/2,height3/2,text='*',fill='yellow')
self.canvas0.bind('<Button-1>',self.click_pressed)
# self.canvas0.bind('<Motion>',self.click_pressed)
self.canvas0.bind("<F4>", self.end_event)
print ('set up HDMI1 as Canvas0',self.hdmi1_id,canvas_id)
# start Tkinters event loop
self.root.mainloop( )
def click_pressed(self,event):
x= event.x
y= event.y
widget=event.widget
#print ('click',widget,x,y)
if self.canvas0 != None:
if self.ref0 !=None:
self.canvas0.delete(self.ref0)
self.ref0=None
if widget== self.canvas0:
text0 = 'x,y: '+ str(x) + " " + str(y)
else:
text0 = 'Clicked on other display'
self.ref0=self.canvas0.create_text(100,300,anchor=NW,text=text0,fill='yellow',font='arial 20')
if self.canvas1 != None:
if self.ref1 !=None:
self.canvas1.delete(self.ref1)
self.ref1=None
if widget== self.canvas1:
text1 = 'x,y: '+ str(x) + " " + str(y)
else:
text1 = 'Clicked on other display'
self.ref1=self.canvas1.create_text(100,300,anchor=NW,text=text1,fill='yellow',font='arial 20')
#status,message=self.dm.do_backlight_command('backlight set 50')
# print (status,message)
def matrix_text(self,c):
# convert to string
cstr=['','','','','','','','','']
i=0
while i <9:
cstr[i]= '{:f}'.format(c[i])
i+=1
chunks=self.chunks(cstr,3)
cstr33=''
for chunk in chunks:
line = ' '.join(chunk)
cstr33 += '\n'+line
return cstr33
def chunks(self,lst, n):
#Yield successive n-sized chunks from lst
for i in range(0, len(lst), n):
yield lst[i:i + n]
def end_event(self,event):
self.end()
def end(self):
self.mon.log(self,"Pi Presents aborted: ")
if self.root is not None:
self.root.destroy()
self.mon.log(self,"Pi Presents exiting normally, bye")
sys.exit(100)
"""
if __name__ == '__main__':
disp=Display()
disp.init()
#pp=PiPresents()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
2,
6738,
850,
14681,
1330,
1057,
11,
13345,
198,
6738,
256,
74,
3849,
1330,
309,
74,
11,
1680,
11017,
11,
51,... | 2.08416 | 3,933 |
#Faça um Programa que leia três números e mostre o maior deles.
valor1 = float(input("Digite o primeiro valor: "))
valor2 = float(input("Digite o segundo valor: "))
valor3 = float(input("Digite o terceiro valor: "))
valores = [valor1, valor2, valor3]
print(f'O maior número é {max(valores)}')
| [
2,
50110,
50041,
23781,
6118,
64,
8358,
443,
544,
491,
25792,
82,
299,
21356,
647,
418,
304,
749,
260,
267,
17266,
1504,
390,
829,
13,
201,
198,
201,
198,
2100,
273,
16,
796,
12178,
7,
15414,
7203,
19511,
578,
267,
6994,
7058,
1188,... | 2.276119 | 134 |
# Part 1 of the Python Review lab.
| [
2,
2142,
352,
286,
262,
11361,
6602,
2248,
13,
198
] | 3.5 | 10 |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
from patron.forms import *
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62... | 3.686047 | 86 |
#!/usr/bin/python
import sys, urllib2
import re
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit('^C caught, exiting...')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
11,
2956,
297,
571,
17,
198,
11748,
302,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
... | 2.30137 | 73 |
from django_redis import get_redis_connection
from rest_framework import serializers
class ImageCodeCheckSerializer(serializers.Serializer):
'''图片验证序列化器'''
image_code_id = serializers.UUIDField()
text = serializers.CharField(max_length=4, min_length=4) | [
6738,
42625,
14208,
62,
445,
271,
1330,
651,
62,
445,
271,
62,
38659,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
4871,
7412,
10669,
9787,
32634,
7509,
7,
46911,
11341,
13,
32634,
7509,
2599,
198,
220,
220,
220,
705,
7061,
32... | 2.588235 | 102 |
import csv
with open("random_num.csv",'r') as f:
reader = csv.reader(f)
data = list(reader)
result = []
for d in data[0]:
d_int = int(d)
result.append(3*d_int + 6)
with open("function_random_num.csv",'w') as output:
writer = csv.writer(output)
writer.writerow(result)
| [
11748,
269,
21370,
628,
198,
198,
4480,
1280,
7203,
25120,
62,
22510,
13,
40664,
1600,
6,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
9173,
796,
269,
21370,
13,
46862,
7,
69,
8,
198,
220,
220,
220,
1366,
796,
1351,
7,
46862,
8,
... | 2.325581 | 129 |
import pprint as pp
import airflow.utils.dates
from airflow import DAG, macros
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(10),
}
with DAG(dag_id="data_dag", default_args=default_args, schedule_interval="@daily") as dag:
upload = DummyOperator(task_id="upload")
process = BashOperator(
task_id="process",
bash_command="echo 'processing'"
)
# This task will fail half the time based
# based on the day of the execution date modulo 2
# If day 16 % 2 = exit 0
# If day 17 % 2 = exit 1
fail = BashOperator(
task_id="fail",
bash_command="""
valid={{macros.ds_format(ds, "%Y-%m-%d", "%d")}}
if [ $(($valid % 2)) == 1 ]; then
exit 1
else
exit 0
fi
"""
)
upload >> process >> fail | [
11748,
279,
4798,
355,
9788,
198,
11748,
45771,
13,
26791,
13,
19581,
198,
6738,
45771,
1330,
360,
4760,
11,
34749,
198,
6738,
45771,
13,
3575,
2024,
13,
41757,
62,
46616,
1330,
15743,
18843,
1352,
198,
6738,
45771,
13,
3575,
2024,
13,
... | 2.188976 | 508 |
import http.client
from rdr_service.code_constants import PPI_SYSTEM
from rdr_service.concepts import Concept
from tests.helpers.unittest_base import BaseTestCase
| [
11748,
2638,
13,
16366,
198,
198,
6738,
374,
7109,
62,
15271,
13,
8189,
62,
9979,
1187,
1330,
350,
11901,
62,
23060,
25361,
198,
6738,
374,
7109,
62,
15271,
13,
43169,
82,
1330,
26097,
198,
6738,
5254,
13,
16794,
364,
13,
403,
715,
... | 3.3 | 50 |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
@python_2_unicode_compatible
@python_2_unicode_compatible
@python_2_unicode_compatible
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
21015,
62,
17,
62,
46903,
1098,
62,
38532,
628,
198,
31,
29412,
... | 3.108434 | 83 |
import os, sys
import xml.etree.ElementTree as etree
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append( root + '/lvl2vhir' )
sys.path.append( root + '/lvlUtil' )
import LevelEditor
import EntityYaml
import FileIO
AI_BLOCKING_AREA_SUFFIX = '_AIBlockingArea'
#
# Model bounding box representation
#
# AI blocking area
if __name__ == "__main__":
main( sys.argv ) | [
11748,
28686,
11,
25064,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
2123,
631,
198,
198,
15763,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7... | 2.61039 | 154 |
__author__ = 'smuthunoori'
import kivy
kivy.require('1.7.0')
from kivy.uix.relativelayout import RelativeLayout
from kivy.graphics import Line
| [
834,
9800,
834,
796,
705,
5796,
1071,
403,
2675,
72,
6,
198,
198,
11748,
479,
452,
88,
198,
74,
452,
88,
13,
46115,
10786,
16,
13,
22,
13,
15,
11537,
198,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
43762,
39786,
1330,
45344,
3251... | 2.578947 | 57 |
# -*- coding: UTF-8 -*-
from cefpython3 import cefpython
from threading import Thread, Lock
import signal
import Queue
import wx
import os
import HTMLParser
application_settings = {
"cache_path": "/tmp/cef/cache/",
"debug": False,
"log_severity": cefpython.LOGSEVERITY_WARNING,
"log_file": "/tmp/cef/debug.log",
"resources_dir_path": cefpython.GetModuleDirectory() + "/Resources",
"browser_subprocess_path": "%s/%s" % (cefpython.GetModuleDirectory(), "subprocess"),
"unique_request_context_per_browser": True,
"downloads_enabled": True,
"remote_debugging_port": 0,
"context_menu": {
"enabled": True,
"navigation": True,
"print": True,
"view_source": True,
"external_browser": True,
"devtools": True,
},
"ignore_certificate_errors": True,
}
browser_settings = {
"plugins_disabled": True,
"file_access_from_file_urls_allowed": True,
"universal_access_from_file_urls_allowed": True,
}
switch_settings = {
"locale_pak": cefpython.GetModuleDirectory() + "/Resources/en.lproj/locale.pak",
# "proxy-server": "socks5://127.0.0.1:7777",
# "proxy-server": "http://127.0.0.1:7777",
# "user-agent": "MyBrowser/1.0"
# "no-proxy-server": "",
# "enable-media-stream": "",
# "remote-debugging-port": "12345",
# "disable-gpu": "",
# "--invalid-switch": "" -> Invalid switch name
}
pid_file = os.path.join(application_settings['cache_path'], 'cefjs.pid')
with open(pid_file, 'w') as f:
if not os.path.exists(application_settings['cache_path']):
os.makedirs(application_settings['cache_path'])
f.write(str(os.getpid()))
status = 0
lock = Lock()
__all__ = ['loop', 'Session', 'set_app_settings', 'set_browser_settings', 'set_switch_settings', 'kill_self']
if __name__ == '__main__':
s_cls = Session
loop(s_cls)
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
269,
891,
29412,
18,
1330,
269,
891,
29412,
198,
6738,
4704,
278,
1330,
14122,
11,
13656,
198,
11748,
6737,
198,
11748,
4670,
518,
198,
11748,
266,
87,
198,
11748,
28... | 2.446034 | 769 |
from netmiko import Netmiko
from getpass import getpass
from netmiko import ConnectHandler
banner = '-' * 80
password = getpass()
cisco3 = {
'device_type': 'cisco_ios',
'host': 'cisco3.lasthop.io',
'username': 'pyclass',
'password': password,
}
nx0s1 = {
'device_type': 'cisco_nxos',
'host': 'nxos1.lasthop.io',
'username': 'pyclass',
'password': password,
}
# SHOW PROMPT
for net_device in (cisco3, nx0s1):
connect = ConnectHandler(**net_device)
prompt = connect.find_prompt()
print(prompt)
print(banner)
# SHOW VERSION
for net_device in (cisco3, nx0s1):
connect = ConnectHandler(**net_device)
show_ver = connect.send_command('show version')
print(show_ver)
print(banner)
# SHOW RUN
for net_device in (cisco3, nx0s1):
connect = ConnectHandler(**net_device)
show_run = connect.send_command('show run')
print(show_run)
write_file(net_device + ".txt", show_run)
| [
6738,
2010,
76,
12125,
1330,
3433,
76,
12125,
198,
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
2010,
76,
12125,
1330,
8113,
25060,
198,
198,
3820,
1008,
796,
705,
19355,
1635,
4019,
198,
28712,
796,
651,
6603,
3419,
198,
198,
66,
486... | 2.417949 | 390 |
import threading
from ..database.DB import DbConnection
_lock = threading.Lock()
_apikeys = {}
# noinspection PyUnusedLocal
| [
11748,
4704,
278,
198,
198,
6738,
11485,
48806,
13,
11012,
1330,
360,
65,
32048,
198,
198,
62,
5354,
796,
4704,
278,
13,
25392,
3419,
198,
62,
499,
522,
893,
796,
23884,
628,
628,
198,
2,
645,
1040,
14978,
9485,
3118,
1484,
14565,
6... | 2.977778 | 45 |
#!/usr/bin/python
import pygame
import time
#import thread
import fmuglobals
if fmuglobals.RUN_ON_RASPBERRY_PI:
import RPi.GPIO as GPIO
"""
AnalogButtons
"""
#def on_click(self, btn):
# print 'GPIO ' + str(btn) + ' clicked'
# pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=btn['key'], unicode=None, mod=None))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
12972,
6057,
198,
11748,
640,
198,
2,
11748,
4704,
198,
11748,
277,
76,
1018,
75,
672,
874,
198,
198,
361,
277,
76,
1018,
75,
672,
874,
13,
49,
4944,
62,
1340,
62,
49,
1921,
47,
... | 2.319149 | 141 |
from operator import is_not
from functools import partial
| [
6738,
10088,
1330,
318,
62,
1662,
198,
6738,
1257,
310,
10141,
1330,
13027,
198
] | 4.142857 | 14 |
import divide_video
import find_and_blur_faces
import list_of_files
import time
from termcolor import colored
import welcome_message
import warnings
print(colored(welcome_message.welcome_msg,'green'))
# Helps not to display UserWarning
warnings.simplefilter("ignore", UserWarning)
# Information about the program start (start time)
message = f'Program started {time.strftime("%H:%M:%S", time.localtime())}'
print(colored(message,'blue'))
# Function responsible for obtaining a list of files (with the .mp4 extension),
# that have not yet been subjected to obliteration.
# Thanks to this, you can send new files to the input directory without deleting the previously used ones.
list_of_files = list_of_files.get_list_of_files()
# Display information regarding how many files will be exposed to the program.
message_how_many_files = f'{len(list_of_files)} new files to blur faces and slice into frames.'
print(colored(message_how_many_files,'yellow'))
print(list_of_files)
if len(list_of_files) > 0:
for file in list_of_files:
# Divide the movie into frames and save them in the specific folders
folder_path = divide_video.divide_video_into_frames(file)
# Using DSFD, we detect faces and then put a blur in their place
find_and_blur_faces.find_and_blur_faces(folder_path)
elif len(list_of_files) == 0:
print(colored("There is no new videos (mp4) to blur and slice.", 'red'))
# If there is no new videos, code 20.
exit(20)
# Information about the program end (end time)
message = f'Program ended {time.strftime("%H:%M:%S", time.localtime())}'
print(colored(message,'blue'))
| [
11748,
14083,
62,
15588,
198,
11748,
1064,
62,
392,
62,
2436,
333,
62,
32186,
198,
11748,
1351,
62,
1659,
62,
16624,
198,
11748,
640,
198,
6738,
3381,
8043,
1330,
16396,
198,
11748,
7062,
62,
20500,
198,
11748,
14601,
628,
198,
4798,
... | 3.128107 | 523 |
#!/usr/bin/env python3
from sys import stderr, exit
import random
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from hanoi_lib import ConfigGenerator, HanoiTowerProblem
from utils_lang import get_regex, get_std_move
# METADATA OF THIS TAL_SERVICE:
args_list = [
('v',str),
('start', str),
('final', str),
('format', str),
('num_tests',int),
('n_max',int),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
# get seed
seed = ENV['seed']
if seed == 'random_seed':
seed = random.randint(100000, 999999)
else:
seed = int(seed)
TAc.print(LANG.render_feedback("print-service-seed", f"# service seed = {seed}"), "yellow", ["bold"])
# Init Hanoi Tower and configGenerator
hanoi = HanoiTowerProblem(ENV['v'])
gen = ConfigGenerator(seed)
# Execute all test
TAc.print(LANG.render_feedback("start-tests", f"# Start Tests"), "green", ["bold"])
for t in range(1, ENV['num_tests'] + 1):
for n in range(1, ENV['n_max'] + 1):
# RUN TEST --------------------
# get type of configurations
start, final, error = gen.getConfigs(ENV['start'], ENV['final'], n)
assert error == None
TAc.print(LANG.render_feedback("print-configs", f"{start}\n{final}"), "green", ["bold"])
# Get the correct solution
opt_sol = hanoi.getMovesList(start, final)
# Get user solution
user_sol = list()
regex, explain = get_regex(ENV['format'], ENV['lang'])
while True:
user_move, = TALinput(str, sep="\n", regex=regex, regex_explained=explain, exceptions={"end"}, TAc=TAc)
if user_move == 'end':
break
user_sol.append(get_std_move(user_move, ENV['format'], ENV['lang']))
# CHECK TEST ------------------
if user_sol == opt_sol:
TAc.print(LANG.render_feedback("success", f"# success"), "green", ["bold"])
else:
TAc.print(LANG.render_feedback("fail", f"# fail: wrong answer"), "red", ["bold"])
TAc.print(LANG.render_feedback("print-service-seed", f"# service seed: {seed}"), "red", ["bold"])
TAc.print(LANG.render_feedback("print-configs", f"{start}\n{final}"), "green", ["bold"])
break
TAc.print(LANG.render_feedback("end", f"Finish Tests"), "green", ["bold"])
exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
25064,
1330,
336,
1082,
81,
11,
8420,
198,
11748,
4738,
198,
198,
6738,
309,
1847,
15414,
82,
1330,
309,
1847,
15414,
198,
6738,
1963,
346,
9000,
1330,
2039,
85,
11,
16332,
... | 2.291391 | 1,057 |
# Generated by Django 3.2.7 on 2021-09-27 14:25
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
2931,
12,
1983,
1478,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import vrep.vrep as vrep
from PIL import Image
import numpy as np
import time
import array
import cv2
__credits__ = 'nemilya'
OP_MODE = vrep.simx_opmode_oneshot_wait
PORT_NUM = 19997
if __name__ == '__main__':
run()
| [
11748,
410,
7856,
13,
85,
7856,
355,
410,
7856,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
7177,
198,
11748,
269,
85,
17,
628,
198,
834,
66,
20696,
834,
796,
705,
77,
368,
813,... | 2.494505 | 91 |
"""Unit test package for pypda."""
| [
37811,
26453,
1332,
5301,
329,
279,
4464,
6814,
526,
15931,
198
] | 3.181818 | 11 |
import math
import logging
import sys
import time
import traceback
from typing import Callable, Optional
import i3ipc
import common
import cycle_windows
import layout
import move_counter
import transformations
| [
11748,
10688,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
6738,
19720,
1330,
4889,
540,
11,
32233,
198,
198,
11748,
1312,
18,
541,
66,
198,
198,
11748,
2219,
198,
11748,
6772,
62,
28457,
198,
117... | 4.115385 | 52 |
import numpy
from tensorflow.keras.callbacks import Callback
| [
11748,
299,
32152,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
13345,
10146,
1330,
4889,
1891,
628
] | 3.444444 | 18 |
from setuptools import setup
template_patterns = ['templates/*.html',
'templates/*/*.html',
'templates/*/*/*.html',
]
package_name = 'django-bitcoin'
packages = ['django_bitcoin',
'django_bitcoin.management',
'django_bitcoin.management.commands',
'django_bitcoin.templatetags',
'django_bitcoin.templates',
'django_bitcoin.migrations',
'django_bitcoin.jsonrpc']
long_description = open("README.rst").read() + "\n" + open("CHANGES.rst").read()
setup(name='django-bitcoin',
version='0.2',
description='Bitcoin application integration for Django web framework',
long_description=long_description,
author='Jeremias Kangas',
url='https://github.com/kangasbros/django-bitcoin',
requires=["qrcode (>2.3.1)", "South (>0.7.4)"],
license="MIT",
# use_2to3 = True,
packages=packages,
package_data=dict((package_name, template_patterns) for package_name in packages),
)
| [
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
28243,
62,
33279,
82,
796,
37250,
11498,
17041,
15211,
13,
6494,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
1149... | 2.223849 | 478 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2ims
Use PIL to create a series of images.
"""
import os
try:
import numpy as np
except ImportError:
np = None
try:
import PIL
from PIL import Image
except ImportError:
PIL = None
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
theMax = im.max()
if theMax > 128 and theMax < 300:
pass # assume 0:255
else:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def writeIms(filename, images):
""" writeIms(filename, images)
Export movie to a series of image files. If the filenenumber
contains an asterix, a sequence number is introduced at its
location. Otherwise the sequence number is introduced right
before the final dot.
To enable easy creation of a new directory with image files,
it is made sure that the full path exists.
Images should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write series of image files.")
# Check images
images = checkImages(images)
# Get dirname and filename
filename = os.path.abspath(filename)
dirname, filename = os.path.split(filename)
# Create dir(s) if we need to
if not os.path.isdir(dirname):
os.makedirs(dirname)
# Insert formatter
filename = _getFilenameWithFormatter(filename, len(images))
# Write
seq = 0
for frame in images:
seq += 1
# Get filename
fname = os.path.join(dirname, filename%seq)
# Write image
if np and isinstance(frame, np.ndarray):
frame = PIL.Image.fromarray(frame)
frame.save(fname)
def readIms(filename, asNumpy=True):
""" readIms(filename, asNumpy=True)
Read images from a series of images in a single directory. Returns a
list of numpy arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read a series of image files.")
# Check Numpy
if asNumpy and np is None:
raise RuntimeError("Need Numpy to return numpy arrays.")
# Get dirname and filename
filename = os.path.abspath(filename)
dirname, filename = os.path.split(filename)
# Check dir exists
if not os.path.isdir(dirname):
raise IOError('Directory not found: '+str(dirname))
# Get two parts of the filename
part1, part2 = _getFilenameParts(filename)
# Init images
images = []
# Get all files in directory
for fname in os.listdir(dirname):
if fname.startswith(part1) and fname.endswith(part2):
# Get sequence number
nr = _getSequenceNumber(fname, part1, part2)
# Get Pil image and store copy (to prevent keeping the file)
im = PIL.Image.open(os.path.join(dirname, fname))
images.append((im.copy(), nr))
# Sort images
images.sort(key=lambda x:x[1])
images = [im[0] for im in images]
# Convert to numpy if needed
if asNumpy:
images2 = images
images = []
for im in images2:
# Make without palette
if im.mode == 'P':
im = im.convert()
# Make numpy array
a = np.asarray(im)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Add
images.append(a)
# Done
return images
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
220,
15069,
357,
34,
8,
2321,
11,
978,
3876,
22864,
198,
2,
198,
2,
220,
220,
770,
2438,
318,
2426,
284,
262,
357,
3605,
8,
347,
10305,
5964,
25,
198,
2,
... | 2.361402 | 2,767 |
from __future__ import unicode_literals
from django.db import models, transaction
from .tracker import TrackHelper
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
8611,
198,
198,
6738,
764,
2213,
10735,
1330,
17762,
47429,
628,
628
] | 3.75 | 32 |
import numpy as np
# Chapter 2 Beginning with NumPy fundamentals
#
# Demonstrates the NumPy dtype constructors.
#
# Run from the commandline with
#
# python dtypeconstructors.py
print "In: dtype(float)"
print np.dtype(float)
#Out: dtype('float64')
print "In: dtype('f')"
print np.dtype('f')
#Out: dtype('float32')
print "In: dtype('d')"
print np.dtype('d')
#Out: dtype('float64')
print "In: dtype('f8')"
print np.dtype('f8')
#Out: dtype('float64')
print "In: dtype('Float64')"
print np.dtype('Float64')
#Out: dtype('float64')
| [
11748,
299,
32152,
355,
45941,
198,
198,
2,
7006,
362,
25976,
351,
31835,
20519,
33099,
198,
2,
198,
2,
7814,
2536,
689,
262,
31835,
20519,
288,
4906,
5678,
669,
13,
198,
2,
198,
2,
5660,
422,
262,
3141,
1370,
351,
220,
198,
2,
19... | 2.540284 | 211 |
from setuptools import setup
version = open('VERSION').read()
setup(
name='signalfx-instrumentation-redis',
version=version,
url='https://github.com/signalfx/python-redis/',
download_url='https://github.com/signalfx/python-redis/tarball/'+version,
license='Apache License 2.0',
author='SignalFx, Inc.',
author_email='signalfx-oss@splunk.com',
description='OpenTracing support for Redis',
long_description=open('README.rst').read(),
long_description_content_type="text/x-rst",
packages=['redis_opentracing'],
platforms='any',
install_requires=[
'future',
'redis',
'opentracing>=2.0,<2.4'
],
extras_require={
'tests': [
'flake8<3',
'flake8-quotes',
'mock<1.1.0',
'pytest>=2.7,<3',
'pytest-cov<2.6.0',
]
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
9641,
796,
1280,
10786,
43717,
27691,
961,
3419,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
12683,
1604,
87,
12,
259,
43872,
341,
12,
445,
271,
3256,
198,
220,
220,
220,
2196,
28,
... | 2.203455 | 521 |
#%%
import qgrid
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
# from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.naive_bayes import GaussianNB,ComplementNB,MultinomialNB,BernoulliNB
import numpy as np
#pd.options.display.max_cols=1000
pd.options.display.max_rows=100
missing_values = ["n/a", "na", "--","NA","?",""," ",-1,"NAN","NaN"]
df=pd.read_csv('kick.csv',na_values=missing_values)
num_rows=len(df)
#model=BernoulliNB() #0.89 accuracy
#0.82 acc
#model= ComplementNB()#GaussianNB()#DecisionTreeClassifier()
drop_cols=list()
print(df.isnull().sum().sum())
print("Dtypes Before Preprocessing",df.dtypes)
null_stats=df.isnull().sum()
#df=df.dropna(thresh=100,axis='rows')
######################################categorically encoding remaining columns###############
##################### & perform one-hot encoding ###########################################
#enc=OneHotEncoder(handle_unknown='ignore')
# for col in df.columns:
# if df[col].dtypes=='object':
# print(col)
# df[col]=df[col].fillna(df[col].value_counts().index[0])
# df[col]=df[col].astype('category')
# cat_df=df[col].cat.codes
# zip_df=pd.DataFrame(list(zip(df[col],cat_df)))
# df[col]=enc.fit_transform(zip_df)
# print(zip_df)
# print("@@@@@@@@@@@@@@@@@@")
# print(cat_df)
# print("******************")
#print(df['Make'])
#%%
for index,val in enumerate(null_stats):
if val > num_rows/2:
#print(val)
drop_cols.append(df.columns[index])
print("####################################")
print(drop_cols)
drop_cols.append('SubModel')
drop_cols.append('Trim')
drop_cols.append('Color')
drop_cols.append('VNST')
drop_cols.append('VehYear')
drop_cols.append('WheelTypeID')
drop_cols.append('VNZIP1')
drop_cols.append('BYRNO')
df=df.drop(drop_cols,axis=1)
#replace missing values of numeric columns by median/mean
df=df.fillna(df.median())
#drop rows with missing values in column with datatype as object/string
df=df.dropna(axis=0)
#%%
# df_new=enc.fit_transform(df)
len(df.columns)
#%%
# import qgrid
#%%
# qgrid.show_grid(df,show_toolbar=True, grid_options={'forceFitColumns': False, 'defaultColumnWidth': 200})
#%%
# grid=qgrid.QGridWidget(df=df)
# display(grid)
#%%
df_new=pd.get_dummies(df)
len(df_new.columns)
#%%
df_new
#%%
#df_new.to_csv('result.csv')
#%%
#%%
print(len(df_new.columns))
df1=df
df=df_new
df_bckup=df
#n=num_rows
n=100
############################# Model Building,Training and Testing Begins ###################################
print("Testing for different data sizes")
model=BernoulliNB()
while n <= 55000:
print("Total Dataset Size={} X {}".format(n,len(df.columns)))
df=df.sample(n)
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:,1:], df['IsBadBuy'] ,stratify=df['IsBadBuy'],test_size=0.49,shuffle=True)
print(X_test.shape)
model.fit(X_train,y_train)
print(model.score(X_test,y_test))
n=n*2 #int(n/2)
df=df_bckup
#%%
from sklearn.model_selection import cross_val_score
from sklearn.metrics import precision_recall_fscore_support
import time
#%%
models=[]
scores=[]
time_taken=[]
models.append(GaussianNB())
models.append(BernoulliNB())
models.append(MultinomialNB())
models.append(ComplementNB())
quad_metrics=[]
for model in models:
s,t,m=train_model(model=model,df=df_new,n=5)
scores.append(s)
quad_metrics.append(m)
time_taken.append(t)
#%%
import matplotlib.pyplot as plt
import seaborn as sns
print("Errors=",scores)
precision=[row[0] for row in quad_metrics]
recall=[row[1] for row in quad_metrics]
print("Time in Seconds {}" .format(time_taken))
model_names=['Gaussian','Bernoulli','Multinomial','ComplementNB']
data=pd.DataFrame(list(zip(model_names,scores)),columns=['Naive Bayes Variants','error=1-accuracy'])
data1=pd.DataFrame(list(zip(model_names,time_taken)),columns=['Naive Bayes Variants','time_taken in seconds'])
data2=pd.DataFrame(list(zip(model_names,precision)),columns=['Naive Bayes Variants','precision'])
data3=pd.DataFrame(list(zip(model_names,recall)),columns=['Naive Bayes Variants','recall'])
sns.barplot(x="Naive Bayes Variants",y="error=1-accuracy",data=data)
plt.show()
sns.barplot(x="Naive Bayes Variants",y="time_taken in seconds",data=data1)
plt.show()
sns.barplot(x="Naive Bayes Variants",y="precision",data=data2)
plt.show()
sns.barplot(x="Naive Bayes Variants",y="recall",data=data3)
plt.show()
#%%
df_new.Make_MAZDA
#%%
#print(df1)
# correlation on original data and not pre-processed data
def get_redundant_pairs(data):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = data.columns
print(cols)
for i in range(0, data.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
dfc=get_top_abs_correlations(df1,n=50)
print(dfc)
#%%
kde=KernelDensity(kernel='gaussian').fit(df_new.iloc[:,1:])
kde.score_samples(df_new.iloc[:,1:])
#df.iloc[:,1:]
#%%
import seaborn as sns
import matplotlib.pyplot as plt
sns.distplot(df_new.iloc[:,1:]) | [
198,
2,
16626,
198,
11748,
10662,
25928,
198,
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
32169,
35,
6377,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
3272,
62,
2100,
62,
26675,
198,
6738,
1341,
35720,
13,
21048,
1330,
26423,
... | 2.478422 | 2,155 |
#Gemaakt door Joey Einerhand en Bas Körver
andereModuleBerekenen = True
while andereModuleBerekenen == True:
aantalPIs = int(input("Aantal PI's: "))
nummerPIs = []
moetVoldPIs = []
wegingPIs = []
wegingTotaal = 0
opnieuwBerekenen = True
print('')
n = 0
while (n < aantalPIs):
nummerPIs.append(input('Wat is het PI nummer van de ' + str(n + 1) + 'e PI?: '))
n += 1
print('')
n = 0
while (n < aantalPIs):
wegingPIs.append(int(input('Wat is de weging van PI #' + nummerPIs[n] + '?: ')))
n += 1
print('')
n = 0
while (n < aantalPIs):
moetVold = input('Moet PI' + nummerPIs[n] + ' Voldoende zijn? (J/N): ')
if (moetVold == 'J' or moetVold == 'j' or moetVold == 'ja' or moetVold == 'Ja' or moetVold == 'JA' or moetVold == 'jA'):
moetVoldPIs.append(1)
else:
moetVoldPIs.append(0)
n += 1
print('')
n = 0
while (n < aantalPIs):
wegingTotaal += wegingPIs[n]
n += 1
n = 0
while (opnieuwBerekenen == True):
aantalLln = int(input('Aantal leerlingen?: '))
lln = 0
while (aantalLln > 0):
cijfersPIs = []
gewogenGem = 0
eindCijfer = 0
lln += 1
print('')
print('Deze berekening geldt voor de ' + str(lln) + 'e leerling.')
n = 0
while (n < aantalPIs):
cijfersPIs.append(float(input('Wat is het cijfer van PI' + nummerPIs[n] + '?: ')))
n += 1
print('')
n = 0
while (n < aantalPIs):
gewogenGem += (cijfersPIs[n] * wegingPIs[n])
n += 1
n = 0
gewogenGem /= wegingTotaal
while (n < aantalPIs):
if (gewogenGem >= 5.0) and (cijfersPIs[n] < 5.5) and (moetVoldPIs[n] == 1):
eindCijfer = 5.0
n = aantalPIs
else:
eindCijfer = gewogenGem
n += 1
n = 0
print('Het eindcijfer is: ', round(eindCijfer, 1), sep = '')
aantalLln -= 1
print('')
opnieuwLln = input('Wil je voor meer leerlingen het eindcijfer voor deze module berekenen? (J/N): ')
if (opnieuwLln == 'J' or opnieuwLln == 'j' or opnieuwLln == 'ja' or opnieuwLln == 'Ja' or opnieuwLln == 'JA' or opnieuwLln == 'jA'):
print('')
print('Vul opnieuw het aantal leerlingen in voor wie je het eindcijfer wilt berekenen.')
print('')
else:
opnieuwBerekenen = False
print('')
opnieuwMod = input('Wil je voor een andere module eindcijfers berekenen? (J/N): ')
if (opnieuwMod == 'J' or opnieuwMod == 'j' or opnieuwMod == 'ja' or opnieuwMod == 'Ja' or opnieuwMod == 'JA' or opnieuwMod == 'jA'):
print('')
print("Vul opnieuw de informatie over de desbetreffende PI's in.")
print(format('','-^57'))
else:
andereModuleBerekenen = False
print('')
print('Bedankt voor het gebruiken van dit programma.\n', format('Tot ziens!', "^42"))
print('') | [
2,
38,
19687,
461,
83,
3420,
26154,
412,
7274,
4993,
551,
6455,
509,
30570,
332,
198,
198,
392,
567,
26796,
33,
567,
3464,
268,
796,
6407,
198,
4514,
290,
567,
26796,
33,
567,
3464,
268,
6624,
6407,
25,
198,
220,
220,
220,
257,
41... | 1.758127 | 1,815 |
__author__ = "hirusha-adi"
__version__ = "1.0"
import os
import sys
import time
import requests
from server import app
from utils.other import (__CREDITS__, __LOGO__, __MAIN_MENU__, __SLEEP__,
__LoadStuffMenu__, __ResizeMenu__)
from utils.resize import jpeg, jpg, png
if __name__ == "__main__":
while True:
try:
ENTIRE_PROGRAM()
except (KeyboardInterrupt, SystemExit):
sys.exit("\n\nExitting 'main.py'. Have a nice day!")
| [
834,
9800,
834,
796,
366,
71,
343,
46213,
12,
9189,
1,
198,
834,
9641,
834,
796,
366,
16,
13,
15,
1,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
198,
11748,
7007,
198,
198,
6738,
4382,
1330,
598,
198,
6738,
33... | 2.170306 | 229 |
import json
import re
import sys
keywords = ["[Gg]lobal [Hh]istory","[Ww]orld [Hh]istory"]
print "filename\tkeyword"
for line in sys.stdin:
filename = re.search(r'"doi": "([^"]*)"',line).groups()[0]
matches = []
for keyword in keywords:
if re.search(keyword,line):
matches.append(keyword)
print "%s\t%s" %(filename,"--".join(matches))
| [
11748,
33918,
198,
11748,
302,
198,
11748,
25064,
628,
198,
2539,
10879,
796,
14631,
58,
38,
70,
60,
75,
2572,
685,
39,
71,
60,
396,
652,
2430,
58,
54,
86,
60,
1764,
685,
39,
71,
60,
396,
652,
8973,
198,
198,
4798,
366,
34345,
5... | 2.19209 | 177 |
from typing import List | [
6738,
19720,
1330,
7343
] | 5.75 | 4 |
from simplecrypt import decrypt, encrypt
password = "zfcxwqjnozidxno"
message = "this is a secret message"
with open("private.pem", "r") as private:
with open("private.pem.crypt", "w") as crypted:
with open("public.pem", "r") as publickey:
ciphertext = encrypt(publickey.read(), private.read())
crypted.write(str(ciphertext))
print(ciphertext)
with open("public.pem", "r") as publickey:
mystring = decrypt(publickey.read(), ciphertext).decode("utf8")
print(mystring)
| [
6738,
2829,
29609,
1330,
42797,
11,
34117,
198,
198,
28712,
796,
366,
89,
16072,
87,
86,
80,
73,
3919,
89,
312,
87,
3919,
1,
198,
20500,
796,
366,
5661,
318,
257,
3200,
3275,
1,
198,
4480,
1280,
7203,
19734,
13,
79,
368,
1600,
366... | 2.596939 | 196 |
s=float(input('qual o valor do seu salario'))
if s <= 1250.00 :
a=(s*15)/100
print('seu novo salario é {:2}'.format(s+a))
if s > 1250.00:
a=(s*10)/100
print('seu novo salario é {:2}'.format(s+a)) | [
82,
28,
22468,
7,
15414,
10786,
13255,
267,
1188,
273,
466,
384,
84,
3664,
4982,
6,
4008,
198,
361,
264,
19841,
1105,
1120,
13,
405,
220,
1058,
198,
220,
220,
220,
257,
16193,
82,
9,
1314,
20679,
3064,
198,
220,
220,
220,
3601,
10... | 2.038462 | 104 |
# coding: latin-1
###############################################################################
# eVotUM - Electronic Voting System
#
# verifySignature-app.py
#
# Cripto-7.4.1 - Commmad line app to exemplify the usage of verifySignature
# function (see eccblind.py)
#
# Copyright (c) 2016 Universidade do Minho
# Developed by André Baptista - Devise Futures, Lda. (andre.baptista@devisefutures.com)
# Reviewed by Ricardo Barroso - Devise Futures, Lda. (ricardo.barroso@devisefutures.com)
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################################################
"""
Command line app that receives signer's public key from file and Data, Signature, Blind Components and
prComponents from STDIN and writes a message to STDOUT indicating if the signature is valid..
"""
import sys
from eVotUM.Cripto import eccblind
from eVotUM.Cripto import utils
if __name__ == "__main__":
parseArgs()
| [
2,
19617,
25,
3042,
259,
12,
16,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
304,
53,
313,
5883,
532,
19508,
30061,
4482,
198,
2,
198,
2,
11767,
11712,
1300,
12,
1324,
13,
9078,
198,
2,
198,
2,
327,
1968,
78,
12,
22,
13,
19,
... | 3.644796 | 442 |
import socket
| [
11748,
17802,
628
] | 5 | 3 |
'''
Created on 23 mars 2014
@author: Mohamed seghilani
'''
import math
import numpy as np
import sys
import matplotlib.pylab as plt
from opencavity.modesolver import AmpMask2D
class FresnelProp(object):
'''
Class for Fresnel propagation kernel construction and integrals calculation
this class contains all informations about the optical system:
- wavelength =1 (default value)
- U1 : the initial field (complex 1D or 2D vector)
- x1,y1 : starting plane coordinates (1D vector of float)
- U2 : resulting field ((complex 1D or 2D vector)
- x2,y2 : result plane coordinates (1D vector of float)
The optical system can be 1D or 2D, this information is stored in the attribute
'dimension_flag'
This class contains a method to calculate the propagated field at several distances
to follow the propagation. It is called yz_prop_chart()
The class contains plotting methods to show the start/result field
Steps to use this class:
>>> opSys=FresnelProp() # create a propagation system (object)
>>> opSys.set_start_beam(U1, x) # set the starting field
>>> opSys.set_ABCD(M1) # M1 is an ABCD optical system matrix
>>> opSys.propagate1D_ABCD(x2=30*x) # calculate the propagation integral
>>> opSys.show_result_beam(what='intensity') # plot the resulting field
>>> opSys.show_result_beam(what='phase')
'''
def __init__(self,wavelength=1):
'''
Constructor
'''
self.wavelength=wavelength # all distances are relative to wavelength unit, for cavity_example_2D L=1000 means 1000*lambda unit
# if lambda is in micron L=1000 micron =1mm
self.k=2*math.pi/wavelength; #wave-number
self.M=[] #ABCD matrix
self.x1=[]
self.x2=[]
self.y1=[]
self.y2=[]
self.U1=[] #start field
self.U2=[] #result field
self.Uyz=[] #result matrix for Uyz propagation
self.zz=[] #z axis for Uyz chart
self.X1=[] #for the meshgrid
self.Y1=[]
self.dim_flag=''
def set_start_beam(self,U1,x1,y1=[]):
"""Assign initial value to self.U1 taken as the start beam for propagation functions in the class.
Args:
- U1 (complex 1D or 2D matrix): the initial field
- x1 (vector of float): abscissa of the starting plane
- y1 (vector of float): ordinate of the starting plane (for 2D case)
.. Note::
- default value of y1 is void vector, this assumes 1D system.
- For 2D system if y1 is not given it will be taken equals to x1.
Returns:
- none.
"""
if np.size(U1.shape)==1:
self.dim_flag='1D'
self.U1=U1
self.x1=x1
self.y1=y1
else:
self.dim_flag='2D'
self.U1=U1
self.x1=x1
if y1==[]:
print("2D initial beam but y1 is missing..")
print("y1 is set = x1.")
self.y1=x1
return
def get_start_beam(self):
"""
Fetch the initial beam contained in 'self.U1' and the corresponding abscissa self.x1 it returns U2,x2 and y2 if 2D.
Args:
-none
Returns:
An array containing the following elements:
- U1 (complex 1D or 2D matrix): the initial field "entered by the user"
- x1 (vector of float): abscissa of the starting plane "entered by the user"
- y1 (vector of float): ordinate of the starting plane (for 2D case)
.. Note::
- This function returns the field entered by the user using the function 'set_start_beam'.
- The same result can be obtained by directly accessing 'self.U1','self.x1' and 'self.y1' of the class
"""
if self.dim_flag=='1D':
return self.U1, self.x1
elif self.dim_flag=='2D':
return self.U1,self.x1, self.y1
else:
print("Empty system!")
sys.exit(1)
def get_result_beam(self):
"""
Fetch the propagation result contained in 'self.U2' and the corresponding abscissa self.x2 it returns U2,x2 and y2 if 2D.
Args:
-none
Returns:
An array containing the following elements:
- U2 (complex 1D or 2D matrix): the propagation result field
- x2 (vector of float): abscissa of the starting plane
- y2 (vector of float): ordinate of the starting plane (for 2D case)
.. Note::
- The same result can be obtained by directly accessing 'self.U2','self.x2' and 'self.y2' of the class after calculation.
"""
if self.dim_flag=='1D':
return self.U2, self.x2
elif self.dim_flag=='2D':
return self.U2,self.x2,self.y2
else:
print("Empty system!")
sys.exit(1)
def apply_mask1D(self,Mask):
"""Apply phase and amplitude mask given as an argument to the initial field.
Args:
-Mask (complex 1D matrix): the initial field will be multiplied by this matrix (element by element).
Returns:
-none.
.. Note::
- The same result can be obtained by directly multiplying 'self.U1' by Mask, but using this function is preferred for the clarity of the code.
Example of use
>>> opSys=FresnelProp() # creating propagator object
>>> T_lens=np.exp((1j*opSys.k/(2*f))*(x)**2) # creating phase mask of thin lens with FL=f
>>> opSys.set_start_beam(tem00, x) # setting the initial, see the function documentation for more information
>>> opSys.set_ABCD(M1) # set the ABCD propagation matrix
>>> opSys.apply_mask1D(T_lens) # Applying the phase mask
"""
print("Applying 1D Mask...")
#Mask=MaskObj.Msk
n_pts_msk=np.size(Mask)
n_pts_beam=np.size(self.U1)
if n_pts_beam == n_pts_msk:
self.U1=self.U1*Mask
print("Mask applied.")
else:
print("The phase mask and the initial field must have the same length!")
sys.exit(1)
return
def set_ABCD(self,M):
"""assign an ABCD matrix the system (the ABCD matrix is self.M : an attribute of the class holding the system)
Args:
- M (2x2) real matrix : Paraxial propagation system.
Returns:
-none.
.. Note::
- The same result can be obtained by directly assigning 'self.M=M' by Mask, but using this function is preferred for the clarity of the code.
Example of use
>>> # definition of the ABCD matrices L1, L2, f are real variables
>>> M1=np.array([[1, L1],[0, 1]]); # propagation distance L1
>>> M2=np.array([[1, 0],[-1/f, 1]]); # thin lens with EFL=f
>>> M3=np.array([[1, L2],[0, 1]]) # propagation distance L2
>>> M=M3.dot(M2).dot(M1) # calculating the global matrix
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_ABCD(M) # set the ABCD propagation matrix
"""
if not M.shape[0]==2 & M.shape[1]==2:
print("ABCD matrix must be 2x2!")
sys.exit(1)
else:
self.M=M
return
def cascade_subsystem(self,M2):
"""Cascade subsytem does dot product with the initial ABCD matrix (inverted order).
Args:
- M2 (2x2) real matrix : Paraxial propagation system.
Returns:
-none.
.. Note::
- The same result can be obtained by directly doing the dot product 'self.M=np.dot(M2,self.M) but using this function is preferred for the clarity of the code.
- another way to do the same thing (preferred one) is to calculate the complete system matrix and then assign it
(see 'set_ABCD()' function doc )
- Matrix with propagation distance ='0' can not be assigned this causes division by '0' in the propagation Kernel
Example of use
>>> # definition of the ABCD matrices L1, L2, f are real variables
>>> M1=np.array([[1, L1],[0, 1]]); # propagation distance L1
>>> M2=np.array([[1, 0],[-1/f, 1]]); # thin lens with EFL=f
>>> M3=np.array([[1, L2],[0, 1]]) # propagation distance L2
>>> M=M3.dot(M2).dot(M1) # calculating the global matrix
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_ABCD(M) # set the ABCD propagation matrix
"""
if not M2.shape[0]==2 & M2.shape[1]==2:
print("ABCD matrix must be 2x2!")
sys.exit(1)
else:
if self.M==[]:
self.M=M2
else:
self.M=np.dot(M2,self.M)
return
def kernel1D_ABCD(self,x1,x2,A,B,C,D):
"""
Fresnel Kernel 1D this function is used internally in Fresnel integral calculation
"""
y=np.sqrt(1j/(self.wavelength*B))*np.exp((-1j*self.k/(2*B))*(A*x1**2+D*x2**2-2*x1*x2))
return y
def kernel2D_ABCD(self,x1,x2,y1,y2,Ax,Bx,Cx,Dx,Ay=0,By=0,Cy=0,Dy=0):
"""
fresnel Kernel 2D this function is used internally in Fresnel integral calculation
"""
if Ay==0:
Ay=Ax
if By==0:
By=Bx
if Cy==0:
Cy=Cx
if Dy==0:
Dy=Dx
y=1j/(self.wavelength*np.sqrt(Bx*By))*np.exp((-1j*self.k/2)*((Ax*x1**2+Dx*x2**2-2*x1*x2)/Bx+(Ay*y1**2+Dy*y2**2-2*y1*y2)/By))
return y
def propagate1D_ABCD(self,x2=[]):
"""Fresnel propagation (1D case) of a complex field U1 one iteration through ABCD optical system
from the plane x1 to the plane x2 .
Args:
- x2 (real 1D vector) : vector defining the propagation plane coordinates can be assimilated to a detector surface for example. by default it is a void vector, this means that the result plane is taken equal to the startingone (same size).
Returns:
-none. the propagation result is stored in 'self.U2' to get it use the function 'self.get_result_beam()'
.. Note::
- x2 size must to satisfy Fresnel condition (Paraxial optics condition) ...to be explained later...
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex field , 'x' (real) is starting plane
>>> opSys.set_ABCD(M) # set the ABCD propagation matrix
>>> opSys.propagate2D_ABCD() # propagation through the ABCD system, the result plane is equal the starting one
>>> opSys.show_result_beam() # plots the result field
>>> opSys.show_result_beam() # plots the result field
"""
if self.dim_flag=='':
print('the initial field is empty!')
sys.exit(1)
elif self.dim_flag=='2D':
print('please use 2D propagation function.')
sys.exit(1)
else:
if x2==[]:
self.x2=self.x1
else:
self.x2=x2
A=self.M[0,0]; B=self.M[0,1]; C=self.M[1,0]; D=self.M[1,1]
self.U2=np.zeros(np.size(self.x2))+np.zeros(np.size(self.x2))*1j
for i in range(np.size(self.x2)):
Mi=self.U1*np.exp(-1j*self.k*(B))*self.kernel1D_ABCD(self.x1,self.x2[i],A,B,C,D);
self.U2[i]=np.trapz(Mi, self.x1)
#self.U2[i]=np.sum(Mi)
return
# # def cpropagate1D_ABCD(self,U1,x1,x2,A,B,C,D):
# """
# propagator using c library
# """
# import ctypes
# import os
# chemin=os.getcwd()+"\libpropagator_c.dll"
# mydll = ctypes.cdll.LoadLibrary(chemin)
#
#
# size_x=np.size(x1);
#
# tab1= ctypes.c_double*size_x #(ctypes.c_int*taille)() equivalent aux 2 lignes
# U2_real=tab1()
#
# tab2= ctypes.c_double*size_x #(ctypes.c_int*taille)() equivalent aux 2 lignes
# U2_imag=tab2()
#
#
#
# # c_float_p = ctypes.POINTER(ctypes.c_float)
# # data = numpy.array([[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]])
# # data = data.astype(numpy.float32)
# # data_p = data.ctypes.data_as(c_float_p)
#
# U1_real=np.array(np.real(U1))
# U1_imag=np.array(np.imag(U1))
#
# c_double_ptr = ctypes.POINTER(ctypes.c_double)
#
# U1_real=U1_real.astype(np.double)
# U1_imag=U1_imag.astype(np.double)
#
# U1_real_ptr=U1_real.ctypes.data_as(c_double_ptr)
# U1_imag_ptr=U1_imag.ctypes.data_as(c_double_ptr)
#
# x1=x1.astype(np.double)
# x2=x2.astype(np.double)
#
# x1_ptr=x1.ctypes.data_as(c_double_ptr)
# x2_ptr=x2.ctypes.data_as(c_double_ptr)
#
# wavelength_c=ctypes.c_float() #pointeur sur un c_float
# Ac=ctypes.c_float()
# Bc=ctypes.c_float()
# Cc=ctypes.c_float()
# Dc=ctypes.c_float()
#
# wavelength_c.value=self.wavelength
# Ac.value=A; Bc.value=B; Cc.value=C; Dc.value=D;
#
# mydll.propagate1D(wavelength_c,U1_real_ptr,U1_imag_ptr,U2_real,U2_imag,x1_ptr,x2_ptr,size_x,Ac,Bc,Cc,Dc)
# #propagate1D(float wavelenth,double* U1_real,double* U1_imag,double* U2_real,double* U2_imag,double* x1, double* x2,int size_x,float A,float B, float C, float D){
# #U2=U2_real+1j*U2_imag
# U2=np.zeros(np.size(x2))+np.zeros(np.size(x2))*1j
# for i in range(size_x):
# U2[i]=U2_real[i]+1j*U2_imag[i]
#
# return U2
#
def propagate2D_ABCD(self,x2=[],y2=[]):
"""Fresnel propagation (2D case) of a complex field U1 one iteration through ABCD optical system from the plane x1,y1 to the plane x2,y2 .
Args:
- x2,y2 (real 1D vectors) : vectors defining the propagation plane coordinates, can be assimilated to a detector surface for example. by default it is a void vector, this means that the result plane is taken equal to the starting one (same size).
Returns:
-none. the propagation result is stored in 'self.U2' to get it use the function 'self.get_result_beam()'
.. Note::
- x2,y2 size must to satisfy Fresnel condition (Paraxial optics condition) ...to be explained later...
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex 2D field , 'x' (real) is starting plane
>>> opSys.set_ABCD(M) # set the ABCD propagation matrix
>>> opSys.propagate1D_ABCD(x2=30*x) # propagation through the ABCD system, the result plane is 30 times the starting one
"""
if self.dim_flag=='':
print('the initial field is empty!')
sys.exit(1)
elif self.dim_flag=='1D':
print('please use 1D propagation function.')
sys.exit(1)
else:
print('calculating Fresnel propagation...')
#it's ok, start porpagation calcul
if x2==[]:
self.x2=self.x1
#self.y2=self.y1
else:
self.x2=x2
if y2==[]:
self.y2=x2
else:
self.y2=y2
Ax=self.M[0,0]; Bx=self.M[0,1]; Cx=self.M[1,0]; Dx=self.M[1,1]
Ay=Ax; By=Bx; Cy=Cx; Dy=Dx
self.U2=np.zeros((np.size(self.x2),np.size(self.y2)))+np.zeros((np.size(self.x2),np.size(self.y2)))*1j
for i in range(np.size(self.x2)):
for j in range(np.size(self.y2)):
self.X1,self.Y1=np.meshgrid(self.x1,self.y1);
Mi=self.U1*np.exp(-1j*self.k*(np.sqrt(Bx*By)))*self.kernel2D_ABCD(self.X1,self.x2[i],self.Y1,self.y2[j],Ax,Bx,Cx,Dx,Ay,By,Cy,Dy);
integral1=np.trapz(Mi, self.x1)
integral2=np.trapz(integral1,self.y1)
#integral1=np.sum(Mi)
#integral2=np.sum(integral1)
#self.U2[i]=np.sum(Mi)
self.U2[i,j]=integral2
return
def cpropagate2D_ABCD(self,U1,x1,x2,y1,y2,Ax,Bx,Cx,Dx,Ay=0,By=0,Cy=0,Dy=0):
"""
propagator using c library
"""
import ctypes
import os
if Ay==0:
Ay=Ax
if By==0:
By=Bx
if Cy==0:
Cy=Cx
if Dy==0:
Dy=Dx
chemin=os.getcwd()+"\libpropagator_c.dll"
mydll = ctypes.cdll.LoadLibrary(chemin)
size_x=np.size(x1);
size_y=np.size(y1);
# An array of double* can be passed to your function as double**.
U2_real=(ctypes.POINTER(ctypes.c_double)*size_x)() #pointer on an array of double*
for i in range(size_x): #we fill each int* with an int*
U2_real[i]=(ctypes.c_double*size_x)()
U2_imag=(ctypes.POINTER(ctypes.c_double)*size_x)()
for i in range(size_x):
U2_imag[i]=(ctypes.c_double*size_x)()
U1_real_ptr=(ctypes.POINTER(ctypes.c_double)*size_x)() #pointer on an array of double*
for i in range(size_x): #we fill each int* with an int*
U1_real_ptr[i]=(ctypes.c_double*size_x)()
U1_imag_ptr=(ctypes.POINTER(ctypes.c_double)*size_x)()
for i in range(size_x):
U1_imag_ptr[i]=(ctypes.c_double*size_x)()
# c_float_p = ctypes.POINTER(ctypes.c_float)
# data = numpy.array([[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]])
# data = data.astype(numpy.float32)
# data_p = data.ctypes.data_as(c_float_p)
U1_real=np.array(np.real(U1)) #separation of real an imag parts for the function
U1_imag=np.array(np.imag(U1))
for i in range(size_x):
for j in range(size_y):
U1_real_ptr[i][j]=U1_real[i][j]
U1_imag_ptr[i][j]=U1_imag[i][j]
c_double_ptr = ctypes.POINTER(ctypes.c_double)
# c_2D_double_ptr=ctypes.POINTER(c_double_ptr)
#
# U1_real=U1_real.astype(np.double)
# U1_imag=U1_imag.astype(np.double)
#
# U1_real_ptr=U1_real.ctypes.data_as(c_2D_double_ptr)
# U1_imag_ptr=U1_imag.ctypes.data_as(c_2D_double_ptr)
x1=x1.astype(np.double)
x2=x2.astype(np.double)
y1=y1.astype(np.double)
y2=y2.astype(np.double)
x1_ptr=x1.ctypes.data_as(c_double_ptr)
x2_ptr=x2.ctypes.data_as(c_double_ptr)
y1_ptr=y1.ctypes.data_as(c_double_ptr)
y2_ptr=y2.ctypes.data_as(c_double_ptr)
wavelength_c=ctypes.c_float() #pointeur sur un c_float
Ac=ctypes.c_float()
Bc=ctypes.c_float()
Cc=ctypes.c_float()
Dc=ctypes.c_float()
wavelength_c.value=self.wavelength
Ac.value=Ax; Bc.value=Bx; Cc.value=Cx; Dc.value=Dx;
mydll.propagate2D(wavelength_c,U1_real_ptr,U1_imag_ptr,U2_real,U2_imag,x1_ptr,x2_ptr,y1_ptr,y2_ptr,size_x,size_y,Ac,Bc,Cc,Dc)
U2=np.zeros((np.size(x2),np.size(y2)))+np.zeros((np.size(x2),np.size(y2)))*1j
for i in range(size_x):
for j in range(size_y):
U2[i][j]=U2_real[i][j]+1j*U2_imag[i][j]
return U2
def yz_prop_chart(self,Lmin,Lmax,nstep,x2=[]):
"""Propagate the 1D complex field to several planes and store the results in a matrix to follow the propagation as a function of distance .
Args:
- Lmin (real) : initial distance from which propagations calculation starts.
- Lmax (real)> Lmax : Stop distance until which propagations are calculated.
- nstep (integer) : number of planes where the propagated field is calculated.
- x2(real 1D vectors) : vector defining the propagation plane coordinates, can be assimilated to a detector surface for example. by default it is a void vector, this means that the result plane is taken equal to the starting one (same size)
Returns:
-none. the propagation result is stored in 'self.Uyz' :2D complex matrix containing result field at several planes stored in 'self.zz'.
.. Note::
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex 1D field , 'x' (real) is initial plane
>>> #opSys.yz_prop_chart(5e3,50e3,100,30*x) # propagate the start field from Lmin=5mm to Lmax=50mm at 100 intermediate planes (linearly spaced ), result plane is 30x times the start one.
>>> #opSys.show_prop_yz() # do the calculations
>>> #opSys.show_prop_yz(what='intensity') # show the result
>>> #plt.show()
"""
if self.dim_flag=='':
print('the initial field is empty!')
sys.exit(1)
elif self.dim_flag=='2D':
print('please use 2D propagation function.')
sys.exit(1)
else:
if x2==[]:
self.x2=self.x1
else:
self.x2=x2
dz=(Lmax-Lmin)/nstep
if dz<=0:
print('Lmin must be < Lmax.')
sys.exit(1)
else:
zz=np.zeros(nstep) #the z vector contains all propagation distances
self.Uyz=np.zeros((np.size(self.U1),nstep))+np.zeros((np.size(self.U1),nstep))*1j
zi=Lmin
for i in range (nstep):
zi=zi+dz
zz[i]=zi
self.M=np.array([[1, zi],[0, 1]])
self.propagate1D_ABCD(self.x2) #the result is in U2 attribute of the class (self) it takes the ABCD matrix from self.M
self.Uyz[:,i]=self.U2
self.zz=zz
return
def show_result_beam(self,what='amplitude'):
"""shows 'self.U2' result of propagation calculation.
Args:
- what (string): flag to indicate what to plot (amplitude,phase or intensity) of the result field, by default is amplitude.
Returns:
-none.
.. Note::
- the function plots the resulting field using 'matplotlib'.
- it plots 'self.U2' as a function of 'self.x2' for 1D case.
- it shows 2D map of 'self.U2' in the plane defined by 'self.x2', 'self.y2'.
- sometimes (when not using Ipython) the function 'matplotlib.pylab.show()' must be used to show the plot result.
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex 2D field , 'x' (real) is initial plane
>>> opSys.set_ABCD(M) # set the ABCD propagation matrix
>>> opSys.propagate1D_ABCD(x2=30*x) # propagation through the ABCD system, the result plane is 30 times the initial one
>>> opSys.show_result_beam(what='intensity') # show result of propagation
>>> opSys.show_result_beam(what='phase')
"""
if self.dim_flag=='':
print("the system is empty ")
sys.exit(1)
elif self.dim_flag=='1D':
plt.figure()
if what=='amplitude':
plt.plot(self.x2,np.abs(self.U2))
elif what=='phase':
plt.plot(self.x2,np.angle(self.U2))
elif what=='intensity':
plt.plot(self.x2,np.abs(self.U2)**2)
else:
print("what must be 'amplitude','intensity' or 'phase'")
elif self.dim_flag=='2D':
plt.figure()
if what=='amplitude':
plt.pcolor(self.x2,self.y2,np.abs(self.U2))
elif what=='phase':
plt.pcolor(self.x2,self.y2,np.angle(self.U2))
elif what=='intensity':
plt.pcolor(self.x2,self.y2,np.abs(self.U2)**2)
else:
print("what must be 'amplitude','intensity' or 'phase'")
return
def show_start_beam(self,what='amplitude'):
"""
shows self.U1 the start beam assigned by the user
Args:
- what (string): flag to indicate what to plot (amplitude,phase or intensity) of the result field, by default is amplitude.
Returns:
-none.
.. Note::
- the function plots the resulting field using 'matplotlib'.
- it plots 'self.U1' as a function of 'self.x1' for 1D case.
- it shows 2D map of 'self.U2' in the plane defined by 'self.x1', 'self.y1'.
- sometimes (when not using Ipython) the function 'matplotlib.pylab.show()' must be used to show the plot result.
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex 2D field , 'x' (real) is initial plane
>>> opSys.show_start_beam(what='intensity') # show initial field assigned by the user
>>> opSys.show_start_beam(what='phase')
"""
if self.dim_flag=='':
print("the system is empty ")
sys.exit(1)
elif self.dim_flag=='1D':
plt.figure()
if what=='amplitude':
plt.plot(self.x1,np.abs(self.U1))
elif what=='phase':
plt.plot(self.x1,np.angle(self.U1))
elif what=='intensity':
plt.plot(self.x1,np.abs(self.U1)**2)
else:
print("what must be 'amplitude','intensity' or 'phase'")
elif self.dim_flag=='2D':
plt.figure()
if what=='amplitude':
plt.pcolor(self.x1,self.y1,np.abs(self.U1))
elif what=='phase':
plt.pcolor(self.x1,self.y1,np.angle(self.U1))
elif what=='intensity':
plt.pcolor(self.x1,self.y1,np.abs(self.U1)**2)
else:
print("what must be 'amplitude','intensity' or 'phase'")
def show_prop_yz(self,what='amplitude'):
"""shows self.Uyz : result of propagations at successive planes to follow the propagation
Args:
- what (string): flag to indicate what to plot (amplitude,phase or intensity) of the result field, by default is amplitude.
Returns:
-none.
.. Note::
- the function plots the resulting field using 'matplotlib'.
- it shows 2D map of 'self.U2' representing propagation result at several planes defined in 'self.zz'.
- sometimes (when not using Ipython) the function 'matplotlib.pylab.show()' must be used to show the plot result.
Example of use
>>> opSys=FresnelProp() # creating propagation system (object)
>>> opSys.set_start_beam(tem00, x)# tem00 is a complex 1D field , 'x' (real) is initial plane
>>> #opSys.yz_prop_chart(5e3,50e3,100,30*x) # propagate the start field from Lmin=5mm to Lmax=50mm at 100 intermediate planes (linearly spaced ), result plane is 30x times the start one.
>>> #opSys.show_prop_yz() # do the calculations
>>> #opSys.show_prop_yz(what='intensity') # show intensity of resulting fields
>>> #plt.show()
"""
if self.Uyz ==[]:
print('the propagation chart is empty!')
sys.exit(1)
else:
plt.figure()
if what=='amplitude':
plt.pcolor(self.zz,self.x2,np.abs(self.Uyz))
elif what=='phase':
plt.pcolor(self.zz,self.x2,np.angle(self.Uyz))
elif what=='intensity':
plt.pcolor(self.zz,self.x2,np.abs(self.Uyz)**2)
else:
print("what must be 'amplitude','intensity' or 'phase'")
if __name__ == '__main__':
pass
| [
7061,
6,
198,
41972,
319,
2242,
48962,
1946,
198,
198,
31,
9800,
25,
27469,
384,
456,
346,
3216,
198,
7061,
6,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,... | 1.896207 | 15,608 |
# -*- coding: utf-8 -*-
"""
Common task with image data.
==================================
20.07.20.
Conversion to QImage and QPixmap
"""
__author__ = 'Johannes Schuck'
__email__ = 'johannes.schuck@gmail.com'
__all__ = ['to_q_image',
'to_q_pixmap']
from PySide2.QtGui import QImage, QPixmap
import numpy as np
def to_q_image(image: np.ndarray):
"""
Converts a OpenCV / NumPy array to a QImage.
Expects the image to be 8 bit.
Is able to convert Grayscale, BGR and ARGG images.
Parameters
----------
image : np.ndarray
Input Image
Returns
-------
QImage
The converted image. If image is None, returns an empty QImage.
"""
if image is None:
return QImage()
if image.dtype == np.uint8:
if len(image.shape) == 2:
height, width = image.shape
return QImage(image.data, width, height, width, QImage.Format_Indexed8)
elif len(image.shape) == 3:
height, width, ch = image.shape
if image.shape[2] == 3:
return QImage(image.data, width, height, width * 3, QImage.Format_BGR888)
elif image.shape[2] == 4:
return QImage(image.data, width, height, width * 3, QImage.Format_ARGB32)
def to_q_pixmap(image: QPixmap):
"""
Converts a OpenCV / NumPy array to a QPixmap.
Expects the image to be 8 bit.
Is able to convert Grayscale, BGR and ARGG images.
Parameters
----------
image : np.ndarray
Input Image
Returns
-------
QPixmap
The converted QPixmap. If image is None, returns an empty QPixmap.
"""
return QPixmap(to_q_image(image))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
17227,
4876,
351,
2939,
1366,
13,
198,
10052,
855,
198,
198,
1238,
13,
2998,
13,
1238,
13,
198,
3103,
9641,
284,
1195,
5159,
290,
1195,
47,
844,
8899,
198,... | 2.302865 | 733 |
#!/usr/bin/env python3
from setuptools import setup
from django_migrations_diff import get_version
with open('README.rst') as f:
readme = f.read()
setup(
name='django-migrations-diff',
version=get_version(),
description='CLI for comparing Django migrations between two snapshots.',
long_description=readme,
author='Denis Krumko',
author_email='dkrumko@gmail.com',
url='https://github.com/deniskrumko/django-migrations-diff',
license="MIT",
entry_points={
'console_scripts': [
'mdiff = django_migrations_diff.main:main',
],
},
packages=['django_migrations_diff'],
python_requires=">=3.6",
install_requires=['requests'],
keywords='CLI, Django, Migrations, Diff',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
6738,
42625,
14208,
62,
76,
3692,
602,
62,
26069,
1330,
651,
62,
9641,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
81,
301,
... | 2.582397 | 534 |
def correlation(self, method):
'''This is called from reduce_run.py.
Performs a spearman rank order correlation
based reduction. First looks for a parameter
that correlates with reduction_metric and
correlation meets reduction_threshold and
then converts the match parameter into
a 2d multilabel shape. Then new correlation
against reduction_metric is performed to identify
which particular value is to be dropped.
'''
import numpy as np
# transform the data properly first
from .reduce_utils import cols_to_multilabel
data = cols_to_multilabel(self)
# get the correlations
corr_values = data.corr(method)[self.reduction_metric]
# drop the reduction metric row
corr_values.drop(self.reduction_metric, inplace=True)
# drop labels where value is NaN
corr_values.dropna(inplace=True)
# if all nans, then stop
if len(corr_values) <= 1:
return self
# sort based on the metric type
corr_values.sort_values(ascending=self.minimize_loss, inplace=True)
# if less than threshold, then stop
if abs(corr_values[-1]) < self.reduction_threshold:
return self
# get the strongest correlation
corr_values = corr_values.index[-1]
# get the label, value, and dtype from the column header
label, dtype, value = corr_values.split('~')
# convert things back to their original dtype
try:
value = np.array([value]).astype(dtype)[0]
except Exception as e:
print(f'could not convert {value} to type {dtype} for parameter {label}')
# this is where we modify the parameter space accordingly
self.param_object.remove_is(label, value)
return self
| [
4299,
16096,
7,
944,
11,
2446,
2599,
628,
220,
220,
220,
705,
7061,
1212,
318,
1444,
422,
4646,
62,
5143,
13,
9078,
13,
628,
220,
220,
220,
2448,
23914,
257,
17514,
805,
4279,
1502,
16096,
198,
220,
220,
220,
1912,
7741,
13,
3274,
... | 2.921368 | 585 |
from call_forward_flask import app
from call_forward_flask.models import (
Senator,
State,
Zipcode,
)
from flask import (
Response,
redirect,
render_template,
request,
url_for,
)
from twilio.twiml.voice_response import VoiceResponse, Gather
@app.route('/')
def hello():
"""Very basic route to landing page."""
return render_template('index.html')
@app.route('/callcongress/welcome', methods=['POST'])
def callcongress():
"""Verify or collect State intofrmation."""
response = VoiceResponse()
from_state = request.values.get('FromState', None)
if from_state:
gather = Gather(
num_digits=1,
action='/callcongress/set-state',
method='POST',
from_state=from_state
)
gather.say("Thank you for calling congress! It looks like " +
"you\'re calling from {}. ".format(from_state) +
"If this is correct, please press 1. Press 2 if " +
"this is not your current state of residence.")
else:
gather = Gather(
num_digits=5,
action='/callcongress/state-lookup',
method='POST'
)
gather.say("Thank you for calling Call Congress! If you wish to " +
"call your senators, please enter your 5-digit zip code.")
response.append(gather)
return Response(str(response), 200, mimetype="application/xml")
@app.route('/callcongress/state-lookup', methods=['GET', 'POST'])
def state_lookup():
"""Look up state from given zipcode.
Once state is found, redirect to call_senators for forwarding.
"""
zip_digits = request.values.get('Digits', None)
# NB: We don't do any error handling for a missing/erroneous zip code
# in this sample application. You, gentle reader, should to handle that
# edge case before deploying this code.
zip_obj = Zipcode.query.filter_by(zipcode=zip_digits).first()
return redirect(url_for('call_senators', state_id=zip_obj.state_id))
@app.route('/callcongress/collect-zip', methods=['GET', 'POST'])
def collect_zip():
"""If our state guess is wrong, prompt user for zip code."""
response = VoiceResponse()
gather = Gather(
num_digits=5,
action='/callcongress/state-lookup',
method='POST'
)
gather.say("If you wish to call your senators, please " +
"enter your 5-digit zip code.")
response.append(gather)
return Response(str(response), 200, mimetype="application/xml")
@app.route('/callcongress/set-state', methods=['GET', 'POST'])
def set_state():
"""Set state for senator call list.
Set user's state from confirmation or user-provided Zip.
Redirect to call_senators route.
"""
# Get the digit pressed by the user
digits_provided = request.values.get('Digits', None)
# Set state if State correct, else prompt for zipcode.
if digits_provided == '1':
state = request.values.get('CallerState')
state_obj = State.query.filter_by(name=state).first()
if state_obj:
return redirect(url_for('call_senators', state_id=int(state_obj.id)))
return redirect(url_for('collect_zip'))
@app.route('/callcongress/call-senators/<state_id>', methods=['GET', 'POST'])
def call_senators(state_id):
"""Route for connecting caller to both of their senators."""
senators = State.query.get(state_id).senators.all()
response = VoiceResponse()
first_call = senators[0]
second_call = senators[1]
response.say(
"Connecting you to {}. ".format(first_call.name) +
"After the senator's office ends the call, you will " +
"be re-directed to {}.".format(second_call.name)
)
response.dial(
first_call.phone,
action=url_for('call_second_senator', senator_id=second_call.id)
)
return Response(str(response), 200, mimetype="application/xml")
@app.route(
'/callcongress/call-second-senator/<senator_id>',
methods=['GET', 'POST']
)
def call_second_senator(senator_id):
"""Forward the caller to their second senator."""
senator = Senator.query.get(senator_id)
response = VoiceResponse()
response.say("Connecting you to {}.".format(senator.name))
response.dial(
senator.phone,
action=url_for('end_call')
)
return Response(str(response), 200, mimetype="application/xml")
@app.route('/callcongress/goodbye', methods=['GET', 'POST'])
def end_call():
"""Thank user & hang up."""
response = VoiceResponse()
response.say("Thank you for using Call Congress! " +
"Your voice makes a difference. Goodbye.")
response.hangup()
return Response(str(response), 200, mimetype="application/xml")
| [
6738,
869,
62,
11813,
62,
2704,
2093,
1330,
598,
198,
6738,
869,
62,
11813,
62,
2704,
2093,
13,
27530,
1330,
357,
198,
220,
220,
220,
8962,
11,
198,
220,
220,
220,
1812,
11,
198,
220,
220,
220,
38636,
8189,
11,
198,
8,
198,
198,
... | 2.592091 | 1,846 |
"""
A module to show off hwo to recover from an error.
This module shows what happens when there is not a try-except statement
immediately around the error, but there is one up higher in the call stack.
Author: Walker M. White
Date: March 30, 2019
"""
def function_1(x,y):
"""
Have function_2 do all the work
"""
print('Starting function_1')
result = 0 # try-except is like if-else. Initialize a var for right scope
try:
print('Starting try')
result = function_2(x,y)
print('Completing try')
except:
print('Starting except')
result = float('inf')
print('Completing except')
print('Completing function_1')
return result
def function_2(x,y):
"""
Have function_3 do all the work
"""
print('Starting function_2')
try:
result = function_3(x,y)
except:
result = float('inf')
print('Completing function_2')
return result
def function_3(x,y):
"""Returns: x divided by y"""
print('Starting function_3')
result = x/y
print('Completing function_3')
return result
# Script Code
if __name__ == "__main__":
print(function_1(1,0))
| [
37811,
198,
32,
8265,
284,
905,
572,
289,
21638,
284,
8551,
422,
281,
4049,
13,
198,
198,
1212,
8265,
2523,
644,
4325,
618,
612,
318,
407,
257,
1949,
12,
16341,
2643,
198,
320,
23802,
1088,
262,
4049,
11,
475,
612,
318,
530,
510,
... | 2.627494 | 451 |
import pandas as pd | [
198,
11748,
19798,
292,
355,
279,
67
] | 2.857143 | 7 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 14:25:34 2018
@author: akira
"""
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules=[ Extension("SDN_MultipleTrial",
["SDN_MultipleTrial.pyx"],
libraries=["m"],
extra_compile_args = ["-ffast-math","-fopenmp"],
extra_link_args = ['-fopenmp'])
]
setup(
cmdclass = {"build_ext":build_ext},
ext_modules = cythonize("SDN_MultipleTrial.pyx")
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
1526,
1105,
1478,
25,
1495,
25,
2682,
2864,
198,
198,
31,
9800,
25,
47594,
8704,
19... | 2.328063 | 253 |
#-*- coding: utf-8 -*-
import unittest
from django.template import TemplateSyntaxError
from django.test import TestCase as DjangoTestCase
from django_comments_xtd.templatetags.comments_xtd import render_markup_comment, formatter
@unittest.skipIf(not formatter, "This test case needs django-markup, docutils and markdown installed to be run")
@unittest.skipIf(formatter, "This test case needs django-markup or docutils or markdown not installed to be run")
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
42625,
14208,
13,
28243,
1330,
37350,
13940,
41641,
12331,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
355,
37770,
1440... | 3.171233 | 146 |
#
# PySNMP MIB module ALCATEL-IND1-OSPF3-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALCATEL-IND1-OSPF3-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:18:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
routingIND1Ospf3, = mibBuilder.importSymbols("ALCATEL-IND1-BASE", "routingIND1Ospf3")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, ObjectIdentity, Counter64, Integer32, Bits, IpAddress, NotificationType, iso, MibIdentifier, ModuleIdentity, Gauge32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "ObjectIdentity", "Counter64", "Integer32", "Bits", "IpAddress", "NotificationType", "iso", "MibIdentifier", "ModuleIdentity", "Gauge32", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
alcatelIND1OSPF3MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1))
alcatelIND1OSPF3MIB.setRevisions(('2007-04-03 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: alcatelIND1OSPF3MIB.setRevisionsDescriptions(('The latest version of this MIB Module.',))
if mibBuilder.loadTexts: alcatelIND1OSPF3MIB.setLastUpdated('200704030000Z')
if mibBuilder.loadTexts: alcatelIND1OSPF3MIB.setOrganization('Alcatel-Lucent')
if mibBuilder.loadTexts: alcatelIND1OSPF3MIB.setContactInfo('Please consult with Customer Service to ensure the most appropriate version of this document is used with the products in question: Alcatel-Lucent, Enterprise Solutions Division (Formerly Alcatel Internetworking, Incorporated) 26801 West Agoura Road Agoura Hills, CA 91301-5122 United States Of America Telephone: North America +1 800 995 2696 Latin America +1 877 919 9526 Europe +31 23 556 0100 Asia +65 394 7933 All Other +1 818 878 4507 Electronic Mail: support@ind.alcatel.com World Wide Web: http://alcatel-lucent.com/wps/portal/enterprise File Transfer Protocol: ftp://ftp.ind.alcatel.com/pub/products/mibs')
if mibBuilder.loadTexts: alcatelIND1OSPF3MIB.setDescription('This module describes an authoritative enterprise-specific Simple Network Management Protocol (SNMP) Management Information Base (MIB): This proprietary MIB contains management information for the configuration of OSPFv3 global configuration parameters. The right to make changes in specification and other information contained in this document without prior notice is reserved. No liability shall be assumed for any incidental, indirect, special, o r consequential damages whatsoever arising from or related to this document or the information contained herein. Vendors, end-users, and other interested parties are granted non-exclusive license to use this specification in connection with management of the products for which it is intended to be used. Copyright (C) 1995-2003 Alcatel-Lucent ALL RIGHTS RESERVED WORLDWIDE')
alcatelIND1OSPF3MIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1))
alaProtocolOspf3 = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1))
alaOspf3OrigRouteTag = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 1), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3OrigRouteTag.setStatus('current')
if mibBuilder.loadTexts: alaOspf3OrigRouteTag.setDescription('Route tag that is originated with ASEs')
alaOspf3TimerSpfDelay = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3TimerSpfDelay.setStatus('current')
if mibBuilder.loadTexts: alaOspf3TimerSpfDelay.setDescription('Delay (in seconds) between topology change and SPF run')
alaOspf3TimerSpfHold = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3TimerSpfHold.setStatus('current')
if mibBuilder.loadTexts: alaOspf3TimerSpfHold.setDescription('Delay (in seconds) between subsequent SPF executions')
alaOspf3RestartHelperSupport = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3RestartHelperSupport.setStatus('current')
if mibBuilder.loadTexts: alaOspf3RestartHelperSupport.setDescription('This router can be a helper to another restarting router')
alaOspf3RestartStrictLsaChecking = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3RestartStrictLsaChecking.setStatus('current')
if mibBuilder.loadTexts: alaOspf3RestartStrictLsaChecking.setDescription('Will changed LSA result in restart termination')
alaOspf3RestartInitiate = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3RestartInitiate.setStatus('current')
if mibBuilder.loadTexts: alaOspf3RestartInitiate.setDescription('Start a graceful restart')
alaOspf3MTUCheck = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaOspf3MTUCheck.setStatus('current')
if mibBuilder.loadTexts: alaOspf3MTUCheck.setDescription('Verify the MTU of a neighbor matches our own.')
alcatelIND1OSPF3MIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 2))
alcatelIND1OSPF3MIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 2, 1))
alcatelIND1OSPF3MIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 2, 2))
alcatelIND1OSPF3MIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 2, 1, 1)).setObjects(("ALCATEL-IND1-OSPF3-MIB", "alaOSPF3ConfigMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alcatelIND1OSPF3MIBCompliance = alcatelIND1OSPF3MIBCompliance.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1OSPF3MIBCompliance.setDescription('The compliance statement for OSPFv3 and implementing the ALCATEL-IND1-OSPF3 MIB.')
alaOSPF3ConfigMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 10, 13, 1, 2, 2, 1)).setObjects(("ALCATEL-IND1-OSPF3-MIB", "alaOspf3OrigRouteTag"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3TimerSpfDelay"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3TimerSpfHold"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3RestartHelperSupport"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3RestartStrictLsaChecking"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3RestartInitiate"), ("ALCATEL-IND1-OSPF3-MIB", "alaOspf3MTUCheck"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaOSPF3ConfigMIBGroup = alaOSPF3ConfigMIBGroup.setStatus('current')
if mibBuilder.loadTexts: alaOSPF3ConfigMIBGroup.setDescription('A collection of objects to support management of OSPF3.')
mibBuilder.exportSymbols("ALCATEL-IND1-OSPF3-MIB", alaOspf3TimerSpfHold=alaOspf3TimerSpfHold, alaOspf3OrigRouteTag=alaOspf3OrigRouteTag, alaProtocolOspf3=alaProtocolOspf3, alaOSPF3ConfigMIBGroup=alaOSPF3ConfigMIBGroup, alcatelIND1OSPF3MIB=alcatelIND1OSPF3MIB, alcatelIND1OSPF3MIBGroups=alcatelIND1OSPF3MIBGroups, alcatelIND1OSPF3MIBCompliance=alcatelIND1OSPF3MIBCompliance, alaOspf3TimerSpfDelay=alaOspf3TimerSpfDelay, alaOspf3RestartStrictLsaChecking=alaOspf3RestartStrictLsaChecking, alcatelIND1OSPF3MIBConformance=alcatelIND1OSPF3MIBConformance, alaOspf3MTUCheck=alaOspf3MTUCheck, alcatelIND1OSPF3MIBCompliances=alcatelIND1OSPF3MIBCompliances, PYSNMP_MODULE_ID=alcatelIND1OSPF3MIB, alaOspf3RestartHelperSupport=alaOspf3RestartHelperSupport, alaOspf3RestartInitiate=alaOspf3RestartInitiate, alcatelIND1OSPF3MIBObjects=alcatelIND1OSPF3MIBObjects)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
317,
5639,
1404,
3698,
12,
12115,
16,
12,
2640,
42668,
18,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,... | 2.637046 | 3,466 |
"""GLEDOPTO Soposh Dual White and color 5W GU10 300lm device."""
from zigpy.profiles import zll
from zigpy.profiles.zll import DeviceType
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Scenes,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
class SoposhGU10(CustomDevice):
"""GLEDOPTO Soposh Dual White and color 5W GU10 300lm."""
signature = {
ENDPOINTS: {
11: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: DeviceType.EXTENDED_COLOR_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
OUTPUT_CLUSTERS: [],
},
13: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: DeviceType.EXTENDED_COLOR_LIGHT,
INPUT_CLUSTERS: [LightLink.cluster_id],
OUTPUT_CLUSTERS: [LightLink.cluster_id],
},
}
}
replacement = {
ENDPOINTS: {
11: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: DeviceType.EXTENDED_COLOR_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
OUTPUT_CLUSTERS: [],
}
}
}
| [
37811,
8763,
1961,
3185,
10468,
35643,
3768,
20446,
2635,
290,
3124,
642,
54,
19348,
940,
5867,
75,
76,
3335,
526,
15931,
198,
6738,
1976,
328,
9078,
13,
5577,
2915,
1330,
1976,
297,
198,
6738,
1976,
328,
9078,
13,
5577,
2915,
13,
89,... | 1.709013 | 1,165 |
import deepdish as dd
@Foo.register('bar')
| [
11748,
2769,
67,
680,
355,
49427,
628,
198,
31,
37,
2238,
13,
30238,
10786,
5657,
11537,
198
] | 2.647059 | 17 |
import pytest
from hw.task_1_read_file import MagicNumberError, read_magic_number
@pytest.fixture
@pytest.mark.parametrize("opened_file", [
'2\nHi, fellows\nisdigit',
'2.4\nStar\nWars',
'1\n',
'2.999999\n3\n5',
], indirect=True)
@pytest.mark.parametrize("opened_file", [
'100\nBye, fellows\nis',
'3\nLOTR',
'-3\n',
'0.999999',
'1e4\n',
], indirect=True)
@pytest.mark.parametrize("opened_file", [
'\nBye, fellows\nis',
'2,1\nLOTR',
'0.9.3\n',
'python4.999999',
], indirect=True)
@pytest.mark.parametrize("opened_file", '\n', indirect=True)
| [
11748,
12972,
9288,
198,
6738,
289,
86,
13,
35943,
62,
16,
62,
961,
62,
7753,
1330,
6139,
15057,
12331,
11,
1100,
62,
32707,
62,
17618,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
38... | 2.016779 | 298 |
from setuptools import setup, find_packages
# with open("README.md", "r", encoding='UTF-8') as fh:
# long_description = fh.read()
setup(
name="DRLS",
version="1.0.0",
description="Tsinghua network tools",
long_description="",
license="Apache 2.0 Licence",
url="",
author="dongbaishun",
author_email="zcm18@mails.tsinghua.edu.cn",
packages=find_packages(),
include_package_data=True,
platforms="any",
install_requires=[],
scripts=[],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache 2.0 License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'DRLS=DRLS.__main__:main'
]
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
2,
351,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
11639,
48504,
12,
23,
11537,
355,
277,
71,
25,
198,
2,
220,
220,
220,
220,
890,
62,
11213,
... | 2.366972 | 327 |
from download_videos import download_video as download
from process_audio import extract_audio as audio
from process_video import process_video as video
import sys, os
import pytube
import shutil
import re
import json
# Sanity check
if sys.version_info < (3, 0):
raise Exception("Code must be run using python3")
# Already exists no calculate
video_id, yt = download.get_video_id(sys.argv[1])
if os.path.exists("./public/saves/" + video_id):
with open("./public/saves/" + video_id + "/meta_data.json") as f:
print("DONE {} {}".format(
video_id,
f.read()
))
__import__("sys").exit(0)
# Not a url
if not re.match(r"^(http(s)?:\/\/)?((w){3}.)?youtu(be|.be)?(\.com)?\/.+", sys.argv[1]):
raise Exception("pytube.exceptions.RegexMatchError")
# 1. Download the video
# Raises an exception if not a valid url
try:
_, _, metadata = download.download_video(sys.argv[1])
except Exception as e:
if os.path.exists("./public/saves/" + video_id):
shutil.rmtree("./public/saves/" + video_id)
raise Exception(str(e))
# Intermediate: make some directories
make_if_not_exist("./public/saves/" + video_id + "/video_snippets")
make_if_not_exist("./public/saves/" + video_id + "/audio_snippets")
# 2. Extract breakpoints from video
try:
breakpoints = audio.extract_audio(yt, video_id)
except:
if os.path.exists("./public/saves/" + video_id):
shutil.rmtree("./public/saves/" + video_id)
raise Exception("Video has no captions")
# 3. Slice and format the video
temp_vid = "./public/saves/" + video_id + "/temp.mp4"
video.set_cut_dir(video_id)
video.process_videos(temp_vid, breakpoints)
# 4. Thumbnail
video.run_process(video.FFMPEG_DIR, ["-ss", "00:00:05", "-i", temp_vid, "-vframes", "1", "-q:v", "2",
"./public/saves/" + video_id + "/thumb.jpg"])
# os.remove(temp_vid)
# 5. Make the HMTL template
transcriptions = ""
count = 0
for i, b in enumerate(breakpoints):
text = b[list(b.keys())[0]]
if text == "": continue
transcriptions += """
<audio id="audio-{id}" style="display: none">
<source src="/saves/{video_id}/audio_snippets/{id}.mp3" type="audio/mpeg">
Your browser does not support the audio element.
</audio>
<video width="50%" src="/saves/{video_id}/video_snippets/{id2}.mp4" muted loop></video>
<table class="text-snippet" style="width: calc(100% + 160px); position: relative; left: -160px">
<tr>
<td style="width: 160px">
<!-- Toggle volume_up and pause -->
<button onclick="togglePlaybackSound({id}, this)" class="round-button inline-big-button" style="margin: auto"><i class="material-icons">
volume_up</i></button>
<button class="round-button inline-big-button" style="margin: auto"><i class="material-icons">
refresh</i></button>
</td>
<td style="width: calc(100% - 160px)">
<p>
{text}
</p>
</td>
</tr>
</table>
""".format(video_id=video_id, id=count, text=text, id2=i)
count += 1
template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>2XStudy</title>
<meta name="description" content="An app to transcribe and abridge youtube videos for studying">
<meta name="author" content="TheHumbleOnes">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="/css/main.css">
<link rel="stylesheet" href="/css/page.css">
<link href="https://fonts.googleapis.com/css?family=Jomolhari|Montserrat&display=swap" rel="stylesheet">
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Inconsolata&display=swap" rel="stylesheet">
</head>
<body>
<script>
let BREAKS = {speeds};
</script>
<div class="title-bar">
<a href="/index.html"><img src="/img/logo.png" class="logo"></a>
<ul class="nav">
<li><a href="/index.html">Home</a></li>
<li><a href="/about.html">About</a></li>
<li><a href="https://github.com/Gavin-Song/2XStudy">Github</a></li>
</ul>
</div>
<div class="page-container">
<br><br><br><br>
<!-- Absolutely placed -->
<button onclick="window.location.href = '/index.html';" class="invisible-button inline-big-button back-button"><i class="material-icons">
arrow_back_ios
</i></button>
<button class="round-button inline-big-button edit-button"><i class="material-icons">
edit</i></button>
<button onclick="share()" class="round-button inline-big-button share-button"><i class="material-icons">
share</i></button>
<!-- Normal stuff -->
<br>
<h1>{title}</h1>
<small style="position: relative; top: -10px">{author} | {time}</small>
<br><br>
<h2>Abriged Video</h2><br>
<video id="main-vid" width="100%" height="auto" src="{src}" type="video/mp4" controls>
Your browser does not support the video tag.
</video>
<div id="video-speed-control">
<button id="video-speed-0.25" onclick="videoSpeed(0.25)">0.25x</button>
<button id="video-speed-0.5" onclick="videoSpeed(0.5)">0.5x</button>
<button id="video-speed-0.75" onclick="videoSpeed(0.75)">0.75x</button>
<button id="video-speed-1" class="active" onclick="videoSpeed(1)">1x</button>
<button id="video-speed-1.25" onclick="videoSpeed(1.25)">1.25x</button>
<button id="video-speed-1.5" onclick="videoSpeed(1.5)">1.5x</button>
<button id="video-speed-1.75" onclick="videoSpeed(1.75)">1.75x</button>
<button id="video-speed-2" onclick="videoSpeed(2)">2x</button>
<button id="video-speed-2.5" onclick="videoSpeed(2.5)">2.5x</button>
<button id="video-speed-3" onclick="videoSpeed(3)">3x</button>
</div>
<br><br><br>
<h2>Transcription</h2>
{transcriptions}
<br><br><br><br>
</div>
<div class="modal" style="display: none" id="modal">
<button onclick="document.getElementById('modal').style.display = 'none';"
style="float: right" class="invisible-button inline-big-button">
<i class="material-icons">close</i></button>
<div id="qrcode"></div>
<br>
Or share this url
<div class="monospace" id="url">
</div>
</div>
<!-- NO JS ENABLED -->
<noscript>
<div class="modal" style="background-color: red" id="modal">
<button onclick="document.getElementById('modal').style.display = 'none';"
class="invisible-button inline-big-button">
<i class="material-icons">close</i></button>
Please enable javascript!
</div>
</noscript>
<script src="/js/qrcode.js"></script>
<script src="/js/share.js"></script>
<script src="/js/video.js"></script>
</body>
</html>""".format(
title=metadata["video_title"],
author=metadata["video_author"],
time=metadata["video_length"],
src="/saves/" + video_id + "/temp.mp4",
transcriptions=transcriptions,
speeds=json.dumps(breakpoints)
)
with open("./public/saves/" + video_id + "/index.html", "w") as f:
f.write(template)
print("DONE {} {}".format(video_id, json.dumps(metadata))) | [
6738,
4321,
62,
32861,
1330,
4321,
62,
15588,
355,
4321,
198,
6738,
1429,
62,
24051,
1330,
7925,
62,
24051,
355,
6597,
198,
6738,
1429,
62,
15588,
1330,
1429,
62,
15588,
355,
2008,
198,
198,
11748,
25064,
11,
28686,
198,
11748,
12972,
... | 2.294656 | 3,275 |
__author__ = 'antonellacalvia'
from .node import * | [
834,
9800,
834,
796,
705,
415,
505,
297,
330,
282,
8869,
6,
198,
198,
6738,
764,
17440,
1330,
1635
] | 2.684211 | 19 |
from django.core.management.base import BaseCommand, CommandError
from ...fetch.fetchers import UserIdFetcher, UserFetcher
from ...models import Account, User
class Command(BaseCommand):
"""For fetching data about an Account's Flickr user.
Should create/update them in our DB, and associate the User with the Account.
./manage.py fetch_flickr_account_user --id=1
"""
help = "Fetches data for an Account's Flickr User"
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
2644,
69,
7569,
13,
34045,
3533,
1330,
11787,
7390,
37,
316,
2044,
11,
11787,
37,
316,
2044,
198,
6738,
2644,
27530,
1330,
10781,
1... | 3.270073 | 137 |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
@contextmanager
def session_scope():
"""
Provide a transactional scope around a series of operations using connection of alembic
:return:
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
session_cls = sessionmaker(bind=op.get_bind())
session_ins = session_cls()
try:
yield session_ins
session_ins.commit()
except:
session_ins.rollback()
finally:
session_ins.close()
def reflect_model(tablename):
"""
Create a SQLAlchemy model dynamically based on current table schema in database
:param tablename: table name of the model you want to reflect
:return:
"""
from alembic import op
from sqlalchemy.ext.automap import automap_base
Base = automap_base()
Base.prepare(op.get_bind().engine, reflect=True)
return Base.classes[tablename]
def bulk_insert_data(tablename, rows, multiinsert=True):
"""
Construct a model by inspecting the existing table schema. Then insert the rows against it.
:param tablename: The table name of the rows will be inserted.
:param rows: a list of dictionaries indicating rows
:return:
"""
if not isinstance(rows, list):
raise TypeError('rows parameter is expected to be list type')
elif rows and not isinstance(rows[0], dict):
raise TypeError("rows parameter is expected to be list of dict type")
from alembic import op
op.bulk_insert(reflect_model(tablename).__table__, rows, multiinsert)
def absolute_path(relative_path):
"""
:param relative_path: the path relative to alembic migration folder
:return: the absolute path
"""
from alembic import context
current_path = context.config.get_main_option('here')
if current_path:
return os.path.normpath(os.path.join(current_path, relative_path))
else:
raise Exception('%(here) config in alembic cannot be found')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
11748,
28686,
628,
198,
31,
22866,
37153,
198,
4299,
6246,
62,
29982,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220... | 2.876437 | 696 |
#!/usr/bin/env python3
import time
import automationhat
if automationhat.is_automation_hat():
automationhat.light.power.write(1)
while True:
print(automationhat.input.read())
print(automationhat.analog.read())
time.sleep(0.5)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
640,
198,
198,
11748,
22771,
5183,
628,
198,
361,
22771,
5183,
13,
271,
62,
2306,
296,
341,
62,
5183,
33529,
198,
220,
220,
220,
22771,
5183,
13,
2971,
13,
6477,
13,
... | 2.572917 | 96 |
import csv
with open('iladata.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csvreader:
if (row[5] == '1'):
print(row[3])
if (row[6] == '1'):
print(row[4])
# print(', '.join(row[])); | [
11748,
269,
21370,
198,
198,
4480,
1280,
10786,
346,
14706,
13,
40664,
3256,
705,
81,
11537,
355,
269,
21370,
7753,
25,
198,
220,
220,
220,
269,
21370,
46862,
796,
269,
21370,
13,
46862,
7,
40664,
7753,
11,
46728,
2676,
28,
3256,
3256... | 1.907285 | 151 |
import json
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
PAYPAL_SUPPORTED_CURRENCIES = [
'AUD', # Australian dollar
'BRL', # Brazilian real, [2]
'CAD', # Canadian dollar
'CZK', # Czech koruna
'DKK', # Danish krone
'EUR', # Euro
'HKD', # Hong Kong dollar
'HUF', # Hungarian forint [1]
'INR', # Indian rupee [3]
'ILS', # Israeli new shekel
'JPY', # Japanese yen [1]
'MYR', # Malaysian ringgit [2]
'MXN', # Mexican peso
'TWD', # New Taiwan dollar [1]
'NZD', # New Zealand dollar
'NOK', # Norwegian krone
'PHP', # Philippine peso
'PLN', # Polish złoty
'GBP', # Pound sterling
'RUB', # Russian ruble
'SGD', # Singapore dollar
'SEK', # Swedish krona
'CHF', # Swiss franc
'THB', # Thai baht
'USD', # United States dollar
# [1] This currency does not support decimals. If you pass a decimal amount, an error occurs.
# [2] This currency is supported as a payment currency and a currency balance for in-country PayPal accounts only.
# [3] This currency is supported as a payment currency and a currency balance for in-country PayPal India accounts only.
]
| [
11748,
33918,
198,
198,
6738,
16298,
2238,
1330,
4981,
11,
7032,
11,
40391,
11,
4808,
198,
6738,
16298,
2238,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
198,
4537,
48232,
1847,
62,
40331,
15490,
1961,
62,
34,
31302,
24181,
11015,
7... | 2.600423 | 473 |
from psutil import NoSuchProcess
import psutil | [
6738,
26692,
22602,
1330,
1400,
16678,
18709,
198,
198,
11748,
26692,
22602
] | 3.916667 | 12 |
#!/usr/bin/env python
import os
import subprocess
import sys
import math
import cmpt
# Usage: python shell.py ot-shell benchmark/ circuit
exe=sys.argv[1]
dir=sys.argv[2]
tgt=sys.argv[3]
os.chdir(dir + '/' + tgt)
shell = tgt + ".shell"
golden = tgt + ".output"
output = ".output"
# execute the shell to generate timing report
with open(shell) as f:
subprocess.call([exe, "-o", output], stdin=f)
# compare the output with the golden
cmpt.compare_timing(output, golden)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
10688,
198,
11748,
12067,
457,
198,
198,
2,
29566,
25,
21015,
7582,
13,
9078,
30972,
12,
29149,
18335,
14,
10349,
198... | 2.672222 | 180 |
def test_addition(session, dmg):
"""Should add entities to db"""
user = dmg.User()
session.add(user)
session.commit()
assert user.id == 1
role = dmg.Role(name='test role')
session.add(role)
session.commit()
assert role.id == 1
def test_autoincrement(session, dmg):
"""Should auto-increment primary key"""
user1 = dmg.User()
user2 = dmg.User()
session.add(user1)
session.add(user2)
session.commit()
assert user1.id == 1
assert user2.id == 2
def test_user_role_relationship(session, dmg):
"""Should properly read/write user-role relationships"""
user1 = dmg.User()
user1.roles.append(dmg.Role(name='test role for user1'))
session.add(user1)
session.commit()
db_user1 = session.query(dmg.User).filter(dmg.User.id == user1.id).first()
assert db_user1 is not None
assert db_user1.roles is not None
assert db_user1.roles[0].name == 'test role for user1'
role2 = dmg.Role(name='test role2')
role2.users.append(user1)
session.add(role2)
session.commit()
roles_for_user_1 = session.query(dmg.UserRole).filter(dmg.UserRole.user_id == user1.id).all()
assert len(roles_for_user_1) == 2
assert roles_for_user_1[0].role_id == 1 and roles_for_user_1[0].role.name == 'test role for user1'
assert roles_for_user_1[1].role_id == 2 and roles_for_user_1[1].role.name == 'test role2'
def test_role_permission_relationship(session, dmg):
"""Should properly read/write role-permission relationships"""
permission1 = dmg.Permission(name='can_access_p1')
permission1.roles.append(dmg.Role(name='r1'))
session.add(permission1)
session.commit()
db_permission1 = session.query(dmg.Permission).filter(dmg.Permission.id == permission1.id).first()
assert db_permission1 is not None
assert db_permission1.roles is not None
assert db_permission1.roles[0].name == 'r1'
role2 = dmg.Role(name='r2')
role2.permissions.append(permission1)
session.add(role2)
session.commit()
roles_for_perm1 = session.query(dmg.RolePermission).filter(dmg.RolePermission.permission_id == permission1.id).all()
assert len(roles_for_perm1) == 2
assert roles_for_perm1[0].role_id == 1 and roles_for_perm1[0].role.name == 'r1'
assert roles_for_perm1[1].role_id == 2 and roles_for_perm1[1].role.name == 'r2'
def test_unit_permission_relationship(session, dmg):
"""Should properly read/write unit-permission relationships"""
unit1 = dmg.Unit(name='u1')
unit2 = dmg.Unit(name='u2')
perm1 = dmg.Permission(name='can_access_u1_and_u2')
perm1.units.extend((unit1, unit2))
session.add(perm1)
session.commit()
db_permission1 = session.query(dmg.Permission).filter(dmg.Permission.id == perm1.id).first()
assert db_permission1 is not None
assert db_permission1.units is not None
assert len(db_permission1.units) == 2
def test_full_relationships_and_deletes(session, dmg):
"""Should properly handle complex relationships, including deletes"""
role1 = dmg.Role(name='r1')
perm1 = dmg.Permission(name='p1')
perm2 = dmg.Permission(name='p2')
role1.permissions.extend((perm1, perm2))
user1 = dmg.User(name='u1')
user1.roles.append(role1)
session.add(user1)
session.commit()
db_user1 = session.query(dmg.User).filter(dmg.User.id == user1.id).first()
assert db_user1 is not None
assert len(db_user1.roles) == 1
assert len(db_user1.roles[0].permissions) == 2
def test_hierarchy(session, dmg):
"""Should properly support role hierarchies"""
role_parent = dmg.Role(name='parent_role')
role_child1 = dmg.Role(name='child1', parents=[role_parent])
role_child2 = dmg.Role(name='child2', parents=[role_parent])
role_parent.children.append(dmg.Role(name='child3'))
session.add_all([role_parent, role_child1, role_child2])
session.commit()
added_parent = session.query(dmg.Role).filter(dmg.Role.name == 'parent_role').first()
assert added_parent is not None
assert len(added_parent.children) == 3
assert set(map(lambda role: role.name, added_parent.children)).issubset(['child1', 'child2', 'child3'])
added_child1 = session.query(dmg.Role).filter(dmg.Role.name == 'child1').first()
assert added_child1.parents[0].name == added_parent.name
| [
4299,
1332,
62,
2860,
653,
7,
29891,
11,
41323,
2599,
198,
220,
220,
220,
37227,
19926,
751,
12066,
284,
20613,
37811,
198,
220,
220,
220,
2836,
796,
41323,
13,
12982,
3419,
198,
220,
220,
220,
6246,
13,
2860,
7,
7220,
8,
198,
220,
... | 2.580856 | 1,682 |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
from slixmpp.plugins.base import register_plugin
from slixmpp.plugins.xep_0047 import stanza
from slixmpp.plugins.xep_0047.stanza import Open, Close, Data
from slixmpp.plugins.xep_0047.stream import IBBytestream
from slixmpp.plugins.xep_0047.ibb import XEP_0047
register_plugin(XEP_0047)
# Retain some backwards compatibility
xep_0047 = XEP_0047
| [
37811,
198,
220,
220,
220,
3454,
844,
76,
381,
25,
383,
3454,
624,
1395,
7378,
47,
10074,
198,
220,
220,
220,
15069,
357,
34,
8,
2321,
32607,
2271,
417,
327,
13,
45954,
11,
18990,
449,
13,
51,
13,
40275,
198,
220,
220,
220,
770,
... | 2.686567 | 201 |
"""Testing utilities, made available for plugins."""
| [
37811,
44154,
20081,
11,
925,
1695,
329,
20652,
526,
15931,
198
] | 4.818182 | 11 |
'''
easyTest全局配置文件
'''
# 测试用例及测试方案相关配置
TESTCASE = {
'runType': 'Browser', # 运行模式(Remote,Browser)
'xmlDir': 'script/config/', # 测试方案配置文件夹路径(支持绝对路径)
'testCaseDir': 'script/testCase/', # 测试用例配置文件夹路径(必须相对于项目根目录)
'filesDir': 'script/files/', # 待上传文件路径
}
# 参数化数据相关配置
PARAMETERIZATION = {
'runTime': 1, # 单个场景运行次数(说明:如果设置成2,那么每个场景运行2次后,再运行下一个场景)
'dataDir': '/script/data/',
}
# 本地报告日志相关配置
REPORT = {
'isScreenShot': True, # 截图开关:True:开启截图,False:关闭截图
'isWriteLog': True, # 打印测试日志开关:True:开启打印日志,False:关闭打印日志
'isWriteSysLog': True, # 打印系统日志开关:True:开启打印日志,False:关闭打印日志
'showFindElementLog': True, # 日志中是否显示查找元素信息:True:显示,False:不显示
'logLevel': 3, # 日志打印级别 1级:业务级别 2级:包含断言(默认) 3级:代码级别
'logDir': '/script/report/log/', # 本地日志文件夹路径,也可以使用绝对路径
'screenShotDir': '/script/report/image/', # 本地截图文件夹路径
}
# 服务器接收报告相关配置
SERVER = {
'isRequest': True, # 发送日志到服务器的开关:True:发送日志,False 不发送日志
'requestURL': '', # 日志发送URL
}
# 驱动相关配置
DRIVER = {
'implicitlyWait': 50, # 查找元素隐式等待时间(秒)
'afterFindElementWait': 0.5, # 找到元素后固定的等待时间(秒)
'afterActionWait': 1, # 操作(如点击)后固定的等待时间(秒)
'repeatFindTime': 10, # 当找不到元素时重复查找的次数
'repeatDoTime': 10, # 当操作(如点击)失败后重复操作的次数
'waitForSERException': 1, # 当定位元素出现StaleElementReferenceException异常时,进行下次重复查找的间隔时间
'waitForNAPException': 1, # 当关闭警告窗,出现NoAlertPresentException异常时,进行下次重复查找的间隔时间
'waitForWDException': 1, # 当操作(如点击)失败后,等待的时间间隔
'maximizeWindow': True, # 启动浏览器最大化窗口
'createDriverFailed':10,#创建浏览器失败重新创建次数
}
# 本地浏览器相关配置(非远程主机)
BROWSER = {
'fireFox': { # 本地火狐浏览器参数配置
'binary_location': '', # 启动程序路径
},
'chrome': { # 本地谷歌浏览器参数配置
'binary_location': '', # 启动程序路径
},
}
# xml配置文件Tag标签及属性的名称
XML_TAG = {
'testPlan': {
'connection': 'connection',
'scene': 'scene',
'sceneid': 'schemeId',
'scriptId': 'scriptId',
'enabled': 'enabled',
'browser': 'browser',
'paramPath': 'paramPath',
'testCase': 'testCase',
'hub': 'hub'
},
'testParam': {
'param': 'param',
'id': 'id',
'description': 'description'
}
}
# 模板相关配置
TEMPLATES = {
'storeTemplateDir': 'SRC/template/', # 存放模板的目录
'templateDir': [
'测试方案模板.xml', # 测试方案模板
'测试用例模板.py', # 测试用例模板
'参数化模板.xml' # 参数化模板
]
}
| [
171,
119,
123,
7061,
6,
198,
38171,
14402,
17739,
101,
161,
109,
222,
165,
227,
235,
163,
121,
106,
23877,
229,
20015,
114,
198,
7061,
6,
198,
198,
2,
10545,
113,
233,
46237,
243,
18796,
101,
160,
122,
233,
20998,
232,
38184,
233,
... | 1.060502 | 2,033 |
from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
from .NS_convergence import *
from proteus.mprans import Pres
name = "pressure"
LevelModelType = Pres.LevelModel
coefficients=Pres.Coefficients(modelIndex=PRESSURE_model,
fluidModelIndex=V_model,
pressureIncrementModelIndex=PINC_model,
useRotationalForm=False)
initialConditions = {0:getIBC_p()}
dirichletConditions = {0:getDBC_p} # pressure bc are explicitly set
advectiveFluxBoundaryConditions = {0:getFlux}
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
3170,
1040,
1330,
2134,
198,
6738,
10688,
1330,
1635,
198,
6738,
5915,
385,
1330,
1635,
198,
6738,
5915,
385,
13,
12286,
62,
79,
1330,
1635,
198,
6738,
764,
8035,
62,
1102,
33... | 2.424812 | 266 |