content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
print(type(range(3)))
| [
4798,
7,
4906,
7,
9521,
7,
18,
22305,
198
] | 2.444444 | 9 |
#!/usr/bin/env python3
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from mylib.easy import T
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
198,
6738,
616,
8019,
13,
38171,
1330,
309,
628,
... | 2.568182 | 44 |
# https://leetcode.com/problems/jump-game/
nums = [2, 3, 1, 1, 4]
print(Solution().canJump(nums))
nums = [3, 2, 1, 0, 4]
print(Solution().canJump(nums)) | [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
43327,
12,
6057,
14,
628,
198,
198,
77,
5700,
796,
685,
17,
11,
513,
11,
352,
11,
352,
11,
604,
60,
198,
4798,
7,
46344,
22446,
5171,
36046,
7,
77,
5700,
4008,
198,
... | 2.197183 | 71 |
# Time: O(n * t)
# Space: O(max(k, t))
import collections
# @param {integer[]} nums
# @param {integer} k
# @param {integer} t
# @return {boolean}
| [
2,
3862,
25,
220,
440,
7,
77,
1635,
256,
8,
198,
2,
4687,
25,
440,
7,
9806,
7,
74,
11,
256,
4008,
198,
198,
11748,
17268,
198,
220,
220,
220,
1303,
2488,
17143,
1391,
41433,
21737,
92,
997,
82,
198,
220,
220,
220,
1303,
2488,
... | 2.260274 | 73 |
import sys
n, k, m, *a = map(int, sys.stdin.read().split())
if __name__ == '__main__':
ans = main()
print(ans)
| [
11748,
25064,
201,
198,
201,
198,
77,
11,
479,
11,
285,
11,
1635,
64,
796,
3975,
7,
600,
11,
25064,
13,
19282,
259,
13,
961,
22446,
35312,
28955,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
1... | 2.081967 | 61 |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2021
"""
Instrumentation for Sanic
https://sanicframework.org/en/
"""
try:
import sanic
import wrapt
import opentracing
from ..log import logger
from ..singletons import async_tracer, agent
from ..util.secrets import strip_secrets_from_query
from ..util.traceutils import extract_custom_headers
@wrapt.patch_function_wrapper('sanic.exceptions', 'SanicException.__init__')
if hasattr(sanic.response.BaseHTTPResponse, "send"):
@wrapt.patch_function_wrapper('sanic.response', 'BaseHTTPResponse.send')
else:
@wrapt.patch_function_wrapper('sanic.server', 'HttpProtocol.write_response')
@wrapt.patch_function_wrapper('sanic.server', 'HttpProtocol.stream_response')
@wrapt.patch_function_wrapper('sanic.app', 'Sanic.handle_request')
logger.debug("Instrumenting Sanic")
except ImportError:
pass
except AttributeError:
logger.debug("Not supported Sanic version")
| [
2,
357,
66,
8,
15069,
19764,
11421,
13,
33448,
198,
2,
357,
66,
8,
15069,
2262,
2271,
3457,
13,
33448,
198,
198,
37811,
198,
818,
43872,
341,
329,
2986,
291,
198,
5450,
1378,
12807,
291,
30604,
13,
2398,
14,
268,
14,
198,
37811,
1... | 2.774105 | 363 |
import torch
from torch import nn
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198
] | 3.777778 | 9 |
# flake8: noqa
import os
import sys
import multiprocessing
from time import sleep
from datetime import datetime, time
from logging import DEBUG
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
print(f'append {ROOT_PATH} into sys.path')
from vnpy.event import EventEngine, EVENT_TIMER
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.trader.utility import load_json
from vnpy.gateway.rpc import RpcGateway
from vnpy.app.cta_stock import CtaStockApp
#from vnpy.app.cta_crypto.base import EVENT_CTA_LOG
from vnpy.app.rpc_service import RpcServiceApp
#from vnpy.app.algo_broker import AlgoBrokerApp
from vnpy.app.account_recorder import AccountRecorderApp
from vnpy.trader.util_pid import update_pid
from vnpy.trader.util_monitor import OrderMonitor, TradeMonitor, PositionMonitor, AccountMonitor, LogMonitor
SETTINGS["log.active"] = True
SETTINGS["log.level"] = DEBUG
SETTINGS["log.console"] = True
SETTINGS["log.file"] = True
gateway_name = 'em02_gw'
gw_setting = load_json(f'connect_{gateway_name}.json')
import types
import traceback
def excepthook(exctype: type, value: Exception, tb: types.TracebackType) -> None:
"""
Raise exception under debug mode
"""
sys.__excepthook__(exctype, value, tb)
msg = "".join(traceback.format_exception(exctype, value, tb))
print(msg, file=sys.stderr)
if __name__ == "__main__":
sys.excepthook = excepthook
s = DaemonService()
s.start()
| [
2,
781,
539,
23,
25,
645,
20402,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18540,
305,
919,
278,
198,
6738,
640,
1330,
3993,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
198,
6738,
18931,
1330,
16959,
198,
198,
2,
... | 2.498433 | 638 |
#!/usr/bin/env python2
# Futures
from __future__ import print_function, division
# Standard
import os
import multiprocessing
# Non-standard
import cv2
import numpy as np
import matplotlib.pyplot as plt
def list_images(input_dir):
""" Produces a list of all the filenames in the input directory. """
# List the file names
filenames = sorted(os.listdir(input_dir))
# Filter out our instructional file
# (putting it there was a great idea)
filenames = [f for f in filenames if "DROP_INPUT" not in f]
# And return them
return filenames
def calculate_luminance(image):
"""
Calculates the luminance or brightness or whatever of a single OpenCV image.
https://stackoverflow.com/questions/6442118/python-measuring-pixel-brightness
"""
# Get image dimensions
h = image.shape[0]
w = image.shape[1]
# Calculate for each pixel
brightness = []
for y in range(0, h, int(h/50)):
for x in range(0, w, int(w/50)):
r,g,b = image[y, x]
brightness.append(0.333*r + 0.333*g + 0.333*b)
# And return an average
return np.mean(brightness)
def worker_func(path):
"""
Worker function for calculate_luminances_files()
"""
image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
return calculate_luminance(image)
def calculate_luminances_files(directory, filenames):
"""
Calculates the luminance of each image using a pool of processes.
"""
# Create a list of all the paths
paths = []
for filename in filenames:
path = os.path.join(directory, filename)
paths.append(path)
# Start a pool with a worker_func for each path
p = multiprocessing.Pool(multiprocessing.cpu_count())
luminances = p.map(worker_func, paths)
# And we're done!
return luminances
def set_luminance(image, init_luminance, final_luminance):
"""
Tries to approximate the luminance of a certain image to a given value.
"""
# Copy the image to a new array, so we don't screw it up
new_image = np.copy(image)
# Get image dimensions
h = new_image.shape[0]
w = new_image.shape[1]
# Calculate the luminance difference
lum_diff = (final_luminance - init_luminance)
# Adjust gamma
new_image = adjust_gamma(image, 1 + (4*lum_diff)/255)
# Re-calculate luminance
new_luminance = calculate_luminance(new_image)
# Calculate the luminance difference
lum_diff = (final_luminance - new_luminance)
# And give it a second pass for good measure
new_image = adjust_gamma(new_image, 1 + (4*lum_diff)/255)
# Re-calculate luminance
new_luminance = calculate_luminance(new_image)
# Calculate the luminance difference
lum_diff = (final_luminance - new_luminance)
# And a third because why not, we have the CPU to spare
new_image = adjust_gamma(new_image, 1 + (4*lum_diff)/255)
# And we're done!
return new_image
def adjust_gamma(image, gamma=1.2):
"""
Adjusts an image's gamma, returns an adjusted copy.
"""
# https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def curve_luminances(images, init_luminances, target_luminances):
"""
Given a luminance, tries to make luminance follow the curve.
"""
# Set luminances individually
equalized_images = []
for i, image in enumerate(images):
equalized_images.append(set_luminance(image, init_luminances[i], target_luminances[i]))
# And we're done!
return equalized_images
def worker_func_luminance(args):
"""
Worker function for curve_luminances_files()
"""
# Unpack arguments
in_path = args[0]
init_luminance = args[1]
target_luminance = args[2]
out_path = args[3]
# Do the roar
image = cv2.cvtColor(cv2.imread(in_path), cv2.COLOR_BGR2RGB)
equalized_image = set_luminance(image, init_luminance, target_luminance)
cv2.imwrite(out_path, cv2.cvtColor(equalized_image, cv2.COLOR_RGB2BGR))
def curve_luminances_files(filenames, init_luminances, target_luminances, input_dir, output_dir):
"""
Given a luminance, tries to make luminance follow the curve.
"""
# Generate the vector of arguments we'll need
args = []
equalized_image_filenames = []
for i, filename in enumerate(filenames):
# Generate the input and output paths, and the initial and target luminances
in_path = os.path.join(input_dir, filename)
out_filename = str(i)+".jpg"
out_path = os.path.join(output_dir, out_filename)
args.append([in_path, init_luminances[i], target_luminances[i], out_path])
equalized_image_filenames.append(out_filename)
# Start a pool with a worker_func for each path
p = multiprocessing.Pool(multiprocessing.cpu_count())
p.map(worker_func_luminance, args)
# And we're done!
return equalized_image_filenames
def fit_luminance_curve(luminances, degree=5):
"""
Interpolates and smoothes a new luminance curve.
"""
# Find a polynomial that approximates the curve,
# smoothing it
poly = np.polyfit(range(len(luminances)), luminances, degree)
# And return its counterdomain in the same range
return np.polyval(poly, range(len(luminances)))
def calculate_error(luminances, ref_curve):
"""
Calculates the average, std and total error.
"""
# Calculate the error
error = np.abs(np.array(luminances) - np.array(ref_curve))
# And return the metrics we want
return np.sum(error), np.mean(error), np.std(error)
def plot_luminance_curves(curves, filename, labels=None):
"""
Plots a luminance curve.
"""
# Create a new figure
plt.figure()
# Plot all curves, adding labels if they exist
for i, curve in enumerate(curves):
if labels==None:
plt.plot(curve)
else:
plt.plot(curve, label=labels[i])
plt.legend()
# Save the figure and close it
plt.savefig(filename)
plt.clf()
def deflicker_with_files(input_dir, output_dir):
"""
Instead of loading everything into memory and blowing up, this function
uses file-based methods. Thanks to caching and whatnot, the IO latency is
not enough to bottleneck the process, at least for now.
"""
print("Listing filenames")
original_filenames = list_images(input_dir)
print("Calculating luminances")
luminances = calculate_luminances_files(input_dir, original_filenames)
print("Initial luminances:")
print("Mean:", np.mean(luminances), "std:", np.std(luminances))
# TODO: Filter outliers
print("Fitting luminance curve")
fitted_curve = fit_luminance_curve(luminances)
print("Calculating error")
err_sum, err_mean, err_std = calculate_error(luminances, fitted_curve)
print("Total error:", err_sum, "avg", err_mean, "std", err_std)
print("Curving luminances")
equalized_image_filenames = curve_luminances_files(original_filenames, luminances, fitted_curve, input_dir, output_dir)
print("Calculating luminances")
new_luminances = calculate_luminances_files(output_dir, equalized_image_filenames)
print("New luminances:")
print("Mean:", np.mean(new_luminances), "std:", np.std(new_luminances))
print("Calculating error")
err_sum, err_mean, err_std = calculate_error(new_luminances, fitted_curve)
print("Total error:", err_sum, "avg", err_mean, "std", err_std)
print("Plotting curves")
plot_luminance_curves([luminances, fitted_curve, new_luminances], "curves.pdf", ["original", "fitted", "result"])
if __name__ == "__main__":
# Define the input and output directories
input_dir = "timelapse"
output_dir = "output"
# And call the main function
deflicker_with_files(input_dir, output_dir)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
2,
24002,
942,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
198,
2,
8997,
198,
11748,
28686,
198,
11748,
18540,
305,
919,
278,
198,
198,
2,
8504,
12,
... | 2.782142 | 2,699 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import configparser # An input for some functions we're testing.
import os.path # To find the integration test .ini files.
import pytest # To register tests with.
import unittest.mock # To mock the application, plug-in and container registry out.
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
import UM.Application # To mock the application out.
import UM.PluginRegistry # To mock the plug-in registry out.
import UM.Settings.ContainerRegistry # To mock the container registry out.
import UM.Settings.InstanceContainer # To intercept the serialised data from the read() function.
import LegacyProfileReader as LegacyProfileReaderModule # To get the directory of the module.
@pytest.fixture
test_prepareDefaultsData = [
{
"defaults":
{
"foo": "bar"
},
"cheese": "delicious"
},
{
"cat": "fluffy",
"dog": "floofy"
}
]
@pytest.mark.parametrize("input", test_prepareDefaultsData)
test_prepareLocalsData = [
( # Ordinary case.
{ # Parser data.
"profile":
{
"layer_height": "0.2",
"infill_density": "30"
}
},
{ # Defaults.
"layer_height": "0.1",
"infill_density": "20",
"line_width": "0.4"
}
),
( # Empty data.
{ # Parser data.
"profile":
{
}
},
{ # Defaults.
}
),
( # All defaults.
{ # Parser data.
"profile":
{
}
},
{ # Defaults.
"foo": "bar",
"boo": "far"
}
),
( # Multiple config sections.
{ # Parser data.
"some_other_name":
{
"foo": "bar"
},
"profile":
{
"foo": "baz" #Not the same as in some_other_name
}
},
{ # Defaults.
"foo": "bla"
}
)
]
@pytest.mark.parametrize("parser_data, defaults", test_prepareLocalsData)
test_prepareLocalsNoSectionErrorData = [
( # Section does not exist.
{ # Parser data.
"some_other_name":
{
"foo": "bar"
},
},
{ # Defaults.
"foo": "baz"
}
)
]
## Test cases where a key error is expected.
@pytest.mark.parametrize("parser_data, defaults", test_prepareLocalsNoSectionErrorData)
intercepted_data = ""
@pytest.mark.parametrize("file_name", ["normal_case.ini"])
| [
2,
15069,
357,
66,
8,
2864,
6172,
320,
3110,
347,
13,
53,
13,
198,
2,
4424,
64,
318,
2716,
739,
262,
2846,
286,
262,
17370,
6489,
85,
18,
393,
2440,
13,
198,
198,
11748,
4566,
48610,
1303,
1052,
5128,
329,
617,
5499,
356,
821,
4... | 2.024756 | 1,333 |
from django.urls import path
from .consumers import ChatConsumer
websocket_urlpatterns = [
path(r'ws/chat/<room_name>/', ChatConsumer),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
5936,
31260,
1330,
24101,
49106,
628,
198,
732,
1443,
5459,
62,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7,
81,
6,
18504,
14,
17006,
14,
27,
3823,
62,
3672,
... | 2.769231 | 52 |
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
import math
# http://www.imagemagick.org/Usage/fonts/
# original imagemagick command:
# convert -size 320x115 xc:lightblue -font Candice -pointsize 72 \
# -fill Navy -annotate 0x0+12+55 'Anthony' \
# -fill RoyalBlue -annotate 0x130+25+80 'Anthony' \
# font_slewed.jpg
w = 320
h = 115
with Image(width=w, height=h, background=Color('lightblue')) as img:
with Drawing() as draw:
text = 'Anthony'
draw.font = 'Candice'
draw.font_size = 72
draw.gravity = 'forget'
draw.fill_color = Color('navy')
draw.text(12, 55, text)
# NOTE: annotate option format of IM is {SlewX}x{SlewY}+{X}+{Y}
# But Wand doesn't have slew function,
# so we emulate it by rotate,translate,skew,scale
x = 25
y = 80
angle = 130
skewangle = math.fmod(angle, 360)
if (0 <= skewangle and skewangle < 90) or (270 <= skewangle and skewangle < 360):
skewangle = skewangle * -1.0
draw.translate(x, y) # 3.positioning
draw.skew(skewangle, 0) # 2.skewing
draw.scale(1.0, math.cos(math.radians(angle))) # 1.flipping & shlinking vertically
draw.fill_color = Color('royalblue')
draw.text(0, 0, text)
draw(img)
img.save(filename='sample05.png')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11569,
13,
9060,
1330,
7412,
198,
6738,
11569,
13,
19334,
278,
1330,
40027,
198,
6738,
11569,
13,
8043,
1330,
5315,
198,
11748,
10688,
198,
198,
2,
2638,
1378,
2503,
13,
484... | 2.216049 | 648 |
import pytest
from zeus.exceptions import UnknownRevision
from zeus.utils.revisions import identify_revision
| [
11748,
12972,
9288,
198,
198,
6738,
1976,
27650,
13,
1069,
11755,
1330,
16185,
18009,
1166,
198,
6738,
1976,
27650,
13,
26791,
13,
18218,
3279,
1330,
5911,
62,
260,
10178,
628,
628
] | 3.645161 | 31 |
# Copyright 2021 Mathew Odden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf8") as fh:
long_desc = fh.read()
setup(
name="redstone",
version="0.5.0",
author="Mathew Odden",
author_email="mathewrodden@gmail.com",
url="https://github.com/IBM/redstone",
description="A Pythonic IBM Cloud SDK",
long_description=long_desc,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=["requests[security]", "cryptography"],
extras_require={
"docs": ["sphinx>=3.1", "sphinx_rtd_theme"],
},
entry_points={
"console_scripts": [
"rs-crypto = redstone.crypto.__main__:main",
"rs-keyprotect = redstone.keyprotect.cli:main",
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
],
python_requires=">=3.6",
)
| [
2,
15069,
33448,
6550,
6391,
440,
4742,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921... | 2.77069 | 580 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pdb import set_trace
import sys
import argparse
import os
from teixml2lib.ualog import Log
import teixml2lib.file_utils as fut
__date__ = "'4-01-2021"
__version__ = "0.1.0"
__author__ = "Marta Materni"
logerr = Log("a")
if __name__ == "__main__":
logerr.open("log/writehtml.ERR.log", 1)
parser = argparse.ArgumentParser()
if len(sys.argv) == 1:
print("release: %s %s" % (__version__, __date__))
parser.print_help()
sys.exit()
try:
parser.add_argument('-i',
dest="html",
required=False,
metavar="",
default="",
help="-i <html>")
parser.add_argument('-o',
dest="ou",
required=True,
metavar="",
help="-o <file_out.html>")
parser.add_argument('-wa',
dest="wa",
required=False,
metavar="",
default="a",
help="[-wa w/a (w)rite a)ppend) default a")
args = parser.parse_args()
html_ou = args.ou
fut.make_dir_of_file(html_ou)
html = args.html
write_append = args.wa
with open(html_ou, write_append) as f:
f.write(html+os.linesep)
except Exception as e:
logerr.log("ERROR writehtml.py")
logerr.log(e)
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
279,
9945,
1330,
900,
62,
40546,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
57... | 1.692308 | 949 |
from django.db import models
from django.contrib.auth.models import User
#hello this is priyaa
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
2,
31373,
428,
318,
1293,
3972,
64,
198,
2,
13610,
534,
4981,
994,
13,
628,
198,
197,
197,
198
] | 3.121951 | 41 |
"""Plots graphs of timings"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib.lines import Line2D
def fps_plot(df, title):
"""plots graphs of timings"""
df = df[~df.groupname.str.contains("ffmpeg_unblocked_decoding_speed")]
df["groupname"] = df.groupname.str.split("_benchmark", expand=True)[0]
sns.set(font_scale=2)
g = sns.catplot(data=df, kind="bar",
x="groupname", y="fps", palette="dark", alpha=.6, height=5, aspect=5,
legend=True, legend_out=True)
plt.xlabel(title)
plt.savefig(title + ".png")
def combined_plot(df, title):
"""plots graphs of timings"""
df["groupname"] = df.groupname.str.split("_benchmark", expand=True)[0]
# This item is not needed given we have an unblocked graph
df = df[~df.groupname.str.contains("ffmpeg_unblocked_decoding_speed")].reset_index()
# Put the colors into a list, in order of increasing FPS (the values here are pre sorted)
# by using a global color mapping we ensure consistent colors between graph
palette = [GLOBAL_COLOR_MAPPINGS[group] for group in df.groupname.values]
sns.set(font_scale=3)
def barplot_err(x, y, xerr=None, yerr=None, data=None, **kwargs):
"""Plot a bar graph with hand defined symmetrical error bars"""
_data = []
for _i in data.index:
_data_i = pd.concat([data.loc[_i:_i]] * 3, ignore_index=True, sort=False)
_row = data.loc[_i]
if xerr is not None:
_data_i[x] = [_row[x] - _row[xerr], _row[x], _row[x] + _row[xerr]]
if yerr is not None:
_data_i[y] = [_row[y] - _row[yerr], _row[y], _row[y] + _row[yerr]]
_data.append(_data_i)
_data = pd.concat(_data, ignore_index=True, sort=False)
_ax = sns.barplot(x=x, y=y, data=_data, ci="sd", **kwargs)
return _ax
_, ax = plt.subplots(figsize=(40, 10))
_ax = barplot_err(x="groupname", y="time_for_all_frames", yerr="stddev_for_all_frames",
capsize=.2, data=df, ax=ax, palette=palette)
_ax.set_xticklabels([]) # remove labels on each bar
legend_markers = []
legend_labels = []
for _, row in df.iterrows():
print(row.name, row.time_for_all_frames - row.time_for_all_frames * 0.5)
_ax.text(row.name, row.time_for_all_frames - row.time_for_all_frames * 0.5,
f"{int(round(row.fps, 0))} FPS", color="black", ha="center", va="bottom")
# plot legend
rect = Line2D([], [], marker="s", markersize=30, linewidth=0, color=palette[_])
legend_markers.append(rect)
legend_labels.append(row.groupname)
print("labels", legend_labels)
_ax.legend(legend_markers, legend_labels, bbox_to_anchor=(1.01, 1), borderaxespad=0)
plt.xlabel(title)
plt.ylabel("Time to process 1000 frames (s)")
plt.tight_layout()
plt.savefig("tmp_" + title + ".png")
return _ax
for suffix in ["_video_1920x1080"]:
unblocked = load_df(f"benchmark_timings_unblocked{suffix}.csv")
io = load_df(f"benchmark_timings_iolimited{suffix}.csv")
cpu = load_df(f"benchmark_timings_cpulimited{suffix}.csv")
#unblocked = unblocked[~unblocked.groupname.str.contains("max_possible_fps")].reset_index()
tmp_palette = sns.color_palette("hls", len(unblocked.groupname))
unblocked["groupname"] = unblocked.groupname.str.split("_benchmark", expand=True)[0]
GLOBAL_COLOR_MAPPINGS = {group: color for group, color in zip(sorted(unblocked.groupname.values),
tmp_palette)}
print(GLOBAL_COLOR_MAPPINGS)
combined_plot(unblocked, f"Unblocked{suffix}")
combined_plot(io, f"IOLimited{suffix}")
combined_plot(cpu, f"CPULimited{suffix}")
| [
37811,
3646,
1747,
28770,
286,
4628,
654,
37811,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
2603,
29487,
8019,
13,
6615,
13... | 2.206239 | 1,731 |
#! /usr/bin/env python
from ..common.device_base import Device_Base
from .device_zwave_base import Device_ZWave_Base
paddle_events = {"DON", "DOF", "DIM", "BRT", "DFON", "DFOF"}
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
6738,
11485,
11321,
13,
25202,
62,
8692,
1330,
16232,
62,
14881,
198,
6738,
764,
25202,
62,
89,
19204,
62,
8692,
1330,
16232,
62,
57,
39709,
62,
14881,
198,
198,
79,
37382,
62... | 2.577465 | 71 |
# native imports
# project imports
# external imports
import sqlalchemy
import sqlalchemy.ext.declarative
Base = sqlalchemy.ext.declarative.declarative_base()
Session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker())
| [
2,
6868,
17944,
198,
198,
2,
1628,
17944,
198,
198,
2,
7097,
17944,
198,
11748,
44161,
282,
26599,
198,
11748,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
198,
198,
14881,
796,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
... | 3.205479 | 73 |
from trainer import Trainer
from tester import Tester
from dataset import Dataset, Params
if __name__ == "__main__":
args = Params()
dataset = Dataset(args.dataset)
#
print("~~~~ Training ~~~~")
trainer = Trainer(dataset, args)
trainer.train()
print("~~~~ Select best epoch on validation set ~~~~")
dataset = Dataset(args.dataset)
checkpoint_file = "models/" + args.dataset + "/model.chkpnt"
tester = Tester(dataset, checkpoint_file, "dev")
tester.test()
tester = Tester(dataset, checkpoint_file, "test")
tester.test()
| [
6738,
21997,
1330,
31924,
198,
6738,
256,
7834,
1330,
309,
7834,
198,
6738,
27039,
1330,
16092,
292,
316,
11,
2547,
4105,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
26498,
796,
2547,
4105,
3... | 2.674419 | 215 |
import gym
from gym.utils import seeding
import numpy as np
import pandas as pd
| [
11748,
11550,
198,
6738,
11550,
13,
26791,
1330,
384,
8228,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198
] | 3.333333 | 24 |
import os
from helper.reddit import RedditConfig
import random
import json
| [
11748,
28686,
198,
6738,
31904,
13,
10748,
1330,
10750,
16934,
198,
11748,
4738,
198,
11748,
33918,
628
] | 4.470588 | 17 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from socialapp import models
from rest_framework.test import APIClient
from socialapp import urls
class TestREST(TestCase):
@classmethod
"""
def testGetAuthorRemoveFriend(self):
client = APIClient()
temp1 = self.author3.get_absolute_url()
path = temp[:8] + "remove-friend/" + temp[8:]
response = client.get(path)
self.assertEqual(response.status_code, 200)
"""
"""
def testGetAuthorSendRequest(self):
client = APIClient()
temp = self.author3.get_absolute_url()
path = temp[:8] + "send-request/" + temp[8:]
response = client.get(path)
self.assertEqual(response.status_code, 200)
"""
"""
def testGetAuthorAcceptRequest(self):
client = APIClient()
temp1 = self.author1.get_absolute_url()
path = temp[:8] + "accept-request/" + temp[8:]
response = client.get(path)
self.assertEqual(response.status_code, 200)
"""
"""
def testGetAuthorDeclineRequest(self):
client = APIClient()
temp1 = self.author1.get_absolute_url()
path = temp[:8] + "decline-request/" + temp[8:]
response = client.get(path)
self.assertEqual(response.status_code, 200)
"""
"""
def testGetPostCreate(self):
client = APIClient()
path = "/Post/create/"
response = client.get(path)
self.assertEqual(response.status_code, 200)
"""
"""
def testPostPost(self):
client = APIClient()
path = "/Post/create/"
#author = self.author1
#title = "test"
#source = 'http://1.1'
#origin = 'http://1.1'
#contentType = 'PUBLIC'
#description = 'testpostpost'
#content = 'does it work?'
#unlisted = False
#published = '2019-03-19'
#visibility = 'PUBLIC'
responce = client.post(path, {'author': 'author1', 'title': 'test', 'source': 'http://1.1', 'origin': 'http://1.1',
'contentType': 'PUBLIC', 'description': 'testpostpost', 'content': 'does it work?', 'unlisted': 'False',
'published': '2019-03-19', 'visibility': 'PUBLIC'})
print('\n')
print(responce)
print('\n')
"""
#login = self.client.login(username='user1')
#print('\n')
#print(path)
#print('\n') | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
1919,
1324,
1330,
4981,
198,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
11... | 2.259813 | 1,070 |
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .keystream import KeyStream
from yowsup.common.tools import TimeTools
from .layer_crypt import YowCryptLayer
from yowsup.layers.network import YowNetworkLayer
from .autherror import AuthError
from .protocolentities import *
import base64
| [
6738,
331,
1666,
929,
13,
75,
6962,
1330,
575,
322,
49925,
11,
575,
322,
49925,
9237,
11,
575,
322,
19703,
4668,
49925,
198,
6738,
764,
2539,
5532,
1330,
7383,
12124,
198,
6738,
331,
1666,
929,
13,
11321,
13,
31391,
1330,
3862,
33637,... | 3.402174 | 92 |
from gym_aima.envs.aima_env import AIMAEnv
| [
6738,
11550,
62,
1385,
64,
13,
268,
14259,
13,
1385,
64,
62,
24330,
1330,
317,
3955,
32,
4834,
85,
198
] | 2.15 | 20 |
#!/usr/bin/env python
# compatibility with python 2/3:
from __future__ import print_function
from __future__ import division
import sys, os
import glob
import time
import datetime
import numpy as np
import astropy.io.fits as pyfits
import argparse
import pkg_resources
import appdirs
# Directory of this file
path = os.path.dirname(os.path.realpath(__file__))
# Global variables
#import ac_settings as ac_set
# Global variables
from .actin_files import ac_settings as ac_set
from .actin_files import ac_config
from .actin_files import ac_read_data
from .actin_files import ac_get_win
from .actin_files import ac_calc_ind
from .actin_files import ac_save
from .actin_files import ac_plot_time as ac_plot
from .actin_files import ac_tools
from matplotlib import pylab as plt
# initiate global variables:
ac_set.init()
# Configuration file:
config_file = os.path.join(path, "config_lines.txt")
# Version file:
version_file = os.path.join(path, "VERSION")
# Print preamble:
version = ac_set.preamble(version_file)
def actin_file(file, calc_index=None, rv_in=None, config_file=config_file, save_output=False, ln_plts=False, obj_name=None, targ_list=None, del_out=False, frac=True):
"""
Runs ACTIN for one fits file.
Accepts files of types: 'S2D', 'S1D', 'e2ds', 's1d', 's1d_*_rv', 'ADP', and 'rdb'.
Recognizes fits files from HARPS, HARPS-N and ESPRESSO instruments.
"""
print()
print("--------------------")
print("EXECUTING ACTIN_FILE")
print("--------------------")
if type(file) is list: file = file[0]
# Check if file is from known instrument
tel, instr = ac_tools.get_instr(file)
if instr == False: pass
elif instr in ac_set.instr: pass
else:
msg="*** ERROR:\nUnrecognized instrument. ACTIN only accepts HARPS, HARPS-N or ESPRESSO. To read from a different instrument convert data to rdb file with the headers: 'obj', 'obs_date', 'bjd', 'wave', 'flux', 'error_pixel' (optional)."
sys.exit(msg)
# Checking if object in targ_list is the same as the object in fits file
if targ_list:
check = ac_tools.check_targ(file, targets=targ_list)
if check is True: pass
elif check is False: return
# Read config file and retrieve lines information
if calc_index:
sel_lines = ac_config.read_conf(config_file, calc_index)
# Read data from file
data = ac_read_data.read_data(file, rv_in=rv_in, obj_name=obj_name)
if not data:
return
# Check output file for duplicates
if save_output is not False and data['file_type'] != "rdb":
dupl = ac_save.check_duplicate(data['obj'], data['obs_date'], data['instr'], data['file_type'], save_output)
if dupl is True: return
if calc_index:
# Check selected lines for spectral range and orders
test = ac_calc_ind.check_lines(data['wave'], sel_lines)
if not test:
print("*** ACTION: Ignoring measurement.")
return
# Calculate flux in the required lines
sel_lines = ac_calc_ind.calc_flux_lines(data, sel_lines, ln_plts=ln_plts, frac=frac)
# Calculate chosen indices
index = ac_calc_ind.calc_ind(sel_lines)
if not calc_index:
index = None
sel_lines = None
# Write output to rdb file in "out_dir"/"obj"
if save_output is not False:
rdb_file = ac_save.save_data(data, index, out_dir=save_output)
else: rdb_file = None
info = {}
info['config_file'] = config_file
info['file_type'] = data['file_type']
info['version'] = version
info['source_path'] = os.path.split(file)[0]
info['tel'] = data['tel']
info['instr'] = data['instr']
info['obj'] = data['obj']
options = {}
options['frac'] = frac
output = {}
output['data'] = data
output['index'] = index
output['sel_lines'] = sel_lines
output['info'] = info
output['options'] = options
output['rdb_file'] = rdb_file
return output
def actin(files, calc_index=None, rv_in=None, config_file=None, save_output=False, ln_plts=False, obj_name=None, targ_list=None, del_out=False, frac=True, test=False, save_plots=False):
"""
Runs 'actin_file' function for one or multiple fits files, for one or multiple stars.
Accepts fits files from HARPS, HARPS-N, and ESPRESSO instruments.
Accepts files of types: 'S1D', 'S2D', 'e2ds', 's1d', 's1d_*_rv', 'ADP', and 'rdb'.
"""
print()
print("----------------")
print(" STARTING ACTIN ")
print("----------------")
start_time = time.time()
# Get config file from installation or input
if config_file is None:
cfg_file = get_config()
else:
cfg_file = config_file
print()
print("Using spectral lines from configuration file:")
print(cfg_file)
# test values can be 'S1D', 'S2D', 'e2ds', 's1d', 'adp', or 'rdb'
# this needs to have before anything that uses 'files'
if test:
calc_index, files = ac_tools.test_actin(test, path, calc_index)
if not files:
raise Exception("*** ERROR: There are no files to read")
# Make lists to be iterated below
if isinstance(files, str): files = [files]
if rv_in is None:
rv_in = [rv_in]*len(files)
elif type(rv_in) is not list:
rv_in = [float(rv_in)] ### added float to use as module
else: pass
# Check if files exist
ac_tools.check_files(files)
# Remove output file
if del_out:
print()
print("Executing ac_tools.remove_output:")
print("Searching output files to delete...")
#ac_tools.remove_output(files, save_output, targ_list)
if obj_name:
for f in files:
_, instr = ac_tools.get_instr(f)
file_type = ac_tools.get_file_type(f)
if isinstance(obj_name, str):
star_name = obj_name
ac_tools.remove_output2(star_name, instr, file_type, save_output)
elif isinstance(obj_name, (list, np.ndarray)):
for star_name in obj_name:
ac_tools.remove_output2(star_name, instr, file_type, save_output)
elif not obj_name:
for f in files:
star_name = ac_tools.get_target(f)
_, instr = ac_tools.get_instr(f)
file_type = ac_tools.get_file_type(f)
ac_tools.remove_output2(star_name, instr, file_type, save_output)
# Option to make line plots directory the same as the data output dir
if ln_plts == 'same':
ln_plts = save_output
total_files = len(files)
# Organize files by path to star and file type
files_list = ac_tools.files_by_star_and_ftype(files)
n_files_t = 0
# star directories
for k in range(len(files_list)):
# file types
for i in range(len(files_list[k])):
n_files = 0
# files organized by dir and file type
for j in range(len(files_list[k][i])):
n_files += 1
n_files_t += 1
# Run actin file
output = actin_file(files_list[k][i][j],
calc_index,
rv_in=rv_in[j],
config_file=cfg_file,
save_output=save_output,
ln_plts=ln_plts,
obj_name=obj_name,
targ_list=targ_list,
del_out=del_out,
frac=frac)
# POST-PROCESSING:
if output:
# Dictionaries for each file type
sel_lines = output['sel_lines']
info = output['info']
options = output['options']
rdb_file = output['rdb_file']
# Save log and line info files
ac_save.save_log(info, options, n_files, out_dir=save_output)
ac_save.save_line_info(info, sel_lines, out_dir=save_output)
if save_plots:
# Save time-series plots
ac_plot.plt_time(info, out_dir=save_output, rmv_flgs=False, save_plt=True)
ac_plot.plt_time_mlty(info, out_dir=save_output, rmv_flgs=False, save_plt=True, hdrs=calc_index)
else: pass
if n_files_t != total_files:
print()
print("*** ERROR: Number of ACTIN calls different than number of files.")
print("n_files_t:", n_files_t)
print("total_files:", total_files)
elapsed_time = (time.time() - start_time)/60
# Summary:
print("\n---------------------------------")
print("Fractional pixels:\t{}".format(frac))
print("Files analysed:\t\t{}".format(total_files))
print("Save output:\t\t{}".format(save_output))
print("Elapsed time:\t\t{:.4f} min".format(elapsed_time))
return
def get_config():
"""
Check for existence of ACTIN folder and config file and creates them if not present. Returns the path to the config file.
"""
cfg_dir = appdirs.user_config_dir('ACTIN')
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
cfg_file = os.path.join(cfg_dir, 'config_lines.txt')
if not os.path.isfile(cfg_file):
create_user_config(cfg_file)
return cfg_file
def create_user_config(cfg_file):
"""
Create the user's config file
"""
from shutil import copyfile ###
src = pkg_resources.resource_stream(__name__, 'config_lines.txt')
copyfile(src.name, cfg_file)
def main():
"""
Main function, call actin function with arguments from terminal.
"""
# initiate the parser
parser = argparse.ArgumentParser()
# add short and long argument
parser.add_argument('--files', '-f', help='Read file(s)', nargs='+')
parser.add_argument('--calc_index', '-i', help="Index id to calculate as designated by 'ind_id' in config_index.txt.", nargs='+', default=None)
parser.add_argument('--rv_in', '-rv', help="RV value to calibrate wavelength. If False (default) try to read RV from CCF file.", nargs='+', default=None, type=float)
parser.add_argument('--config_file', '-cf', help='Path to config_file, or False (default) read config file from standard directory.', default=None)
parser.add_argument('--save_output', '-s', help='Path to output directory of data table, or False (default).', default=False)
parser.add_argument('--ln_plts', '-lp', help="Path to directory to save line plots. If 'same' saves line plots to same directory of data output. If 'show' only shows the plots. If 'False' (default) does not save or show line plots", default=False)
parser.add_argument('--obj_name', '-obj', help='Give target a name that overides the one from the fits files.', default=None)
parser.add_argument('--targ_list', '-tl', help='Give a list of stars to select from fits files.', nargs='+', default=None)
parser.add_argument('--del_out', '-del', help='Delete output data file if True.', default=False, type=lambda x: (str(x).lower() == 'true'))
parser.add_argument('--test', '-t', help='Tests actin using the provided fits files in the "test_files" directory. Options are "e2ds", "s1d", and "adp"', default=False)
parser.add_argument('--frac', '-frc', help='Turns fractional pixel on (True, default) or off (False).', default=True, type=lambda x: (str(x).lower() == 'true'))
parser.add_argument('--save_plots', '-sp', help="If True saves time-series and multi-plots to same directory as 'save_output'.", default=False, type=lambda x: (str(x).lower() == 'true'))
# read arguments from the command lines
args = parser.parse_args()
actin(files = args.files,
calc_index = args.calc_index,
rv_in = args.rv_in,
config_file = args.config_file,
save_output = args.save_output,
ln_plts = args.ln_plts,
obj_name = args.obj_name,
targ_list = args.targ_list,
del_out = args.del_out,
test = args.test,
frac = args.frac,
save_plots = args.save_plots)
if __name__ == "__main__":
ac_set.preamble(version_file, verbose=True)
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
2,
17764,
351,
21015,
362,
14,
18,
25,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
25064,
11,
28686,
198,
11748,... | 2.301431 | 5,381 |
from collections.abc import Iterable
from operator import attrgetter
from sympy import sympify
from devito.symbolics import (retrieve_functions, retrieve_indexed, split_affine,
uxreplace)
from devito.tools import PartialOrderTuple, filter_sorted, flatten, as_tuple
from devito.types import Dimension, Eq, IgnoreDimSort
__all__ = ['dimension_sort', 'generate_implicit_exprs', 'lower_exprs']
def dimension_sort(expr):
"""
Topologically sort the Dimensions in ``expr``, based on the order in which they
appear within Indexeds.
"""
if isinstance(expr.implicit_dims, IgnoreDimSort):
relations = set()
else:
relations = {handle_indexed(i) for i in retrieve_indexed(expr)}
# Add in any implicit dimension (typical of scalar temporaries, or Step)
relations.add(expr.implicit_dims)
# Add in leftover free dimensions (not an Indexed' index)
extra = set([i for i in expr.free_symbols if isinstance(i, Dimension)])
# Add in pure data dimensions (e.g., those accessed only via explicit values,
# such as A[3])
indexeds = retrieve_indexed(expr, deep=True)
extra.update(set().union(*[set(i.function.dimensions) for i in indexeds]))
# Enforce determinism
extra = filter_sorted(extra, key=attrgetter('name'))
# Add in implicit relations for parent dimensions
# -----------------------------------------------
# 1) Note that (d.parent, d) is what we want, while (d, d.parent) would be
# wrong; for example, in `((t, time), (t, x, y), (x, y))`, `x` could now
# preceed `time`, while `t`, and therefore `time`, *must* appear before `x`,
# as indicated by the second relation
implicit_relations = {(d.parent, d) for d in extra if d.is_Derived}
# 2) To handle cases such as `((time, xi), (x,))`, where `xi` a SubDimension
# of `x`, besides `(x, xi)`, we also have to add `(time, x)` so that we
# obtain the desired ordering `(time, x, xi)`. W/o `(time, x)`, the ordering
# `(x, time, xi)` might be returned instead, which would be non-sense
implicit_relations.update({tuple(d.root for d in i) for i in relations})
ordering = PartialOrderTuple(extra, relations=(relations | implicit_relations))
return ordering
def generate_implicit_exprs(expressions):
"""
Create and add implicit expressions.
Implicit expressions are those not explicitly defined by the user
but instead are requisites of some specified functionality.
Currently, implicit expressions stem from the following:
* MultiSubDomains attached to input equations.
"""
found = {}
processed = []
for e in expressions:
if e.subdomain:
try:
dims = [d.root for d in e.free_symbols if isinstance(d, Dimension)]
sub_dims = [d.root for d in e.subdomain.dimensions]
sub_dims.extend(e.subdomain.implicit_dimensions)
dims = [d for d in dims if d not in frozenset(sub_dims)]
dims.extend(e.subdomain.implicit_dimensions)
if e.subdomain not in found:
grid = list(retrieve_functions(e, mode='unique'))[0].grid
found[e.subdomain] = [i.func(*i.args, implicit_dims=dims) for i in
e.subdomain._create_implicit_exprs(grid)]
processed.extend(found[e.subdomain])
dims.extend(e.subdomain.dimensions)
new_e = Eq(e.lhs, e.rhs, subdomain=e.subdomain, implicit_dims=dims)
processed.append(new_e)
except AttributeError:
# Not a MultiSubDomain
processed.append(e)
else:
processed.append(e)
return processed
def lower_exprs(expressions, **kwargs):
"""
Lowering an expression consists of the following passes:
* Indexify functions;
* Align Indexeds with the computational domain;
* Apply user-provided substitution;
Examples
--------
f(x - 2*h_x, y) -> f[xi + 2, yi + 4] (assuming halo_size=4)
"""
# Normalize subs
subs = {k: sympify(v) for k, v in kwargs.get('subs', {}).items()}
processed = []
for expr in as_tuple(expressions):
try:
dimension_map = expr.subdomain.dimension_map
except AttributeError:
# Some Relationals may be pure SymPy objects, thus lacking the subdomain
dimension_map = {}
# Handle Functions (typical case)
mapper = {f: f.indexify(lshift=True, subs=dimension_map)
for f in retrieve_functions(expr)}
# Handle Indexeds (from index notation)
for i in retrieve_indexed(expr):
f = i.function
# Introduce shifting to align with the computational domain
indices = [(lower_exprs(a) + o) for a, o in
zip(i.indices, f._size_nodomain.left)]
# Substitute spacing (spacing only used in own dimension)
indices = [i.xreplace({d.spacing: 1, -d.spacing: -1})
for i, d in zip(indices, f.dimensions)]
# Apply substitutions, if necessary
if dimension_map:
indices = [j.xreplace(dimension_map) for j in indices]
mapper[i] = f.indexed[indices]
# Add dimensions map to the mapper in case dimensions are used
# as an expression, i.e. Eq(u, x, subdomain=xleft)
mapper.update(dimension_map)
# Add the user-supplied substitutions
mapper.update(subs)
processed.append(uxreplace(expr, mapper))
if isinstance(expressions, Iterable):
return processed
else:
assert len(processed) == 1
return processed.pop()
| [
6738,
17268,
13,
39305,
1330,
40806,
540,
198,
6738,
10088,
1330,
708,
81,
1136,
353,
198,
198,
6738,
10558,
88,
1330,
10558,
1958,
198,
198,
6738,
1614,
10094,
13,
1837,
2022,
19615,
1330,
357,
1186,
30227,
62,
12543,
2733,
11,
19818,
... | 2.413779 | 2,395 |
# https://docs.python.org/3/tutorial/modules.html#standard-modules
import sys
sys.path.append("../ImageManupilation")
| [
2,
3740,
1378,
31628,
13,
29412,
13,
2398,
14,
18,
14,
83,
44917,
14,
18170,
13,
6494,
2,
20307,
12,
18170,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
40720,
5159,
5124,
929,
10520,
4943,
198
] | 3.105263 | 38 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
TeX/LaTeX/PDFLaTeX/XeLaTeX support
Example::
def configure(conf):
conf.load('tex')
if not conf.env.LATEX:
conf.fatal('The program LaTex is required')
def build(bld):
bld(
features = 'tex',
type = 'latex', # pdflatex or xelatex
source = 'document.ltx', # mandatory, the source
outs = 'ps', # 'pdf' or 'ps pdf'
deps = 'crossreferencing.lst', # to give dependencies directly
prompt = 1, # 0 for the batch mode
)
To configure with a special program use::
$ PDFLATEX=luatex waf configure
"""
import os, re
from waflib import Utils, Task, Errors, Logs
from waflib.TaskGen import feature, before_method
re_bibunit = re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
"""
Parse the inputs and try to find the *bibunit* dependencies
:return: list of bibunit files
:rtype: list of :py:class:`waflib.Node.Node`
"""
node = self.inputs[0]
nodes = []
if not node: return nodes
code = node.read()
for match in re_bibunit.finditer(code):
path = match.group('file')
if path:
for k in ['', '.bib']:
# add another loop for the tex include paths?
Logs.debug('tex: trying %s%s' % (path, k))
fi = node.parent.find_resource(path + k)
if fi:
nodes.append(fi)
# no break, people are crazy
else:
Logs.debug('tex: could not find %s' % path)
Logs.debug("tex: found the following bibunit files: %s" % nodes)
return nodes
exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps']
"""List of typical file extensions included in latex files"""
exts_tex = ['.ltx', '.tex']
"""List of typical file extensions that contain latex"""
re_tex = re.compile(r'\\(?P<type>include|bibliography|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
"""Regexp for expressions that may include latex files"""
g_bibtex_re = re.compile('bibdata', re.M)
"""Regexp for bibtex files"""
class tex(Task.Task):
"""
Compile a tex/latex file.
.. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex
"""
bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
bibtex_fun.__doc__ = """
Execute the program **bibtex**
"""
makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
makeindex_fun.__doc__ = """
Execute the program **makeindex**
"""
def exec_command(self, cmd, **kw):
"""
Override :py:meth:`waflib.Task.Task.exec_command` to execute the command without buffering (latex may prompt for inputs)
:return: the return code
:rtype: int
"""
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
return Utils.subprocess.Popen(cmd, **kw).wait()
def scan_aux(self, node):
"""
A recursive regex-based scanner that finds included auxiliary files.
"""
nodes = [node]
re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M)
parse_node(node)
return nodes
def scan(self):
"""
A recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex`
Depending on your needs you might want:
* to change re_tex::
from waflib.Tools import tex
tex.re_tex = myregex
* or to change the method scan from the latex tasks::
from waflib.Task import classes
classes['latex'].scan = myscanfunction
"""
node = self.inputs[0]
nodes = []
names = []
seen = []
if not node: return (nodes, names)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s" % (nodes, names))
return (nodes, names)
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError("%r command exit status %r" % (msg, retcode))
def bibfile(self):
"""
Parse the *.aux* files to find bibfiles to process.
If yes, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`
"""
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except (OSError, IOError):
Logs.error('Error reading %s: %r' % aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
Logs.warn('calling bibtex')
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.TEXINPUTS, 'BSTINPUTS': self.TEXINPUTS})
self.env.SRCFILE = aux_node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
def bibunits(self):
"""
Parse the *.aux* file to find bibunit files. If there are bibunit files,
execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`.
"""
try:
bibunits = bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)]
if fn:
Logs.warn('calling bibtex on bibunits')
for f in fn:
self.env.env = {'BIBINPUTS': self.TEXINPUTS, 'BSTINPUTS': self.TEXINPUTS}
self.env.SRCFILE = f
self.check_status('error when calling bibtex', self.bibtex_fun())
def makeindex(self):
"""
Look on the filesystem if there is a *.idx* file to process. If yes, execute
:py:meth:`waflib.Tools.tex.tex.makeindex_fun`
"""
try:
idx_path = self.idx_node.abspath()
os.stat(idx_path)
except OSError:
Logs.warn('index file %s absent, not calling makeindex' % idx_path)
else:
Logs.warn('calling makeindex')
self.env.SRCFILE = self.idx_node.name
self.env.env = {}
self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun())
def bibtopic(self):
"""
Additional .aux files from the bibtopic package
"""
p = self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')):
self.aux_nodes += p.ant_glob('*[0-9].aux')
def run(self):
"""
Runs the TeX build process.
It may require multiple passes, depending on the usage of cross-references,
bibliographies, content susceptible of needing such passes.
The appropriate TeX compiler is called until the *.aux* files stop changing.
Makeindex and bibtex are called if necessary.
"""
env = self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS', '-interaction=batchmode')
env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
env.append_value('XELATEXFLAGS', '-interaction=batchmode')
fun = self.texfun
node = self.inputs[0]
srcfile = node.abspath()
texinputs = self.env.TEXINPUTS or ''
self.TEXINPUTS = node.parent.get_bld().abspath() + os.pathsep + node.parent.get_src().abspath() + os.pathsep + texinputs + os.pathsep
# important, set the cwd for everybody
self.cwd = self.inputs[0].parent.get_bld().abspath()
Logs.warn('first pass on %s' % self.__class__.__name__)
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.TEXINPUTS})
self.env.SRCFILE = srcfile
self.check_status('error when calling latex', fun())
self.aux_nodes = self.scan_aux(node.change_ext('.aux'))
self.idx_node = node.change_ext('.idx')
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
hash = ''
for i in range(10):
# prevent against infinite loops - one never knows
# watch the contents of file.aux and stop if file.aux does not change anymore
prev_hash = hash
try:
hashes = [Utils.h_file(x.abspath()) for x in self.aux_nodes]
hash = Utils.h_list(hashes)
except (OSError, IOError):
Logs.error('could not read aux.h')
pass
if hash and hash == prev_hash:
break
# run the command
Logs.warn('calling %s' % self.__class__.__name__)
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.TEXINPUTS})
self.env.SRCFILE = srcfile
self.check_status('error when calling %s' % self.__class__.__name__, fun())
@feature('tex')
@before_method('process_source')
def apply_tex(self):
"""
Create :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc).
"""
if not getattr(self, 'type', None) in ['latex', 'pdflatex', 'xelatex']:
self.type = 'pdflatex'
tree = self.bld
outs = Utils.to_list(getattr(self, 'outs', []))
# prompt for incomplete files (else the batchmode is used)
self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal('Could not find %r for %r' % (filename, self))
if not n in deps_lst:
deps_lst.append(n)
for node in self.to_nodes(self.source):
if self.type == 'latex':
task = self.create_task('latex', node, node.change_ext('.dvi'))
elif self.type == 'pdflatex':
task = self.create_task('pdflatex', node, node.change_ext('.pdf'))
elif self.type == 'xelatex':
task = self.create_task('xelatex', node, node.change_ext('.pdf'))
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = tree.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
tree.node_deps[task.uid()] = deps_lst
v = dict(os.environ)
p = node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.get_bld().abspath() + os.pathsep + v.get('TEXINPUTS', '') + os.pathsep
v['TEXINPUTS'] = p
if self.type == 'latex':
if 'ps' in outs:
tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps'))
tsk.env.env = dict(v)
if 'pdf' in outs:
tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf'))
tsk.env.env = dict(v)
elif self.type == 'pdflatex':
if 'ps' in outs:
self.create_task('pdf2ps', task.outputs, node.change_ext('.ps'))
self.source = []
def configure(self):
"""
Try to find the programs tex, latex and others. Do not raise any error if they
are not found.
"""
v = self.env
for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
try:
self.find_program(p, var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS'] = '-Ppdf'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
5658,
15196,
88,
11,
4793,
12,
10333,
357,
5350,
8,
198,
198,
37811,
198,
49568,
14,
14772,
49568,
14,
5760,
3697,
64,
49568,
14,
55,
68,
... | 2.37194 | 4,412 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-25 21:34
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2177,
12,
3023,
12,
1495,
2310,
25,
2682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.8 | 55 |
# -*- coding: utf-8 -*-
import numpy as np
import treecode.energy_and_momentum as EM
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
5509,
8189,
13,
22554,
62,
392,
62,
32542,
298,
388,
355,
17228,
201,
198,
201,
198,
201,
198,
201,
198
... | 2.133333 | 45 |
# -*- coding: utf-8 -*-
import argparse
from yolo import YOLO
from cache import detect_cam ,detect_video
FLAGS = None
if __name__ == '__main__':
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
"--camera", nargs='?', type=int, required=False, default=0,
help = "Camera mode, will ignore all positional arguments, please input your camera number"
)
parser.add_argument(
"--width", nargs='?', type=int, default=2560,
help = "[Optional] Camera width"
)
parser.add_argument(
"--height", nargs='?', type=int, default=960,
help = "[Optional] Camera height"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
parser.add_argument(
"--input", nargs='?', type=str, required=False, default='./path2your_video',
help = "Video input path"
)
FLAGS = parser.parse_args()
if ("input" in FLAGS and FLAGS.input != './path2your_video'):
print("Video mode")
detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
if ("camera" in FLAGS):
print("Camera mode")
print(FLAGS.camera, FLAGS.width, FLAGS.height, FLAGS.output)
detect_cam(YOLO(**vars(FLAGS)), FLAGS.camera, FLAGS.width, FLAGS.height, FLAGS.output)
else:
print("Must specify at least video_input_path. See usage with --help.")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1822,
29572,
198,
6738,
331,
14057,
1330,
575,
3535,
46,
198,
6738,
12940,
1330,
4886,
62,
20991,
837,
15255,
478,
62,
15588,
198,
198,
38948,
50,
796,
6045,... | 2.644737 | 532 |
from agbot.core.model.context import VerticalContext
from .tp_ui import UiTestPoint
| [
6738,
556,
13645,
13,
7295,
13,
19849,
13,
22866,
1330,
38937,
21947,
198,
6738,
764,
34788,
62,
9019,
1330,
471,
72,
14402,
12727,
628
] | 3.541667 | 24 |
from typing import Callable, Dict, List
# CLI
OUTPUT_DIR_HELP: str = "The path to the output directory."
PRETTY_HELP: str = "Pretty-print the SVG code."
SOURCE_HELP: str = "The source of the emoji to obtain."
SOURCES: List[str] = ["Twemoji", "OpenMoji"]
# URLs
TWEMOJI_URL: str = (
"https://raw.githubusercontent.com/twitter/twemoji/v13.0.2/assets/svg/{code}.svg"
)
OPENMOJI_URL: str = (
"https://raw.githubusercontent.com/hfg-gmuend/openmoji/13.0.0/color/svg/{code}.svg"
)
URLS: Dict[str, str] = {SOURCES[0]: TWEMOJI_URL, SOURCES[1]: OPENMOJI_URL}
CODE_CASES: Dict[str, Callable[[str], str]] = {
SOURCES[0]: str.lower,
SOURCES[1]: str.upper,
}
# SVG
NAMESPACE_URI: str = "http://www.w3.org/2000/svg"
| [
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
7343,
198,
198,
2,
43749,
198,
2606,
7250,
3843,
62,
34720,
62,
39,
3698,
47,
25,
965,
796,
366,
464,
3108,
284,
262,
5072,
8619,
526,
198,
47,
26087,
9936,
62,
39,
3698,
47,
25,
9... | 2.187879 | 330 |
from typing import Union, List, Tuple
import math
from abc import ABC, abstractmethod
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from utils.gen_args import Arguments
from memory.common import ReplayBufferAbstract
from agents.resnet_head import IMPALAResnet
| [
6738,
19720,
1330,
4479,
11,
7343,
11,
309,
29291,
198,
11748,
10688,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
... | 3.613636 | 88 |
# Store the version of the package
__version__ = "2021.08.01"
| [
2,
9363,
262,
2196,
286,
262,
5301,
198,
834,
9641,
834,
796,
366,
1238,
2481,
13,
2919,
13,
486,
1,
198
] | 2.952381 | 21 |
DATABASE = 'db_edot'
# mysql
HOST = 'localhost'
PORT = 3306
USER = 'root'
PASSWD = ''
SQLDB = 'db_edot'
from flask import Flask, render_template, request, g
app = Flask(__name__)
# DB support
import MySQLdb
# returns a database connection for MySQL
# set this line to define database connection
DBFUNC = connect_to_database_mysql
tbl_user = "tbl_user"
tbl_product = "tbl_product"
tbl_orderlines = "tbl_orderlines"
tbl_order = "tbl_order"
tbl_category = "tbl_category"
"""
def add_testdata():
db = DBFUNC(SQLDB)
print "Adding testdata"
cursor = db.cursor()
cursor.execute("insert into " + tbl_category + "(name) values ('Fine Gravel');")
cursor.execute("insert into " + tbl_category + "(name) values ('Lag Gravel');")
cursor.execute("insert into " + tbl_category + "(name) values ('Plateau Gravel');")
cursor.execute("insert into " + tbl_category + "(name) values ('Pea Gravel');")
cursor.execute("insert into " + tbl_category + "(name) values ('Crushed Stone');")
cursor.execute("insert into " + tbl_product + "(name, description, image_url, price, cat_id) values ('Gravel 2mm', 'Two millimeter fine gravel', '/images/fine1.png', '29.50', (SELECT id from tbl_category WHERE name='Fine Gravel'));")
cursor.execute("insert into " + tbl_product + "(name, description, image_url, price, cat_id) values ('Gravel 4mm', 'Four millimeter fine gravel', '/images/fine2.png', '99.90', (SELECT id from tbl_category WHERE name='Fine Gravel'));")
cursor.execute("insert into " + tbl_product + "(name, description, image_url, price, cat_id) values ('Granite', 'A common type of felsic intrusive igneous rock that is granular and phaneritic in texture.', '/images/granite.png', '995.90', (SELECT id from tbl_category WHERE name='Crushed Stone'));")
cursor.execute("insert into " + tbl_product + "(name, description, image_url, price, cat_id) values ('Limestone', 'A sedimentary rock composed largely of the minerals calcite and aragonite.', '/images/limestone.png', '1050.0', (SELECT id from tbl_category WHERE name='Crushed Stone'));")
cursor.execute("insert into " + tbl_product + "(name, description, image_url, price, cat_id) values ('Dolomite', 'An anhydrous carbonate mineral composed of calcium magnesium carbonate.', '/images/rock.png', '1250.0', (SELECT id from tbl_category WHERE name='Crushed Stone'));")
db.commit()
db.close()
"""
main()
| [
35,
1404,
6242,
11159,
796,
705,
9945,
62,
276,
313,
6,
198,
198,
2,
48761,
198,
39,
10892,
796,
705,
36750,
6,
198,
15490,
796,
513,
20548,
198,
29904,
796,
705,
15763,
6,
198,
47924,
22332,
796,
10148,
198,
17861,
11012,
796,
705,... | 2.971814 | 816 |
import os
pluginName = os.path.abspath(__file__).split(os.path.sep)[-2]
importline = 'import '+('.'.join(['plugins',pluginName,'models'])+' as models')
exec(importline) #import plugins.thisplugin.models as models
from corpusslayer.adminModelRegister import registerForMe
from django.contrib import admin
# Register your models here.
registerForMe(admin, models)
| [
11748,
28686,
198,
33803,
5376,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
737,
35312,
7,
418,
13,
6978,
13,
325,
79,
38381,
12,
17,
60,
198,
11748,
1370,
796,
705,
11748,
705,
10,
10786,
2637,
13,
22179,
7,
1781... | 3.192982 | 114 |
import unittest
from seven_segment import possible_numbers, seven_segment
# class Tests(unittest.TestCase):
# TESTS = {
# "Basics": [
# {
# "input": [['B', 'C', 'b', 'c'],
# ['A']],
# "answer": 2,
# },
# {
# "input": [['B', 'C', 'a', 'f', 'g', 'c', 'd'],
# ['A', 'G', 'D', 'e']],
# "answer": 6,
# },
# {
# "input": [['A', 'B', 'C', 'D', 'E', 'F', 'a', 'b', 'c', 'd', 'e', 'f'],
# ['G', 'g']],
# "answer": 4,
# },
# {
# "input": [['B', 'C', 'a', 'f', 'g', 'c', 'd'],
# ['A', 'G', 'D', 'F', 'b', 'e']],
# "answer": 20,
# },
# {
# "input": [['A', 'B', 'C', 'b', 'c', 'f', 'g'],
# ['G', 'd']],
# "answer": 1,
# },
# {
# "input": [[],
# ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'a', 'b', 'c', 'd', 'e', 'f', 'g']],
# "answer": 100,
# },
# ],
# # "Randoms": make_test(10),
# }
# def test_Basics(self):
# for i in self.TESTS['Basics']:
# assert seven_segment(*i['input']) == i['answer'], i['input']
# # def test_Extra(self):
# # for i in self.TESTS['Extra']:
# # assert seven_segment(i['input']) == i['answer'], i['input']
# if __name__ == "__main__": # pragma: no cover
# unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
3598,
62,
325,
5154,
1330,
1744,
62,
77,
17024,
11,
3598,
62,
325,
5154,
628,
198,
198,
2,
1398,
30307,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
2,
220,
220,
220,
220,
309,
1546,
4694,
... | 1.516791 | 1,072 |
from datetime import date
from enum import Enum
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
from dateutil.relativedelta import relativedelta
from . import query
| [
6738,
4818,
8079,
1330,
3128,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3128,
22602... | 3.5 | 58 |
from compas.geometry import Point
from compas.geometry import Line
from compas.geometry import NurbsCurve
from compas.artists import Artist
from compas.colors import Color
line = Line(Point(0, 0, 0), Point(3, 3, 0))
curve = NurbsCurve.from_line(line)
# ==============================================================================
# Visualisation
# ==============================================================================
Artist.clear()
Artist(curve).draw(color=Color.green())
for point in curve.points:
Artist(point).draw()
Artist.redraw()
| [
6738,
552,
292,
13,
469,
15748,
1330,
6252,
198,
6738,
552,
292,
13,
469,
15748,
1330,
6910,
198,
6738,
552,
292,
13,
469,
15748,
1330,
49281,
1443,
26628,
303,
198,
6738,
552,
292,
13,
433,
1023,
1330,
18902,
198,
6738,
552,
292,
1... | 3.802721 | 147 |
# -*- coding:utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
28311,
25,
198,
220,
220,
220,
1330,
2829,
17752,
355,
33918,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
33918,
628
] | 2.638889 | 36 |
from django.test import TestCase
from .factories import CommentFactory
from ..models import Activity
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
764,
22584,
1749,
1330,
18957,
22810,
198,
6738,
11485,
27530,
1330,
24641,
628
] | 4.291667 | 24 |
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
sim = simulation.Simulator(test)
rslt = sim.run()
print(rslt)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
2,
262,
1306,
1627,
460,
307,
4615,
706,
9988,
198,
17597,
13,
6978,
13,
28463,
7,
15,... | 2.641791 | 201 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import time
import torch
import numpy as np
from shared import SharedTensor
import util
def parse_args():
'''
Parse input arguments.
'''
parser = argparse.ArgumentParser(
description='Train private or public multinomial logistic regression.')
parser.add_argument('--dataset', default='mnist', type=str,
help="dataset to use.",
choices=['mnist', 'synth', 'covtype'])
parser.add_argument('--plaintext', action="store_true",
help="use a non-private algorithm")
parser.add_argument('--width', default=100, type=float,
help="width of uniform distribution for secret shares")
parser.add_argument('--seed', default=2019, type=float,
help="Seed the torch RNG.")
parser.add_argument('--iter', default=10000, type=int,
help="Iterations of SGD.")
parser.add_argument('--batchsize', default=8, type=int,
help="Batch size for SGD.")
parser.add_argument('--cuda', action="store_true",
help="Run on CUDA device.")
parser.add_argument('--data_path', default="/tmp", type=str,
help="Path to cache downloaded MNIST data.")
parser.add_argument('--n_classes', default=2, type=int,
help="Number of classes.")
return parser.parse_args()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
... | 2.40225 | 711 |
import numpy as np
if __name__ == "__main__":
synclist = synchronize(["AbcdEfG", "cdEFG"])
print([s.get_text() for s in synclist])
| [
11748,
299,
32152,
355,
45941,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
6171,
565,
396,
796,
18305,
1096,
7,
14692,
4826,
10210,
36,
69,
38,
1600,
366,
10210,
25425,
38,
8973,
8,
19... | 2.306452 | 62 |
# *******************************************************************************
#
# Copyright (c) 2019-2021 David Briant. All rights reserved.
#
# *******************************************************************************
from coppertop.pipe import *
from coppertop.core import Missing
from coppertop.std import check, equal
from bones.core.types import index, count, num, pystr, N, T, T1, T2, T3, pyint, pyfloat
from bones.metatypes import BType, BTAtom, S, weaken, cacheAndUpdate, fitsWithin as _fitsWithin
tFred = BTAtom.ensure('fred')
tJoe = BTAtom.ensure('joe')
tSally = BTAtom.ensure('sally')
@coppertop(style=binary2)
if __name__ == '__main__':
main()
print('pass')
| [
2,
41906,
17174,
46068,
8162,
198,
2,
198,
2,
220,
220,
220,
15069,
357,
66,
8,
13130,
12,
1238,
2481,
3271,
25866,
415,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
41906,
17174,
46068,
8162,
628,
198,
6738,
763,
381,
861,
404,
13,... | 3.145374 | 227 |
import logging
import logging.config
import datetime
import GetOldTweets3 as Got
from typing import List
from trend_analyze.src.convert_to_model import ConvertTM
from trend_analyze.src.scraping_tweet import TwitterScraper
from trend_analyze.config import *
from trend_analyze.src.model import *
class TwitterFetcher:
"""
TwitterFetcher can collect data without Api's limitation. But it may collect incomplete information or can only
collect parts of data. If you want to collect complete data, use ApiTwitterFetcher instead of TwitterFetcher
"""
def fetch_user_info_from_name(self, username: str) -> User:
"""
get incomplete user information with username
[!!] This function is unstable. This cannot work well sometimes by query_user_info().
:param username: screen name except first '@'
:type username: str
:return: User
"""
try:
user = self.ts.user_info(username)
return user
except Exception as e:
self.logger.error(e)
return User()
def fetch_tweet(self, username: str = "", max_tweet: int = 0,
q: str = "", since: int = 0, until: int = 0) -> List[Tweet]:
"""
collect tweets with GetOldPython3
[!!] this method may take a lot of time, if you don't specify max tweet count.
:param username: screen name except '@'
:type username: str
:param max_tweet: max tweet count
:type max_tweet: int
:param q: search word
:type q: str
:param since: relative since date (e.g. today => 0, yesterday => 1, a week ago => 7)
:type since: int
:param until: relative until date (e.g. today => 0, yesterday => 1, a week ago => 7)
:type until: int
:return: list[Tweet]:
"""
if since < until:
self.logger.error("Invalid Argument: specify until date before since date")
return []
try:
tc = Got.manager.TweetCriteria()
now = datetime.now()
if username:
tc.setUsername(username)
if max_tweet:
tc.setMaxTweets(max_tweet)
if q:
tc.setQuerySearch(q)
if since:
since_date = (now - datetime.timedelta(days=since)).strftime("%Y-%m-%d")
tc.setSince(since=since_date)
if until:
until_date = (now - datetime.timedelta(days=until)).strftime("%Y-%m-%d")
tc.setUntil(until=until_date)
tweets = list()
g_append = tweets.append
tmp = Got.manager.TweetManager.getTweets(tc)
for g_tweet in tmp:
m_t = self.ctm.from_gti_tweet(g_tweet)
m_t.is_official = False
g_append(m_t)
return tweets
except Exception as e:
self.logger.error(e)
return []
def fetch_user_relations(self, username: str) -> List[UserRelation]:
"""
returns list consisted of UserRelation without using api
:param username: target user's name
:return: List[UserRelation]
"""
user = self.fetch_user_info_from_name(username)
return self.ctm.build_user_relation(user, self.ts.follower_list(username), self.ts.following_list(username))
| [
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
4818,
8079,
198,
198,
11748,
3497,
19620,
32665,
1039,
18,
355,
11853,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
5182,
62,
38200,
2736,
13,
10677,
13,
1102,
1851,
62,
1462,
62,... | 2.233553 | 1,520 |
a=123*456
b=(34*99)+22
print(a)
print(b)
| [
64,
28,
10163,
9,
29228,
198,
65,
16193,
2682,
9,
2079,
47762,
1828,
198,
198,
4798,
7,
64,
8,
198,
4798,
7,
65,
8,
198
] | 1.68 | 25 |
import logging
import json
import os
from typing import Optional
from retrying import retry
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def resolve_project_id(project_id=None) -> Optional[int]:
"""
Gets project id from following sources in following order of precedence:
- default parameter values
- environment variables
- sh_scrapy.hsref (kumo)
- scrapinghub.yml file
in order to allow to use codes that needs HS or dash API,
either locally or from scrapinghub, correctly configured
"""
if project_id:
return int(project_id)
# read from environment
if os.environ.get("PROJECT_ID"):
return int(os.environ.get("PROJECT_ID"))
# for ScrapyCloud jobs:
if os.environ.get("SHUB_JOBKEY"):
return int(os.environ["SHUB_JOBKEY"].split("/")[0])
# read from scrapinghub.yml
try:
from shub.config import load_shub_config # pylint: disable=import-error
cfg = load_shub_config()
project_id = cfg.get_project_id("default")
if project_id:
return int(project_id)
except ImportError:
logger.warning("Install shub package if want to access scrapinghub.yml")
if not project_id:
logger.warning("Project id not found. Use either PROJECT_ID env. variable or scrapinghub.yml default target.")
MINS_IN_A_DAY = 24 * 60
ONE_MIN_IN_S = 60
dash_retry_decorator = retry(
retry_on_exception=just_log_exception, wait_fixed=ONE_MIN_IN_S * 1000, stop_max_attempt_number=MINS_IN_A_DAY
)
| [
11748,
18931,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
1005,
14992,
1330,
1005,
563,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
6404,
1362,
13,
2617,
... | 2.598662 | 598 |
# -*- coding: utf-8 -*-
"""
CentralReport - Webservices module
Contains all entities used with webservices.
https://github.com/CentralReport/
"""
class Answer:
"""
This entity contains the result of a webservice
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
5694,
19100,
532,
47736,
712,
1063,
8265,
198,
220,
220,
220,
220,
220,
220,
220,
49850,
477,
12066,
973,
351,
2639,
712,
1063,
13,
628... | 2.72043 | 93 |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""You may copy this file as the starting point of your own model."""
import tensorflow.keras as ke
import os.path
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense
from openfl.federated import KerasTaskRunner
class KerasCNN(KerasTaskRunner):
"""A basic convolutional neural network model."""
def __init__(self, **kwargs):
"""
Initialize.
Args:
**kwargs: Additional parameters to pass to the function
"""
super().__init__(**kwargs)
path = 'model/saved_model'
if os.path.isdir(path):
# Load the previously trained model
self.model = self.load_native(filepath=path)
self.logger.info('Loading the previously saved model : model/saved_model')
else:
# Build the model
self.model = self.build_model(
self.feature_shape, self.data_loader.num_classes, **kwargs)
self.initialize_tensorkeys_for_functions()
self.model.summary(print_fn=self.logger.info)
if self.data_loader is not None:
self.logger.info(f'Train Set Size : {self.get_train_data_size()}')
self.logger.info(f'Valid Set Size : {self.get_valid_data_size()}')
def build_model(self,
input_shape,
num_classes,
conv_kernel_size=(4, 4),
conv_strides=(2, 2),
conv1_channels_out=16,
conv2_channels_out=32,
final_dense_inputsize=100,
**kwargs):
"""
Define the model architecture.
Args:
input_shape (numpy.ndarray): The shape of the data
num_classes (int): The number of classes of the dataset
Returns:
tensorflow.python.keras.engine.sequential.Sequential: The model defined in Keras
"""
model = Sequential()
model.add(Conv2D(conv1_channels_out,
kernel_size=conv_kernel_size,
strides=conv_strides,
activation='relu',
input_shape=input_shape))
model.add(Conv2D(conv2_channels_out,
kernel_size=conv_kernel_size,
strides=conv_strides,
activation='relu'))
model.add(Flatten())
model.add(Dense(final_dense_inputsize, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=ke.losses.categorical_crossentropy,
optimizer=ke.optimizers.Adam(),
metrics=['accuracy'])
# initialize the optimizer variables
opt_vars = model.optimizer.variables()
for v in opt_vars:
v.initializer.run(session=self.sess)
return model
| [
2,
15069,
357,
34,
8,
12131,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
37811,
1639,
743,
4866,
428,
2393,
355,
262,
3599,
966,
286,
534,
898,
2746,
526,
15931,... | 2.031821 | 1,477 |
__________________________________________________________________________________________________
sample 28 ms submission
__________________________________________________________________________________________________
sample 13016 kb submission
__________________________________________________________________________________________________
| [
27193,
10221,
834,
198,
39873,
2579,
13845,
14498,
198,
27193,
10221,
834,
198,
39873,
1511,
27037,
47823,
14498,
198,
27193,
10221,
834,
198
] | 15.130435 | 23 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['test_definition_parser_raises_error_schema_str_contains_multiple_types 1'] = GenericRepr("<ExceptionInfo ValueError('MyType class was defined with __schema__ containing more than one GraphQL definition (found: ObjectTypeDefinitionNode, ObjectTypeDefinitionNode)') tblen=2>")
snapshots['test_definition_parser_raises_error_when_schema_str_has_invalid_syntax 1'] = GenericRepr('<ExceptionInfo GraphQLSyntaxError("Syntax Error: Unexpected Name \'typo\'.", locations=[SourceLocation(line=1, column=1)]) tblen=6>')
snapshots['test_definition_parser_raises_error_when_schema_type_is_invalid 1'] = GenericRepr("<ExceptionInfo TypeError('MyType class was defined with __schema__ of invalid type: bool') tblen=2>")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11495,
1477,
24879,
25,
410,
16,
532,
3740,
1378,
42469,
13,
4743,
14,
89,
34,
19,
88,
52,
66,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
19... | 3.114187 | 289 |
import os
from .exceptions import TooManyLoops, MissingDataError
from .lib import (BUILD37, BUILD38, get_data_directory, sorted_nicely,
BUILD37_ACCESSIONS, BUILD38_ACCESSIONS, ACCESSION_LENGTHS,
RCRS_ACCESSION, MITOCHONDRIA_NAMES)
| [
11748,
28686,
198,
198,
6738,
764,
1069,
11755,
1330,
14190,
7085,
27654,
2840,
11,
25639,
6601,
12331,
198,
6738,
764,
8019,
1330,
357,
19499,
26761,
2718,
11,
20571,
26761,
2548,
11,
651,
62,
7890,
62,
34945,
11,
23243,
62,
44460,
306... | 2.279661 | 118 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 13:01:20 2021
@author: Hendrik Timm
"""
# plotting with matplotlib
import pickle
from matplotlib import pyplot as plt
import numpy as np
# create graphs for every extracted feature
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2556,
220,
767,
1511,
25,
486,
25,
1238,
33448,
198,
198,
31,
9800,
25,
14666,
126... | 2.36 | 125 |
lpu = "LPU"
speciality = "SPECIALLITY"
token = "TOKEN"
chat_id = "CHAT_NAME" # if u re planing to send
| [
75,
19944,
796,
366,
43,
5105,
1,
201,
198,
20887,
414,
796,
366,
48451,
40,
7036,
9050,
1,
201,
198,
30001,
796,
366,
10468,
43959,
1,
201,
198,
17006,
62,
312,
796,
366,
31542,
62,
20608,
1,
220,
1303,
611,
334,
302,
1410,
278,
... | 2.22449 | 49 |
import torch
import torch.nn
import os.path as osp
from baseline.utils.parser import get_opts
from baseline.nnutils.stream_modules import ActionClassification
from baseline.data.ucf101 import UCF101Temporal, split
from baseline.logger import Logger
from tqdm import tqdm
import pdb
from tensorboardX import SummaryWriter
import collections
from sklearn.metrics import average_precision_score, recall_score
import numpy as np
from time import gmtime, strftime
if __name__ == "__main__":
opts = get_opts()
main(opts)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
6738,
14805,
13,
26791,
13,
48610,
1330,
651,
62,
404,
912,
198,
6738,
14805,
13,
77,
14930,
4487,
13,
5532,
62,
18170,
1330,
7561,
9487,
26... | 3.273292 | 161 |
import RPi.GPIO as GPIO # Import GPIO Module
from time import sleep # Import sleep Module for timing
BUTTON_PIN = 21
LED_PIN = 20
GPIO.setmode(GPIO.BCM) # Configures pin numbering to Broadcom reference
GPIO.setwarnings(False) # Disable Warnings
GPIO.setup(LED_PIN, GPIO.OUT) #Set our GPIO pin to output
GPIO.output(LED_PIN, False) #Set output to off
GPIO.setup(BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set GPIO to input with a pull-down resistor
GPIO.add_event_detect(BUTTON_PIN, GPIO.RISING, bouncetime=200)
try:
main()
except KeyboardInterrupt:
pass
finally:
GPIO.output(LED_PIN, True) # Turn LED on
sleep(0.5)
GPIO.cleanup() | [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
220,
1303,
17267,
50143,
19937,
198,
6738,
640,
1330,
3993,
220,
1303,
17267,
3993,
19937,
329,
10576,
198,
198,
47526,
11357,
62,
44032,
796,
2310,
198,
30465,
62,
44032,
796,
1160,
198,
1... | 2.636719 | 256 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pendulum
import pytest
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.task_group import TaskGroup
from airflow.www.views import dag_edges, task_group_to_dict
EXPECTED_JSON = {
'id': None,
'value': {
'label': None,
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234',
'value': {
'label': 'group234',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234.group34',
'value': {
'label': 'group34',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234.group34.task3',
'value': {
'label': 'task3',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.group34.task4',
'value': {
'label': 'task4',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.group34.downstream_join_id',
'value': {
'label': '',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue;',
'shape': 'circle',
},
},
],
},
{
'id': 'group234.task2',
'value': {
'label': 'task2',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.upstream_join_id',
'value': {
'label': '',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue;',
'shape': 'circle',
},
},
],
},
{
'id': 'task1',
'value': {
'label': 'task1',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'task5',
'value': {
'label': 'task5',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
],
}
def test_build_task_group():
"""
This is an alternative syntax to use TaskGroup. It should result in the same TaskGroup
as using context manager.
"""
execution_date = pendulum.parse("20200101")
dag = DAG("test_build_task_group", start_date=execution_date)
task1 = DummyOperator(task_id="task1", dag=dag)
group234 = TaskGroup("group234", dag=dag)
_ = DummyOperator(task_id="task2", dag=dag, task_group=group234)
group34 = TaskGroup("group34", dag=dag, parent_group=group234)
_ = DummyOperator(task_id="task3", dag=dag, task_group=group34)
_ = DummyOperator(task_id="task4", dag=dag, task_group=group34)
task5 = DummyOperator(task_id="task5", dag=dag)
task1 >> group234
group34 >> task5
assert task_group_to_dict(dag.task_group) == EXPECTED_JSON
def test_build_task_group_with_prefix():
"""
Tests that prefix_group_id turns on/off prefixing of task_id with group_id.
"""
execution_date = pendulum.parse("20200101")
with DAG("test_build_task_group_with_prefix", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234", prefix_group_id=False) as group234:
task2 = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
task3 = DummyOperator(task_id="task3")
with TaskGroup("group4", prefix_group_id=False) as group4:
task4 = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
assert task2.task_id == "task2"
assert group34.group_id == "group34"
assert task3.task_id == "group34.task3"
assert group4.group_id == "group34.group4"
assert task4.task_id == "task4"
assert task5.task_id == "task5"
assert group234.get_child_by_label("task2") == task2
assert group234.get_child_by_label("group34") == group34
assert group4.get_child_by_label("task4") == task4
assert extract_node_id(task_group_to_dict(dag.task_group), include_label=True) == {
'id': None,
'label': None,
'children': [
{
'id': 'group234',
'label': 'group234',
'children': [
{
'id': 'group34',
'label': 'group34',
'children': [
{
'id': 'group34.group4',
'label': 'group4',
'children': [{'id': 'task4', 'label': 'task4'}],
},
{'id': 'group34.task3', 'label': 'task3'},
{'id': 'group34.downstream_join_id', 'label': ''},
],
},
{'id': 'task2', 'label': 'task2'},
{'id': 'group234.upstream_join_id', 'label': ''},
],
},
{'id': 'task1', 'label': 'task1'},
{'id': 'task5', 'label': 'task5'},
],
}
def test_build_task_group_with_task_decorator():
"""
Test that TaskGroup can be used with the @task decorator.
"""
from airflow.operators.python import task
@task
@task
@task
@task
@task
execution_date = pendulum.parse("20200101")
with DAG("test_build_task_group_with_task_decorator", start_date=execution_date) as dag:
tsk_1 = task_1()
with TaskGroup("group234") as group234:
tsk_2 = task_2()
tsk_3 = task_3()
tsk_4 = task_4(tsk_2, tsk_3)
tsk_5 = task_5()
tsk_1 >> group234 >> tsk_5
# pylint: disable=no-member
assert tsk_1.operator in tsk_2.operator.upstream_list
assert tsk_1.operator in tsk_3.operator.upstream_list
assert tsk_5.operator in tsk_4.operator.downstream_list
# pylint: enable=no-member
assert extract_node_id(task_group_to_dict(dag.task_group)) == {
'id': None,
'children': [
{
'id': 'group234',
'children': [
{'id': 'group234.task_2'},
{'id': 'group234.task_3'},
{'id': 'group234.task_4'},
{'id': 'group234.upstream_join_id'},
{'id': 'group234.downstream_join_id'},
],
},
{'id': 'task_1'},
{'id': 'task_5'},
],
}
edges = dag_edges(dag)
assert sorted((e["source_id"], e["target_id"]) for e in edges) == [
('group234.downstream_join_id', 'task_5'),
('group234.task_2', 'group234.task_4'),
('group234.task_3', 'group234.task_4'),
('group234.task_4', 'group234.downstream_join_id'),
('group234.upstream_join_id', 'group234.task_2'),
('group234.upstream_join_id', 'group234.task_3'),
('task_1', 'group234.upstream_join_id'),
]
def test_sub_dag_task_group():
"""
Tests dag.sub_dag() updates task_group correctly.
"""
execution_date = pendulum.parse("20200101")
with DAG("test_test_task_group_sub_dag", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
with TaskGroup("group6") as group6:
_ = DummyOperator(task_id="task6")
task7 = DummyOperator(task_id="task7")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
group234 >> group6
group234 >> task7
subdag = dag.sub_dag(task_regex="task5", include_upstream=True, include_downstream=False)
assert extract_node_id(task_group_to_dict(subdag.task_group)) == {
'id': None,
'children': [
{
'id': 'group234',
'children': [
{
'id': 'group234.group34',
'children': [
{'id': 'group234.group34.task3'},
{'id': 'group234.group34.task4'},
{'id': 'group234.group34.downstream_join_id'},
],
},
{'id': 'group234.upstream_join_id'},
],
},
{'id': 'task1'},
{'id': 'task5'},
],
}
edges = dag_edges(subdag)
assert sorted((e["source_id"], e["target_id"]) for e in edges) == [
('group234.group34.downstream_join_id', 'task5'),
('group234.group34.task3', 'group234.group34.downstream_join_id'),
('group234.group34.task4', 'group234.group34.downstream_join_id'),
('group234.upstream_join_id', 'group234.group34.task3'),
('group234.upstream_join_id', 'group234.group34.task4'),
('task1', 'group234.upstream_join_id'),
]
subdag_task_groups = subdag.task_group.get_task_group_dict()
assert subdag_task_groups.keys() == {None, "group234", "group234.group34"}
included_group_ids = {"group234", "group234.group34"}
included_task_ids = {'group234.group34.task3', 'group234.group34.task4', 'task1', 'task5'}
for task_group in subdag_task_groups.values():
assert task_group.upstream_group_ids.issubset(included_group_ids)
assert task_group.downstream_group_ids.issubset(included_group_ids)
assert task_group.upstream_task_ids.issubset(included_task_ids)
assert task_group.downstream_task_ids.issubset(included_task_ids)
for task in subdag.task_group:
assert task.upstream_task_ids.issubset(included_task_ids)
assert task.downstream_task_ids.issubset(included_task_ids)
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
... | 1.800829 | 6,999 |
import collections
import time
from aspen.network_engines import CooperativeEngine
from aspen.sockets import packet
from aspen.sockets.loop import Die
import tornado.ioloop
import tornado.httpserver
import tornado.wsgi
class TornadoBuffer(collections.deque):
"""Model a buffer of items.
There are two of these for each Socket, one for incoming message payloads
and one for outgoing message objects.
Here's what the flow looks like:
wire => [msg, msg, msg, msg, msg, msg, msg, msg] => resource
wire <= [msg, msg, msg, msg, msg, msg, msg, msg] <= resource
"""
def __init__(self, name, socket=None):
"""Takes a string and maybe a socket.
If given a socket, we will try to play nice with its loop.
"""
# It feels like it's going to take deeper rewiring to get *.sock files
# working with Tornado callbacks.
raise NotImplementedError("Sorry, for now please use a different "
"networking library in order to use *.sock "
"files.")
collections.deque.__init__(self)
self._socket = socket
self._name = name
put = collections.deque.appendleft
get = collections.deque.pop
empty = lambda d: not bool(d)
# flush
# =====
# Used for outgoing buffer.
def flush(self):
"""Return an iterable of bytestrings or None.
"""
if not self.empty():
return self.__flusher()
return None
def __flusher(self):
"""Yield strings.
We unload bytestrings as fast as we can until we run out of time or
bytestrings. On my MacBook Pro I am seeing between 500 and 1000
messages dumped in 2ms--without any WSGI/HTTP/TCP overhead. We always
yield at least one bytestring to avoid deadlock.
This generator is instantiated in self.flush.
"""
if not self.empty():
yield packet.frame(self.get())
timeout = time.time() + (0.007) # We have 7ms to dump bytestrings. Go!
while not self.empty() and time.time() < timeout:
yield packet.frame(self.get())
# next
# ====
# Used for incoming buffer.
def next(self):
"""Return the next item from the queue.
The first time this is called, we lazily instantiate the generator at
self._blocked. Subsequent calls are directed directly to that
generator's next method.
"""
self._blocked = self._blocked()
self.next = self._next
return self.next()
def _blocked(self):
"""Yield items from self forever.
This generator is lazily instantiated in self.next. It is designed to
cooperate with ThreadedLoop. XXX Oh yeah?
"""
if self._socket is None: # We're on a Channel.
while 1:
yield self.get()
else: # We're on a Socket.
while not self._socket.loop.please_stop:
out = self.get()
if out is Die:
break # will result in a StopIteration
yield out
| [
11748,
17268,
198,
11748,
640,
198,
198,
6738,
355,
3617,
13,
27349,
62,
1516,
1127,
1330,
43457,
13798,
198,
6738,
355,
3617,
13,
82,
11603,
1330,
19638,
198,
6738,
355,
3617,
13,
82,
11603,
13,
26268,
1330,
6733,
198,
11748,
33718,
... | 2.445469 | 1,302 |
"""
A custom Keras layer to generate anchor boxes.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import numpy as np
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
from bounding_box_utils.bounding_box_utils import convert_coordinates
class AnchorBoxes(Layer):
"""
A Keras layer to create an output tensor containing anchor box coordinates and variances based on the input tensor
and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of the input tensor. The number
of anchor boxes created per unit depends on the arguments `aspect_ratios` and `two_boxes_for_ar1`, in the default
case it is 4. The boxes are parameterized by the coordinate tuple `(xmin, ymin, xmax, ymax)`.
The logic implemented by this layer is identical to the logic of function `generate_anchor_boxes_for_layer`
in the module `ssd_input_encoder.py`.
The purpose of having this layer in the network is to make the model self-sufficient at inference time.
Since the model is predicting offsets to the anchor boxes (rather than predicting absolute box coordinates directly)
, one needs to know the anchor box coordinates in order to construct the final prediction boxes from the predicted
offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary information to convert the
predicted offsets back to absolute coordinates would be missing in the model output. The reason why it is necessary
to predict offsets to the anchor boxes rather than to predict absolute box coordinates directly is explained in
`README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`.
The last axis contains the four anchor box coordinates and the four variance values for each box.
"""
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=(0.5, 1.0, 2.0),
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=(0.1, 0.1, 0.2, 0.2),
coords='centroids',
normalize_coords=False,
**kwargs):
"""
All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is
undefined.
Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in (0, 1], the scaling factor for the size of the generated anchor boxes
as a fraction of the shorter side of the input image.
next_scale (float): A float in (0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
aspect_ratios (tuple/list, optional): The tuple/list of aspect ratios for which default boxes are to be
generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (tuple/list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be
divided by its respective variance value.
coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the
input format of the ground truth labels).
Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height),
'corners' for the format `(xmin, ymin, xmax, ymax)`,
or 'minmax' for the format `(xmin, xmax, ymin, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
"""
############################################################################
# Get a few exceptions out of the way.
############################################################################
if K.backend() != 'tensorflow':
raise TypeError(
"This layer only supports TensorFlow at the moment, "
"but you are using the {} backend.".format(K.backend()))
if not (isinstance(img_height, int) and isinstance(img_width, int)):
raise ValueError('`img_height` and `img_width` must be float')
elif not (img_height > 0 and img_width > 0):
raise ValueError('`img_height` and `img_width` must be greater than 0')
else:
self.img_height = img_height
self.img_width = img_width
if not (isinstance(this_scale, float) and isinstance(next_scale, float)):
raise ValueError('`this_scale` and `next_scale` must be float')
elif not ((0 < this_scale) and (0 < next_scale)):
raise ValueError(
"`this_scale` and `next_scale` must be > 0"
"but `this_scale` == {}, `next_scale` == {}".format(this_scale, next_scale))
else:
self.this_scale = this_scale
self.next_scale = next_scale
if not (isinstance(aspect_ratios, (list, tuple)) and aspect_ratios):
raise ValueError("Aspect ratios must be a list or tuple and not empty")
# NOTE 当 aspect_ratios 为 () 或 [], np.any(np.array(aspect_ratios)) <=0 为 False, 所以必须有上面的判断
elif np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
self.aspect_ratios = aspect_ratios
if not (isinstance(variances, (list, tuple)) and len(variances) == 4):
# We need one variance value for each of the four box coordinates
raise ValueError("4 variance values must be passed, but {} values were received.".format(len(variances)))
else:
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
else:
self.variances = variances
if coords not in ('minmax', 'centroids', 'corners'):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
else:
self.coords = coords
if this_steps is not None:
if not ((isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2)) or
isinstance(this_steps, (int, float))):
raise ValueError("This steps must be a 2-int/float list/tuple or a int/float")
else:
self.this_steps = this_steps
else:
self.this_steps = this_steps
if this_offsets is not None:
if not ((isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2)) or
isinstance(this_offsets, (int, float))):
raise ValueError("This steps must be a 2-int/float list/tuple or a int/float")
else:
self.this_offsets = this_offsets
else:
self.this_offsets = this_offsets
if not (isinstance(two_boxes_for_ar1, bool)):
raise ValueError('`two_boxes_for_ar1` must be bool')
else:
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (isinstance(clip_boxes, bool)):
raise ValueError('`clip_boxes` must be bool')
else:
self.clip_boxes = clip_boxes
if not (isinstance(normalize_coords, bool)):
raise ValueError('`normalize_coords` must be bool')
else:
self.normalize_coords = normalize_coords
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios) + 1
else:
self.n_boxes = len(aspect_ratios)
super(AnchorBoxes, self).__init__(**kwargs)
def call(self, x, mask=None):
"""
Return an anchor box tensor based on the shape of the input tensor.
The logic implemented here is identical to the logic of function `generate_anchor_boxes_for_layer` in the module
`ssd_box_encode_decode_utils.py`.
Note that this tensor does not participate in any graph computations at runtime.
It is being created as a constant once during graph creation and is just being output along with the rest of the
model output during runtime.
Because of this, all logic is implemented as Numpy array operations and it is sufficient to convert the
resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape
`(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
The input for this layer must be the output of the localization predictor layer.
# UNCLEAR mask 是啥?
mask:
"""
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for aspect_ratio in self.aspect_ratios:
if aspect_ratio == 1:
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = self.this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale value and the next.
# NOTE 几何平均数, 就是当 aspect_ratios 为 1 时取两个 boxes
box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size
wh_list.append((box_width, box_height))
else:
box_height = self.this_scale * size / np.sqrt(aspect_ratio)
box_width = self.this_scale * size * np.sqrt(aspect_ratio)
wh_list.append((box_width, box_height))
# shape 为 (n_boxes, 2)
wh_list = np.array(wh_list)
# We need the shape of the input tensor
if K.image_dim_ordering() == 'tf':
# FIXME
batch_size, feature_map_height, feature_map_width, feature_map_channels = K.int_shape(x)
# batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape
else:
# Not yet relevant since TensorFlow is the only supported backend right now,
# but it can't harm to have this in here for the future
batch_size, feature_map_height, feature_map_width, feature_map_channels = K.int_shape(x)
# batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape
##################################################################################
# Compute the grid of box center points. They are identical for all aspect ratios.
##################################################################################
# 1. Compute the step sizes,
# i.e. how far apart the anchor box center points will be vertically and horizontally.
if self.this_steps is None:
# 假设 box4, img_height,img_width=512, 那么 feature_map_height,feature_map_width=512 / 2 ^ 3 = 64
# 那么 step_height,step_width = 512 / 64 = 8
# 意思是 feature_map 是 64*64 的方格, 一个方格表示原图的 8*8 个像素, 每一个 step 移动一个方格
step_height = self.img_height / feature_map_height
step_width = self.img_width / feature_map_width
else:
if isinstance(self.this_steps, (list, tuple)):
step_height = self.this_steps[0]
step_width = self.this_steps[1]
# 相当于 elif isinstance(self.this_steps, (int, float)):
else:
step_height = self.this_steps
step_width = self.this_steps
# 2. Compute the offsets, i.e.
# at what pixel values the first anchor box center point will be from the top and from the left of the image.
if self.this_offsets is None:
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(self.this_offsets, (list, tuple)):
offset_height = self.this_offsets[0]
offset_width = self.this_offsets[1]
# 相当于 elif isinstance(self.this_offsets, (int, float)):
else:
offset_height = self.this_offsets
offset_width = self.this_offsets
# 3. Now that we have the offsets and step sizes, compute the grid of anchor box center points.
# np.linspace 参见 https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html
# 第一个参数 start 表示区间开始, 第二个参数 stop 表示区间结尾, 第三个参数 num, 表示个数,默认包含 stop
# 如 box4, np.linspace(0.5 * 8, 63.5 * 8, 64), cy=np.array([4, 12,..., 500, 508])
cy = np.linspace(offset_height * step_height, (offset_height + feature_map_height - 1) * step_height,
feature_map_height)
# 如 box4, np.linspace(0.5 * 8, 63.5 * 8, 64), cx=np.array([4, 12,..., 500, 508])
cx = np.linspace(offset_width * step_width, (offset_width + feature_map_width - 1) * step_width,
feature_map_width)
# 如 box4, cx_grid=np.array([[4,12,...508],[4,12,...508],..., [4,12,...508]), shape 为 (64, 64)
# cy_grid=np.array([[4,4,...4],[12,12,...12],...,[508,508,...508]]), shape 为 (64, 64)
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
# 如 box4, shape 变为 (64, 64, 1)
cx_grid = np.expand_dims(cx_grid, -1)
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))
# np.tile() 返回的数组的 shape 为 (feature_map_height, feature_map_width, n_boxes)
# Set cx
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes))
# Set cy
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes))
# Set w
boxes_tensor[:, :, :, 2] = wh_list[:, 0]
# Set h
boxes_tensor[:, :, :, 3] = wh_list[:, 1]
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
# 转换是为了做 clip
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
# 记得 tf 是不能做这样的操作的
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# If `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# TODO: Implement box limiting directly for `(cx, cy, w, h)`
# so that we don't have to unnecessarily convert back and forth.
if self.coords == 'centroids':
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids',
border_pixels='half')
elif self.coords == 'minmax':
# Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax',
border_pixels='half')
# Create a tensor to contain the variances and append it to `boxes_tensor`.
# This tensor has the same shape as `boxes_tensor`
# and simply contains the same 4 variance values for every position in the last axis.
# Has shape `(feature_map_height, feature_map_width, n_boxes, 4)`
variances_tensor = np.zeros_like(boxes_tensor)
# Long live broadcasting
variances_tensor += self.variances
# Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)
# Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along
# 沿着 batch_size 那一维进行 tile
# The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1))
return boxes_tensor
| [
37811,
198,
32,
2183,
17337,
292,
7679,
284,
7716,
18021,
10559,
13,
198,
198,
15269,
357,
34,
8,
2864,
13762,
2290,
25754,
23502,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
1... | 2.300725 | 8,280 |
#!/usr/bin/env python
"""Show FlashBlade Protection Status"""
# usage:
# ./flashBladeProtectionStatus.py -v mycluster \
# -u myuser \
# -d mydomain.net \
# -f flashblad01
# import pyhesity wrapper module
from pyhesity import *
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # Cohesity cluster name or IP
parser.add_argument('-u', '--username', type=str, required=True) # Cohesity Username
parser.add_argument('-d', '--domain', type=str, default='local') # Cohesity User Domain
parser.add_argument('-f', '--flashbladesource', type=str, required=True)
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
flashbladesource = args.flashbladesource
# authenticate
apiauth(vip, username, domain)
# get flashblade source
sources = api('get', 'protectionSources?environments=kFlashBlade')
flashblade = [s for s in sources if s['protectionSource']['name'].lower() == flashbladesource.lower()]
if len(flashblade) < 1:
print('FlashBlade %s not registered in Cohesity' % flashbladesource)
exit(1)
else:
flashblade = flashblade[0]
parentId = flashblade['protectionSource']['id']
# get protected volume names
protectedvolumes = api('get', 'protectionSources/protectedObjects?id=%s&environment=kFlashBlade' % parentId)
protectedVolumeNames = [v['protectionSource']['name'] for v in protectedvolumes]
outfile = '%s-unprotected.txt' % flashbladesource
f = open(outfile, 'w')
for volume in flashblade['nodes']:
volumename = volume['protectionSource']['name']
if volumename in protectedVolumeNames:
print(' PROTECTED: %s' % volumename)
else:
print('UNPROTECTED: %s' % volumename)
f.write('%s\n' % volumename)
f.close()
print('\nunprotected volumes saved to %s' % outfile)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
15307,
9973,
47520,
9985,
12678,
37811,
198,
198,
2,
8748,
25,
198,
2,
24457,
34167,
47520,
19703,
3213,
19580,
13,
9078,
532,
85,
616,
565,
5819,
3467,
198,
2,
220,
220,
220,
... | 2.654372 | 732 |
""" Meaningful composition of shader programs exposed as a library """
__author__ = 'Stephan Gerhard'
from .shaders import Shader
from .lib import get_shader_code
import pyglet.gl as gl
# load the vary-line-width-shader
| [
37811,
30563,
913,
11742,
286,
33030,
4056,
7362,
355,
257,
5888,
37227,
198,
834,
9800,
834,
796,
705,
8600,
7637,
13573,
10424,
6,
198,
198,
6738,
764,
1477,
9972,
1330,
911,
5067,
198,
6738,
764,
8019,
1330,
651,
62,
1477,
5067,
62... | 3.363636 | 66 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-24 15:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
2931,
12,
1731,
1315,
25,
2998,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.73913 | 69 |
import argparse
import re
import pickle
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# import stopwords
parser = argparse.ArgumentParser(description='')
parser.add_argument('--db-path', help='')
args = parser.parse_args()
try:
print(args.db_path)
db = pickle.load(open(args.db_path, 'rb'))
except Exception as e:
print('error loading existing database:')
print(e)
print('starting from an empty database')
db = {}
print(len(db.keys()))
preprocessed_db = [{'title': re.sub(' +', ' ', doc['title'].lower().strip().replace('\n', '')),
'summary': doc['summary']} for k, doc in db.items()]
surveys = [x['summary'] for x in preprocessed_db if 'a survey' in x['title']]
not_surveys = [x['summary'] for x in preprocessed_db if 'a survey' not in x['title']]
not_surveys_titles = [x['title'] for x in preprocessed_db if 'a survey' not in x['title']]
vectorizer = TfidfVectorizer()
X = surveys + not_surveys[:len(surveys)]
y = np.array([1] * len(surveys) + [0] * len(surveys))
X = vectorizer.fit_transform(X)
vectoriser_feat_names = vectorizer.get_feature_names()
print(vectoriser_feat_names[0:20])
print(X.shape)
X_train, X_test, y_train, y_test = train_test_split(X.toarray(), y, test_size=0.2, random_state=42)
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
print(clf.feature_importances_)
X_not_surveys = vectorizer.transform(not_surveys)
X_proba = clf.predict_proba(X_not_surveys)
top_indices = np.argsort(X_proba[:, 1])[::-1]
print([not_surveys_titles[idx] for idx in top_indices[:20]])
top_feat_indices = np.argsort(clf.feature_importances_)[::-1]
print([vectoriser_feat_names[idx] for idx in top_feat_indices[0:50]])
| [
11748,
1822,
29572,
198,
11748,
302,
198,
11748,
2298,
293,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
6738,
1341,
35720,
13,
1072,
1... | 2.511688 | 770 |
import importlib
import torch
import torch.nn as nn
from pretraining_model import Transformer, TransformerWithLMHead
| [
11748,
1330,
8019,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
2181,
24674,
62,
19849,
1330,
3602,
16354,
11,
3602,
16354,
3152,
31288,
13847,
628
] | 3.83871 | 31 |
from .effnet import efficientnet_lite_b0, efficientnet_lite_b1, efficientnet_lite_b2, efficientnet_lite_b3, efficientnet_lite_b4 | [
6738,
764,
14822,
3262,
1330,
6942,
3262,
62,
36890,
62,
65,
15,
11,
6942,
3262,
62,
36890,
62,
65,
16,
11,
6942,
3262,
62,
36890,
62,
65,
17,
11,
6942,
3262,
62,
36890,
62,
65,
18,
11,
6942,
3262,
62,
36890,
62,
65,
19
] | 2.909091 | 44 |
#!/usr/local/bin/python3
from keras.models import model_from_json
import numpy as np
import cv2
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import math
from pythonosc import udp_client
#parser = argparse.ArgumentParser()
#parser.add_argument("source")
#parser.add_argument("fps")
#args = parser.parse_args()
cap = cv2.VideoCapture(0)#os.path.abspath(args.source) if not args.source == 'webcam' else 0)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
cap.set(cv2.CAP_PROP_FPS, 30)#int(args.fps))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
DNN = "TF"
if DNN == "CAFFE":
modelFile = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
configFile = "deploy.prototxt"
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
else:
modelFile = "opencv_face_detector_uint8.pb"
configFile = "opencv_face_detector.pbtxt"
net = cv2.dnn.readNetFromTensorflow(modelFile, configFile)
if __name__ == '__main__':
ip = "127.0.0.1"
port = 12345
client = udp_client.SimpleUDPClient(ip, port)
model = FacialExpressionModel("model.json", "weights.h5")
start_app(model)
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
198,
6738,
41927,
292,
13,
27530,
1330,
2746,
62,
6738,
62,
17752,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,... | 2.350195 | 514 |
import os
import argparse
import keras2onnx
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, Input
from tensorflow.keras.layers import LSTM
from tensorflow.keras.datasets import imdb
# Training settings
parser = argparse.ArgumentParser(description='Keras IMDB LSTM Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=7, metavar='N',
help='number of epochs to train (default: 5)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--vocab-size', type=int, default=2000,
help='Max size of the vocabulary (default: 2000)')
parser.add_argument('--max-len', type=int, default=250,
help='Sequence max length (default: 250)')
parser.add_argument('--output-path', type=str, default="onnx_models/lstm_imdb.onnx",
help='Output path to store the onnx file')
parser.add_argument('--output-metric', type=str, default="",
help='Output file path to store the metric value obtained in test set')
args = parser.parse_args()
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=args.vocab_size, maxlen=args.max_len)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=args.max_len)
x_test = sequence.pad_sequences(x_test, maxlen=args.max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Input(shape=(x_train.shape[-1])))
model.add(Embedding(args.vocab_size, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=(x_test, y_test))
loss, acc = model.evaluate(x_test, y_test,
batch_size=args.batch_size)
print("Evaluation result: Loss:", loss, " Accuracy:", acc)
# In case of providing output metric file, store the test accuracy value
if args.output_metric != "":
with open(args.output_metric, 'w') as ofile:
ofile.write(str(acc))
# Convert to ONNX
onnx_model = keras2onnx.convert_keras(model, "lstm_imdb", debug_mode=1)
# Save ONNX to file
keras2onnx.save_model(onnx_model, args.output_path)
| [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
41927,
292,
17,
261,
77,
87,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
3866,
36948,
1330,
8379,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
24604,
1843,
198,
... | 2.445946 | 1,258 |
# coding: utf-8
# Copyright(C) 2019 FireEye, Inc. All Rights Reserved.
#
# FLASHMINGO!
# This finds suspicious constants in the constant pool(s)
import os
import logging
import logging.handlers
class Plugin:
"""
All plugins work on a SWFObject passed
as an argument
"""
def _init_logging(self):
"""
Plugins will inherit from this class to have
a consistent logging scheme
"""
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.DEBUG)
fmt = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
log_dir = os.path.dirname(os.path.realpath(__file__))
log_filename = os.path.join(log_dir, 'plugin.log')
handler = logging.handlers.RotatingFileHandler(
log_filename,
maxBytes=5 * 1024 * 1024,
backupCount=5)
handler.setLevel(logging.DEBUG)
handler.setFormatter(fmt)
ml = logging.getLogger('main')
ml.addHandler(handler)
return ml
def _find_suspicious_constants(self):
"""Find suspicious constants
Some things are pretty fishy, like for example
`MZ` or `PE` magic numbers
We search for this in the constant pool
"""
suspicious_constants = []
funky_constants = [0x4550, 0x5a4d, 0x905a4d, 0x90905a4d]
funky_constants += self.user_constants
for c in self.swf.constants:
if c in funky_constants:
suspicious_constants.append(c)
return suspicious_constants
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
7,
34,
8,
13130,
3764,
24876,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
9977,
11211,
44,
2751,
46,
0,
198,
2,
770,
7228,
13678,
38491,
287,
262,
6937,
5933,
7,
82,
8,
... | 2.257703 | 714 |
"""
1D interpolation by Lagrange method.
Bruce Wernick
10 June 2021
"""
def lagrange1d(X, Y):
"1D Lagrange interpolation"
n = len(X)
return f
# ---------------------------------------------------------------------
if __name__ == '__main__':
x = [1,2,3,4,5]
y = [2,4,6,8,10]
fx = lagrange1d(x, y)
print(fx(2.5))
| [
37811,
201,
198,
16,
35,
39555,
341,
416,
21003,
9521,
2446,
13,
201,
198,
38509,
370,
1142,
624,
201,
198,
940,
2795,
33448,
201,
198,
37811,
201,
198,
201,
198,
4299,
19470,
9521,
16,
67,
7,
55,
11,
575,
2599,
201,
198,
220,
366... | 2.333333 | 150 |
#!usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/2/5
# @Author : Shufei Lei
# @Software : PyCharm
import hashlib
import requests
import execjs
import re
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4144.2 Safari/537.36'
}
url = 'http://www.mafengwo.cn/i/21452824.html'
get_html(url)
| [
2,
0,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
220,
1058,
33448,
14,
17,
14,
20,
198,
2,
2488,
13838,
220,
220,
1058,
911,
3046,
2... | 2.239766 | 171 |
HSDATTR_PROC = "processed"
HSDATTR_EQUAL = "equal"
HSDATTR_FILE = "file"
HSDATTR_LINE = "lines"
class HSDException(Exception):
"""Base class for exceptions in the HSD packages."""
pass
class HSDQueryError(HSDException):
"""Base class for errors detected by the HSDQuery object.
Attributes:
filename: Name of the file where error occured (or empty string).
line: Line where the error occurred (or None).
tagname: Name of the tag with the error (or empty string).
"""
class HSDParserError(HSDException):
"""Base class for parser related errors."""
pass
def unquote(txt):
"""Giving string without quotes if enclosed in those."""
if len(txt) >= 2 and (txt[0] in "\"'") and txt[-1] == txt[0]:
return txt[1:-1]
else:
return txt
def splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for (specified as string).
Returns:
(char, before, after) where char is the character from the character
set which has been found as first; before and after are the substrings
before and after it. If none of the characters had been found in the
text, char and after are set to the empty string and before to the
entrire string.
"""
for firstpos, char in enumerate(txt):
if char in charset:
break
else:
return '', txt, ''
return txt[firstpos], txt[:firstpos], txt[firstpos+1:]
| [
7998,
35,
1404,
5446,
62,
4805,
4503,
796,
366,
14681,
276,
1,
198,
7998,
35,
1404,
5446,
62,
36,
10917,
1847,
796,
366,
40496,
1,
198,
7998,
35,
1404,
5446,
62,
25664,
796,
366,
7753,
1,
198,
7998,
35,
1404,
5446,
62,
24027,
796,... | 2.56748 | 615 |
import re
from sqlalchemy import (
Column,
DateTime,
String,
Integer,
JSON,
Text,
Float,
ForeignKey,
insert
)
from app.Database import Base, Session
| [
11748,
302,
198,
6738,
44161,
282,
26599,
1330,
357,
198,
220,
220,
220,
29201,
11,
198,
220,
220,
220,
7536,
7575,
11,
198,
220,
220,
220,
10903,
11,
198,
220,
220,
220,
34142,
11,
198,
220,
220,
220,
19449,
11,
198,
220,
220,
22... | 2.447368 | 76 |
from rest_framework import generics
from .models import Todo
from .serializers import TodoSerializer
| [
6738,
1334,
62,
30604,
1330,
1152,
873,
198,
198,
6738,
764,
27530,
1330,
309,
24313,
198,
6738,
764,
46911,
11341,
1330,
309,
24313,
32634,
7509,
628,
198
] | 3.851852 | 27 |
__version__ = '1.6.0'
__author__ = 'Aashutosh Rathi <aashutoshrathi@gmail.com>'
__all__ = []
| [
834,
9641,
834,
796,
705,
16,
13,
21,
13,
15,
6,
198,
834,
9800,
834,
796,
705,
32,
1077,
315,
3768,
26494,
72,
1279,
64,
1077,
315,
3768,
81,
44202,
31,
14816,
13,
785,
29,
6,
198,
834,
439,
834,
796,
17635,
198
] | 2.162791 | 43 |
# http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.html
import cv2
import numpy as np
img1 = cv2.imread('a.jpg',0)
img2 = cv2.imread('a.jpg',0)
ret, thresh = cv2.threshold(img1, 127, 255,0)
ret, thresh2 = cv2.threshold(img2, 127, 255,0)
contours,hierarchy = cv2.findContours(thresh,2,1)
cnt1 = contours[0]
contours,hierarchy = cv2.findContours(thresh2,2,1)
cnt2 = contours[0]
ret = cv2.matchShapes(cnt1,cnt2,1,0.0)
print ret
| [
2,
2638,
1378,
31628,
13,
9654,
33967,
13,
2398,
14,
2213,
2954,
14,
15390,
14,
9078,
62,
83,
44917,
82,
14,
9078,
62,
9600,
36942,
14,
9078,
62,
3642,
4662,
14,
9078,
62,
3642,
4662,
62,
3549,
62,
12543,
2733,
14,
9078,
62,
3642,... | 2.082988 | 241 |
import argparse
import os, sys
import numpy as np
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','..'))
from mergeSTR import *
TESTDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files")
COMMDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "common")
DUMPDIR = os.path.join(COMMDIR, "dump")
VCFDIR = os.path.join(COMMDIR, "sample_vcfs")
MRGVCFDIR = os.path.join(VCFDIR, "mergeSTR_vcfs")
# Set up base argparser
# Set up dummy class
# Test right files or directory - GangSTR
# TODO fails bc no contig line in VCFs
# Test right files or directory - advntr
#def test_AdVNTRRightFile():
# args = base_argparse()
# fname1 = os.path.join(MRGVCFDIR, "test_file_advntr1.vcf.gz")
# fname2 = os.path.join(MRGVCFDIR, "test_file_advntr2.vcf.gz")
# args.vcftype = "advntr"
# args.vcfs = fname1 + "," + fname2
# assert main(args)==0
# args.vcftype = "auto"
# assert main(args)==0
# args.update_sample_from_file = True
# assert main(args)==0
# args.verbose = True
# assert main(args)==0
# Test right files or directory - hipstr
# Test right files or directory - ExpansionHunter
# TODO fails bc no contig line in VCFs
#def test_ExpansionHunterRightFile():
# args = base_argparse()
# fname1 = os.path.join(MRGVCFDIR, "test_file_eh1.vcf.gz")
# fname2 = os.path.join(MRGVCFDIR, "test_file_eh2.vcf.gz")
# args.vcftype = "eh"
# args.vcfs = fname1 + "," + fname2
# assert main(args)==0
# args.vcftype = "auto"
# assert main(args)==0
# args.update_sample_from_file = True
# assert main(args)==0
# args.verbose = True
# assert main(args)==0
# Test right files or directory - popstr
# test VCFs with different ref genome contigs return 1
| [
11748,
1822,
29572,
198,
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
69... | 2.363757 | 756 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, wen.guan@cern.ch, 2017
import logging
from pilot.eventservice.esprocess.esprocess import ESProcess
from pilot.eventservice.esprocess.eshook import ESHook
logger = logging.getLogger(__name__)
"""
ES manager to setup and run ESProcess.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
7... | 3.141104 | 163 |
"""
The LogWidget receives and saves logging output from the ScriptEditor.
"""
from PySide import QtCore, QtGui
from FabricEngine.FabricUI import DFG, Actions
| [
37811,
198,
464,
5972,
38300,
11583,
290,
16031,
18931,
5072,
422,
262,
12327,
17171,
13,
198,
37811,
198,
198,
6738,
9485,
24819,
1330,
33734,
14055,
11,
33734,
8205,
72,
198,
6738,
37759,
13798,
13,
43957,
1173,
10080,
1330,
360,
30386,... | 3.72093 | 43 |
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import Lasso
CATEGORICAL_FEATURES = ['city', 'country', 'apartment_type', 'bedrooms', 'baths', 'amenities', 'is_superhost']
data = pd.read_csv('airbnb_data.csv')
features = data.drop(['price', 'listing_url', 'image_url', 'title', 'district'], axis=1)
target = data['price']
features['rating'].fillna(features['rating'].mean(), inplace=True)
features['reviews'].fillna(1, inplace=True)
features['baths'].fillna('1 bath', inplace=True)
X_train, X_test, y_train, y_test = train_test_split(processed_data, target, test_size=0.3)
reg = Lasso()
reg.fit(X_train, y_train)
pred = reg.predict(X_test)
print('Regression score of the model', reg.score(X_test, y_test))
print('Mean absolute error for the model', mean_absolute_error(y_test, pred)) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1881,
21352,
27195,
12342,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
198,
6738,
1341,
35720,
13,
... | 2.952381 | 357 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = ['ReadFromText', 'ReadFromTextWithFilename', 'ReadAllFromText',
'WriteToText']
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(file_pattern, min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError('Cannot skip negative number of header lines: %d'
% skip_header_lines)
elif skip_header_lines > 10:
logging.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(),
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source, min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines, coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True, compression_type, desired_bundle_size, min_bundle_size,
source_from_file)
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern, min_bundle_size, compression_type,
strip_trailing_newlines, coder, validate=validate,
skip_header_lines=skip_header_lines)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(),
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(file_path_prefix, file_name_suffix,
append_trailing_newlines, num_shards,
shard_name_template, coder, compression_type, header)
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
... | 2.771964 | 5,600 |
import os
import shutil
import sys
import tempfile
import unittest
import configbetter
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
198,
11748,
4566,
27903,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
... | 2.897959 | 49 |
"""
This module contains the 'create_account' node menu.
"""
from textwrap import dedent
def create_account(caller):
"""Create a new account.
This node simply prompts the user to enter a username.
The input is redirected to 'create_username'.
"""
text = "Enter your new account's name."
options = (
{
"key": "_default",
"desc": "Enter your new username.",
"goto": "create_username",
},
)
return text, options
| [
171,
119,
123,
37811,
198,
1212,
8265,
4909,
262,
705,
17953,
62,
23317,
6,
10139,
6859,
13,
198,
198,
37811,
198,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
4299,
2251,
62,
23317,
7,
13345,
263,
2599,
198,
220,
220,
220,
37... | 2.558974 | 195 |
from IntCode import intcode_day7
from time import sleep
memory_puzzle = [3, 8, 1001, 8, 10, 8, 105, 1, 0, 0, 21, 34, 47, 72, 93, 110, 191, 272, 353, 434, 99999, 3, 9, 102, 3, 9, 9, 1001, 9, 3, 9, 4, 9, 99, 3, 9, 102, 4, 9, 9, 1001, 9, 4, 9, 4, 9, 99, 3, 9, 101, 3, 9, 9, 1002, 9, 3, 9, 1001, 9, 2, 9, 1002, 9, 2, 9, 101, 4, 9, 9, 4, 9, 99, 3, 9, 1002, 9, 3, 9, 101, 5, 9, 9, 102, 4, 9, 9, 1001, 9, 4, 9, 4, 9, 99, 3, 9, 101, 3, 9, 9, 102, 4, 9, 9, 1001, 9, 3, 9, 4, 9, 99, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 99, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 99, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 99, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 99, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 101, 2, 9, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1001, 9, 2, 9, 4, 9, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 102, 2, 9, 9, 4, 9, 3, 9, 1001, 9, 1, 9, 4, 9, 3, 9, 101, 1, 9, 9, 4, 9, 3, 9, 1002, 9, 2, 9, 4, 9, 99]
# Expected 139629729
test1 = [3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99, 0, 0, 5]
# Expected 18216
test2 = [3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007, 54, 5, 55, 1005, 55, 26, 1001, 54, -5, 54, 1105, 1, 12, 1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55, 2, 53, 55, 53, 4, 53, 1001, 56, -1, 56, 1005, 56, 6, 99, 0, 0, 0, 0, 10]
memory = memory_puzzle[:]
# All combinations of Phases
outputs = []
condition = True
for c1 in range(0, 5):
phases_1 = [5, 6, 7, 8, 9]
p1 = phases_1[c1]
phases_1.pop(c1)
for c2 in range(0, 4):
phases_2 = phases_1[:]
p2 = phases_2[c2]
phases_2.pop(c2)
for c3 in range(0, 3):
phases_3 = phases_2[:]
p3 = phases_3[c3]
phases_3.pop(c3)
for c4 in range(0, 2):
phases_4 = phases_3[:]
p4 = phases_4[c4]
phases_4.pop(c4)
for c5 in range(0, 1):
phases_5 = phases_4[:]
p5 = phases_5[c5]
phases_5.pop(c5)
# Inicial value for the Amplifiers
outputE = 0
MemA, MemB, MemC, MemD, MemE = memory[:], memory[:], memory[:], memory[:], memory[:]
com_posA = com_posB = com_posC = com_posD = com_posE = 0
condition = True
# First run (giving the Amplifiers their phases)
MemA, outputA, _, com_posA = intcode_day7(MemA, com_posA, p1, outputE)
MemB, outputB, _, com_posB = intcode_day7(MemB, com_posB, p2, outputA)
MemC, outputC, _, com_posC = intcode_day7(MemC, com_posC, p3, outputB)
MemD, outputD, _, com_posD = intcode_day7(MemD, com_posD, p4, outputC)
MemE, outputE, _, com_posE = intcode_day7(MemE, com_posE, p5, outputD)
# Rerunning giving them only their inputs (previous Amp'2 output)
while condition:
MemA, outputA, _, com_posA = intcode_day7(MemA, com_posA, outputE)
MemB, outputB, _, com_posB = intcode_day7(MemB, com_posB, outputA)
MemC, outputC, _, com_posC = intcode_day7(MemC, com_posC, outputB)
MemD, outputD, _, com_posD = intcode_day7(MemD, com_posD, outputC)
MemE, outputE, condition, com_posE = intcode_day7(MemE, com_posE, outputD)
outputs += [outputE]
print(max(outputs))
| [
6738,
2558,
10669,
1330,
493,
8189,
62,
820,
22,
198,
6738,
640,
1330,
3993,
198,
198,
31673,
62,
79,
9625,
796,
685,
18,
11,
807,
11,
1802,
16,
11,
807,
11,
838,
11,
807,
11,
13343,
11,
352,
11,
657,
11,
657,
11,
2310,
11,
49... | 1.709865 | 2,585 |
__all__ = ["make_node", "CM_Node"]
import bpy
import bpy.types
import bpy_types
import bmesh
import bmesh.ops
import math
import mathutils
import pyconspack as cpk
from array import array
from pyconspack import Conspack
from mathutils import Matrix
import io_scene_consmodel.consmodel as consmodel
from io_scene_consmodel.util import (matrix_to_vec, AttrPack, defencode)
# Nodes
# make_node
# Conspack regs
defencode(CM_Node, "node")
defencode(CM_Mesh, "mesh")
defencode(CM_Camera, "camera")
defencode(CM_LightPoint, "light-point")
defencode(CM_Material, "material-simple")
| [
834,
439,
834,
796,
14631,
15883,
62,
17440,
1600,
366,
24187,
62,
19667,
8973,
198,
198,
11748,
275,
9078,
198,
11748,
275,
9078,
13,
19199,
198,
11748,
275,
9078,
62,
19199,
198,
198,
11748,
275,
76,
5069,
198,
11748,
275,
76,
5069,... | 2.831731 | 208 |
import os
from urllib.error import HTTPError
from urllib.request import urlretrieve
class AbstractRemoteFile:
"""
AbstractRemoteFile provide infrastructure for RemoteFile where
only the method fetch() needs to be defined for a concreate implementation.
"""
@property
def local(self):
"""Return true if the file is available locally on the File System"""
return os.path.exists(self._file_path)
def fetch(self):
"""Perform the action needed to fetch the content and store it locally"""
pass
@property
def path(self):
"""Return the actual local file path"""
if not self.local:
self.fetch()
return self._file_path
class GoogleDriveFile(AbstractRemoteFile):
"""
Helper file to manage caching and retrieving of file available on Google Drive
"""
def __init__(self, local_path=None, google_id=None, local_base=None):
"""
Provide the information regarding where the file should be located
and where to fetch it if missing.
:param local_path: relative or absolute path
:param google_id: Resource ID from google
:param local_base: Absolute path when local_path is relative
"""
super().__init__(local_path, local_base)
self._gid = google_id
class HttpFile(AbstractRemoteFile):
"""
Helper file to manage caching and retrieving of file available on HTTP servers
"""
def __init__(self, local_path=None, remote_url=None, local_base=None):
"""
Provide the information regarding where the file should be located
and where to fetch it if missing.
:param local_path: relative or absolute path
:param remote_url: http(s):// url to fetch the file from
:param local_base: Absolute path when local_path is relative
"""
super().__init__(local_path, local_base)
self._url = remote_url
__all__ = [
"AbstractRemoteFile",
"GoogleDriveFile",
"HttpFile",
]
| [
11748,
28686,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
14626,
12331,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
1186,
30227,
628,
628,
198,
198,
4871,
27741,
36510,
8979,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2... | 2.811034 | 725 |
from sys import stdin
number = stdin.read()
print sum(range(min(int(number), 1), max(int(number), 1) + 1))
| [
6738,
25064,
1330,
14367,
259,
198,
17618,
796,
14367,
259,
13,
961,
3419,
198,
4798,
2160,
7,
9521,
7,
1084,
7,
600,
7,
17618,
828,
352,
828,
3509,
7,
600,
7,
17618,
828,
352,
8,
1343,
352,
4008,
198
] | 2.74359 | 39 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Xiaobo Yang
@Contact : hal_42@zju.edu.cn
@Time : 2021/11/8 21:16
@File : decorators.py
@Software: PyCharm
@Desc :
"""
from warnings import warn
from .timer import Timer
from .color_print import *
__all__ = ['deprecated', 'func_runtime_timer']
def deprecated(func):
"""Show deprecation warning of a function."""
return wrapper
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
13838,
220,
1058,
22450,
20391,
10998,
198,
31,
17829,
1058,
10284,
62,
3682,
31,
89,
14396,
13,
15532,... | 2.634615 | 156 |
import json, os
import boto3
from awake import wol
battlestation_mac_address = os.environ['DESKTOP_MAC_ADDR']
sqs = boto3.client('sqs')
utility_q_url = os.environ['UTILITY_Q_URL']
while True:
response = sqs.receive_message(
QueueUrl=utility_q_url,
MaxNumberOfMessages=10,
MessageAttributeNames=[
'All'
],
WaitTimeSeconds=5
)
messages_to_delete = []
if 'Messages' in response:
for message in response['Messages']:
body = json.loads(message['Body'])
print('Received message with body: {}'.format(body))
if body['target'] != 'RaspberryPi':
print('Not intended for this system, ignoring...')
continue
else:
print('Intended for this system, processing...')
if body['intent'] == 'SmartHomeAction' and body['action'] == 'TurnOn' and body['entity'] == 'Battlestation':
print("Turning on Battlestation...")
wol.send_magic_packet(battlestation_mac_address)
messages_to_delete.append({'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']})
if messages_to_delete:
sqs.delete_message_batch(
QueueUrl=utility_q_url,
Entries=messages_to_delete
)
| [
11748,
33918,
11,
28686,
198,
198,
11748,
275,
2069,
18,
198,
6738,
21693,
1330,
266,
349,
198,
198,
65,
1078,
75,
27364,
62,
20285,
62,
21975,
796,
28686,
13,
268,
2268,
17816,
30910,
42,
35222,
62,
44721,
62,
2885,
7707,
20520,
198,... | 2.15235 | 617 |
from django import forms
from django.forms import BaseFormSet
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
7308,
8479,
7248,
628
] | 3.9375 | 16 |
import random
| [
198,
11748,
4738,
628
] | 4 | 4 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Optional, Iterable, Tuple, List, Dict, cast
import struct
import math
from enum import IntEnum
from itertools import repeat
from ezdxf.lldxf import const
from ezdxf.tools.binarydata import bytes_to_hexstr, ByteStream, BitStream
from ezdxf import colors
from ezdxf.math import Vec3, Matrix44, Z_AXIS, ConstructionCircle, ConstructionArc
from ezdxf.entities import factory
import logging
if TYPE_CHECKING:
from ezdxf.eztypes import (
Tags, TagWriter, Drawing, Polymesh, Polyface, Polyline, Hatch,
)
logger = logging.getLogger('ezdxf')
CHUNK_SIZE = 127
| [
2,
15069,
357,
66,
8,
12131,
11,
1869,
39193,
4270,
4224,
72,
198,
2,
13789,
25,
17168,
13789,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
32233,
11,
40806,
540,
11,
309,
29291,
11,
7343,
11,
360,
713,
11,
3350,
198,
11748,
... | 3.009132 | 219 |
from __future__ import absolute_import
import tensorflow as tf
import matplotlib.pyplot as plt
import os
from dataset import Dataset
from Yolo import Yolo
import ConfigParser
if __name__ == '__main__':
tf.app.run() | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
628,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
6738,
27039,
1330,
16092,
292,
316,
198,
6738,
575,
14... | 3.140845 | 71 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Starting point for writing scripts to integrate TFLM with external IDEs.
This script can be used to output a tree containing only the sources and headers
needed to use TFLM for a specific configuration (e.g. target and
optimized_kernel_implementation). This should serve as a starting
point to integrate TFLM with external IDEs.
The goal is for this script to be an interface that is maintained by the TFLM
team and any additional scripting needed for integration with a particular IDE
should be written external to the TFLM repository and built to work on top of
the output tree generated with this script.
We will add more documentation for a desired end-to-end integration workflow as
we get further along in our prototyping. See this github issue for more details:
https://github.com/tensorflow/tensorflow/issues/47413
"""
import argparse
import fileinput
import os
import shutil
import subprocess
# For examples, we are explicitly making a deicision to not have any source
# specialization based on the TARGET and OPTIMIZED_KERNEL_DIR. The thinking
# here is that any target-specific sources should not be part of the TFLM
# tree. Rather, this function will return an examples directory structure for
# x86 and it will be the responsibility of the target-specific examples
# repository to provide all the additional sources (and remove the unnecessary
# sources) for the examples to run on that specific target.
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Starting script for TFLM project generation")
parser.add_argument(
"output_dir", help="Output directory for generated TFLM tree")
parser.add_argument(
"--makefile_options",
default="",
help="Additional TFLM Makefile options. For example: "
"--makefile_options=\"TARGET=<target> "
"OPTIMIZED_KERNEL_DIR=<optimized_kernel_dir> "
"TARGET_ARCH=corex-m4\"")
parser.add_argument(
"--examples",
"-e",
action="append",
help="Examples to add to the output tree. For example: "
"-e hello_world -e micro_speech")
args = parser.parse_args()
_create_tflm_tree(args.output_dir, args.makefile_options)
if args.examples is not None:
_create_examples_tree(args.output_dir, args.examples)
| [
2,
15069,
33448,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.574699 | 830 |
import os
import glob
import torch
import numpy as np
import torchaudio
from torch.nn import functional as F
import sippyart
PROJ_DIR = sippyart.__path__[0] + "/"
PARENT_DIR = PROJ_DIR + "../"
# TODO sync_n sample rates
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
24051,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
198,
11748,
264,
41214,
433,
198,
198,
31190,
41,
62,
34720,
... | 2.747126 | 87 |
'''Configuration file for gentea.py with basemap FAP_teacup_base'''
database = {
'user' : 'app_user',
'passwd' : 'ecohdb',
'host' : 'XXX.XXX.XX.200',
'port' : '1521',
'sid' : 'ecohdb'
}
input_image = (r'../work/FAP_teacup_base.png')
# Both of these are optional. They default to size 11 'Arial.ttf'
#this doesn't actually do anything.
#font_size = 22
font_size = 40
font_face = 'Arial.ttf'
# Optional; defaults to (1,1). Positions the timestamp on the image.
timestamp_location = (281, 332) # Upper-left of the "inner" image
# This function dynamically converts capacities into relative teacup sizes.
# Edit freely (or hand-assign values to the `teacup_size` attributes).
sizer = lambda capacity: (capacity)/25333.3
# This is the core of the config file.
# Each output attribute is essentially a json object (python dict) containing
# the necessary information to grab a value from the database and draw it.
output_attributes = [
#MTELFBCO
{
'attribute_type' : 'teacup',
#'upper_left' : (570, 696),
#'upper_left' : (250, 820),
#'upper_left' : (300, 900),
#'upper_left' : (340, 900),
'upper_left' : (1090, 700),
'site_datatype_id' : 101013,
'teacup_size' : 1, # Mt. Elbert is too small to work on the other scale
'capacity' : 11143,
'label' : 'Mt. Elbert Forebay'
},
{
'attribute_type' : 'line',
'color' : 'red',
'thickness' : 2,
#'point_a' : (783, 755),
#'point_a' : (325, 905),
#'point_a' : (645, 940),
#'point_a' : (685, 940),
'point_a' : (1095, 710),
#'point_b' : (888, 828)
'point_b' : (900, 825)
},
{
'attribute_type' : 'point',
'color' : 'red',
'size' : 9,
#'center' : (888, 828)
'center' : (900, 825)
},
# PUERESCO
{
'attribute_type' : 'teacup',
#'upper_left' : (2092, 1806),
'upper_left' : (2295, 1806),
'site_datatype_id' : 100450,
'teacup_size' : sizer,
'capacity' : 349940,
'label' : 'Pueblo'
},
{
'attribute_type' : 'line',
'color' : 'red',
'thickness' : 2,
#'point_a' : (2482, 1866),
'point_a' : (2610, 1807),
'point_b' : (2628, 1757)
},
{
'attribute_type' : 'point',
'color' : 'red',
'size' : 9,
'center' : (2628, 1757)
},
# RUERESCO
{
'attribute_type' : 'teacup',
#'upper_left' : (504, 296),
#'upper_left' : (504, 310),
'upper_left' : (220, 720),
'site_datatype_id' : 100460,
'teacup_size' : sizer,
'capacity' : 102373,
'label' : 'Ruedi'
},
{
'attribute_type' : 'line',
'color' : 'red',
'thickness' : 2,
#'point_a' : (532, 416),
#'point_a' : (515, 330),
'point_a' : (320, 720),
#'point_b' : (425, 545)
'point_b' : (405, 550)
},
{
'attribute_type' : 'point',
'color' : 'red',
'size' : 9,
#'center' : (425, 545)
'center' : (405, 550)
},
# TURQLACO
{
'attribute_type' : 'teacup',
#'upper_left' : (912, 260),
'upper_left' : (1000, 280),
'site_datatype_id' : 101012,
'teacup_size' : sizer,
'capacity' : 129398.0,
'label' : 'Turquoise'
},
{
'attribute_type' : 'line',
'color' : 'red',
'thickness' : 2,
#'point_a' : (988, 440),
#'point_a' : (1050, 380),
'point_a' : (1010, 300),
#'point_b' : (900, 652)
'point_b' : (890, 645)
},
{
'attribute_type' : 'point',
'color' : 'red',
'size' : 9,
#'center' : (900, 652)
'center' : (890, 645)
},
# TWIRESCO
{
'attribute_type' : 'teacup',
#'upper_left' : (616, 1144),
'upper_left' : (660, 1144),
'site_datatype_id' : 101014,
'teacup_size' : sizer,
'capacity' : 141000.0,
'label' : 'Twin Lakes'
},
{
'attribute_type' : 'line',
'color' : 'red',
'thickness' : 2,
#'point_a' : (687, 1131),
'point_a' : (800, 1145),
#'point_b' : (900, 855)
'point_b' : (900, 860)
},
{
'attribute_type' : 'point',
'color' : 'red',
'size' : 9,
#'center' : (900, 855)
'center' : (900, 860)
}
]
if __name__ == '__main__':
# Convenience for experimenting with sizes
exec(r"print '\n'.join('%s\t%s' % (a['capacity'], sizer(a['capacity'])) "
r"for a in output_attributes if a['attribute_type'] == 'teacup')")
| [
7061,
6,
38149,
2393,
329,
25049,
18213,
13,
9078,
351,
1615,
368,
499,
376,
2969,
62,
660,
330,
929,
62,
8692,
7061,
6,
198,
198,
48806,
796,
1391,
198,
220,
220,
220,
705,
7220,
6,
1058,
705,
1324,
62,
7220,
3256,
198,
220,
220,... | 1.876626 | 2,537 |
import time
from image_recognition import ImageRecognition
from text_recognition import TextRecognition
| [
11748,
640,
198,
198,
6738,
2939,
62,
26243,
653,
1330,
7412,
6690,
2360,
653,
198,
6738,
2420,
62,
26243,
653,
1330,
8255,
6690,
2360,
653,
628
] | 4.076923 | 26 |
from operator import attrgetter
if __name__ == '__main__':
users = [User(23), User(3), User(99)]
print(users)
sorted_users_lambda = sorted(users, key=lambda u: u.user_id)
print(sorted_users_lambda)
sorted_users_attr = sorted(users, key=attrgetter('user_id'))
print(sorted_users_attr) | [
6738,
10088,
1330,
708,
81,
1136,
353,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2985,
796,
685,
12982,
7,
1954,
828,
11787,
7,
18,
828,
11787,
7,
2079,
15437,
628,
220,
220,
220,
... | 2.544715 | 123 |