content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from PIL import Image
import random
im = Image.open("/tmp/out.png")
px = im.load()
rng = random.Random()
COMMANDS = 18
size = im.size
position = (0, 0)
direction = (1, 0)
matrix_1 = ((0, 1), (-1, 0))
matrix_2 = ((0, -1), (1, 0))
stack = []
inbuf = ["b", "e", "e", "s", "i", "n", "c", "u", "r", "s", "e"]
while True:
command, param = randomize(px[position], position, COMMANDS)
print(command, position, direction, stack, Command(command))
if command == 4:
# read input
if len(inbuf) > 0:
stack.append(ord(inbuf.pop(0)))
else:
rotate(matrix_1)
elif command == 5:
# add
if len(stack) >= 2:
stack.append(stack.pop() + stack.pop())
else:
rotate(matrix_2)
elif command == 3:
# mod
if len(stack) >= 2:
stack.append(stack.pop() % stack.pop())
else:
rotate(matrix_2)
elif command == 0:
# div
if len(stack) >= 2:
stack.append(stack.pop() // stack.pop())
else:
rotate(matrix_2)
elif command == 6: # push
stack.append(param)
elif command == 8:
# rot
if len(stack) > 0:
stack = [stack.pop()] + stack
elif command == 9:
# unrot
if len(stack) > 0:
stack.append(stack.pop(0))
elif command == 10:
stack.extend([stack.pop()] * 2)
elif command == 7:
# mul
if len(stack) >= 2:
stack.append(stack.pop() * stack.pop())
else:
rotate(matrix_2)
elif command == 11:
rotate(matrix_1)
elif command == 12:
rotate(matrix_2)
elif command == 13: # setdir
arg = param
lowbits = (arg & 0b111) - 3
highbits = ((arg >> 3) & 0b111) - 3
direction = highbits, lowbits
elif command == 2: # setdir if zero
if len(stack) > 0 and stack[-1] == 0:
arg = param
lowbits = (arg & 0b111) - 3
highbits = ((arg >> 3) & 0b111) - 3
direction = highbits, lowbits
elif command == 16:
inbuf.append(chr(stack.pop()))
elif command == 17:
if len(stack) >= 2:
a, b = stack.pop(), stack.pop()
stack.append(a)
stack.append(b)
elif command == 1:
if len(stack) > 0: stack.pop()
elif command == 15:
break
position = (position[0] + direction[0], position[1] + direction[1])
while position[0] < 0 or position[1] < 0 or position[0] >= size[0] or position[1] >= size[1]:
position = (position[0] % size[0], position[1] % size[1])
print(stack, inbuf) | [
6738,
350,
4146,
1330,
7412,
198,
11748,
4738,
198,
198,
320,
796,
7412,
13,
9654,
7203,
14,
22065,
14,
448,
13,
11134,
4943,
198,
8416,
796,
545,
13,
2220,
3419,
198,
198,
81,
782,
796,
4738,
13,
29531,
3419,
198,
198,
9858,
10725,... | 1.994007 | 1,335 |
from dpd.utils import download_file
def download_lodes_data(data, st, part_or_seg, type_, year):
"""
Download LODES OD file. APIS documentation from here: https://lehd.ces.census.gov/data/lodes/LODES7/LODESTechDoc7.4.pdf
Args:
data (str): one of "od", "rac", or "wac"
e.g. "od"
st (str): lowercase, 2-letter postal code for a chosen state
e.g. "ca"
part_or_seg (str): If data is od, part of the state file, can have a value of either “main” or “aux”. Complimentary parts of the state file, the main part includes jobs with both workplace and residence in the state and the aux part includes jobs with the workplace in the state and the residence outside of the state. If data is rac or wac, segment of the workforce, can have the values of “S000”, “SA01”, “SA02”, “SA03”, “SE01”, “SE02”, “SE03”, “SI01”, “SI02”, or “SI03”. These correspond to the same segments of the workforce as are listed in the OD file structure.
e.g. "main"
type_ (str): Job Type, can have a value of “JT00” for All Jobs, “JT01” for Primary Jobs, “JT02” for All Private Jobs, “JT03” for Private Primary Jobs, “JT04” for All Federal Jobs, or “JT05” for Federal Primary Jobs.
e.g. "JT00"
year (str): Year of job data. Can have the value of 2002-2015 for most states.
e.g. "2017"
Returns:
str: the local filename of the downloaded file
"""
data_values = ["od", "rac", "wac"]
if data not in data_values:
raise ValueError("data must be one of " + str(data_values))
if data == "od":
part_values = ["main", "aux"]
if part_or_seg not in part_values:
raise ValueError(
"part_or_seg must be one of "
+ str(part_values)
+ "when data is "
+ data
)
elif data in ["rac", "wac"]:
seg_values = [
"S000",
"SA01",
"SA02",
"SA03",
"SE01",
"SE02",
"SE03",
"SI01",
"SI02",
"SI03",
]
if part_or_seg not in seg_values:
raise ValueError(
"part_or_seg must be one of " + str(seg_values) + "when data is " + data
)
type_values = ["JT00", "JT01", "JT02", "JT03", "JT04", "JT05"]
if type_ not in type_values:
raise ValueError("type_ must be one of " + str(type_values))
url = (
"https://lehd.ces.census.gov/data/lodes/LODES7/%s/%s/%s_%s_%s_%s_%s.csv.gz"
% (st, data, st, data, part_or_seg, type_, year)
)
return download_file(url)
def download_lodes_xwalk(st):
"""
Download LODES Crosswalk file. APIS documentation from here: https://lehd.ces.census.gov/data/lodes/LODES7/LODESTechDoc7.4.pdf
Args:
st (str): lowercase, 2-letter postal code for a chosen state
e.g. "ca"
Returns:
str: the local filename of the downloaded file
"""
url = "https://lehd.ces.census.gov/data/lodes/LODES7/%s/%s_xwalk.csv.gz" % (st, st)
return download_file(url)
| [
6738,
288,
30094,
13,
26791,
1330,
4321,
62,
7753,
628,
198,
4299,
4321,
62,
75,
4147,
62,
7890,
7,
7890,
11,
336,
11,
636,
62,
273,
62,
325,
70,
11,
2099,
62,
11,
614,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
10472,... | 2.102754 | 1,489 |
#
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import sys
from ovirtcli.format.format import Formatter
from ovirtsdk.xml import params
from ovirtsdk.infrastructure.common import Base
from ovirtsdk.infrastructure import brokers
import types
from ovirtsdk.xml.params import ApiSummary
class TextFormatter(Formatter):
"""Text formatter."""
name = 'text'
# list of complex types that should be treated as
# primitives (e.g should be wrapped to string at runtime)
complex_type_exceptions = [datetime.datetime]
# context.terminal.stdout.write('\n')
| [
2,
198,
2,
15069,
357,
66,
8,
3050,
2297,
10983,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 3.361765 | 340 |
'''
NAME
argumentos_at.py
VERSION
[1.0]
AUTHOR
Daianna Gonzalez Padilla <daianna@lcg.unam.mx>
DESCRIPTION
This programs gets a file with one or more dna sequences and returns an output file with the AT content
of each sequence, from the command line
CATEGORY
DNA sequence analysis
USAGE
argumentos_at.py -i input_file_path -o output_file_path -r sig_figs
ARGUMENTS
-i, --input INPUT
File with gene sequences
-o, --output OUTPUT
Path for the output file
-r, --round ROUND
Number of digits to round
INPUT
The file with the DNA sequences given by the user
OUTPUT
A file with the AT content of each sequence given in the input file
EXAMPLES
Example 1: gets a file with seq_1 = "ATCGTACGATCGATCGATCGCTAGACGTATCG"
seq_2 = "actgatcgacgatcgatcgatcacgact"
seq_3 = "ACTGAC-ACTGT-ACTGTA----CATGTG"
seq_4 = "ATTCTGNNNNNNNNNNNNNGTC"
and returns a new file with
AT content for seq_1 is 50.0
AT content for seq_2 is 50.0
AT content for seq_3 is 56.5217
GITHUB LINK
https://github.com/daianna21/python_class/blob/master/scripts/argumentos_at.py
'''
import argparse
import os
import re
# Create the parser
my_parser = argparse.ArgumentParser(description="Script that calculates AT content using command line arguments")
# Add the arguments, all are necessary
# Add an argument to request the input file
my_parser.add_argument("-i", "--input",
type=str,
help="File with gene sequences",
required=True)
# Add an argument to save the output in a new file
my_parser.add_argument("-o", "--output",
help="Path for the output file",
required=True)
# Add an argument for sig figs and change it to numeric
my_parser.add_argument("-r", "--round",
help="Number of digits to round",
type=int,
required=True)
# Function to calculate AT content of a dna sequence
# Execute the parse_args() method
args = my_parser.parse_args()
#Define the input and output files, and the sig figs of output
input_file = args.input
output_file = args.output
r= args.round
#Function to validate the given paths for input and output files
#Call the function with the arguments given
valid_path(args.input, args.output, args.round)
| [
7061,
6,
198,
20608,
198,
220,
220,
220,
4578,
418,
62,
265,
13,
9078,
198,
198,
43717,
198,
220,
220,
220,
685,
16,
13,
15,
60,
198,
198,
32,
24318,
1581,
198,
220,
220,
220,
9637,
666,
2616,
24416,
15744,
5049,
1279,
6814,
666,
... | 2.296064 | 1,118 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-09-28 03:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1959,
319,
12131,
12,
2931,
12,
2078,
7643,
25,
2682,
201,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201... | 2.686047 | 86 |
from django.contrib import admin
from . import models
admin.site.register(models.Product, ProductAdmin)
admin.site.register(models.Review, ReviewAdmin)
admin.site.register(models.Order, OrderAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
1330,
4981,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
15667,
11,
8721,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
14832,
11,
6602,
46787,
8,
1... | 3.491228 | 57 |
#!/usr/bin/env python3
import argparse
import yaml
import pathlib
import decimal
import datetime
import os
decimal.getcontext().prec = 10
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='path to data directory', required=True)
args = parser.parse_args()
script_path = os.path.dirname(os.path.realpath(__file__))
config_path = script_path + '/../config'
# Configuration
config = {}
with open(config_path + '/tax.yaml') as f:
config['tax'] = yaml.safe_load(f.read())
# Find current tax year
today = datetime.date.today()
config['current_tax'] = next(x for x in config['tax'] if x['start_date'] <= today and x['end_date'] >= today)
# Data
total_sales = decimal.Decimal(0.00)
total_payments = decimal.Decimal(0.00)
data_directory = str(args.data)
data_path = pathlib.Path(data_directory)
invoice_files = list(data_path.glob('data/invoices/*.yaml'))
for invoice_file in invoice_files:
fp = invoice_file.open()
invoice_data = yaml.safe_load(fp.read())
fp.close()
if invoice_data['issue_date'] >= config['current_tax']['start_date'] and invoice_data['issue_date'] <= config['current_tax']['end_date'] and invoice_data['issue_date'] <= today:
print(invoice_data['number'])
total_sales += decimal.Decimal(invoice_data['total'])
print(invoice_data['total'])
# Subtract any payments from accounts receivable
if 'payments' in invoice_data:
for payment in invoice_data['payments']:
print(payment['amount'])
total_payments += decimal.Decimal(payment['amount'])
print()
print("Total sales: %.2f" % total_sales)
print("Total payments: %.2f" % total_payments)
# Calculate tax and national insurance
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
331,
43695,
198,
11748,
3108,
8019,
198,
11748,
32465,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
12501,
4402,
13,
1136,
22866,
22446,
... | 2.673879 | 647 |
"""
Wanna-transfer
--------------
Wanna-transfer is a python based tool to
efficient upload and download large files to and from the cloud.
It is easy to setup
```````````````````
And run it:
.. code:: bash
$ pip install wanna-transfer
$ wanna -h
Links
`````
* `development
<https://github.com/Multiplicom/wanna-transfer>`_
"""
import re
import codecs
import os.path
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = ["docopt==0.6.2", "boto3~=1.9", "configparser==3.5.0"]
test_requires = [
'mock==2.0.0'
]
setup_options = dict(
name="wanna-transfer",
version=find_version("wanna", "__init__.py"),
description="High level transfer to the cloud",
long_description=__doc__,
author="Piotr Pawlaczek",
author_email="info@pawlaczek.pl",
url="http://github.com/Multiplicom/wanna-transfer",
entry_points={"console_scripts": ["wanna = wanna.entry_points.wannacli:main"]},
packages=find_packages(exclude=["tests*"]),
install_requires=requires,
test_requires=test_requires,
zip_safe=False,
license="BSD",
classifiers=list(
(
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy",
)
),
)
setup(**setup_options)
| [
37811,
198,
54,
7697,
12,
39437,
198,
26171,
198,
54,
7697,
12,
39437,
318,
257,
21015,
1912,
2891,
284,
198,
16814,
9516,
290,
4321,
1588,
3696,
284,
290,
422,
262,
6279,
13,
198,
198,
1026,
318,
2562,
284,
9058,
198,
33153,
33153,
... | 2.520305 | 788 |
"""Extract all tables from an html file, printing and saving each to csv file."""
import pandas as pd
import sys
df_list = pd.read_html(sys.argv[1])
df = pd.DataFrame((df_list[0]))
for index, row in df.iterrows():
print row['Element'],"::", row['Cov.'], "::", row['Cov..1']
| [
37811,
11627,
974,
477,
8893,
422,
281,
27711,
2393,
11,
13570,
290,
8914,
1123,
284,
269,
21370,
2393,
526,
15931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
198,
7568,
62,
4868,
796,
279,
67,
13,
961,
62,
6... | 2.611111 | 108 |
# -*- coding: utf-8 -*-
from numpy import array, linspace, pi
import numpy as np
from scipy.optimize import curve_fit, root_scalar
def get_BH(self):
"""
Return the B(H) curve of the material (by default do nothing).
Parameters
----------
self : ModelBH
a ModelBH object
Returns
-------
BH: numpy.ndarray
B(H) values (two colums matrix: H and B(H))
"""
return None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
299,
32152,
1330,
7177,
11,
300,
1040,
10223,
11,
31028,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
11,
... | 2.420455 | 176 |
#!/usr/bin/python
import sys
import os
import build_include
""" parameters -- tag: the git tag or branch to use, fast: use git pull versus git clone
zero parameters means build local source without pulling from git"""
if len(sys.argv) > 1:
tag = sys.argv[1]
else:
tag = False
if len(sys.argv) > 2:
fast = sys.argv[2]
else:
fast = "true"
fast = ( fast == "true" )
android_path = build_include.build_apk(tag, not fast)
import build_settings
# old path for adb
adb_path = build_settings.android_sdk_path + "/tools/adb"
if not os.path.exists(adb_path):
adb_path = build_settings.android_sdk_path + "/platform-tools/adb"
if not os.path.exists(adb_path):
raise Exception("adb not found")
build_include.shell(adb_path + " install -r " + android_path + "/bin/MITApp-debug.apk", False)
print "Built project to: " + android_path
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1382,
62,
17256,
198,
198,
37811,
220,
220,
10007,
1377,
7621,
25,
262,
17606,
7621,
393,
8478,
284,
779,
11,
3049,
25,
779,
17606,
2834,
905... | 2.623494 | 332 |
#!/usr/bin/env python3
"""Regenerate the frontend OpenAPI bindings to the backend.
Requirements:
- npm/node installed and in PATH
- java 8+ installed and in PATH
"""
import os
from pathlib import Path
import shutil
from typing import List
EXIT_SUCCESS = 0
if __name__ == "__main__":
main()
| [
171,
119,
123,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
8081,
877,
378,
262,
2166,
437,
4946,
17614,
34111,
284,
262,
30203,
13,
198,
198,
42249,
25,
198,
220,
220,
220,
532,
30599,
14,
17440,
6589,
290,
287... | 2.924528 | 106 |
import numpy as np
import networkx as nx
import itertools as it
import pandas as pd
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import DiscreteFactor
import pylab as plt
from utils.utils import IsingModel, factor2Df, sampling
from collections import Counter
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
from pgmpy.factors.discrete import TabularCPD
| [
11748,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
340,
861,
10141,
355,
340,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
23241,
3149,
88,
13,
27530,
1330,
2940,
709,
17633,
198,
6738,
23241,
3149,
88,... | 3.33871 | 124 |
# https://edabit.com/challenge/76ibd8jZxvhAwDskb
#
# A city skyline can be represented as a 2-D list with 1s representing buildings. In the example below, the height of
# the tallest building is 4 (second-most right column).
#
# [[0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0],
# [0, 0, 1, 0, 1, 0],
# [0, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 1, 1]]
#
# Create a function that takes a skyline (2-D list of 0's and 1's) and returns the height of the tallest skyscraper.
# Examples
#
# tallest_skyscraper([
# [0, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 1, 1, 0],
# [1, 1, 1, 1]
# ]) ➞ 3
#
# tallest_skyscraper([
# [0, 1, 0, 0],
# [0, 1, 0, 0],
# [0, 1, 1, 0],
# [1, 1, 1, 1]
# ]) ➞ 4
#
# tallest_skyscraper([
# [0, 0, 0, 0],
# [0, 0, 0, 0],
# [1, 1, 1, 0],
# [1, 1, 1, 1]
# ]) ➞ 2
print(tallest_skyscraper([
[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 1, 0, 0],
[1, 1, 1, 1, 0]
]))
print(tallest_skyscraper([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 1, 1, 1]
]))
print(tallest_skyscraper([
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]
]))
| [
2,
3740,
1378,
276,
29968,
13,
785,
14,
36747,
3540,
14,
4304,
571,
67,
23,
73,
57,
87,
85,
71,
23155,
35,
8135,
65,
198,
2,
198,
2,
317,
1748,
47566,
460,
307,
7997,
355,
257,
362,
12,
35,
1351,
351,
352,
82,
10200,
6832,
13,... | 1.891608 | 572 |
from __future__ import annotations
import asyncio
import json
import math
import time
from asyncio import Queue
import dataclasses
from dataclasses import dataclass
from typing import Dict, Type, Set, TypeVar, Generic, List, Any
import numpy as np
from coniql.util import doc_field
from .plugin import Plugin
from ._types import NumberMeta, Channel, NumberType, NumberDisplay, Range, Time, \
ChannelStatus, ChannelQuality, DisplayForm, ArrayWrapper, Function, \
NamedMeta, ObjectMeta, FunctionMeta, NamedValue
# How long to keep Sim alive after the last listener has gone
SIM_DESTROY_TIMEOUT = 10
# Map of channel_id func to its Sim class
CHANNEL_CLASSES: Dict[str, Type['SimChannel']] = {}
# Map of channel_id func to its callable function
FUNCTION_CLASSES: Dict[str, Type['SimFunction']] = {}
@register_channel("sine")
class SineSimChannel(SimChannel):
"""Create a simulated float sine value
Args:
min_value: The minimum output value
max_value: The maximum output value
steps: The number of steps taken to produce a complete sine wave
update_seconds: The time between each step
warning_percent: Percentage of the full range, outside this is warning
alarm_percent: Percentage of the full range, outside this is alarm
"""
@register_channel("sinewave")
class SineWaveSimChannel(SimChannel):
"""Create a simulated float waveform
Args:
period_seconds: The time between repetitions on the sinewave in time
sample_wavelength: The wavelength of the output sinewave
size: The size of the output waveform (min 10 elements)
update_seconds: The time between each step
min_value: The minimum output value
max_value: The maximum output value
warning_percent: Percentage of the full range, outside this is warning
alarm_percent: Percentage of the full range, outside this is alarm
"""
T = TypeVar('T')
R = TypeVar('R')
@register_function("hello")
class Hello(SimFunction):
"""Say hello to someone"""
@dataclass
@dataclass
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
10688,
198,
11748,
640,
198,
6738,
30351,
952,
1330,
4670,
518,
198,
11748,
4818,
330,
28958,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
... | 3.149096 | 664 |
import utils
def main():
"""Main."""
code_b64 = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg'
code_b64 += 'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq'
code_b64 += 'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg'
code_b64 += 'YnkK'
code = bytearray(code_b64.decode('base64'))
oracle = utils.gen_ECB_oracle(code, 20)
print '> Oracle using ECB:', utils.detect_ECB(oracle)
for i in xrange(1, 11):
try:
decrypted = utils.decrypt_oracle_ECB(oracle, 16, code, 20)
break
except KeyError:
print 'try {:d} failed, trying again'.format(i)
if decrypted:
print '> Plaintext:\n', decrypted
print '> p14 ok'
else:
print '> p14 failed -- unable to decipher after several tries'
if __name__ == '__main__':
main()
| [
11748,
3384,
4487,
198,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
13383,
526,
15931,
198,
220,
220,
220,
2438,
62,
65,
2414,
796,
705,
37280,
24,
36299,
38,
2290,
41,
88,
33,
79,
8482,
33,
660,
4090,
16,
43,
73,
10206,
53... | 1.847458 | 472 |
import time
| [
11748,
640,
198
] | 4 | 3 |
import sys
sys.path.append("..")
from common import *
data = fnl(parse)[0]
school = data
print(data)
days = 256
rate = 7
track = [0 for i in range(rate+2)]
for fish in range(len(school)):
track[school[fish]] += 1
pprint(track)
for day in range(days):
day0 = track[0]
track = track[1:]
track.append(0)
if(day0 >= 1):
track[8] += day0
track[6] += day0
print(sum(track)) | [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
6738,
2219,
1330,
1635,
198,
198,
7890,
796,
277,
21283,
7,
29572,
38381,
15,
60,
198,
14347,
796,
1366,
198,
4798,
7,
7890,
8,
198,
198,
12545,
796,
17759,
198,
4... | 2.256831 | 183 |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = "http://py4e-data.dr-chuck.net/comments_1481945.html"
html = urlopen(url, context=ctx).read().decode()
soup = BeautifulSoup(html, "html.parser")
# Retrieve all the anchor tags
s = 0 # Sum of all numbers
tags = soup('span')
for tag in tags:
s += int(tag.contents[0])
print(f"Sum is {s}")
| [
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
264,
6649,
198,
198,
2,
41032,
25952,
10703,
8563,
198,
49464,
796,
264,
6649,
13,
17953,
62,
12286,
62,
22866,
3419,
198,... | 2.733696 | 184 |
from __future__ import absolute_import
import toml
from roundhouse import Serializer
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
284,
4029,
198,
198,
6738,
2835,
4803,
1330,
23283,
7509,
628
] | 4 | 22 |
#!/usr/bin/env python
import numpy as np
import epitome as epi
def slidewin_intranet_corr(data, idx, net, n_steps, win_step, win_len):
"""
data -- voxels x timepoints
idx -- voxel network labels (integers)
net -- interger value representing network of interest
n_steps -- number of windows to take
win_step -- step length of window (50% of full length?)
win_len -- length of window
Returns the mean + std across all windowed samples of the timeseries
supplied in data.
Gives a measure of intranetwork correlation variability over time. This
can be used to see if / when networks become coherent, and allows us to
compare this across networks.
"""
idx = np.where(np.array(idx) == net)[0]
net_data = data[idx, :]
mean = np.zeros(n_steps)
std = np.zeros(n_steps)
for step in np.arange(n_steps-1):
win_start = step*win_step
win_stop = step*win_step + win_len
data_slice = net_data[:, win_start:win_stop]
corr = np.corrcoef(data_slice)
for x in np.arange(corr.shape[0]):
corr[x,x] = np.nan
mean[step] = np.nanmean(corr)
std[step] = np.nanstd(corr)
return mean, std
# def unused():
# """
# Noone loves these.
# """
#calculate pc spectra
# pc_a_spec = calculate_spectra(pc_a, samp)
# pc_b_spec = calculate_spectra(pc_b, samp)
# # calculate pc derivatives
# pc_a_diff = np.diff(pc_a, n=1)
# pc_b_diff = np.diff(pc_b, n=1)
# #calculate pc envalope
# pc_a_env = np.abs(signal.hilbert(pc_a_diff))
# pc_b_env = np.abs(signal.hilbert(pc_b_diff))
# pc1_a, pc2_a, exp_a = return_top_2_pcs(tmp_data[0:6, :])
# pc1_b, pc2_b, exp_b = return_top_2_pcs(tmp_data[0:6, :])
# plot PCs
# plot_timeseries(np.vstack((pc_a, pc_b)), 2, 2,
# 'roi-pc-timeseries-subject-' + str(i))
# compare_spectra(pc_a_spec, pc_b_spec,
# 'roi-pc-spectra-subject-' + str(i))
# plot_timeseries(np.vstack((pc_a_diff, pc_b_diff)), 2, 2,
# 'roi-pc-diff-timeseries-subject-' + str(i))
# plot_timeseries(np.vstack((pc_a_diff, pc_b_diff)), 2, 2,
# 'roi-pc-env-timeseries-subject-' + str(i),
# np.vstack((pc_a_env, pc_b_env)))
# # plot phase portrait of both pcs
# plot_phase_portrait(pc_a.T, pc_b.T, exp_a, exp_b,
# 'roi-pc_phase-portrait-subject-' + str(i))
# # plot phase portrait of the derivative of both pcs
# plot_phase_portrait(pc_a_diff.T, pc_b_diff.T, exp_a, exp_b,
# 'roi-pc-diff_phase-portrait-subject-' + str(i))
# # plot phase portrait of the envalope of both pcs
# plot_phase_portrait(pc_a_env.T, pc_b_env.T, exp_a, exp_b,
# 'roi-pc-env_phase-portrait-subject-' + str(i))
# # plot phase portrait of pc_A + lag
# plot_delay_embedded_phase_portraits(pc_a.T, lags,
# 'roi-pc-a_delay-embedded-portrait-subject-' + str(i))
# # plot phase portrait of pc_B + lag
# plot_delay_embedded_phase_portraits(pc_b.T, lags,
# 'roi-pc-b_delay-embedded-portrait-subject-' + str(i))
# # plot phase portrait top 2 pcs from network a
# plot_phase_portrait(pc1_a.T, pc2_a.T, exp_a, exp_a,
# 'roi-2pcs-a-subject-' + str(i))
# # plot phase portrait top 2 pcs from network a
# plot_phase_portrait(pc1_b.T, pc2_b.T, exp_b, exp_b,
# 'roi-2pcs-b-subject-' + str(i))
#corrs[i] = np.corrcoef(pc_a, pc_b)[0][1]
# return 'sadness'
# def plot_timeseries(data, n_rois, n_net, title, envs=None):
# """
# This function needs to be fixed to work with the network indicies.
# """
# # plot timeseries
# for i in np.arange(n_rois):
# plt.subplot(n_rois, 1, i+1)
# plot_data = data[i, :]
# if envs != None:
# plot_envs = envs[i, :]
# if i < n_rois/n_net:
# plt.plot(plot_data, linewidth=1, color='red')
# if envs != None:
# plt.plot(plot_envs, linewidth=1, color='black')
# plt.axis('off')
# else:
# plt.plot(plot_data, linewidth=1, color='blue')
# if envs != None:
# plt.plot(plot_envs, linewidth=1, color='black')
# plt.axis('off')
# plt.suptitle(title)
# plt.savefig(title + '.pdf')
# plt.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
21240,
462,
355,
2462,
72,
198,
198,
4299,
27803,
413,
259,
62,
600,
2596,
316,
62,
10215,
81,
7,
7890,
11,
4686,
87,
11,
2010,
11,
299,
... | 1.976471 | 2,295 |
"""Organizations managers."""
from django.db import models
from readthedocs.core.utils.extend import SettingsOverrideObject
from .constants import ADMIN_ACCESS, READ_ONLY_ACCESS
class TeamManagerBase(models.Manager):
"""Manager to control team's access."""
class TeamMemberManager(models.Manager):
"""Manager for queries on team members."""
def sorted(self):
"""
Return sorted list of members and invites.
Return list of members and invites sorted by members first, and null
members (invites) last.
"""
return (
self.get_queryset().annotate(
null_member=models.Count('member'),
).order_by('-null_member', 'member')
)
| [
37811,
26121,
4582,
11663,
526,
15931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
1100,
83,
704,
420,
82,
13,
7295,
13,
26791,
13,
2302,
437,
1330,
16163,
37961,
10267,
198,
198,
6738,
764,
9979,
1187,
1330,
5... | 2.67029 | 276 |
import unified_planning
from unified_planning.shortcuts import *
from unified_planning.test import TestCase, main, skipIfEngineNotAvailable
from unified_planning.test.examples import get_example_problems
from up_skdecide.domain import DomainImpl as SkDecideDomain
from skdecide.hub.solver.iw import IW
| [
11748,
22706,
62,
11578,
768,
198,
6738,
22706,
62,
11578,
768,
13,
19509,
23779,
1330,
1635,
198,
6738,
22706,
62,
11578,
768,
13,
9288,
1330,
6208,
20448,
11,
1388,
11,
14267,
1532,
13798,
3673,
10493,
198,
6738,
22706,
62,
11578,
768... | 3.465909 | 88 |
'''
Created by auto_sdk on 2015.01.28
'''
from aliyun.api.base import RestApi
| [
7061,
6,
201,
198,
41972,
416,
8295,
62,
21282,
74,
319,
1853,
13,
486,
13,
2078,
201,
198,
7061,
6,
201,
198,
6738,
435,
7745,
403,
13,
15042,
13,
8692,
1330,
8324,
32,
14415,
201,
198
] | 2.277778 | 36 |
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rosidl_cli.extensions import Extension
from rosidl_cli.extensions import load_extensions
class GenerateCommandExtension(Extension):
"""
The extension point for source code generation.
The following methods must be defined:
* `generate`
"""
def generate(
self,
package_name,
interface_files,
include_paths,
output_path
):
"""
Generate source code.
Paths to interface definition files are relative paths optionally
prefixed by an absolute path followed by a colon ':', in which case
path resolution is to be performed against that absolute path.
:param package_name: name of the package to generate source code for
:param interface_files: list of paths to interface definition files
:param include_paths: list of paths to include dependency interface
definition files from.
:param output_path: path to directory to hold generated source code files
"""
raise NotImplementedError()
def load_type_extensions(**kwargs):
"""Load extensions for type representation source code generation."""
return load_extensions('rosidl_cli.command.generate.type_extensions', **kwargs)
def load_typesupport_extensions(**kwargs):
"""Load extensions for type support source code generation."""
return load_extensions('rosidl_cli.command.generate.typesupport_extensions', **kwargs)
| [
2,
15069,
33448,
4946,
8090,
47061,
5693,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.22763 | 637 |
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_control.mjcf.skin."""
import os
from absl.testing import absltest
from dm_control.mjcf import skin
from dm_control.utils import io as resources
ASSETS_DIR = os.path.join(os.path.dirname(__file__), 'test_assets')
SKIN_FILE_PATH = os.path.join(ASSETS_DIR, 'skins/test_skin.skn')
if __name__ == '__main__':
absltest.main()
| [
2,
15069,
12131,
383,
288,
76,
62,
13716,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13... | 3.422297 | 296 |
resize = dummyfunc
| [
198,
411,
1096,
796,
31548,
20786,
628
] | 3 | 7 |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import patch, mock_open
from openvino.tools.mo.front.tf.loader import load_tf_graph_def
from openvino.tools.mo.utils.summarize_graph import summarize_graph
pbtxt = 'node{name:"Placeholder"op:"Placeholder"attr{key:"dtype"value{type:DT_FLOAT}}attr{key:"shape"value{shape{dim' + \
'{size:1}dim{size:227}dim{size:227}dim{size:3}}}}}node{name:"Output/Identity"op:"Identity"input:"Placeholder' + \
'"attr{key:"T"value{type:DT_FLOAT}}}'
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
15290,
62,... | 2.554054 | 222 |
t = int(raw_input().strip())
for i in xrange(t):
n = int(raw_input().strip())
data = [int(j) for j in raw_input().strip().split()]
ans = 0
for j in xrange(n - 1):
for k in xrange(j + 1, n):
if data[j] | data[k] <= max(data[j], data[k]):
ans += 1
print ans
| [
83,
796,
493,
7,
1831,
62,
15414,
22446,
36311,
28955,
198,
1640,
1312,
287,
2124,
9521,
7,
83,
2599,
198,
220,
220,
220,
299,
796,
493,
7,
1831,
62,
15414,
22446,
36311,
28955,
198,
220,
220,
220,
1366,
796,
685,
600,
7,
73,
8,
... | 1.987261 | 157 |
import os
import sys
# Add path to python source to path.
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), "python"))
import SmoothParticleNets as spn
import itertools
import numpy as np
import torch
import torch.autograd
from gradcheck import gradcheck
from test_convsdf import quaternionMult, quaternionConjugate
from regular_grid_interpolater import RegularGridInterpolator
try:
import pytest_args
except ImportError:
print("Make sure to compile SmoothParticleNets before running tests.")
raise
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', dest='cpu', action="store_true", default=True)
parser.add_argument('--no-cpu', dest='cpu', action="store_false")
parser.add_argument('--cuda', dest='cuda',
action="store_true", default=True)
parser.add_argument('--no-cuda', dest='cuda', action="store_false")
args = parser.parse_args()
test_imageprojection(cpu=args.cpu, cuda=args.cuda)
| [
11748,
28686,
198,
11748,
25064,
198,
2,
3060,
3108,
284,
21015,
2723,
284,
3108,
13,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
198,
220,
220,
220,
28686,
13,
6978,
13,
159... | 2.795276 | 381 |
data=open('input2.txt',"r")
la=list()
h,d=0,0
for x in data:
la.append(x.strip().split())
for i in range(len(la)):
if la[i][0]=='forward':
h+=int(la[i][1])
elif la[i][0]=='down':
d+=int(la[i][1])
elif la[i][0]=='up':
d-=int(la[i][1])
print(h*d)
| [
7890,
28,
9654,
10786,
15414,
17,
13,
14116,
40264,
81,
4943,
201,
198,
5031,
28,
4868,
3419,
201,
198,
71,
11,
67,
28,
15,
11,
15,
201,
198,
1640,
2124,
287,
1366,
25,
201,
198,
220,
220,
220,
8591,
13,
33295,
7,
87,
13,
36311,... | 1.666667 | 180 |
"""
Go fast with multiprocessing
============================
The streaming interfaces with iterables allow efficient batch processing as shown :doc:`here <ex4_timepicker_batch>`.
But still only one core/thread will be utilized. We will change that will multiprocessing.
Following example shows a batch feature extraction procedure using multiple CPU cores.
"""
import os
import time
import multiprocessing
from typing import Dict, Iterable
from itertools import cycle
import __main__
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import vallenae as vae
HERE = os.path.dirname(__file__) if "__file__" in locals() else os.getcwd()
TRADB = os.path.join(HERE, "steel_plate/sample_plain.tradb")
#%%
# Prepare streaming reads
# -----------------------
tradb = vae.io.TraDatabase(TRADB)
#%%
# Our sample tradb only contains four data sets. That is not enough data for demonstrating batch processing.
# Therefore, we will simulate more data by looping over the data sets with following generator/iterable:
#%%
# Define feature extraction function
# ----------------------------------
# Following function will be applied to all data sets and returns computed features:
# Fix to use pickle serialization in sphinx gallery
setattr(__main__, feature_extraction.__name__, feature_extraction)
#%%
# Compute with single thread/core
# -------------------------------
# .. note::
#
# The examples are executed on the CI / readthedocs server with limited resources.
# Therefore, the shown computation times and speedups are below the capability of modern machines.
#
# Run computation in a single thread and get the time:
time_elapsed_ms = lambda t0: 1e3 * (time.perf_counter() - t0)
time_start = time.perf_counter()
for tra in tra_generator():
results = feature_extraction(tra)
# do something with the results
time_single_thread = time_elapsed_ms(time_start)
print(f"Time single thread: {time_single_thread:.2f} ms")
#%%
# Compute with multiple processes/cores
# -------------------------------------
# First get number of available cores in your machine:
print(f"Available CPU cores: {os.cpu_count()}")
#%%
# But how can we utilize those cores? The common answer for most programming languages is multithreading.
# Threads run in the same process and heap, so data can be shared between them (with care).
# Sadly, Python uses a global interpreter lock (GIL) that locks heap memory, because Python objects are not thread-safe.
# Therefore, threads are blocking each other and no speedups are gained by using multiple threads.
#
# The solution for Python is multiprocessing to work around the GIL. Every process has its own heap and GIL.
# Multiprocessing will introduce overhead for interprocess communication and data serialization/deserialization.
# To reduce the overhead, data is sent in bigger chunks.
#%%
# Run computation on 4 cores with chunks of 128 data sets and get the time / speedup:
with multiprocessing.Pool(4) as pool:
time_start = time.perf_counter()
for results in pool.imap(feature_extraction, tra_generator(), chunksize=128):
pass # do something with the results
time_multiprocessing = time_elapsed_ms(time_start)
print(f"Time multiprocessing: {time_multiprocessing:.2f} ms")
print(f"Speedup: {(time_single_thread / time_multiprocessing):.2f}")
#%%
# Variation of the chunksize
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# Following results show how the chunksize impacts the overall performance.
# The speedup is measured for different chunksizes and plotted against the chunksize:
chunksizes = (10, 40, 60, 80, 100, 120, 140, 160, 200)
speedup_chunksizes = []
with multiprocessing.Pool(4) as pool:
for chunksize in chunksizes:
time_start = time.perf_counter()
for results in pool.imap(feature_extraction, tra_generator(), chunksize=chunksize):
pass # do something with the results
speedup_chunksizes.append(time_single_thread / time_elapsed_ms(time_start))
plt.figure(tight_layout=True, figsize=(6, 3))
plt.plot(chunksizes, speedup_chunksizes)
plt.xlabel("Chunksize")
plt.ylabel("Speedup")
plt.show()
| [
37811,
198,
5247,
3049,
351,
18540,
305,
919,
278,
198,
4770,
25609,
198,
198,
464,
11305,
20314,
351,
11629,
2977,
1249,
6942,
15458,
7587,
355,
3402,
1058,
15390,
25,
63,
1456,
1279,
1069,
19,
62,
2435,
79,
15799,
62,
43501,
29,
446... | 3.362969 | 1,226 |
#!/usr/bin/env python3
import collections
try: # python 3
from collections import abc
except ImportError: # python 2
import collections as abc
import concurrent.futures
from datetime import datetime
import gc
import inspect
import logging
from logging import Logger, LogRecord
import os
# import slack
import sys
from types import FrameType
from typing import Deque, Optional, cast
from loguru import logger
from machine_learning_with_python.models.loggers import LoggerModel, LoggerPatch
LOGGERS = __name__
class InterceptHandler(logging.Handler):
"""
Intercept all logging calls (with standard logging) into our Loguru Sink
See: https://github.com/Delgan/loguru#entirely-compatible-with-standard-logging
"""
loglevel_mapping = {
50: "CRITICAL",
40: "ERROR",
30: "WARNING",
20: "INFO",
10: "DEBUG",
0: "NOTSET",
}
# """ Logging handler intercepting existing handlers to redirect them to loguru """
class LoopDetector(logging.Filter):
"""
Log filter which looks for repeating WARNING and ERROR log lines, which can
often indicate that a module is spinning on a error or stuck waiting for a
condition.
When a repeating line is found, a summary message is printed and a message
optionally sent to Slack.
"""
LINE_HISTORY_SIZE = 50
LINE_REPETITION_THRESHOLD = 5
# SOURCE: https://github.com/jupiterbjy/CUIAudioPlayer/blob/dev_master/CUIAudioPlayer/LoggingConfigurator.py
def get_caller_stack_name(depth=1):
"""
Gets the name of caller.
:param depth: determine which scope to inspect, for nested usage.
"""
return inspect.stack()[depth][3]
# SOURCE: https://github.com/jupiterbjy/CUIAudioPlayer/blob/dev_master/CUIAudioPlayer/LoggingConfigurator.py
# https://stackoverflow.com/questions/52715425
# SMOKE-TESTS
if __name__ == "__main__":
from logging_tree import printout
LOGGER = get_logger(__name__, provider="Logger")
logger.info("TESTING TESTING 1-2-3")
printout()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17268,
198,
198,
28311,
25,
220,
1303,
21015,
513,
198,
220,
220,
220,
422,
17268,
1330,
450,
66,
198,
16341,
17267,
12331,
25,
220,
1303,
21015,
362,
198,
220,
220,
... | 2.889831 | 708 |
from django.contrib import admin
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.urls import reverse
from .calendar import EventCalendar
import datetime, calendar
from .models import (
TimeOfDay,
Scheduler,
SchedulerException,
SchedulerRecurringPattern,
Activity,
SchedulerDay,
SchedulerMonth)
from .forms import SchedDayForm
from .mixins import AdminCommonMixin, CalendarActionMixin
@admin.register(TimeOfDay)
@admin.register(Scheduler)
@admin.register(SchedulerException)
@admin.register(SchedulerRecurringPattern)
@admin.register(Activity)
# @admin.register(SchedulerDay)
# class SchedulerDayAdmin(CalendarActionMixin, admin.ModelAdmin):
# date_hierarchy = 'created'
# change_list_template = 'admin/schedules/scheduler_day.html'
# #form = SchedDayForm
# @admin.register(SchedulerMonth)
# class SchedulerMonthAdmin(admin.ModelAdmin):
# change_list_template = 'admin/schedules/scheduler_month.html'
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
19... | 2.979228 | 337 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor supported value types."""
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_test_ops as test_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensorSpec
from tensorflow.python.platform import googletest
from tensorflow.python.util import dispatch
class WrappedTensor(composite_tensor.CompositeTensor):
"""A class used to test extending RaggedTensor value type support.
Simply wraps a `tf.Tensor` value.
"""
@property
@property
@property
class WrappedTensorOpDispatcher(dispatch.GlobalOpDispatcher):
"""Global op dispatcher for WrappedTensor."""
# For these ops, just return plain Tensors (not WrappedTensors).
OPS_THAT_RETURN_UNTRACED_RESULTS = (array_ops.shape, array_ops.shape_v2,
check_ops.assert_rank_at_least)
WrappedTensorOpDispatcher().register()
ragged_tensor._add_supported_value_type(WrappedTensor)
# pylint: disable=g-complex-comprehension
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_in_graph_and_eager_modes
if __name__ == '__main__':
googletest.main()
| [
2,
15069,
12131,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.264354 | 836 |
import os
import hashlib
| [
11748,
28686,
198,
11748,
12234,
8019,
628
] | 3.714286 | 7 |
# Poppler, assuming it's been installed to the (Linux) system.
{
'targets': [{
'target_name': 'poppler',
'type': 'none',
'direct_dependent_settings': {
'libraries': [
'-lpoppler-cpp',
],
'include_dirs': [
'/usr/include/poppler/cpp',
],
},
}],
}
| [
2,
7695,
381,
1754,
11,
13148,
340,
338,
587,
6589,
284,
262,
357,
19314,
8,
1080,
13,
198,
90,
198,
220,
220,
220,
705,
83,
853,
1039,
10354,
685,
90,
198,
220,
220,
220,
220,
220,
220,
220,
705,
16793,
62,
3672,
10354,
705,
75... | 1.738095 | 210 |
from .app import app
app.run()
| [
6738,
764,
1324,
1330,
598,
198,
198,
1324,
13,
5143,
3419,
198
] | 2.666667 | 12 |
import os
import numpy as np
from numba import njit
from evrepr.data.file_reader import FileReader
@njit
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
198,
6738,
819,
260,
1050,
13,
7890,
13,
7753,
62,
46862,
1330,
9220,
33634,
628,
198,
31,
77,
45051,
628,
198
] | 2.972973 | 37 |
"""Vision Transformer in PyTorch.
Reference:
[1] Dosovitskiy, Alexey, et al. "An image is worth 16x16 words: Transformers for image recognition at scale."
arXiv preprint arXiv:2010.11929 (2020)
Code adapted from https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/vit.py
"""
import torch
from torch import nn
from super_gradients.training.models import SgModule
from super_gradients.training.utils import get_param
from einops import repeat
class PatchEmbed(nn.Module):
"""
2D Image to Patch Embedding Using Conv layers (Faster than rearranging + Linear)
"""
class FeedForward(nn.Module):
'''
feed forward block with residual connection
'''
class Attention(nn.Module):
'''
self attention layer with residual connection
'''
| [
37811,
44206,
3602,
16354,
287,
9485,
15884,
354,
13,
198,
26687,
25,
198,
58,
16,
60,
43976,
709,
896,
4106,
88,
11,
4422,
2959,
11,
2123,
435,
13,
366,
2025,
2939,
318,
2861,
1467,
87,
1433,
2456,
25,
39185,
329,
2939,
9465,
379,
... | 3.007663 | 261 |
import pygame
import pygame.display
import pygame.mixer
from pygame import gfxdraw
import numpy as np
import pysprint_car
import pysprint_tracks
import random
import json
import os
#New awesome imports from shazz :D
from managers.sample_manager import SampleManager
from managers.texture_manager import TextureManager
from screens.highscores_screen import HighscoresScreen
from screens.laprecords_screen import LapRecordsScreen
from screens.credits_screen import CreditsScreen
from screens.splash_screen import SplashScreen
from screens.loading_screen import LoadingScreen
from managers.font_manager import FontManager
from pathlib import Path
from loguru import logger
from screens.colors import *
#Set working directory where the file is located to launch from OS X buldled App
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
pygame.init()
pygame.joystick.init()
version = "0.38"
display_width = 640
display_height = 400
pysprint_car.display_width = 640
pysprint_car.display_height = 400
pysprint_tracks.display_width = 640
pysprint_tracks.display_height = 400
with open(".highscores.json") as high_scores_file:
high_scores = json.load(high_scores_file)
with open(".bestlaps.json") as best_laps_file:
best_laps = json.load(best_laps_file)
race_laps = 4
pysprint_car.race_laps = race_laps
flags = 0
game_display = pygame.display.set_mode((display_width, display_height), flags)
pygame.display.set_caption('PySprint v{}'.format(version))
icon = pygame.image.load('Assets/SuperSprintIcon.png')
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
pysprint_car.game_display = game_display
pysprint_tracks.game_display = game_display
# Create sample managers
FADEOUT_DURATION = 1000
SampleManager.create_manager("sfx", "configuration/atarist_sfx.json")
smp_manager = SampleManager.create_manager("music", "configuration/atarist_music.json")
tex_manager = TextureManager.create_manager("sprites", "configuration/atarist_tex.json")
font_manager = FontManager.create_manager("fonts", "configuration/atarist_fonts.json")
cars = []
tracks = {}
FPS = 30
DEBUG_BUMP = False
DEBUG_CRASH = False
DEBUG_FLAG = False
DISPLAY_FPS = True
DEBUG_FPS = False
DEBUG_FPS_DETAILED = False
DEBUG_AI = False
DISABLE_DRONES = False
DISABLE_LOGGING = True
DEBUG_SELECT_ITEM = False
if DISABLE_LOGGING:
logger.remove()
#Flag Events
GREENFLAG = pygame.USEREVENT
WHITEFLAG = GREENFLAG + 1
CHECKEREDFLAG = WHITEFLAG + 1
JOYSTICK_BUTTON_PRESSED = -2
blue_engine = (72, 146)
green_engine = (390, 284)
yellow_engine = (390, 146)
red_engine = (72, 284)
blue_customization = (12, 202)
green_customization = (330, 340)
yellow_customization = (330, 202)
red_customization = (12, 340)
blue_thumb = (51, 120)
green_thumb = (369, 258)
yellow_thumb = (369, 120)
red_thumb = (51, 258)
press_start_blue = (30,6)
press_start_green = (510,6)
press_start_red = (190,6)
press_start_yellow = (350,6)
score_top_left_blue = (1,0)
score_top_left_green = (161,0)
score_top_left_red = (321,0)
score_top_left_yellow = (481,0)
attract_mode_display_duration = 5000
#Sound Assets
podium_tunes = [ sample for name, sample in smp_manager.samples.items() if name.startswith('podium_tune') ]
# fonts
small_font = font_manager.get_truetype_font("small_font")
shadow_font = font_manager.get_truetype_font("shadow_font")
big_font = font_manager.get_truetype_font("big_font")
big_shadow_font = font_manager.get_truetype_font("big_shadow_font")
# ---------------------------------------------------------------------------------------------
# TODO: move to pysprint_car
# ---------------------------------------------------------------------------------------------
#Graphic assets
pysprint_car.transparency = tex_manager.get_texture("transparency")
pysprint_car.vector_surf = pygame.Surface((display_width,display_height))
pysprint_car.vector_surf.fill((0,0,0))
pysprint_car.vector_surf.set_colorkey((0,0,0))
# ---------------------------------------------------------------------------------------------
# Screens
start_race_screen = tex_manager.get_texture("start_race_screen")
race_podium_screen = tex_manager.get_texture("race_podium_screen")
checkered_background = tex_manager.get_texture("checkered_background")
item_screen = tex_manager.get_texture("item_screen")
blue_selection_wheel = tex_manager.get_texture("blue_selection_wheel")
yellow_selection_wheel = tex_manager.get_texture("yellow_selection_wheel")
red_selection_wheel = tex_manager.get_texture("red_selection_wheel")
green_selection_wheel = tex_manager.get_texture("green_selection_wheel")
# ---------------------------------------------------------------------------------------------
# TODO: move to pysprint_tracks
# ---------------------------------------------------------------------------------------------
#Traffic Cone
pysprint_tracks.traffic_cone = tex_manager.get_texture("traffic_cone")
pysprint_tracks.traffic_cone_shade = tex_manager.get_texture("traffic_cone_shade")
pysprint_tracks.traffic_cone_mask = tex_manager.get_mask("traffic_cone")
#Tornado Frames:
pysprint_tracks.tornado_frames = tex_manager.get_textures(f"tornado_frame")
pysprint_tracks.tornado_frames_masks = tex_manager.get_masks(f"tornado_frame")
#Poles Frames:
pysprint_tracks.poles_frames = tex_manager.get_textures(f"pole_frame")
pysprint_tracks.poles_frames_masks = tex_manager.get_masks(f"pole_frame")
#Spills
pysprint_tracks.oil_spill_image = tex_manager.get_texture("oil_spill")
pysprint_tracks.oil_spill_mask = tex_manager.get_mask("oil_spill")
pysprint_tracks.water_spill_image = tex_manager.get_texture("water_spill")
pysprint_tracks.water_spill_mask = tex_manager.get_mask("water_spill")
pysprint_tracks.grease_spill_image = tex_manager.get_texture("grease_spill")
pysprint_tracks.grease_spill_mask = tex_manager.get_mask("grease_spill")
#Wrenches
pysprint_tracks.wrench_image = tex_manager.get_texture("wrench")
pysprint_tracks.wrench_mask = tex_manager.get_mask("wrench")
#Bonus Frames:
pysprint_tracks.bonus_frames = tex_manager.get_textures(f"bonus_frame")
pysprint_tracks.bonus_frames_masks = tex_manager.get_masks(f"bonus_frame")
pysprint_tracks.bonus_shade_frames = tex_manager.get_textures(f"bonus_frame_shade")
# For the Background
pysprint_tracks.road_gate_frames = tex_manager.get_textures(f"gate")
pysprint_tracks.road_gate_shade_frames = tex_manager.get_textures(f"gate_shade")
pysprint_tracks.road_gate_mask_frames = tex_manager.get_textures(f"gate_mask")
# ---------------------------------------------------------------------------------------------
crowd_flags = tex_manager.get_textures(f"gate_crowd_flag")
wrench_count_sprites = tex_manager.get_textures(f"wrench_count")
hammer_frames = tex_manager.get_textures(f"hammer")
saw_frames = tex_manager.get_textures(f"saw")
head_scratch_frames = tex_manager.get_textures(f"head_scratch")
blow_frames = tex_manager.get_textures(f"blow")
# podiums
first_car_blue = tex_manager.get_texture("podium_first_blue_car")
first_car_red = tex_manager.get_texture("podium_first_red_car")
first_car_green = tex_manager.get_texture("podium_first_green_car")
first_car_yellow = tex_manager.get_texture("podium_first_yellow_car")
first_car_blue_drone = tex_manager.get_texture("podium_first_blue_drone")
first_car_red_drone = tex_manager.get_texture("podium_first_red_drone")
first_car_green_drone = tex_manager.get_texture("podium_first_green_drone")
first_car_yellow_drone = tex_manager.get_texture("podium_first_yellow_drone")
second_car_blue = tex_manager.get_texture("podium_second_car_blue")
second_car_red = tex_manager.get_texture("podium_second_car_red")
second_car_green = tex_manager.get_texture("podium_second_car_green")
second_car_yellow = tex_manager.get_texture("podium_second_car_yellow")
second_car_blue_drone = tex_manager.get_texture("podium_second_car_blue_drone")
second_car_red_drone = tex_manager.get_texture("podium_second_car_red_drone")
second_car_green_drone = tex_manager.get_texture("podium_second_car_green_drone")
second_car_yellow_drone = tex_manager.get_texture("podium_second_car_yellow_drone")
third_car_blue = tex_manager.get_texture("podium_third_car_blue")
third_car_red = tex_manager.get_texture("podium_third_car_red")
third_car_green = tex_manager.get_texture("podium_third_car_green")
third_car_yellow = tex_manager.get_texture("podium_third_car_yellow")
third_car_blue_drone = tex_manager.get_texture("podium_third_car_blue_drone")
third_car_red_drone = tex_manager.get_texture("podium_third_car_red_drone")
third_car_green_drone = tex_manager.get_texture("podium_third_car_green_drone")
third_car_yellow_drone = tex_manager.get_texture("podium_third_car_yellow_drone")
fourth_car_blue = tex_manager.get_texture("podium_fourth_car_blue")
fourth_car_red = tex_manager.get_texture("podium_fourth_car_red")
fourth_car_green = tex_manager.get_texture("podium_fourth_car_green")
fourth_car_yellow = tex_manager.get_texture("podium_fourth_car_yellow")
fourth_car_blue_drone = tex_manager.get_texture("podium_fourth_car_blue_drone")
fourth_car_red_drone = tex_manager.get_texture("podium_fourth_car_red_drone")
fourth_car_green_drone = tex_manager.get_texture("podium_fourth_car_green_drone")
fourth_car_yellow_drone = tex_manager.get_texture("podium_fourth_car_yellow_drone")
engine_idle = tex_manager.get_textures(f"engine_idle")
prepare_to_race = tex_manager.get_textures(f"prepare_to_race")
transition_dots = tex_manager.get_textures(f"transition_dots")
green_flag_frames = tex_manager.get_textures(f"green_flag")
white_flag_frames = tex_manager.get_textures(f"white_flag")
checkered_flag_frames = tex_manager.get_textures(f"checkered_flag")
# choper
yellow_helicopter_frames = tex_manager.get_textures(f"yellow_horizontal_helicopter")
blue_helicopter_frames = tex_manager.get_textures(f"blue_horizontal_helicopter")
green_helicopter_frames = tex_manager.get_textures(f"green_horizontal_helicopter")
red_helicopter_frames = tex_manager.get_textures(f"red_horizontal_helicopter")
yellow_vertical_helicopter_frames = tex_manager.get_textures(f"yellow_vertical_helicopter")
blue_vertical_helicopter_frames = tex_manager.get_textures(f"blue_vertical_helicopter")
green_vertical_helicopter_frames = tex_manager.get_textures(f"green_vertical_helicopter")
red_vertical_helicopter_frames = tex_manager.get_textures(f"red_vertical_helicopter")
dust_cloud_frames = tex_manager.get_textures(f"dust_cloud")
explosion_frames = tex_manager.get_textures(f"explosion")
# cars
car_sprites_masks = tex_manager.get_masks(f"blue_drone")
blue_drone_sprites = tex_manager.get_textures(f"blue_drone")
blue_car_sprites = tex_manager.get_textures(f"blue_car")
red_drone_sprites = tex_manager.get_textures(f"red_drone")
red_car_sprites = tex_manager.get_textures(f"red_car")
green_drone_sprites = tex_manager.get_textures(f"green_drone")
green_car_sprites = tex_manager.get_textures(f"green_car")
yellow_drone_sprites = tex_manager.get_textures(f"yellow_drone")
yellow_car_sprites = tex_manager.get_textures(f"yellow_car")
scrolling_font = font_manager.get_bitmap_font("scrolling")
keyboard_1 = {}
keyboard_1['ACCELERATE'] = pygame.K_RCTRL
keyboard_1['LEFT'] = pygame.K_LEFT
keyboard_1['RIGHT'] = pygame.K_RIGHT
keyboard_1['METHOD'] = "KEYBOARD 1"
keyboard_2 = {}
keyboard_2['ACCELERATE'] = pygame.K_LCTRL
keyboard_2['LEFT'] = pygame.K_x
keyboard_2['RIGHT'] = pygame.K_c
keyboard_2['METHOD'] = "KEYBOARD 2"
joystick_1 = {
'METHOD': 'JOYSTICK 1'
}
joystick_2 = {
'METHOD': 'JOYSTICK 2'
}
joystick_3 = {
'METHOD': 'JOYSTICK 3'
}
joystick_4 = {
'METHOD': 'JOYSTICK 4'
}
control_methods = [keyboard_1, keyboard_2, joystick_1, joystick_2,
joystick_3, joystick_4]
highscores_screen = HighscoresScreen(display=game_display, high_scores=high_scores)
laprecords_screen = LapRecordsScreen(display=game_display, best_laps=best_laps)
credits_screen = CreditsScreen(display=game_display)
splash_screen = SplashScreen(display=game_display)
loading_screen = LoadingScreen(display=game_display, fps=FPS, display_width=display_width, display_height=display_width)
def wait_action(screen_to_update = None, use_timer: bool = True):
"""Wait any input (keyboard, joystick or timer) to go to the next screen"""
screen_exit = False
key_pressed = False
pygame.display.update()
screen_start_time = pygame.time.get_ticks()
while not screen_exit:
# go to next after some time
if use_timer:
if pygame.time.get_ticks() - screen_start_time >= attract_mode_display_duration:
screen_exit = True
# of if keyboard is pressed
for event in pygame.event.get():
if event.type == pygame.QUIT:
screen_exit = True
key_pressed = pygame.K_ESCAPE
if event.type == pygame.KEYDOWN:
screen_exit = True
key_pressed = event.key
# or joystick button pushed
if any_joystick_button_pressed():
screen_exit = True
key_pressed = JOYSTICK_BUTTON_PRESSED
if screen_to_update is not None:
screen_exit = True if screen_to_update.update() else screen_exit
clock.tick(FPS)
return key_pressed
game_loop()
| [
11748,
12972,
6057,
198,
11748,
12972,
6057,
13,
13812,
198,
11748,
12972,
6057,
13,
19816,
263,
198,
6738,
12972,
6057,
1330,
308,
21373,
19334,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
279,
893,
4798,
62,
7718,
198,
11748,
279,
... | 2.561446 | 5,395 |
# -*- test-case-name: twisted.web.test.test_static -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Static resources for L{twisted.web}.
"""
from __future__ import division, absolute_import
import os
import warnings
import itertools
import time
import errno
import mimetypes
from zope.interface import implementer
from twisted.web import server
from twisted.web import resource
from twisted.web import http
from twisted.web.util import redirectTo
from twisted.python.compat import networkString, intToBytes, nativeString, _PY3
from twisted.python.compat import escape
from twisted.python import components, filepath, log
from twisted.internet import abstract, interfaces
from twisted.python.util import InsensitiveDict
from twisted.python.runtime import platformType
from twisted.python.url import URL
from incremental import Version
from twisted.python.deprecate import deprecated
if _PY3:
from urllib.parse import quote, unquote
else:
from urllib import quote, unquote
dangerousPathError = resource.NoResource("Invalid request URL.")
class Data(resource.Resource):
"""
This is a static, in-memory resource.
"""
render_HEAD = render_GET
@deprecated(Version("Twisted", 16, 0, 0))
def addSlash(request):
"""
Add a trailing slash to C{request}'s URI. Deprecated, do not use.
"""
return _addSlash(request)
def _addSlash(request):
"""
Add a trailing slash to C{request}'s URI.
@param request: The incoming request to add the ending slash to.
@type request: An object conforming to L{twisted.web.iweb.IRequest}
@return: A URI with a trailing slash, with query and fragment preserved.
@rtype: L{bytes}
"""
url = URL.fromText(request.uri.decode('ascii'))
# Add an empty path segment at the end, so that it adds a trailing slash
url = url.replace(path=list(url.path) + [u""])
return url.asText().encode('ascii')
class Registry(components.Componentized):
"""
I am a Componentized object that will be made available to internal Twisted
file-based dynamic web content such as .rpy and .epy scripts.
"""
def loadMimeTypes(mimetype_locations=None, init=mimetypes.init):
"""
Produces a mapping of extensions (with leading dot) to MIME types.
It does this by calling the C{init} function of the L{mimetypes} module.
This will have the side effect of modifying the global MIME types cache
in that module.
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
@param mimetype_locations: Optional. List of paths to C{mime.types} style
files that should be used.
@type mimetype_locations: iterable of paths or L{None}
@param init: The init function to call. Defaults to the global C{init}
function of the C{mimetypes} module. For internal use (testing) only.
@type init: callable
"""
init(mimetype_locations)
mimetypes.types_map.update(
{
'.conf': 'text/plain',
'.diff': 'text/plain',
'.flac': 'audio/x-flac',
'.java': 'text/plain',
'.oz': 'text/x-oz',
'.swf': 'application/x-shockwave-flash',
'.wml': 'text/vnd.wap.wml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.patch': 'text/plain'
}
)
return mimetypes.types_map
class File(resource.Resource, filepath.FilePath):
"""
File is a resource that represents a plain non-interpreted file
(although it can look for an extension like .rpy or .cgi and hand the
file to a processor for interpretation if you wish). Its constructor
takes a file path.
Alternatively, you can give a directory path to the constructor. In this
case the resource will represent that directory, and its children will
be files underneath that directory. This provides access to an entire
filesystem tree with a single Resource.
If you map the URL 'http://server/FILE' to a resource created as
File('/tmp'), then http://server/FILE/ will return an HTML-formatted
listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will
return the contents of /tmp/foo/bar.html .
@cvar childNotFound: L{Resource} used to render 404 Not Found error pages.
@cvar forbidden: L{Resource} used to render 403 Forbidden error pages.
"""
contentTypes = loadMimeTypes()
contentEncodings = {
".gz" : "gzip",
".bz2": "bzip2"
}
processors = {}
indexNames = ["index", "index.html", "index.htm", "index.rpy"]
type = None
def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0):
"""
Create a file with the given path.
@param path: The filename of the file from which this L{File} will
serve data.
@type path: C{str}
@param defaultType: A I{major/minor}-style MIME type specifier
indicating the I{Content-Type} with which this L{File}'s data
will be served if a MIME type cannot be determined based on
C{path}'s extension.
@type defaultType: C{str}
@param ignoredExts: A sequence giving the extensions of paths in the
filesystem which will be ignored for the purposes of child
lookup. For example, if C{ignoredExts} is C{(".bar",)} and
C{path} is a directory containing a file named C{"foo.bar"}, a
request for the C{"foo"} child of this resource will succeed
with a L{File} pointing to C{"foo.bar"}.
@param registry: The registry object being used to handle this
request. If L{None}, one will be created.
@type registry: L{Registry}
@param allowExt: Ignored parameter, only present for backwards
compatibility. Do not pass a value for this parameter.
"""
resource.Resource.__init__(self)
filepath.FilePath.__init__(self, path)
self.defaultType = defaultType
if ignoredExts in (0, 1) or allowExt:
warnings.warn("ignoredExts should receive a list, not a boolean")
if ignoredExts or allowExt:
self.ignoredExts = [b'*']
else:
self.ignoredExts = []
else:
self.ignoredExts = list(ignoredExts)
self.registry = registry or Registry()
def ignoreExt(self, ext):
"""Ignore the given extension.
Serve file.ext if file is requested
"""
self.ignoredExts.append(ext)
childNotFound = resource.NoResource("File not found.")
forbidden = resource.ForbiddenResource()
def getChild(self, path, request):
"""
If this L{File}'s path refers to a directory, return a L{File}
referring to the file named C{path} in that directory.
If C{path} is the empty string, return a L{DirectoryLister} instead.
"""
self.restat(reraise=False)
if not self.isdir():
return self.childNotFound
if path:
try:
fpath = self.child(path)
except filepath.InsecurePath:
return self.childNotFound
else:
fpath = self.childSearchPreauth(*self.indexNames)
if fpath is None:
return self.directoryListing()
if not fpath.exists():
fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
if fpath is None:
return self.childNotFound
if platformType == "win32":
# don't want .RPY to be different than .rpy, since that would allow
# source disclosure.
processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
else:
processor = self.processors.get(fpath.splitext()[1])
if processor:
return resource.IResource(processor(fpath.path, self.registry))
return self.createSimilarFile(fpath.path)
# methods to allow subclasses to e.g. decrypt files on the fly:
def openForReading(self):
"""Open a file and return it."""
return self.open()
def getFileSize(self):
"""Return file size."""
return self.getsize()
def _parseRangeHeader(self, range):
"""
Parse the value of a Range header into (start, stop) pairs.
In a given pair, either of start or stop can be None, signifying that
no value was provided, but not both.
@return: A list C{[(start, stop)]} of pairs of length at least one.
@raise ValueError: if the header is syntactically invalid or if the
Bytes-Unit is anything other than 'bytes'.
"""
try:
kind, value = range.split(b'=', 1)
except ValueError:
raise ValueError("Missing '=' separator")
kind = kind.strip()
if kind != b'bytes':
raise ValueError("Unsupported Bytes-Unit: %r" % (kind,))
unparsedRanges = list(filter(None, map(bytes.strip, value.split(b','))))
parsedRanges = []
for byteRange in unparsedRanges:
try:
start, end = byteRange.split(b'-', 1)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
if start:
try:
start = int(start)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
start = None
if end:
try:
end = int(end)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
end = None
if start is not None:
if end is not None and start > end:
# Start must be less than or equal to end or it is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
elif end is None:
# One or both of start and end must be specified. Omitting
# both is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
parsedRanges.append((start, end))
return parsedRanges
def _rangeToOffsetAndSize(self, start, end):
"""
Convert a start and end from a Range header to an offset and size.
This method checks that the resulting range overlaps with the resource
being served (and so has the value of C{getFileSize()} as an indirect
input).
Either but not both of start or end can be L{None}:
- Omitted start means that the end value is actually a start value
relative to the end of the resource.
- Omitted end means the end of the resource should be the end of
the range.
End is interpreted as inclusive, as per RFC 2616.
If this range doesn't overlap with any of this resource, C{(0, 0)} is
returned, which is not otherwise a value return value.
@param start: The start value from the header, or L{None} if one was
not present.
@param end: The end value from the header, or L{None} if one was not
present.
@return: C{(offset, size)} where offset is how far into this resource
this resource the range begins and size is how long the range is,
or C{(0, 0)} if the range does not overlap this resource.
"""
size = self.getFileSize()
if start is None:
start = size - end
end = size
elif end is None:
end = size
elif end < size:
end += 1
elif end > size:
end = size
if start >= size:
start = end = 0
return start, (end - start)
def _contentRange(self, offset, size):
"""
Return a string suitable for the value of a Content-Range header for a
range with the given offset and size.
The offset and size are not sanity checked in any way.
@param offset: How far into this resource the range begins.
@param size: How long the range is.
@return: The value as appropriate for the value of a Content-Range
header.
"""
return networkString('bytes %d-%d/%d' % (
offset, offset + size - 1, self.getFileSize()))
def _doSingleRangeRequest(self, request, startAndEnd):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Range header appropriately. The return value
indicates which part of the resource to return.
@param request: The Request object.
@param startAndEnd: A 2-tuple of start of the byte range as specified by
the header and the end of the byte range as specified by the header.
At most one of the start and end may be L{None}.
@return: A 2-tuple of the offset and size of the range to return.
offset == size == 0 indicates that the request is not satisfiable.
"""
start, end = startAndEnd
offset, size = self._rangeToOffsetAndSize(start, end)
if offset == size == 0:
# This range doesn't overlap with any of this resource, so the
# request is unsatisfiable.
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
b'content-range', networkString('bytes */%d' % (self.getFileSize(),)))
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
b'content-range', self._contentRange(offset, size))
return offset, size
def _doMultipleRangeRequest(self, request, byteRanges):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Type and Content-Length headers appropriately. The
return value, which is a little complicated, indicates which parts of
the resource to return and the boundaries that should separate the
parts.
In detail, the return value is a tuple rangeInfo C{rangeInfo} is a
list of 3-tuples C{(partSeparator, partOffset, partSize)}. The
response to this request should be, for each element of C{rangeInfo},
C{partSeparator} followed by C{partSize} bytes of the resource
starting at C{partOffset}. Each C{partSeparator} includes the
MIME-style boundary and the part-specific Content-type and
Content-range headers. It is convenient to return the separator as a
concrete string from this method, because this method needs to compute
the number of bytes that will make up the response to be able to set
the Content-Length header of the response accurately.
@param request: The Request object.
@param byteRanges: A list of C{(start, end)} values as specified by
the header. For each range, at most one of C{start} and C{end}
may be L{None}.
@return: See above.
"""
matchingRangeFound = False
rangeInfo = []
contentLength = 0
boundary = networkString("%x%x" % (int(time.time()*1000000), os.getpid()))
if self.type:
contentType = self.type
else:
contentType = b'bytes' # It's what Apache does...
for start, end in byteRanges:
partOffset, partSize = self._rangeToOffsetAndSize(start, end)
if partOffset == partSize == 0:
continue
contentLength += partSize
matchingRangeFound = True
partContentRange = self._contentRange(partOffset, partSize)
partSeparator = networkString((
"\r\n"
"--%s\r\n"
"Content-type: %s\r\n"
"Content-range: %s\r\n"
"\r\n") % (nativeString(boundary), nativeString(contentType), nativeString(partContentRange)))
contentLength += len(partSeparator)
rangeInfo.append((partSeparator, partOffset, partSize))
if not matchingRangeFound:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
b'content-length', b'0')
request.setHeader(
b'content-range', networkString('bytes */%d' % (self.getFileSize(),)))
return [], b''
finalBoundary = b"\r\n--" + boundary + b"--\r\n"
rangeInfo.append((finalBoundary, 0, 0))
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
b'content-type', networkString('multipart/byteranges; boundary="%s"' % (nativeString(boundary),)))
request.setHeader(
b'content-length', intToBytes(contentLength + len(finalBoundary)))
return rangeInfo
def _setContentHeaders(self, request, size=None):
"""
Set the Content-length and Content-type headers for this request.
This method is not appropriate for requests for multiple byte ranges;
L{_doMultipleRangeRequest} will set these headers in that case.
@param request: The L{twisted.web.http.Request} object.
@param size: The size of the response. If not specified, default to
C{self.getFileSize()}.
"""
if size is None:
size = self.getFileSize()
request.setHeader(b'content-length', intToBytes(size))
if self.type:
request.setHeader(b'content-type', networkString(self.type))
if self.encoding:
request.setHeader(b'content-encoding', networkString(self.encoding))
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{twisted.web.http.Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader(b'range')
if byteRange is None:
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
log.msg("Ignoring malformed Range header %r" % (byteRange.decode(),))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo)
def render_GET(self, request):
"""
Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request.
"""
self.restat(False)
if self.type is None:
self.type, self.encoding = getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
request.setHeader(b'accept-ranges', b'bytes')
try:
fileForReading = self.openForReading()
except IOError as e:
if e.errno == errno.EACCES:
return self.forbidden.render(request)
else:
raise
if request.setLastModified(self.getModificationTime()) is http.CACHED:
# `setLastModified` also sets the response code for us, so if the
# request is cached, we close the file now that we've made sure that
# the request would otherwise succeed and return an empty body.
fileForReading.close()
return b''
if request.method == b'HEAD':
# Set the content headers here, rather than making a producer.
self._setContentHeaders(request)
# We've opened the file to make sure it's accessible, so close it
# now that we don't need it.
fileForReading.close()
return b''
producer = self.makeProducer(request, fileForReading)
producer.start()
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
render_HEAD = render_GET
@implementer(interfaces.IPullProducer)
class StaticProducer(object):
"""
Superclass for classes that implement the business of producing.
@ivar request: The L{IRequest} to write the contents of the file to.
@ivar fileObject: The file the contents of which to write to the request.
"""
bufferSize = abstract.FileDescriptor.bufferSize
def __init__(self, request, fileObject):
"""
Initialize the instance.
"""
self.request = request
self.fileObject = fileObject
def stopProducing(self):
"""
Stop producing data.
L{twisted.internet.interfaces.IProducer.stopProducing}
is called when our consumer has died, and subclasses also call this
method when they are done producing data.
"""
self.fileObject.close()
self.request = None
class NoRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes the entire file to the request.
"""
class SingleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes a single chunk of a file to the request.
"""
def __init__(self, request, fileObject, offset, size):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param offset: The offset into the file of the chunk to be written.
@param size: The size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.offset = offset
self.size = size
class MultipleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes several chunks of a file to the request.
"""
def __init__(self, request, fileObject, rangeInfo):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param rangeInfo: A list of tuples C{[(boundary, offset, size)]}
where:
- C{boundary} will be written to the request first.
- C{offset} the offset into the file of chunk to write.
- C{size} the size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.rangeInfo = rangeInfo
class ASISProcessor(resource.Resource):
"""
Serve files exactly as responses without generating a status-line or any
headers. Inspired by Apache's mod_asis.
"""
def formatFileSize(size):
"""
Format the given file size in bytes to human readable format.
"""
if size < 1024:
return '%iB' % size
elif size < (1024 ** 2):
return '%iK' % (size / 1024)
elif size < (1024 ** 3):
return '%iM' % (size / (1024 ** 2))
else:
return '%iG' % (size / (1024 ** 3))
class DirectoryLister(resource.Resource):
"""
Print the content of a directory.
@ivar template: page template used to render the content of the directory.
It must contain the format keys B{header} and B{tableContent}.
@type template: C{str}
@ivar linePattern: template used to render one line in the listing table.
It must contain the format keys B{class}, B{href}, B{text}, B{size},
B{type} and B{encoding}.
@type linePattern: C{str}
@ivar contentEncodings: a mapping of extensions to encoding types.
@type contentEncodings: C{dict}
@ivar defaultType: default type used when no mimetype is detected.
@type defaultType: C{str}
@ivar dirs: filtered content of C{path}, if the whole content should not be
displayed (default to L{None}, which means the actual content of
C{path} is printed).
@type dirs: L{None} or C{list}
@ivar path: directory which content should be listed.
@type path: C{str}
"""
template = """<html>
<head>
<title>%(header)s</title>
<style>
.even-dir { background-color: #efe0ef }
.even { background-color: #eee }
.odd-dir {background-color: #f0d0ef }
.odd { background-color: #dedede }
.icon { text-align: center }
.listing {
margin-left: auto;
margin-right: auto;
width: 50%%;
padding: 0.1em;
}
body { border: 0; padding: 0; margin: 0; background-color: #efefef; }
h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;}
</style>
</head>
<body>
<h1>%(header)s</h1>
<table>
<thead>
<tr>
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
</thead>
<tbody>
%(tableContent)s
</tbody>
</table>
</body>
</html>
"""
linePattern = """<tr class="%(class)s">
<td><a href="%(href)s">%(text)s</a></td>
<td>%(size)s</td>
<td>%(type)s</td>
<td>%(encoding)s</td>
</tr>
"""
def _getFilesAndDirectories(self, directory):
"""
Helper returning files and directories in given directory listing, with
attributes to be used to build a table content with
C{self.linePattern}.
@return: tuple of (directories, files)
@rtype: C{tuple} of C{list}
"""
files = []
dirs = []
for path in directory:
if _PY3:
if isinstance(path, bytes):
path = path.decode("utf8")
url = quote(path, "/")
escapedPath = escape(path)
childPath = filepath.FilePath(self.path).child(path)
if childPath.isdir():
dirs.append({'text': escapedPath + "/", 'href': url + "/",
'size': '', 'type': '[Directory]',
'encoding': ''})
else:
mimetype, encoding = getTypeAndEncoding(path, self.contentTypes,
self.contentEncodings,
self.defaultType)
try:
size = childPath.getsize()
except OSError:
continue
files.append({
'text': escapedPath, "href": url,
'type': '[%s]' % mimetype,
'encoding': (encoding and '[%s]' % encoding or ''),
'size': formatFileSize(size)})
return dirs, files
def _buildTableContent(self, elements):
"""
Build a table content using C{self.linePattern} and giving elements odd
and even classes.
"""
tableContent = []
rowClasses = itertools.cycle(['odd', 'even'])
for element, rowClass in zip(elements, rowClasses):
element["class"] = rowClass
tableContent.append(self.linePattern % element)
return tableContent
def render(self, request):
"""
Render a listing of the content of C{self.path}.
"""
request.setHeader(b"content-type", b"text/html; charset=utf-8")
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
dirs, files = self._getFilesAndDirectories(directory)
tableContent = "".join(self._buildTableContent(dirs + files))
header = "Directory listing for %s" % (
escape(unquote(nativeString(request.uri))),)
done = self.template % {"header": header, "tableContent": tableContent}
if _PY3:
done = done.encode("utf8")
return done
__str__ = __repr__
| [
2,
532,
9,
12,
1332,
12,
7442,
12,
3672,
25,
19074,
13,
12384,
13,
9288,
13,
9288,
62,
12708,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
40006,
24936,
46779,
13,
198,
2,
4091,
38559,
24290,
329,
3307,
13,
198,
198,
37811,
198,
45442... | 2.38828 | 12,236 |
import collections
from typing import Union
import numpy as np
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.datasets import load_digits as sklearn_load_digits
def _mg_eq(xt, xtau, a=0.2, b=0.1, n=10):
"""
Mackey-Glass time delay diffential equation, at values x(t) and x(t-tau).
"""
return -b*xt + a*xtau / (1+xtau**n)
def _mg_rk4(xt, xtau, a, b, n, h=1.0):
"""
Runge-Kuta method (RK4) for Mackey-Glass timeseries discretization.
"""
k1 = h * _mg_eq(xt, xtau, a, b, n)
k2 = h * _mg_eq(xt + 0.5*k1, xtau, a, b, n)
k3 = h * _mg_eq(xt + 0.5*k2, xtau, a, b, n)
k4 = h * _mg_eq(xt + k3, xtau, a, b, n)
return xt + k1/6 + k2/3 + k3/3 + k4/6
@_deprecate_positional_args
def mackey_glass(n_timesteps: int,
n_future: int = 1,
tau: int = 17,
a: float = 0.2,
b: float = 0.1,
n: int = 10,
x0: float = 1.2,
h: float = 1.0,
seed: Union[int, np.random.RandomState] = 5555) -> np.ndarray:
"""Mackey-Glass timeseries [#]_ [#]_, computed from the Mackey-Glass
delayed differential equation:
.. math::
\\frac{x}{t} = \\frac{ax(t-\\tau)}{1+x(t-\\tau)^n} - bx(t)
Parameters
----------
n_timesteps : int
Number of timesteps to compute.
n_future : int, optional
distance between input and target samples.
By default, equal to 1.
tau : int, optional
Time delay :math:`\\tau` of Mackey-Glass equation.
By defaults, equal to 17. Other values can
change the choatic behaviour of the timeseries.
a : float, optional
:math:`a` parameter of the equation.
By default, equal to 0.2.
b : float, optional
:math:`b` parameter of the equation.
By default, equal to 0.1.
n : int, optional
:math:`n` parameter of the equation.
By default, equal to 10.
x0 : float, optional
Initial condition of the timeseries.
By default, equal to 1.2.
h : float, optional
Time delta for the Runge-Kuta method. Can be assimilated
to the number of discrete point computed per timestep.
By default, equal to 1.0.
seed : int or RandomState
Random state seed for reproducibility.
Returns
-------
np.ndarray
Mackey-Glass timeseries.
Note
----
As Mackey-Glass is defined by delayed time differential equations,
the first timesteps of the timeseries can't be initialized at 0
(otherwise, the first steps of computation involving these
not-computed-yet-timesteps would yield inconsistent results).
A random number generator is therefore used to produce random
initial timesteps based on the value of the initial condition
passed as parameter. A default seed is hard-coded to ensure
reproducibility in any case.
References
----------
.. [#] M. C. Mackey and L. Glass, ‘Oscillation and chaos in physiological
control systems’, Science, vol. 197, no. 4300, pp. 287–289, Jul. 1977,
doi: 10.1126/science.267326.
.. [#] `Mackey-Glass equations
<https://en.wikipedia.org/wiki/Mackey-Glass_equations>`_
on Wikipedia.
"""
# a random state is needed as the method used to discretize
# the timeseries needs to use randomly generated initial steps
# based on the initial condition passed as parameter.
if isinstance(seed, np.random.RandomState):
rs = seed
elif seed is not None:
rs = np.random.RandomState(seed)
else:
rs = np.random.RandomState(5555)
# generate random first step based on the value
# of the initial condition
history_length = int(np.floor(tau/h))
history = collections.deque(x0 * np.ones(history_length) + 0.2 * (rs.rand(history_length) - 0.5))
xt = x0
X = np.zeros(n_timesteps + 1)
for i in range(0, n_timesteps):
X[i] = xt
if tau == 0:
xtau = 0.0
else:
xtau = history.popleft()
history.append(xt)
xth = _mg_rk4(xt, xtau, a=a, b=b, n=n)
xt = xth
y = X[1:].reshape(-1, 1)
X = X[:-1].reshape(-1, 1)
return X, y
@_deprecate_positional_args
| [
11748,
17268,
198,
6738,
19720,
1330,
4479,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
4808,
10378,
8344,
378,
62,
1930,
1859,
62,
22046,
198,
6738,
1341,
35720,
13,
19608,
292,
103... | 2.157363 | 2,078 |
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_mail import Mail
from flask_migrate import Migrate
#: Flask-SQLAlchemy extension instance
db = SQLAlchemy()
#: Flask-Bcrypt extension instance
bcrypt = Bcrypt()
#: Flask-Login extension instance
login_manager = LoginManager()
migrate = Migrate()
mail = Mail()
| [
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
38235,
1330,
23093,
13511,
198,
6738,
42903,
62,
15630,
6012,
1330,
347,
29609,
198,
6738,
42903,
62,
4529,
1330,
11099,
198,
6738,
42903,
62,
76,
42175,
... | 3.5 | 110 |
import sys
import boto3
import click
from botocore.exceptions import ClientError
status_map = {
'CREATE_IN_PROGRESS': 'CP',
'CREATE_FAILED': 'CF',
'CREATE_COMPLETE': 'C',
'ROLLBACK_IN_PROGRESS': 'RP',
'ROLLBACK_FAILED': 'RF',
'ROLLBACK_COMPLETE': 'R',
'DELETE_IN_PROGRESS': 'DP',
'DELETE_FAILED': 'DF',
'DELETE_COMPLETE': 'D',
'UPDATE_IN_PROGRESS': 'UP',
'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS': 'UCP',
'UPDATE_COMPLETE': 'U',
'UPDATE_ROLLBACK_IN_PROGRESS': 'URP',
'UPDATE_ROLLBACK_FAILED': 'URF',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS': 'URCP',
'UPDATE_ROLLBACK_COMPLETE': 'UR',
'REVIEW_IN_PROGRESS': 'RevP'
}
def get_stacks(client, filters):
"""Returns a list of StackSummaries"""
try:
response = client.list_stacks(StackStatusFilter=filters)
stacks = response['StackSummaries']
while True:
if 'NextToken' not in response:
break
response = client.list_stacks(
NextToken=response['NextToken'], StackStatusFilter=filters)
stacks += response['StackSummaries']
return stacks
except ClientError as e:
click.echo(e.response["Error"]["Message"])
sys.exit(1)
def get_filters(filter):
"""Returns a list of filtered stack status codes"""
if filter is None or 'COMPLETE' in filter.upper():
return [k for k in status_map.keys() if 'DELETE_' not in k]
elif filter.startswith('!') is True:
return [k for k in status_map.keys() if filter[1:].upper() not in k and 'DELETE_' not in k]
else:
return [k for k in status_map.keys() if filter.upper() in k]
@click.command(short_help='List stacks')
@click.option('-f', '--filter', help='filter stacks by status code')
@click.option('-c', '--codes', is_flag=True, callback=list_codes, expose_value=False,
is_eager=True, help='List status codes')
@click.option('-p', '--profile', default=None, help='AWS profile')
@click.option('-r', '--region', default=None, help='AWS region')
@click.argument('name', default='')
def ls(name, filter, profile, region):
"""List stacks
Deleted stacks are filtered out by default
\b
cfn-tools ls
cfn-tools ls FUZZY_SEARCH
\b
cfn-tools --codes
cfn-tools ls FUZZY_SEARCH -f update
cfn-tools ls FUZZY_SEARCH -f \!update
"""
session = boto3.session.Session(profile_name=profile, region_name=region)
client = session.client('cloudformation')
filters = get_filters(filter)
stacks = get_stacks(client, filters)
if name is not None:
stacks = [k for k in stacks if name in k['StackName']]
for s in stacks:
if 'LastUpdatedTime' in s:
format_listing(s, 'LastUpdatedTime')
elif 'DeletionTime' in s:
format_listing(s, 'DeletionTime')
else:
format_listing(s, 'CreationTime')
| [
11748,
25064,
198,
11748,
275,
2069,
18,
198,
11748,
3904,
198,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
628,
198,
13376,
62,
8899,
796,
1391,
198,
220,
220,
220,
705,
43387,
6158,
62,
1268,
62,
4805,
49656,
75... | 2.328822 | 1,256 |
# -*- coding: utf-8 -*-
# Copyright 2016, RadsiantBlue Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
from flask import request
import json
import signal
import sys
import time
import mongo
import loop
import common
app = Flask(__name__)
mong = mongo.Mongo()
mongExists=mong.env_found()
loopThread = loop.LoopingThread(interval=20,mong=mong)
adminStats = AdminStats()
@app.route("/",methods=['GET'])
@app.route("/admin/stats", methods=['GET'])
@app.route("/hello")
@app.route('/test', methods=['GET','POST'])
if __name__ =="__main__":
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
loopThread.start()
app.run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
11,
5325,
82,
3014,
14573,
21852,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 3.124675 | 385 |
from unittest import TestCase
from src.configurations import SimpleConfig, FullConfig
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
12351,
13,
11250,
20074,
1330,
17427,
16934,
11,
6462,
16934,
628,
198
] | 4.045455 | 22 |
from .arduino import *
| [
6738,
764,
446,
84,
2879,
1330,
1635,
198
] | 2.875 | 8 |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class StickerPack(Object):
"""Attributes:
ID: ``0x12b299d4``
Args:
emoticon: ``str``
documents: List of ``int`` ``64-bit``
"""
ID = 0x12b299d4
@staticmethod
| [
2,
9485,
39529,
532,
50203,
19308,
2964,
1462,
7824,
20985,
10074,
329,
11361,
198,
2,
15069,
357,
34,
8,
2177,
12,
7908,
6035,
309,
14064,
82,
1279,
5450,
1378,
12567,
13,
785,
14,
12381,
452,
8132,
29,
198,
2,
198,
2,
770,
2393,
... | 3.153392 | 339 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 9 17:41:20 2018
@author: David
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel,convolve
#Data Loading========
data1 = np.loadtxt("Halo.txt")
data1=np.transpose(data1)
prints=0#Define si se hacen los prints de calculo y plot
dataX=data1[0]
dataY=data1[1]
dataZ=data1[2]
dataVx=data1[3]
dataVy=data1[4]
dataVz=data1[5]
dataM=data1[6]
Mseg=[]
X, Y, Z,VX, VY, VZ, MASS = segregar_corte_z(dataX, dataY, dataZ, dataVx, dataVy, dataVz, dataM, 80,10)
L_box = 1200
n_side = 150
l_side = L_box/n_side
vx_grid = np.zeros([n_side, n_side])
vy_grid = np.zeros([n_side, n_side])
for i in range (n_side):
print('calculo ',i)
for j in range (n_side):
min_x = i * l_side
min_y = j * l_side
ii = (X>min_x) & (X<min_x + l_side) & (Y>min_y) & (Y<min_y+l_side)
tmp_vx = VX[ii]
tmp_vy = VY[ii]
tmp_m = MASS[ii]
masa_total = np.sum(tmp_m) + 1E-10
vx_grid[i,j] = np.sum(tmp_m * tmp_vx) / masa_total
vy_grid[i,j] = np.sum(tmp_m * tmp_vy) / masa_total
#==========================================
Divergencia=definir_divergencia(vx_grid,vy_grid)
Dlim=220
Dmin=Dlim
Dmax=Dlim
PC=puntos_criticos(Divergencia,Dmax,Dmin)
#====================
gauss=Gaussian2DKernel(1.5)
new=convolve(PC,gauss,boundary='extend')
Gplot=plt.figure(figsize=(10,10))
axGp=plt.axes()
plt.imshow(new.T)
for i in range(n_side):
for j in range(n_side):
xi = i
yi = j
v_div=200
xf = vx_grid[i,j]/v_div
yf = vy_grid[i,j]/v_div
axGp.arrow(xi,yi,xf,yf,head_width=0.5,head_length=0.1,fc='k',ec='k', alpha=0.5 )
plt.savefig("gauss.png")
fileGauss=open("ScalarGauss.txt","w")
for i in range (n_side):
for j in range (n_side):
fileGauss.write(str(new[i,j])+" ")
fileGauss.write("\n")
fileGauss.close()
fileVX=open("VectorVx.txt","w")
for i in range (n_side):
for j in range (n_side):
fileVX.write(str(vx_grid[i,j])+" ")
fileVX.write("\n")
fileVX.close()
fileVY=open("VectorVy.txt","w")
for i in range (n_side):
for j in range (n_side):
fileVY.write(str(vy_grid[i,j])+" ")
fileVY.write("\n")
fileVY.close()
#=======================================
Gmin=np.amin(new)
Gmax=np.max(new)
L=Gmax-Gmin
thresh=0.5
TH=thresh*L+Gmin
REG=definir_Regiones(new,TH)
#=======================================
Rplot=plt.figure(figsize=(10,10))
axRp=plt.axes()
plt.imshow(REG.T)
for i in range(n_side):
for j in range(n_side):
xi = i
yi = j
v_div=200
xf = vx_grid[i,j]/v_div
yf = vy_grid[i,j]/v_div
axRp.arrow(xi,yi,xf,yf,head_width=0.5,head_length=0.1,fc='k',ec='k', alpha=0.5 )
plt.savefig('Regiones.png')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2758,
220,
860,
1596,
25,
3901,
25,
1238,
2864,
198,
198,
31,
9800,
25,
3271,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
1... | 1.90737 | 1,479 |
tupla1 = (1,2,3,4,5)
print(tupla1)
tupla1 = list(tupla1) # Convertendo a tupla para lista
tupla1[1] = 3000
print(tupla1) | [
28047,
489,
64,
16,
796,
357,
16,
11,
17,
11,
18,
11,
19,
11,
20,
8,
198,
4798,
7,
28047,
489,
64,
16,
8,
198,
28047,
489,
64,
16,
796,
1351,
7,
28047,
489,
64,
16,
8,
1303,
38240,
31110,
257,
12777,
489,
64,
31215,
1351,
64... | 1.846154 | 65 |
#!/usr/bin/env python3
# coding: utf-8
import multiprocessing
from sshconnector import sshconnector
import pymysql
import asyncio
db = pymysql.connect("localhost", "root", "root", "hosts")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
18540,
305,
919,
278,
198,
6738,
26678,
8443,
273,
1330,
26678,
8443,
273,
198,
11748,
279,
4948,
893,
13976,
198,
11748,
30351,
... | 2.852941 | 68 |
#!/usr/bin/python3
import unittest
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
555,
715,
395,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
403,
715,
395,
13,
12417,
3419,
198
] | 2.314286 | 35 |
import sys
import ruamel.yaml as yaml
TRAFFIC = sys.argv[1]
file = "../kubernetes-manifests/loadgenerator.yaml"
with open(file, "r") as stream:
d = list(yaml.safe_load_all(stream))
d[0]['spec']['template']['spec']['containers'][0]['env'][0]['value'] = TRAFFIC
with open(file, "w") as stream:
yaml.dump_all(
d,
stream,
default_flow_style=False
) | [
11748,
25064,
198,
198,
11748,
7422,
17983,
13,
88,
43695,
355,
331,
43695,
198,
198,
51,
3861,
5777,
2149,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
198,
7753,
796,
366,
40720,
74,
18478,
3262,
274,
12,
805,
361,
3558,
14,
2220,
... | 2.115385 | 182 |
slu = Solution()
print(slu.findLengthOfLCIS([1, 3, 5, 4, 7]))
| [
198,
198,
82,
2290,
796,
28186,
3419,
198,
4798,
7,
82,
2290,
13,
19796,
24539,
5189,
5639,
1797,
26933,
16,
11,
513,
11,
642,
11,
604,
11,
767,
60,
4008,
198
] | 2.064516 | 31 |
Spam('Key 1', 'Value 1')
Spam('Key 2', 'Value 2')
| [
198,
4561,
321,
10786,
9218,
352,
3256,
705,
11395,
352,
11537,
198,
4561,
321,
10786,
9218,
362,
3256,
705,
11395,
362,
11537,
198
] | 2.217391 | 23 |
from app.crud import Mapper
from app.models.broadcast_read_user import PityBroadcastReadUser
from app.utils.decorator import dao
from app.utils.logger import Log
@dao(PityBroadcastReadUser, Log("BroadcastReadDao"))
| [
6738,
598,
13,
6098,
463,
1330,
337,
11463,
198,
6738,
598,
13,
27530,
13,
36654,
2701,
62,
961,
62,
7220,
1330,
350,
414,
30507,
2701,
5569,
12982,
198,
6738,
598,
13,
26791,
13,
12501,
273,
1352,
1330,
288,
5488,
198,
6738,
598,
1... | 3.056338 | 71 |
import torch.nn as nn
from torchvision.models.resnet import model_urls
from torchvision.models.utils import load_state_dict_from_url
from ..layers import Sequential, Conv2d, BatchNorm2d, FC, MaxPool2d, GlobalAvgPool2D, GroupNorm, \
ResNetBottleneckBlock, ResNetBasicBlock
from ..utils import batch_set_tensor
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def resnet18(pretrained=False, progress=True, **kwargs):
"""
ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
return _resnet('resnet18', ResNetBasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""
ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
return _resnet('resnet34', ResNetBasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', ResNetBottleneckBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""
ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
return _resnet('resnet101', ResNetBottleneckBlock, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
"""
ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
return _resnet('resnet152', ResNetBottleneckBlock, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
"""
ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', ResNetBottleneckBlock, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
"""
ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', ResNetBottleneckBlock, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
"""
Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', ResNetBottleneckBlock, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
"""
Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
:param pretrained:
If True, returns a model pre-trained on ImageNet.
:param progress:
If True, displays a progress bar of the download to stderr.
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', ResNetBottleneckBlock, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
10178,
13,
27530,
13,
411,
3262,
1330,
2746,
62,
6371,
82,
198,
6738,
28034,
10178,
13,
27530,
13,
26791,
1330,
3440,
62,
5219,
62,
11600,
62,
6738,
62,
6371,
198,
6738,
11485,
... | 2.523686 | 2,322 |
from server import app, talisman
import pytest
# Create test client without https redirect
# (normally taken care of by running in debug)
@pytest.fixture
# Add a function to test routes with optional location
| [
6738,
4382,
1330,
598,
11,
3305,
23845,
198,
11748,
12972,
9288,
628,
198,
2,
13610,
1332,
5456,
1231,
3740,
18941,
198,
2,
357,
27237,
453,
2077,
1337,
286,
416,
2491,
287,
14257,
8,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
2,
... | 3.963636 | 55 |
import os
p = open(os.environ['subdir'] + '/lensing.bands','r').readlines()
i = 0
for l in p:
import re
res = re.split('\s+',l)
f_in = os.environ['subdir'] + '/' + res[0] + '/' + res[1] + '/SCIENCE/coadd_' + res[0] + '_good/coadd.reg'
from glob import glob
command = 'cp ' + f_in + ' ' + os.environ['subdir'] + '/' + res[0] + '/' + res[1] + '/SCIENCE/handmasking.reg'
print command
os.system(command)
if glob(f_in):
i += 1
print i
| [
11748,
28686,
198,
79,
796,
1280,
7,
418,
13,
268,
2268,
17816,
7266,
15908,
20520,
1343,
31051,
75,
26426,
13,
21397,
41707,
81,
27691,
961,
6615,
3419,
198,
72,
796,
657,
198,
1640,
300,
287,
279,
25,
198,
220,
220,
220,
1330,
302... | 2.164384 | 219 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-15 05:23
from __future__ import unicode_literals
from django.db import migrations, models
import fileapp.models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1314,
319,
2864,
12,
940,
12,
1314,
8870,
25,
1954,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.84127 | 63 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import messages
from horizon import tables
from horizon.utils import memoized
from designatedashboard import api
LOG = logging.getLogger(__name__)
EDITABLE_RECORD_TYPES = (
"A",
"AAAA",
"CNAME",
"MX",
"PTR",
"SPF",
"SRV",
"SSHFP",
"TXT",
)
class CreateDomain(tables.LinkAction):
'''Link action for navigating to the CreateDomain view.'''
name = "create_domain"
verbose_name = _("Create Domain")
url = "horizon:project:dns_domains:create_domain"
classes = ("ajax-modal", "btn-create")
policy_rules = (("dns", "create_domain"),)
@memoized.memoized_method
class EditDomain(tables.LinkAction):
'''Link action for navigating to the UpdateDomain view.'''
name = "edit_domain"
verbose_name = _("Edit Domain")
url = "horizon:project:dns_domains:update_domain"
classes = ("ajax-modal", "btn-edit")
policy_rules = (("dns", "update_domain"),)
class ManageRecords(tables.LinkAction):
'''Link action for navigating to the ManageRecords view.'''
name = "manage_records"
verbose_name = _("Manage Records")
url = "horizon:project:dns_domains:records"
classes = ("btn-edit")
policy_rules = (("dns", "get_records"),)
class DeleteDomain(tables.BatchAction):
'''Batch action for deleting domains.'''
name = "delete"
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("Domain")
data_type_plural = _("Domains")
classes = ('btn-danger', 'btn-delete')
policy_rules = (("dns", "delete_domain"),)
class CreateRecord(tables.LinkAction):
'''Link action for navigating to the CreateRecord view.'''
name = "create_record"
verbose_name = _("Create Record")
classes = ("ajax-modal", "btn-create")
policy_rules = (("dns", "create_record"),)
class EditRecord(tables.LinkAction):
'''Link action for navigating to the UpdateRecord view.'''
name = "edit_record"
verbose_name = _("Edit Record")
classes = ("ajax-modal", "btn-edit")
policy_rules = (("dns", "update_record"),)
class DeleteRecord(tables.DeleteAction):
'''Link action for navigating to the UpdateRecord view.'''
data_type_singular = _("Record")
policy_rules = (("dns", "delete_record"),)
class BatchDeleteRecord(tables.BatchAction):
'''Batch action for deleting domain records.'''
name = "delete"
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("Record")
classes = ('btn-danger', 'btn-delete')
policy_rules = (("dns", "delete_record"),)
class DomainsTable(tables.DataTable):
'''Data table for displaying domain summary information.'''
name = tables.Column("name",
verbose_name=_("Name"),
link=("horizon:project:dns_domains:domain_detail"))
email = tables.Column("email",
verbose_name=_("Email"))
ttl = tables.Column("ttl",
verbose_name=_("TTL"))
serial = tables.Column("serial",
verbose_name=_("Serial"))
def record__details_link(record):
'''Returns a link to the view for updating DNS records.'''
return urlresolvers.reverse(
"horizon:project:dns_domains:view_record",
args=(record.domain_id, record.id))
class RecordsTable(tables.DataTable):
'''Data table for displaying summary information for a domains records.'''
name = tables.Column("name",
verbose_name=_("Name"),
link=record__details_link,
)
type = tables.Column("type",
verbose_name=_("Type")
)
data = tables.Column("data",
verbose_name=_("Data")
)
priority = tables.Column("priority",
verbose_name=_("Priority"),
)
ttl = tables.Column("ttl",
verbose_name=_("TTL")
)
| [
2,
15069,
2211,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
... | 2.4742 | 1,938 |
import discord
from discord.ext import commands
from discord.utils import get | [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
13,
26791,
1330,
651
] | 4.8125 | 16 |
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import ExtractorError
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
302,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
5589,
265,
1330,
357,
198,
220,
220,
220,
... | 2.958904 | 73 |
# Generated by Django 3.1.7 on 2021-03-31 17:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3070,
12,
3132,
1596,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor, NearestNeighbors
from sklearn.metrics import balanced_accuracy_score, mean_absolute_error
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from .plot import multiline
from tqdm.autonotebook import tqdm
| [
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
7483,
11,
509,
46445,
32289,
8081,
44292,
11,
3169,
12423,
46445,
32289,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
12974,
62,
4134,
23843,
62,
26675,
11,
1612,
... | 3.225806 | 93 |
"""
Name: Runs a Google Analytics API query
Developer: Matt Clarke
Date: June 8, 2020
Description: Passes a payload to the Google Analytics reporting API and returns the data.
"""
import math
import pandas as pd
def show_message(verbose, message):
"""Show a message if verbose mode is True.
Args:
verbose (bool): True to display messages
message (str): Message to display.
Returns:
Print message if verbose mode is True.
"""
if verbose:
print(message)
def run_query(service: object,
view_id: str,
payload: dict,
output: str = 'df',
verbose=False):
"""Runs a query against the Google Analytics reporting API and returns the results data.
Args:
service (object): Authenticated Google Analytics service connection
view_id (int): Google Analytics view ID to query
payload (dict): Payload of query parameters to pass to Google Analytics in Python dictionary
output (str): String containing the format to return (df or raw)
verbose (bool): Turn on verbose messages.
Returns:
Pandas dataframe or raw array
"""
required_payload = {'ids': 'ga:' + view_id}
final_payload = {**required_payload, **payload}
try:
results = get_results(service, final_payload)
show_message(verbose, results)
if output == 'df':
return results_to_pandas(results)
else:
return results
except Exception as e:
print('Query failed:', str(e))
def get_profile_info(results):
"""Return the profileInfo object from a Google Analytics API request. This contains various parameters, including
the profile ID, the query parameters, the link used in the API call, the number of results and the pagination.
:param results: Google Analytics API results set
:return: Python dictionary containing profileInfo data
"""
if results['profileInfo']:
return results['profileInfo']
def get_total_results(results):
"""Return the totalResults object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Number of results
"""
if results['totalResults']:
return results['totalResults']
def get_items_per_page(results):
"""Return the itemsPerPage object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Number of items per page (default is 1000 if not set, max is 10000)
"""
if results['itemsPerPage']:
return results['itemsPerPage']
def get_total_pages(results):
"""Return the total number of pages.
:param results: Google Analytics API results set
:return: Number of results
"""
if results['totalResults']:
return math.ceil(results['totalResults'] / results['itemsPerPage'])
def get_totals(results):
"""Return the totalsForAllResults object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Python dictionary containing totalsForAllResults data
"""
if results['totalsForAllResults']:
return results['totalsForAllResults']
def get_rows(results):
"""Return the rows object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Python dictionary containing rows data
"""
if results['rows']:
return results['rows']
def get_column_headers(results):
"""Return the columnHeaders object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Python dictionary containing columnHeaders data
"""
if results['columnHeaders']:
return results['columnHeaders']
def set_dtypes(df):
"""Sets the correct data type for each column returned in the dataframe.
:param df: Pandas dataframe from Google Analytics API query
:return: Pandas dataframe with correct dtypes assigned to columns
"""
integer_dtype = ['sessionCount', 'daysSinceLastSession', 'userBucket', 'users', 'newUsers', '1dayUsers',
'7dayUsers', '14dayUsers', '28dayUsers', '30dayUsers', 'sessionDurationBucket', 'sessions',
'bounces', 'uniqueDimensionCombinations', 'hits', 'organicSearches', 'impressions', 'adclicks',
'goal1Starts', 'goal2Starts', 'goal3Starts', 'goal4Starts', 'goal5Starts', 'goal6Starts',
'goal7Starts', 'goal8Starts', 'goal9Starts', 'goal10Starts', 'goal11Starts', 'goal12Starts',
'goal13Starts', 'goal14Starts', 'goal15Starts', 'goal16Starts', 'goal17Starts', 'goal18Starts',
'goal19Starts', 'goal20Starts', 'goal1Completions', 'goal2Completions', 'goal3Completions',
'goal4Completions', 'goal5Completions', 'goal6Completions', 'goal7Completions', 'goal8Completions',
'goal9Completions', 'goal10Completions', 'goal11Completions', 'goal12Completions',
'goal13Completions', 'goal14Completions', 'goal15Completions', 'goal16Completions',
'goal17Completions', 'goal18Completions', 'goal19Completions', 'goal20Completions',
'goalCompletionsAll', 'goalStartsAll', 'goal1Abandons', 'goal2Abandons', 'goal3Abandons',
'goal4Abandons', 'goal5Abandons', 'goal6Abandons', 'goal7Abandons', 'goal8Abandons',
'goal9Abandons', 'goal10Abandons', 'goal11Abandons', 'goal12Abandons', 'goal13Abandons',
'goal14Abandons', 'goal15Abandons', 'goal16Abandons', 'goal17Abandons', 'goal18Abandons',
'goal19Abandons', 'goal20Abandons', 'goalAbandonsAll', 'pageDepth', 'entrances', 'pageviews',
'uniquePageviews', 'exits', 'searchResultViews', 'searchUniques', 'searchSessions', 'searchDepth',
'searchRefinements', 'searchExits', 'pageLoadTime', 'pageLoadSample', 'domainLookupTime',
'pageDownloadTime', 'redirectionTime', 'serverConnectionTime', 'serverResponseTime',
'speedMetricsSample', 'domInteractiveTime', 'domContentLoadedTime', 'domLatencyMetricsSample',
'screenviews', 'uniqueScreenviews', 'sessionsWithEvent', 'sessionsToTransaction',
'daysToTransaction', 'transactions', 'itemQuantity', 'uniquePurchases', 'internalPromotionClicks',
'internalPromotionViews', 'productAddsToCart', 'productCheckouts', 'productDetailViews',
'productListClicks', 'productListViews', 'productRefunds', 'productRemovesFromCart',
'quantityAddedToCart', 'quantityCheckedOut', 'quantityRefunded', 'quantityRemovedFromCart',
'totalRefunds', 'socialInteractions', 'uniqueSocialInteractions', 'userTimingValue',
'userTimingSample', 'exceptions', 'fatalExceptions', 'dimension1', 'dimension2', 'dimension3',
'dimension4', 'dimension5', 'dimension6', 'dimension7', 'dimension8', 'dimension9', 'dimension10',
'dimension11', 'dimension12', 'dimension13', 'dimension14', 'dimension15', 'dimension16',
'dimension17', 'dimension18', 'dimension19', 'dimension20', 'customMetric1', 'customMetric2',
'customMetric3', 'customMetric4', 'customMetric5', 'customMetric6', 'customMetric7',
'customMetric8', 'customMetric9', 'customMetric10', 'customMetric11', 'customMetric12',
'customMetric13', 'customMetric14', 'customMetric15', 'customMetric16', 'customMetric17',
'customMetric18', 'customMetric19', 'customMetric20', 'year', 'month', 'week', 'day', 'hour',
'minute', 'nthMonth', 'nthWeek', 'nthDay', 'nthMinute', 'dayOfWeek', 'isoWeek', 'isoYear',
'isoYearIsoWeek', 'nthHour', 'dcmFloodlightQuantity', 'dcmClicks', 'dcmImpressions',
'adsenseAdUnitsViewed', 'adsenseAdsViewed', 'adsenseAdsClicks', 'adsensePageImpressions',
'adsenseExits', 'totalPublisherImpressions', 'totalPublisherMonetizedPageviews',
'totalPublisherClicks', 'backfillImpressions', 'backfillMonetizedPageviews', 'backfillClicks',
'dfpImpressions', 'dfpMonetizedPageviews', 'dfpClicks', 'cohortNthDay', 'cohortNthMonth',
'cohortNthWeek', 'cohortActiveUsers', 'cohortTotalUsers', 'cohortTotalUsersWithLifetimeCriteria',
'dbmClicks', 'dbmConversions', 'dbmImpressions', 'dsCost', 'dsImpressions'
]
float_dtype = ['percentNewSessions', 'sessionsPerUser', 'bounceRate', 'adCost', 'CPM', 'CPC', 'CTR',
'costPerTransaction', 'costPerGoalConversion', 'RPC', 'ROAS', 'goal1Value', 'goal2Value',
'goal3Value', 'goal4Value', 'goal5Value', 'goal6Value', 'goal7Value', 'goal8Value', 'goal9Value',
'goal10Value', 'goal11Value', 'goal12Value', 'goal13Value', 'goal14Value', 'goal15Value',
'goal16Value', 'goal17Value', 'goal18Value', 'goal19Value', 'goal20Value', 'goalValueAll',
'goalValuePerSession', 'goal1ConversionRate', 'goal2ConversionRate', 'goal3ConversionRate',
'goal4ConversionRate', 'goal5ConversionRate', 'goal6ConversionRate', 'goal7ConversionRate',
'goal8ConversionRate', 'goal9ConversionRate', 'goal10ConversionRate', 'goal11ConversionRate',
'goal12ConversionRate', 'goal13ConversionRate', 'goal14ConversionRate', 'goal15ConversionRate',
'goal16ConversionRate', 'goal17ConversionRate', 'goal18ConversionRate', 'goal19ConversionRate',
'goal20ConversionRate', 'goalConversionRateAll', 'goal1AbandonRate', 'goal2AbandonRate',
'goal3AbandonRate', 'goal4AbandonRate', 'goal5AbandonRate', 'goal6AbandonRate', 'goal7AbandonRate',
'goal8AbandonRate', 'goal9AbandonRate', 'goal10AbandonRate', 'goal11AbandonRate',
'goal12AbandonRate', 'goal13AbandonRate', 'goal14AbandonRate', 'goal15AbandonRate',
'goal16AbandonRate', 'goal17AbandonRate', 'goal18AbandonRate', 'goal19AbandonRate',
'goal20AbandonRate', 'goalAbandonRateAll', 'latitude', 'longitude', 'pageValue', 'entranceRate',
'pageviewsPerSession', 'exitRate', 'avgSearchResultViews', 'percentSessionsWithSearch',
'avgSearchDepth', 'percentSearchRefinements', 'searchExitRate', 'searchGoalConversionRateAll',
'goalValueAllPerSearch', 'searchGoal1ConversionRate', 'searchGoal2ConversionRate',
'searchGoal3ConversionRate', 'searchGoal4ConversionRate', 'searchGoal5ConversionRate',
'searchGoal6ConversionRate', 'searchGoal7ConversionRate', 'searchGoal8ConversionRate',
'searchGoal9ConversionRate', 'searchGoal10ConversionRate', 'searchGoal11ConversionRate',
'searchGoal12ConversionRate', 'searchGoal13ConversionRate', 'searchGoal14ConversionRate',
'searchGoal15ConversionRate', 'searchGoal16ConversionRate', 'searchGoal17ConversionRate',
'searchGoal18ConversionRate', 'searchGoal19ConversionRate', 'searchGoal20ConversionRate',
'avgPageLoadTime', 'avgDomainLookupTime', 'avgPageDownloadTime', 'avgRedirectionTime',
'avgServerConnectionTime', 'avgServerResponseTime', 'avgDomInteractiveTime',
'avgDomContentLoadedTime', 'avgDomLatencyMetricsSample', 'screenviewsPerSession',
'avgScreenviewDuration', 'eventValue', 'eventsPerSessionWithEvent', 'transactionsPerSession',
'transactionRevenue', 'revenuePerTransaction', 'transactionRevenuePerSession', 'transactionShipping',
'transactionTax', 'totalValue', 'revenuePerItem', 'itemRevenue', 'itemsPerPurchase',
'localTransactionRevenue', 'localTransactionShipping', 'localTransactionTax', 'localItemRevenue',
'buyToDetailRate', 'cartToDetailRate', 'internalPromotionCTR', 'localProductRefundAmount',
'localRefundAmount', 'productListCTR', 'productRefundAmount', 'productRevenuePerPurchase',
'refundAmount', 'revenuePerUser', 'transactionsPerUser', 'socialInteractionsPerSession',
'avgUserTimingValue', 'exceptionsPerScreenview', 'fatalExceptionsPerScreenview',
'dcmFloodlightRevenue', 'dcmCPC', 'dcmCTR', 'dcmCost', 'dcmROAS', 'dcmRPC', 'adsenseRevenue',
'adsenseCTR', 'adsenseECPM', 'adsenseViewableImpressionPercent', 'adsenseCoverage',
'totalPublisherCoverage', 'totalPublisherImpressionsPerSession', 'totalPublisherECPM',
'totalPublisherViewableImpressionsPercent', 'totalPublisherCTR', 'totalPublisherRevenue',
'totalPublisherRevenuePer1000Sessions', 'adxImpressions', 'adxMonetizedPageviews', 'adxClicks',
'adxCoverage', 'adxImpressionsPerSession', 'adxViewableImpressionsPercent', 'adxCTR', 'adxRevenue',
'adxRevenuePer1000Sessions', 'adxECPM', 'backfillCoverage', 'backfillImpressionsPerSession',
'backfillViewableImpressionsPercent', 'backfillCTR', 'backfillRevenue', 'backfillECPM',
'backfillRevenuePer1000Sessions', 'dfpCoverage', 'dfpImpressionsPerSession',
'dfpViewableImpressionsPercent', 'dfpCTR', 'dfpRevenue', 'dfpRevenuePer1000Sessions', 'dfpECPM',
'cohortAppviewsPerUser', 'cohortAppviewsPerUserWithLifetimeCriteria', 'cohortGoalCompletionsPerUser',
'cohortGoalCompletionsPerUserWithLifetimeCriteria', 'cohortPageviewsPerUser',
'cohortPageviewsPerUserWithLifetimeCriteria', 'cohortRetentionRate', 'cohortRevenuePerUser',
'cohortRevenuePerUserWithLifetimeCriteria', 'cohortSessionDurationPerUser',
'cohortSessionDurationPerUserWithLifetimeCriteria', 'cohortSessionsPerUser',
'cohortSessionsPerUserWithLifetimeCriteria', 'dbmCPA', 'dbmCPC', 'dbmCPM', 'dbmCTR', 'dbmCost',
'dbmROAS', 'dsCPC', 'dsCTR', 'dsProfit', 'dsReturnOnAdSpend', 'dsRevenuePerClick'
]
string_dtype = ['userType', 'userDefinedValue', 'referralPath', 'fullReferrer', 'campaign', 'source', 'medium',
'sourceMedium', 'keyword', 'adContent', 'socialNetwork', 'hasSocialSourceReferral',
'campaignCode', 'adGroup', 'adSlot', 'adDistributionNetwork', 'adMatchType',
'adKeywordMatchType', 'adMatchedQuery', 'adPlacementDomain', 'adPlacementUrl', 'adFormat',
'adTargetingType', 'adTargetingOption', 'adDisplayUrl', 'adDestinationUrl',
'adwordsCustomerID', 'adwordsCampaignID', 'adwordsAdGroupID', 'adwordsCreativeID',
'adwordsCriteriaID', 'adQueryWordCount', 'isTrueViewVideoAd', 'goalCompletionLocation',
'goalPreviousStep1', 'goalPreviousStep2', 'goalPreviousStep3', 'browser', 'browserVersion',
'operatingSystem', 'operatingSystemVersion', 'mobileDeviceBranding', 'mobileDeviceModel',
'mobileDeviceInputSelector', 'mobileDeviceInfo', 'mobileDeviceMarketingName', 'deviceCategory',
'browserSize', 'dataSource', 'continent', 'subContinent', 'country', 'region', 'metro', 'city',
'networkDomain', 'cityId', 'continentId', 'countryIsoCode', 'metroId', 'regionId', 'regionIsoCode',
'subContinentCode', 'flashVersion', 'javaEnabled', 'language', 'screenColors',
'sourcePropertyDisplayName', 'sourcePropertyTrackingId', 'screenResolution', 'hostname', 'pagePath',
'pagePathLevel1', 'pagePathLevel2', 'pagePathLevel3', 'pagePathLevel4', 'pageTitle',
'landingPagePath', 'secondPagePath', 'exitPagePath', 'previousPagePath', 'searchUsed',
'searchKeyword', 'searchKeywordRefinement', 'searchCategory', 'searchStartPage',
'searchDestinationPage', 'searchAfterDestinationPage', 'appInstallerId', 'appVersion', 'appName',
'appId', 'screenName', 'screenDepth', 'landingScreenName', 'exitScreenName', 'eventCategory',
'eventAction', 'eventLabel', 'transactionId', 'affiliation', 'productSku', 'productName',
'productCategory', 'currencyCode', 'checkoutOptions', 'internalPromotionCreative',
'internalPromotionId', 'internalPromotionName', 'internalPromotionPosition', 'orderCouponCode',
'productBrand', 'productCategoryHeirarchy', 'productCouponCode', 'productListName',
'productListPosition', 'productVariant', 'shoppingStage', 'socialInteractionNetwork',
'socialInteractionAction', 'socialInteractionNetworkAction', 'socialInteractionTarget',
'socialEngagementType', 'userTimingCategory', 'userTimingLabel', 'userTimingVariable',
'exceptionDescription', 'experimentId', 'experimentVariant', 'experimentCombination',
'experimentName', 'customVarName1', 'customVarName2', 'customVarName3', 'customVarName4',
'customVarName5', 'customVarName6', 'customVarName7', 'customVarName8', 'customVarName9',
'customVarName10', 'customVarName11', 'customVarName12', 'customVarName13', 'customVarName14',
'customVarName15', 'customVarName16', 'customVarName17', 'customVarName18', 'customVarName19',
'customVarName20', 'customVarValue1', 'customVarValue2', 'customVarValue3', 'customVarValue4',
'customVarValue5', 'customVarValue6', 'customVarValue7', 'customVarValue8', 'customVarValue9',
'customVarValue10', 'customVarValue11', 'customVarValue12', 'customVarValue13', 'customVarValue14',
'customVarValue15', 'customVarValue16', 'customVarValue17', 'customVarValue18', 'customVarValue19',
'customVarValue20', 'dayOfWeekName', 'dateHour', 'dateHourMinute', 'yearMonth', 'yearWeek',
'dcmClickAd', 'dcmClickAdId', 'dcmClickAdType', 'dcmClickAdTypeId', 'ga:dcmClickAdvertiser',
'dcmClickAdvertiserId', 'dcmClickCampaign', 'dcmClickCampaignId', 'dcmClickCreative',
'dcmClickCreativeId', 'dcmClickRenderingId', 'dcmClickCreativeType', 'dcmClickCreativeTypeId',
'dcmClickCreativeVersion', 'dcmClickSite', 'dcmClickSiteId', 'dcmClickSitePlacement',
'dcmClickSitePlacementId', 'dcmClickSpotId', 'dcmFloodlightActivity',
'dcmFloodlightActivityAndGroup', 'dcmFloodlightActivityGroup', 'dcmFloodlightActivityGroupId',
'dcmFloodlightActivityId', 'dcmFloodlightAdvertiserId', 'dcmFloodlightSpotId', 'dcmLastEventAd',
'dcmLastEventAdId', 'dcmLastEventAdType', 'dcmLastEventAdTypeId', 'dcmLastEventAdvertiser',
'dcmLastEventAdvertiserId', 'dcmLastEventAttributionType', 'dcmLastEventCampaign',
'dcmLastEventCampaignId', 'dcmLastEventCreative', 'dcmLastEventCreativeId',
'dcmLastEventRenderingId', 'dcmLastEventCreativeType', 'dcmLastEventCreativeTypeId',
'dcmLastEventCreativeVersion', 'dcmLastEventSite', 'dcmLastEventSiteId',
'dcmLastEventSitePlacement', 'dcmLastEventSitePlacementId', 'dcmLastEventSpotId', 'userAgeBracket',
'userGender', 'interestOtherCategory', 'interestAffinityCategory', 'interestInMarketCategory',
'dfpLineItemId', 'dfpLineItemName', 'acquisitionCampaign', 'acquisitionMedium', 'acquisitionSource',
'acquisitionSourceMedium', 'acquisitionTrafficChannel', 'cohort', 'channelGrouping',
'dbmClickAdvertiser', 'dbmClickAdvertiserId', 'dbmClickCreativeId', 'dbmClickExchange',
'dbmClickExchangeId', 'dbmClickInsertionOrder', 'dbmClickInsertionOrderId', 'dbmClickLineItem',
'dbmClickLineItemId', 'dbmClickSite', 'dbmClickSiteId', 'dbmLastEventAdvertiser',
'dbmLastEventAdvertiserId', 'dbmLastEventCreativeId', 'dbmLastEventExchange',
'dbmLastEventExchangeId', 'dbmLastEventInsertionOrder', 'dbmLastEventInsertionOrderId',
'dbmLastEventLineItem', 'dbmLastEventLineItemId', 'dbmLastEventSite', 'dbmLastEventSiteId',
'dsAdGroup', 'dsAdGroupId', 'dsAdvertiser', 'dsAdvertiserId', 'dsAgency', 'dsAgencyId',
'dsCampaign', 'dsCampaignId', 'dsEngineAccount', 'dsEngineAccountId', 'dsKeyword', 'dsKeywordId'
]
date_dtype = ['date']
time_dtype = ['time', 'avgSessionDuration', 'timeOnPage', 'avgTimeOnPage', 'searchDuration', 'timeOnScreen']
for column in df.columns:
if column in integer_dtype:
df[column] = df[column].astype(int)
elif column in float_dtype:
df[column] = df[column].astype(float)
elif column in date_dtype:
df[column] = pd.to_datetime(df[column])
elif column in string_dtype:
df[column] = df[column].astype(str)
elif column in time_dtype:
df[column] = df[column].astype(str)
else:
df[column] = df[column]
return df
def results_to_pandas(results):
"""Return a Google Analytics result set in a Pandas DataFrame.
:param results: Google Analytics API results set
:return: Pandas DataFrame containing results
"""
if results['columnHeaders']:
column_headers = results['columnHeaders']
headings = []
for header in column_headers:
name = header['name'].replace('ga:', '')
headings.append(name)
if results['rows']:
rows = results['rows']
df = pd.DataFrame(rows, columns=headings)
return set_dtypes(df)
def get_results(service, final_payload):
"""Passes a payload to the API using the service object and returns all available results by merging paginated
data together into a single DataSet.
:param service: Google Analytics service object
:param final_payload: Final payload to pass to API
:return: Original result object with rows data manipulated to contains rows from all pages
"""
# Run the query and determine the number of items and pages
results = service.data().ga().get(**final_payload).execute()
total_results = get_total_results(results)
total_pages = get_total_pages(results)
items_per_page = get_items_per_page(results)
# Return multiple pages of results
if total_pages and total_pages > 1:
start_index = 0
all_rows = []
while start_index <= total_results:
# Determine start_index and add to payload
start_index_payload = {'start_index': + start_index + 1}
final_payload = {**final_payload, **start_index_payload}
# Fetch results and append rows
next_results = service.data().ga().get(**final_payload).execute()
next_rows = get_rows(next_results)
all_rows = all_rows + next_rows
# Update start_index
start_index = (items_per_page + start_index)
# Replace rows in initial results with all rows
results['rows'] = all_rows
return results
# Return a single page of results
else:
return results
| [
37811,
198,
5376,
25,
44743,
257,
3012,
30437,
7824,
12405,
198,
45351,
25,
4705,
19635,
198,
10430,
25,
2795,
807,
11,
12131,
198,
11828,
25,
6251,
274,
257,
21437,
284,
262,
3012,
30437,
6447,
7824,
290,
5860,
262,
1366,
13,
198,
37... | 2.395468 | 9,973 |
#!/usr/bin/env python3
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import matplotlib
import numpy as np
import os
import unittest
from sumo.geometry.rot3 import Rot3
import sumo.metrics.utils as utils
from sumo.semantic.project_scene import ProjectScene
matplotlib.use("TkAgg")
"""
Test Evaluator utils functions
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
15269,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,... | 3.239726 | 146 |
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""multi-agent environments."""
import functools
import itertools
from typing import Any, Dict, Sequence
from brax import jumpy as jp
from brax import math
from brax.experimental.composer import component_editor
from brax.experimental.composer import reward_functions
from brax.experimental.composer.composer_utils import merge_desc
from brax.experimental.composer.observers import SimObserver as so
import numpy as np
MAX_DIST = 20
MIN_DIST = 0.5
def get_n_agents_desc(agents: Sequence[str],
agents_params: Sequence[str] = None,
init_r: float = 2):
"""Get n agents."""
angles = np.linspace(0, 2 * np.pi, len(agents) + 1)
agents_params = agents_params or ([None] * len(agents))
components = {}
edges = {}
for i, (angle, agent,
agent_params) in enumerate(zip(angles[:-1], agents, agents_params)):
pos = (np.cos(angle) * init_r, np.sin(angle) * init_r, 0)
components[f'agent{i}'] = dict(component=agent, pos=pos)
if agent_params:
components[f'agent{i}'].update(dict(component_params=agent_params))
for k1, k2 in itertools.combinations(list(components), 2):
if k1 == k2:
continue
k1, k2 = sorted([k1, k2]) # ensure the name is always sorted in order
edge_name = component_editor.concat_comps(k1, k2)
edges[edge_name] = dict(
extra_observers=[dict(observer_type='root_vec', indices=(0, 1))])
return dict(components=components, edges=edges)
def add_follow(env_desc: Dict[str, Any], leader_vel: float = 3.0):
"""Add follow task."""
agent_groups = {}
components = {}
edges = {}
agents = sorted(env_desc['components'])
leader, followers = agents[0], agents[1:]
# leader aims to run at a specific velocity
components[leader] = dict(
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=leader_vel + 2,
target_goal=(leader_vel, 0))))
agent_groups[leader] = dict(reward_agents=(leader,))
# follower follows
for agent in followers:
edge_name = component_editor.concat_comps(agent, leader)
edges[edge_name] = dict(
reward_fns=dict(
dist=dict(
reward_type='root_dist', max_dist=MAX_DIST, offset=MAX_DIST +
1)))
agent_groups[agent] = dict(reward_names=(('dist', agent, leader),))
merge_desc(
env_desc,
dict(agent_groups=agent_groups, components=components, edges=edges))
return env_desc
def add_chase(env_desc: Dict[str, Any]):
"""Add chase task."""
agents = sorted(env_desc['components'])
agent_groups = {agent: {'reward_names': ()} for agent in agents}
components = {agent: {'reward_fns': {}} for agent in agents}
edges = {}
prey, predators = agents[0], agents[1:]
for agent in predators:
edge_name = component_editor.concat_comps(agent, prey)
edges[edge_name] = dict(
reward_fns=dict(
# predators aim to chase the prey
chase=dict(
reward_type='root_dist',
offset=MAX_DIST + 1,
min_dist=MIN_DIST,
done_bonus=1000 * MAX_DIST),
# prey aims to run away from all predators
escape=dict(
reward_type='root_dist',
scale=-1,
max_dist=MAX_DIST,
done_bonus=1000 * MAX_DIST,
),
))
agent_groups[prey]['reward_names'] += (('escape', agent, prey),)
agent_groups[agent]['reward_names'] += (('chase', agent, prey),)
for agent in agents:
# add velocity bonus for each agent
components[agent]['reward_fns'].update(dict(run=get_run_reward()))
agent_groups[agent]['reward_names'] += (('run', agent),)
merge_desc(
env_desc,
dict(agent_groups=agent_groups, edges=edges, components=components))
return env_desc
def get_ring_components(name: str = 'ring',
num_segments: int = 4,
radius: float = 3.0,
thickness: float = None,
offset: Sequence[float] = None):
"""Draw a ring with capsules."""
offset = offset or [0, 0, 0]
offset = jp.array(offset)
thickness = thickness or radius / 40.
components = {}
angles = np.linspace(0, np.pi * 2, num_segments + 1)
for i, angle in enumerate(angles[:-1]):
k = f'{name}{i}'
ring_length = radius * np.tan(np.pi / num_segments)
components[k] = dict(
component='singleton',
component_params=dict(
size=[thickness, ring_length * 2],
collider_type='capsule',
no_obs=True),
pos=offset + jp.array(
(radius * np.cos(angle), radius * np.sin(angle), -ring_length)),
quat=math.euler_to_quat(jp.array([90, angle / jp.pi * 180, 0])),
quat_origin=(0, 0, ring_length),
frozen=True,
collide=False)
return components
def add_sumo(
env_desc: Dict[str, Any],
centering_scale: float = 1.,
control_scale: float = 0.1,
draw_scale: float = 0.,
knocking_scale: float = 1.,
opp_scale: float = 1.,
ring_size: float = 3.,
win_bonus: float = 1.,
):
"""Add a sumo task."""
agents = sorted(env_desc['components'])
agent_groups = {agent: {'reward_names': ()} for agent in agents}
components = {agent: {'reward_fns': {}} for agent in agents}
edges = {}
yokozuna, komusubis = agents[0], agents[1:]
for agent in komusubis:
edge_name = component_editor.concat_comps(agent, yokozuna)
edges[edge_name] = dict(
reward_fns=dict(
# komusubis wants to push out yokozuna
komu_win_bonus=dict(
reward_type=reward_functions.exp_norm_reward,
obs=lambda x, y: so('body', 'pos', y['root'], indices=(0, 1)),
max_dist=ring_size,
done_bonus=win_bonus,
scale=-knocking_scale,
),
komu_lose_penalty=dict(
reward_type=reward_functions.exp_norm_reward,
obs=lambda x, y: so('body', 'pos', x['root'], indices=(0, 1)),
max_dist=ring_size,
done_bonus=-win_bonus,
scale=centering_scale,
),
# yokozuna wants to push out komusubis
yoko_win_bonus=dict(
reward_type=reward_functions.exp_norm_reward,
obs=lambda x, y: so('body', 'pos', x['root'], indices=(0, 1)),
max_dist=ring_size,
done_bonus=win_bonus,
scale=-knocking_scale,
),
# each agent aims to be close to the center
yoko_lose_penalty=dict(
reward_type=reward_functions.exp_norm_reward,
obs=lambda x, y: so('body', 'pos', y['root'], indices=(0, 1)),
max_dist=ring_size,
done_bonus=-win_bonus,
scale=centering_scale,
),
# move to opponent's direction
komu_move_to_yoko=dict(
reward_type=reward_functions.direction_reward,
vel0=lambda x, y: so('body', 'vel', x['root'], indices=(0, 1)),
vel1=lambda x, y: so('body', 'vel', y['root'], indices=(0, 1)),
pos0=lambda x, y: so('body', 'pos', x['root'], indices=(0, 1)),
pos1=lambda x, y: so('body', 'pos', y['root'], indices=(0, 1)),
scale=opp_scale,
),
yoko_move_to_komu=dict(
reward_type=reward_functions.direction_reward,
vel0=lambda x, y: so('body', 'vel', y['root'], indices=(0, 1)),
vel1=lambda x, y: so('body', 'vel', x['root'], indices=(0, 1)),
pos0=lambda x, y: so('body', 'pos', y['root'], indices=(0, 1)),
pos1=lambda x, y: so('body', 'pos', x['root'], indices=(0, 1)),
scale=opp_scale,
),
))
agent_groups[agent]['reward_names'] += (('komu_win_bonus', agent, yokozuna),
('komu_lose_penalty', agent,
yokozuna), ('komu_move_to_yoko',
agent, yokozuna))
agent_groups[yokozuna]['reward_names'] += (('yoko_win_bonus', agent,
yokozuna), ('yoko_lose_penalty',
agent, yokozuna),
('yoko_move_to_komu', yokozuna,
agent))
for agent in agents:
components[agent]['reward_fns'].update(
dict(
control_penalty=dict(
reward_type=reward_functions.control_reward,
scale=control_scale,
),
draw_penalty=dict(
reward_type=reward_functions.constant_reward,
value=-draw_scale,
),
))
agent_groups[agent]['reward_names'] += (('control_penalty', agent),
('draw_penalty', agent))
# add sumo ring
components.update(get_ring_components(radius=ring_size, num_segments=20))
merge_desc(
env_desc,
dict(agent_groups=agent_groups, edges=edges, components=components))
return env_desc
def add_squidgame(env_desc: Dict[str, Any],
ring_size: float = 3.0,
run_scale: float = 0):
"""Add a simplified squid game task."""
# TODO: finish reward functions
agents = sorted(env_desc['components'])
agent_groups = {agent: {'reward_names': ()} for agent in agents}
components = {agent: {'reward_fns': {}} for agent in agents}
edges = {}
defender, attackers = agents[0], agents[1:]
for agent in attackers:
edge_name = component_editor.concat_comps(agent, defender)
edges[edge_name] = dict(
reward_fns=dict(
# defenders aim to chase the attackers
chase=dict(reward_type='root_dist', offset=2 * ring_size + 0.5),))
agent_groups[defender]['reward_names'] += (('chase', agent, defender),)
for agent in agents:
if run_scale > 0:
# add velocity bonus for each agent
components[agent] = dict(
reward_fns=dict(run=get_run_reward(scale=run_scale)))
agent_groups[agent]['reward_names'] += (('run', agent),)
# add rings
components.update(
get_ring_components(
name='square',
offset=(ring_size, 0, 0),
radius=ring_size,
thickness=ring_size / 40.,
num_segments=4))
components.update(
get_ring_components(
name='defender_circle',
offset=(ring_size * 2, 0, 0),
radius=ring_size / 5,
thickness=ring_size / 40.,
num_segments=10))
components.update(
get_ring_components(
name='triangle',
offset=(-ring_size / np.sqrt(3), 0, 0),
radius=ring_size / np.sqrt(3),
thickness=ring_size / 40.,
num_segments=3))
components.update(
get_ring_components(
name='attacker_circle',
offset=(-ring_size * np.sqrt(3), 0, 0),
radius=ring_size / 5,
thickness=ring_size / 40.,
num_segments=10))
merge_desc(
env_desc,
dict(agent_groups=agent_groups, edges=edges, components=components))
return env_desc
TASK_MAP = dict(
follow=add_follow, chase=add_chase, sumo=add_sumo, squidgame=add_squidgame)
def create_desc(main_agent: str = 'ant',
other_agent: str = 'ant',
main_agent_params: Dict[str, Any] = None,
other_agent_params: Dict[str, Any] = None,
num_agents: int = 2,
task: str = 'follow',
init_r: float = 2.,
**kwargs):
"""Creat env_desc."""
if main_agent_params or other_agent_params:
agents_params = [main_agent_params] + [other_agent_params] * (
num_agents - 1)
else:
agents_params = None
env_desc = get_n_agents_desc(
agents=[main_agent] + [other_agent] * (num_agents - 1),
agents_params=agents_params,
init_r=init_r)
return TASK_MAP[task](env_desc=env_desc, **kwargs)
ENV_DESCS = {k: functools.partial(create_desc, task=k) for k in TASK_MAP}
| [
2,
15069,
33448,
383,
9718,
87,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 2.092161 | 6,174 |
# Module imports Input, Output and AttributeList to be used in widgets
# pylint: disable=unused-import
import sys
import os
import types
from operator import attrgetter
from AnyQt.QtWidgets import (
QWidget,
QDialog,
QVBoxLayout,
QSizePolicy,
QApplication,
QStyle,
QShortcut,
QSplitter,
QSplitterHandle,
QPushButton,
QStatusBar,
QProgressBar,
QAction,
)
from AnyQt.QtCore import Qt, QByteArray, QSettings, QUrl, pyqtSignal as Signal
from AnyQt.QtGui import QIcon, QKeySequence, QDesktopServices
from Orange.data import FileFormat
from Orange.widgets import settings, gui
# OutputSignal and InputSignal are imported for compatibility, but shouldn't
# be used; use Input and Output instead
from Orange.canvas.registry import (
description as widget_description,
WidgetDescription,
OutputSignal,
InputSignal,
)
from Orange.widgets.report import Report
from Orange.widgets.gui import OWComponent
from Orange.widgets.io import ClipboardFormat
from Orange.widgets.settings import SettingsHandler
from Orange.widgets.utils import saveplot, getdeepattr
from Orange.widgets.utils.progressbar import ProgressBarMixin
from Orange.widgets.utils.messages import (
WidgetMessagesMixin,
UnboundMsg,
MessagesWidget,
)
from Orange.widgets.utils.signals import (
WidgetSignalsMixin,
Input,
Output,
AttributeList,
)
from Orange.widgets.utils.overlay import MessageOverlayWidget, OverlayWidget
from Orange.widgets.utils.buttons import SimpleButton
# Msg is imported and renamed, so widgets can import it from this module rather
# than the one with the mixin (Orange.widgets.utils.messages). Assignment is
# used instead of "import ... as", otherwise PyCharm does not suggest import
Msg = UnboundMsg
class WidgetMetaClass(type(QDialog)):
"""Meta class for widgets. If the class definition does not have a
specific settings handler, the meta class provides a default one
that does not handle contexts. Then it scans for any attributes
of class settings.Setting: the setting is stored in the handler and
the value of the attribute is replaced with the default."""
# noinspection PyMethodParameters
# pylint: disable=bad-classmethod-argument
# pylint: disable=too-many-instance-attributes
class OWWidget(
QDialog,
OWComponent,
Report,
ProgressBarMixin,
WidgetMessagesMixin,
WidgetSignalsMixin,
metaclass=WidgetMetaClass,
):
"""Base widget class"""
# Global widget count
widget_id = 0
# Widget Meta Description
# -----------------------
#: Widget name (:class:`str`) as presented in the Canvas
name = None
id = None
category = None
version = None
#: Short widget description (:class:`str` optional), displayed in
#: canvas help tooltips.
description = ""
#: Widget icon path relative to the defining module
icon = "icons/Unknown.png"
#: Widget priority used for sorting within a category
#: (default ``sys.maxsize``).
priority = sys.maxsize
help = None
help_ref = None
url = None
keywords = []
background = None
replaces = None
#: A list of published input definitions
inputs = []
#: A list of published output definitions
outputs = []
# Default widget GUI layout settings
# ----------------------------------
#: Should the widget have basic layout
#: (If this flag is false then the `want_main_area` and
#: `want_control_area` are ignored).
want_basic_layout = True
#: Should the widget construct a `mainArea` (this is a resizable
#: area to the right of the `controlArea`).
want_main_area = True
#: Should the widget construct a `controlArea`.
want_control_area = True
#: Orientation of the buttonsArea box; valid only if
#: `want_control_area` is `True`. Possible values are Qt.Horizontal,
#: Qt.Vertical and None for no buttons area
buttons_area_orientation = Qt.Horizontal
#: Specify whether the default message bar widget should be created
#: and placed into the default layout. If False then clients are
#: responsible for displaying messages within the widget in an
#: appropriate manner.
want_message_bar = True
#: Widget painted by `Save graph` button
graph_name = None
graph_writers = FileFormat.img_writers
save_position = True
#: If false the widget will receive fixed size constraint
#: (derived from it's layout). Use for widgets which have simple
#: static size contents.
resizing_enabled = True
blockingStateChanged = Signal(bool)
processingStateChanged = Signal(int)
# Signals have to be class attributes and cannot be inherited,
# say from a mixin. This has something to do with the way PyQt binds them
progressBarValueChanged = Signal(float)
messageActivated = Signal(Msg)
messageDeactivated = Signal(Msg)
settingsHandler = None
""":type: SettingsHandler"""
#: Version of the settings representation
#: Subclasses should increase this number when they make breaking
#: changes to settings representation (a settings that used to store
#: int now stores string) and handle migrations in migrate and
#: migrate_context settings.
settings_version = 1
savedWidgetGeometry = settings.Setting(None)
controlAreaVisible = settings.Setting(True, schema_only=True)
#: A list of advice messages (:class:`Message`) to display to the user.
#: When a widget is first shown a message from this list is selected
#: for display. If a user accepts (clicks 'Ok. Got it') the choice is
#: recorded and the message is never shown again (closing the message
#: will not mark it as seen). Messages can be displayed again by pressing
#: Shift + F1
#:
#: :type: list of :class:`Message`
UserAdviceMessages = []
contextAboutToBeOpened = Signal(object)
contextOpened = Signal()
contextClosed = Signal()
# pylint: disable=protected-access
# pylint: disable=super-init-not-called
def __init__(self, *args, **kwargs):
"""__init__s are called in __new__; don't call them from here"""
@classmethod
@classmethod
def set_basic_layout(self):
"""Provide the basic widget layout
Which parts are created is regulated by class attributes
`want_main_area`, `want_control_area`, `want_message_bar` and
`buttons_area_orientation`, the presence of method `send_report`
and attribute `graph_name`.
"""
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(2, 2, 2, 2)
if not self.resizing_enabled:
self.layout().setSizeConstraint(QVBoxLayout.SetFixedSize)
self.want_main_area = self.want_main_area or self.graph_name
self._create_default_buttons()
self._insert_splitter()
if self.want_control_area:
self._insert_control_area()
if self.want_main_area:
self._insert_main_area()
if self.want_message_bar:
# Use a OverlayWidget for status bar positioning.
c = OverlayWidget(self, alignment=Qt.AlignBottom)
c.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
c.setWidget(self)
c.setLayout(QVBoxLayout())
c.layout().setContentsMargins(0, 0, 0, 0)
sb = QStatusBar()
sb.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Maximum)
sb.setSizeGripEnabled(self.resizing_enabled)
c.layout().addWidget(sb)
help = self.__help_action
help_button = SimpleButton(
icon=QIcon(gui.resource_filename("icons/help.svg")),
toolTip="Show widget help",
visible=help.isVisible(),
)
@help.changed.connect
help_button.clicked.connect(help.trigger)
sb.addWidget(help_button)
if self.graph_name is not None:
b = SimpleButton(
icon=QIcon(gui.resource_filename("icons/chart.svg")),
toolTip="Save Image",
)
b.clicked.connect(self.save_graph)
sb.addWidget(b)
if hasattr(self, "send_report"):
b = SimpleButton(
icon=QIcon(gui.resource_filename("icons/report.svg")),
toolTip="Report",
)
b.clicked.connect(self.show_report)
sb.addWidget(b)
self.message_bar = MessagesWidget(self)
self.message_bar.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
pb = QProgressBar(maximumWidth=120, minimum=0, maximum=100)
pb.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Ignored)
pb.setAttribute(Qt.WA_LayoutUsesWidgetRect)
pb.setAttribute(Qt.WA_MacMiniSize)
pb.hide()
sb.addPermanentWidget(pb)
sb.addPermanentWidget(self.message_bar)
self.processingStateChanged.connect(statechanged)
self.blockingStateChanged.connect(statechanged)
@self.progressBarValueChanged.connect
# Reserve the bottom margins for the status bar
margins = self.layout().contentsMargins()
margins.setBottom(sb.sizeHint().height())
self.setContentsMargins(margins)
def save_graph(self):
"""Save the graph with the name given in class attribute `graph_name`.
The method is called by the *Save graph* button, which is created
automatically if the `graph_name` is defined.
"""
graph_obj = getdeepattr(self, self.graph_name, None)
if graph_obj is None:
return
saveplot.save_plot(graph_obj, self.graph_writers)
# when widget is resized, save the new width and height
def resizeEvent(self, event):
"""Overloaded to save the geometry (width and height) when the widget
is resized.
"""
QDialog.resizeEvent(self, event)
# Don't store geometry if the widget is not visible
# (the widget receives a resizeEvent (with the default sizeHint)
# before first showEvent and we must not overwrite the the
# savedGeometry with it)
if self.save_position and self.isVisible():
self.__updateSavedGeometry()
def moveEvent(self, event):
"""Overloaded to save the geometry when the widget is moved
"""
QDialog.moveEvent(self, event)
if self.save_position and self.isVisible():
self.__updateSavedGeometry()
def hideEvent(self, event):
"""Overloaded to save the geometry when the widget is hidden
"""
if self.save_position:
self.__updateSavedGeometry()
QDialog.hideEvent(self, event)
def closeEvent(self, event):
"""Overloaded to save the geometry when the widget is closed
"""
if self.save_position and self.isVisible():
self.__updateSavedGeometry()
QDialog.closeEvent(self, event)
def showEvent(self, event):
"""Overloaded to restore the geometry when the widget is shown
"""
QDialog.showEvent(self, event)
if self.save_position and not self.__was_restored:
# Restore saved geometry on (first) show
if self.__splitter is not None:
self.__splitter.setControlAreaVisible(self.controlAreaVisible)
self.__restoreWidgetGeometry()
self.__was_restored = True
self.__quicktipOnce()
def wheelEvent(self, event):
"""Silently accept the wheel event.
This is to ensure combo boxes and other controls that have focus
don't receive this event unless the cursor is over them.
"""
event.accept()
def reshow(self):
"""Put the widget on top of all windows
"""
self.show()
self.raise_()
self.activateWindow()
def openContext(self, *a):
"""Open a new context corresponding to the given data.
The settings handler first checks the stored context for a
suitable match. If one is found, it becomes the current contexts and
the widgets settings are initialized accordingly. If no suitable
context exists, a new context is created and data is copied from
the widget's settings into the new context.
Widgets that have context settings must call this method after
reinitializing the user interface (e.g. combo boxes) with the new
data.
The arguments given to this method are passed to the context handler.
Their type depends upon the handler. For instance,
`DomainContextHandler` expects `Orange.data.Table` or
`Orange.data.Domain`.
"""
self.contextAboutToBeOpened.emit(a)
self.settingsHandler.open_context(self, *a)
self.contextOpened.emit()
def closeContext(self):
"""Save the current settings and close the current context.
Widgets that have context settings must call this method before
reinitializing the user interface (e.g. combo boxes) with the new
data.
"""
self.settingsHandler.close_context(self)
self.contextClosed.emit()
def retrieveSpecificSettings(self):
"""
Retrieve data that is not registered as setting.
This method is called by
`Orange.widgets.settings.ContextHandler.settings_to_widget`.
Widgets may define it to retrieve any data that is not stored in widget
attributes. See :obj:`Orange.widgets.data.owcolor.OWColor` for an
example.
"""
pass
def storeSpecificSettings(self):
"""
Store data that is not registered as setting.
This method is called by
`Orange.widgets.settings.ContextHandler.settings_from_widget`.
Widgets may define it to store any data that is not stored in widget
attributes. See :obj:`Orange.widgets.data.owcolor.OWColor` for an
example.
"""
pass
def saveSettings(self):
"""
Writes widget instance's settings to class defaults. Usually called
when the widget is deleted.
"""
self.settingsHandler.update_defaults(self)
def onDeleteWidget(self):
"""
Invoked by the canvas to notify the widget it has been deleted
from the workflow.
If possible, subclasses should gracefully cancel any currently
executing tasks.
"""
pass
def handleNewSignals(self):
"""
Invoked by the workflow signal propagation manager after all
signals handlers have been called.
Reimplement this method in order to coalesce updates from
multiple updated inputs.
"""
pass
#: Widget's status message has changed.
statusMessageChanged = Signal(str)
def setStatusMessage(self, text):
"""
Set widget's status message.
This is a short status string to be displayed inline next to
the instantiated widget icon in the canvas.
"""
if self.__statusMessage != text:
self.__statusMessage = text
self.statusMessageChanged.emit(text)
def statusMessage(self):
"""
Return the widget's status message.
"""
return self.__statusMessage
def keyPressEvent(self, e):
"""Handle default key actions or pass the event to the inherited method
"""
if (int(e.modifiers()), e.key()) in OWWidget.defaultKeyActions:
OWWidget.defaultKeyActions[int(e.modifiers()), e.key()](self)
else:
QDialog.keyPressEvent(self, e)
defaultKeyActions = {}
if sys.platform == "darwin":
defaultKeyActions = {
(Qt.ControlModifier, Qt.Key_M): lambda self: self.showMaximized
if self.isMinimized()
else self.showMinimized(),
(Qt.ControlModifier, Qt.Key_W): lambda self: self.setVisible(
not self.isVisible()
),
}
def setBlocking(self, state=True):
"""
Set blocking flag for this widget.
While this flag is set this widget and all its descendants
will not receive any new signals from the workflow signal manager.
This is useful for instance if the widget does it's work in a
separate thread or schedules processing from the event queue.
In this case it can set the blocking flag in it's processNewSignals
method schedule the task and return immediately. After the task
has completed the widget can clear the flag and send the updated
outputs.
.. note::
Failure to clear this flag will block dependent nodes forever.
"""
if self.__blocking != state:
self.__blocking = state
self.blockingStateChanged.emit(state)
def isBlocking(self):
"""Is this widget blocking signal processing."""
return self.__blocking
def resetSettings(self):
"""Reset the widget settings to default"""
self.settingsHandler.reset_settings(self)
def workflowEnv(self):
"""
Return (a view to) the workflow runtime environment.
Returns
-------
env : types.MappingProxyType
"""
return self.__env
def workflowEnvChanged(self, key, value, oldvalue):
"""
A workflow environment variable `key` has changed to value.
Called by the canvas framework to notify widget of a change
in the workflow runtime environment.
The default implementation does nothing.
"""
pass
@classmethod
def migrate_settings(cls, settings, version):
"""Fix settings to work with the current version of widgets
Parameters
----------
settings : dict
dict of name - value mappings
version : Optional[int]
version of the saved settings
or None if settings were created before migrations
"""
@classmethod
def migrate_context(cls, context, version):
"""Fix contexts to work with the current version of widgets
Parameters
----------
context : Context
Context object
version : Optional[int]
version of the saved context
or None if context was created before migrations
"""
class Message(object):
"""
A user message.
:param str text: Message text
:param str persistent_id:
A persistent message id.
:param icon: Message icon
:type icon: QIcon or QStyle.StandardPixmap
:param str moreurl:
An url to open when a user clicks a 'Learn more' button.
.. seealso:: :const:`OWWidget.UserAdviceMessages`
"""
#: QStyle.SP_MessageBox* pixmap enums repeated for easier access
Question = QStyle.SP_MessageBoxQuestion
Information = QStyle.SP_MessageBoxInformation
Warning = QStyle.SP_MessageBoxWarning
Critical = QStyle.SP_MessageBoxCritical
#: Input/Output flags.
#: -------------------
#:
#: The input/output is the default for its type.
#: When there are multiple IO signals with the same type the
#: one with the default flag takes precedence when adding a new
#: link in the canvas.
Default = widget_description.Default
NonDefault = widget_description.NonDefault
#: Single input signal (default)
Single = widget_description.Single
#: Multiple outputs can be linked to this signal.
#: Signal handlers with this flag have (object, id: object) -> None signature.
Multiple = widget_description.Multiple
#: Applies to user interaction only.
#: Only connected if specifically requested (in a dedicated "Links" dialog)
#: or it is the only possible connection.
Explicit = widget_description.Explicit
#: Dynamic output type.
#: Specifies that the instances on the output will in general be
#: subtypes of the declared type and that the output can be connected
#: to any input signal which can accept a subtype of the declared output
#: type.
Dynamic = widget_description.Dynamic
| [
2,
19937,
17944,
23412,
11,
25235,
290,
3460,
4163,
8053,
284,
307,
973,
287,
40803,
198,
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
11748,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
3858,
198,
6738,
10088,
1330,
708... | 2.669692 | 7,602 |
# Copyright (C) 2019 Simon Biggs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from pathlib import Path
from pymedphys._imports import imageio
from pymedphys._imports import numpy as np
from pymedphys._imports import plt, scipy
from .optical_density import calc_net_od
DEFAULT_CAL_STRING_END = " cGy.tif"
def calc_calibration_points(
prescans, postscans, alignments=None, figures=False, pixel_trim=0
):
"""Returns calibration points based on dictionaries of prescans and postscans.
The key of the dictionaries of images is to represent the dose calibration
point. If the key cannot be converted into a float that image will be
ignored.
"""
keys = prescans.keys()
assert keys == postscans.keys()
calibration_points = {}
if alignments is None:
alignments = {key: None for key in keys}
for key in keys:
try:
dose_value = float(key)
except ValueError:
warnings.warn(
"{} does not appear to be a calibration image key. This will "
"be skipped."
)
continue
net_od, alignment = calc_net_od(
prescans[key], postscans[key], alignment=alignments[key]
)
if pixel_trim != 0:
trim_ref = (slice(pixel_trim, -pixel_trim), slice(pixel_trim, -pixel_trim))
net_od = net_od[trim_ref]
if figures:
plt.figure()
plt.imshow(net_od)
plt.show()
calibration_points[dose_value] = np.median(net_od)
alignments[key] = alignment
return calibration_points, alignments
| [
2,
15069,
357,
34,
8,
13130,
11288,
4403,
14542,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.641184 | 811 |
import os
class Config:
"""Extendable configuation class.
This is also used for flask application config.
"""
DATABASE = "hakoblog"
DATABASE_HOST = "db"
DATABASE_USER = "root"
DATABASE_PASS = ""
TESTING = False
GLOBAL_USER_NAME = "hakobe"
CONFIG = config()
| [
11748,
28686,
628,
198,
4871,
17056,
25,
198,
220,
220,
220,
37227,
11627,
437,
540,
4566,
2288,
1398,
13,
198,
220,
220,
220,
770,
318,
635,
973,
329,
42903,
3586,
4566,
13,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
360,
1404,... | 2.4 | 125 |
"""
PLOTING DIRECTIONALITY INDEX
@author: PUNEET DHEER
"""
from matplotlib.widgets import Button
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
fig,ax = plt.subplots()
plt.subplots_adjust(bottom = 0.3)
dataDI = DI
index = np.arange(0, dataDI.shape[0],1)
pos_DI= dataDI.copy()
neg_DI = dataDI.copy()
pos_DI[pos_DI <= 0] = np.nan
neg_DI[neg_DI > 0] = np.nan
plt.bar( index, pos_DI[:,0], label = 'X -> Y', color ='b', alpha = 0.6)
plt.bar( index, neg_DI[:,0], label = 'Y -> X', color ='r', alpha = 0.6 )
ax.legend(loc='upper left', ncol=2);
ax.set_title("DI between: %d[X] and %d[Y]" % (0, 1))
ax.set_xlabel("Window No.")
ax.set_ylabel("Directionality Index")
callback = D_Index()
axprev = plt.axes([0.55, 0.05, 0.15, 0.075]) #plt.axes((left, bottom, width, height))
axnext = plt.axes([0.73, 0.05, 0.15, 0.075])
bnext = Button(axnext, 'NEXT')
bnext.on_clicked(callback.Next)
bprev = Button(axprev, 'PREVIOUS')
bprev.on_clicked(callback.Prev) | [
37811,
201,
198,
6489,
2394,
2751,
42242,
2849,
1847,
9050,
24413,
6369,
201,
198,
201,
198,
31,
9800,
25,
350,
41884,
2767,
360,
13909,
1137,
201,
198,
37811,
201,
198,
201,
198,
6738,
2603,
29487,
8019,
13,
28029,
11407,
1330,
20969,
... | 2.118236 | 499 |
from libavg_charts.aid_lines.cursor_aid_line import CursorAidLine
| [
6738,
9195,
615,
70,
62,
354,
5889,
13,
1698,
62,
6615,
13,
66,
21471,
62,
1698,
62,
1370,
1330,
327,
21471,
44245,
13949,
628
] | 2.791667 | 24 |
import os.path
from typing import List
import xarray as xr
from test.cli.helpers import CliTest, CliDataTest, TEST_ZARR_DIR
from xcube.core.verify import assert_cube
| [
11748,
28686,
13,
6978,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
198,
6738,
1332,
13,
44506,
13,
16794,
364,
1330,
1012,
72,
14402,
11,
1012,
72,
6601,
14402,
11,
43001,
62,
57,
26465,
62,
34720,... | 2.982456 | 57 |
from __future__ import unicode_literals
from django.apps import AppConfig
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.619048 | 21 |
from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, constants, exceptions
from treadmill.api import ipa
| [
6738,
49246,
13,
10745,
430,
13,
40406,
1330,
2779,
62,
1676,
10178,
198,
6738,
49246,
13,
10745,
430,
1330,
8398,
11,
38491,
11,
13269,
198,
6738,
49246,
13,
15042,
1330,
20966,
64,
628
] | 4.393939 | 33 |
import json
# Generate traits JSON from IPFS metadata
json_data = {'assets': []}
dir = 'ipfs\doe_metadata_QmcxJeVYRhyevvwQgsBfSWiY7QVmyNx1rQinzXbc1ZYut5'
for i in range(1, 10001):
with open(f'{dir}/{i}', 'r') as f:
tmp_data = json.loads(f.read())
json_data['assets'].append(tmp_data)
with open('json/ipfs_doe_nft_metadata.json', 'w') as f:
json.dump(json_data, f)
| [
11748,
33918,
198,
198,
2,
2980,
378,
12796,
19449,
422,
6101,
10652,
20150,
198,
17752,
62,
7890,
796,
1391,
6,
19668,
10354,
17635,
92,
198,
15908,
796,
705,
541,
9501,
59,
67,
2577,
62,
38993,
62,
48,
23209,
87,
40932,
53,
38162,
... | 2.01 | 200 |
"""Describes a DiscreteTimeStateTransitionModel"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from covid.impl.util import batch_gather, transition_coords
from covid.impl.discrete_markov import (
discrete_markov_simulation,
discrete_markov_log_prob,
)
tla = tf.linalg
tfd = tfp.distributions
| [
37811,
24564,
22090,
257,
8444,
8374,
7575,
9012,
8291,
653,
17633,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
62,
1676,
65,
1799,
355,
256,
46428,
198,
6738,
11192... | 3.019108 | 157 |
"""
description: this file helps to load raw file and gennerate batch x,y
author:luchi
date:22/11/2016
"""
import numpy as np
import cPickle as pkl
#file path
dataset_path='data/subj0.pkl'
#return batch dataset
| [
37811,
198,
11213,
25,
428,
2393,
5419,
284,
3440,
8246,
2393,
290,
2429,
1008,
378,
15458,
2124,
11,
88,
198,
9800,
25,
75,
22200,
198,
4475,
25,
1828,
14,
1157,
14,
5304,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 2.772152 | 79 |
# Copyright 2016 Leon Poon and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.dom import XMLNS_NAMESPACE, Node
| [
2,
15069,
1584,
10592,
350,
2049,
290,
25767,
669,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.372449 | 196 |
from functools import lru_cache
from pydantic import BaseSettings
@lru_cache
| [
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
26232,
628,
198,
198,
31,
75,
622,
62,
23870,
198
] | 3 | 27 |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Hotel)
admin.site.register(Owner)
admin.site.register(Room)
admin.site.register(Comment)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
15654,
13,
30238,
7,
21352,
417,
8,
198,
28482,
13,
15654,
13,
30238,
7,
42419,
8,
198,
28482... | 3.233333 | 60 |
from test_include import *
import numpy as np
data = generate_dataset()
past_queries = []
past_queries.append(Query([0.2, 0.2, 0.6, 0.6], 0.4, 1))
test_queries, freqmat = generate_test_queries(data)
quickSel = Crumbs()
quickSel.assign_optimal_freq(past_queries)
crumbs_answers = map(lambda t: quickSel.answer(t), test_queries)
import matplotlib.pyplot as plt
a = np.array(crumbs_answers)
a.shape = (30, 30)
viz_freqmap(a)
plt.show()
| [
6738,
1332,
62,
17256,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
628,
198,
7890,
796,
7716,
62,
19608,
292,
316,
3419,
198,
198,
30119,
62,
421,
10640,
796,
17635,
198,
30119,
62,
421,
10640,
13,
33295,
7,
20746,
26933,
15,
13,
... | 2.360215 | 186 |
#!/usr/bin/env python3
import os
import sys
import fileinput
import itertools
# Define input and output files
fileToClean = '/Users/tdh1/Desktop/MTCdata/2-17-2017-Hurco04.txt' #input file
fileToSave = '/Users/tdh1/Desktop/MTCdata/2-17-2017-Hurco04-Clean.txt' #output file
fileToTemp = '/Users/tdh1/Desktop/MTCdata/Temp.txt' #temp file
tempFile = open( fileToClean, 'r' )
dirtyFile = tempFile.readlines() # read all dirty data lines
tempFile.close()
for index in range( len( dirtyFile ) ):
dirtyFile[index] = dirtyFile[index].lstrip() # remove leading whitespace
if dirtyFile[index][0:4] != '2017':
if dirtyFile[index-1][0:1] == '2' and dirtyFile[index][0:3] == '017':
dirtyFile[index-1] = dirtyFile[index-1].rstrip('\n') # remove trailing \n in previous line if current line not a datastamp
elif dirtyFile[index-1][0:2] == '20' and dirtyFile[index][0:2] == '17':
dirtyFile[index-1] = dirtyFile[index-1].rstrip('\n') # remove trailing \n in previous line if current line not a datastamp
elif dirtyFile[index-1][0:3] == '201' and dirtyFile[index][0:1] == '7':
dirtyFile[index-1] = dirtyFile[index-1].rstrip('\n') # remove trailing \n in previous line if current line not a datastamp
else:
dirtyFile[index-1] = dirtyFile[index-1].rstrip('\n') # remove trailing \n in previous line if current line not a datastamp
#dirtyFile[index] = dirtyFile[index].replace('2017', '\n2017')
tempFile = open( fileToTemp, 'w' )
tempFile.writelines( dirtyFile ) # write all clean data lines
tempFile.close()
tempFile = open( fileToTemp, 'r' )
dirtyFile = tempFile.readlines() # read all dirty data lines
tempFile.close()
for index in range( len( dirtyFile ) ):
dirtyFile[index] = dirtyFile[index].replace('2017', '\n2017')
tempFile = open( fileToTemp, 'w' )
tempFile.writelines( dirtyFile ) # write all clean data lines
tempFile.close()
tempFile = open( fileToTemp, 'r' )
dirtyFile = tempFile.readlines() # read all dirty data lines
tempFile.close()
cleanFile = []
for index in range( len( dirtyFile ) ):
#cleanFile.append( dirtyFile[index].splitlines(True) )
if dirtyFile[index] != '\n':
cleanFile.append( dirtyFile[index] )
#cleanFile = list( itertools.chain(*cleanFile) )
tempFile = open( fileToSave, 'w' )
tempFile.writelines( cleanFile ) # write all clean data lines
tempFile.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2393,
15414,
198,
11748,
340,
861,
10141,
198,
198,
2,
2896,
500,
5128,
290,
5072,
3696,
198,
7753,
2514,
32657,
796,
31051,
14490,
14,
... | 2.743259 | 853 |
import re
| [
11748,
302,
198
] | 3.333333 | 3 |
num = int(input('Digite um valor p/ saber seu fatorial: '))
c = num
d = num
print('{}!= {}x'.format(num,num),end='')
while c != 1:
c -= 1
num += (c * num) - num
if c != 1:
print('{}x'.format(c), end='')
else:
print('{}'.format(c), end='')
print('\nO fatorial de {} é {}.'.format(d,num))
| [
22510,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
1188,
273,
279,
14,
17463,
263,
384,
84,
277,
21592,
25,
705,
4008,
198,
66,
796,
997,
198,
67,
796,
997,
198,
4798,
10786,
90,
92,
0,
28,
23884,
87,
4458,
18982,
7,
22510,
11,
... | 2.058065 | 155 |
#!/usr/bin/env python
# encoding: utf-8
"""
Parses and updates indexes files.
Created by Karl Dubost on 2018-03-25.
Copyright (c) 2018 Grange. All rights reserved.
see LICENSE.TXT
"""
import datetime
import logging
import os
import string
import sys
import lxml.html
from lxml import etree
from ymir.utils import helper
from ymir.ymir import createindexmarkup
ROOT = '/Users/karl/Sites/la-grange.net'
CODEPATH = os.path.dirname(sys.argv[0])
TEMPLATEDIR = CODEPATH + "/../templates/"
DATENOW = datetime.datetime.today()
def create_monthly_index(entry_index, month_index_path, date_obj,
first_time=False):
"""Create a monthly index when it doesn't exist."""
msg = "Do not forget to update /map with your tiny hands"
logging.info("%s" % (msg))
# Generate the html
month_markup = month_index(entry_index, date_obj)
return month_markup
def month_index(entry_index, date_obj):
"""Generate the markup for the month index."""
# TODO: refactor the templating parts
template_path = f'{ROOT}/2019/12/04/month_index_tmpl.html'
with open(template_path, 'r') as source:
t = string.Template(source.read())
datestring = helper.convert_date(date_obj, 'iso')
datehumain = helper.convert_date(date_obj, 'humain')
# to get month, we split in 3 the human date and take the second
# argument
datemois = datehumain.split(' ')[1]
tmpl_data = {
'isodateshort': datestring,
'month': datemois,
'year': datestring[:4],
'humandate': datehumain,
'firstentry': entry_index
}
month_markup = t.substitute(tmpl_data)
return month_markup
def update_monthly_index(new_entry_html, month_index_path):
"""Update the HTML Annual index with the feedendry.
new_entry: str
<li>etc…</li>
month_index_path: str
/2020/08/01/something.html
"""
try:
parsed_month = lxml.html.parse(month_index_path)
except OSError as err:
logging.ERROR(f"Monthly Index not found: {err}")
else:
month_index = parsed_month.getroot()
# Get a list of dictionaries for entries
entries = entries_as_dict(month_index)
# Convert html entry to dict
new_entry_xml = helper.make_xml(new_entry_html)
new_entry = to_entry_dict(new_entry_xml)
# Add the new entry to the list of entries
update_entries(entries, new_entry)
return entries
def update_entries(entries, new_entry):
"""Adds the new_entry to the entries.
1. If new_entry URL is already in there, do not add to the list.
2. It sorts the list according to the created date.
"""
if not any(d['created'] == new_entry['created'] for d in entries):
entries.append(new_entry)
entries = sorted(entries, key=lambda k: k['created'])
return entries
def entries_as_dict(month_index):
"""Convert index xml list to list of dictionaries."""
# Search path
findentrylist = etree.ETXPath("//section[@id='month-index']/ul/li")
# Extract data
entries_xml = findentrylist(month_index)
entries = [to_entry_dict(entry_index_xml)
for entry_index_xml in entries_xml]
return entries
def to_entry_dict(entry_index_xml):
"""Convert an XML entry index into a dictionary."""
# Search paths
find_href = etree.ETXPath("a/@href")
find_short_date = etree.ETXPath("time/text()")
find_created = etree.ETXPath("time/@datetime")
find_title = etree.ETXPath("a/text()")
# extract data
entry_index = {
'created': find_created(entry_index_xml)[0],
'iso_short_date': find_short_date(entry_index_xml)[0],
'path': find_href(entry_index_xml)[0],
'title': find_title(entry_index_xml)[0],
}
return entry_index
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
47,
945,
274,
290,
5992,
39199,
3696,
13,
198,
198,
41972,
416,
15415,
10322,
455,
319,
2864,
12,
3070,
12,
1495,
13,
198,
15269,
3... | 2.465417 | 1,547 |
#!/usr/bin/python3
from gpiozero import RGBLED, Button, LED, DigitalOutputDevice
from time import sleep
from subprocess import getoutput
from signal import pause
from os import system
'''
Script zum aktivieren verschiedener Ein- und Ausgabegeraete
-Rotary Encoder zur Lautstaerkeregelung
-RGB LED zur Anzeige/Visualisierung des Lautstaerkepegels
-Taster zum Pausieren der Wiedergabe und Herunterfahren des Pi
-Schalter/Taster und Relais zum abschalten des LCD
-Timer zum Abschalten des LCD
-Startup Lautstaerke
-UVLO selbsthaltung
Relays sollten über Transistor angesteuert werden:
https://www.elektronik-kompendium.de/public/schaerer/powsw3.htmhttps://www.elektronik-kompendium.de/public/schaerer/powsw3.htm
'''
'''
Einstellungen in diesem Script:
Pins und Betriebssystem(os) muss eingestellt werden.
Befehle werden dann automatisch angepasst.
Bei moOde muss Rotary Encoder ueber WebUI aktiviert werden (Standartpins 4,5(GPIO23,24))
Einstellungen an Pi:
Prinzipielle Einstellungen:
Service in systemd für dieses Script einrichten
Kontrollieren ob in /boot/config.txt folgende Parameter gesetzt sind:
disable_splash=1
hdmi_drive=2
dtparam=audio=off
max_usb_current=1
UVLO:
Das Relay zum Halten der Verbindung LiPo-UVLO wird über einen Transistor (GPIO9) angesteuert.
Die Einstellung dazu muss in /boot/config.txt hinzugefuegt werden:
dtoverlay=gpio-poweroff,gpiopin=9,active_low
LCD:
Das Relay zum Ein-/Ausschalten des Touchscreens wird über einen Transistor (GPIO11) angesteuert.
Einstellungen fuer 5Inch 15:9 Touch Display. Am Display selbst sollte 16:9 eingestellt werden
Der Touchscreen wird in /boot/config.txt konfiguriert:
hdmi_force_hotplug=1
hdmi_group=2
hdmi_mode=87
config_hdmi_boost=7
hdmi_cvt 800 480 60 6
display_hdmi_rotate=1
Touch eingabe wird in /etc/X11/xorg.conf.d/40-libinput.conf gedreht mit:
Option "CalibrationMatrix" "0 1 0 -1 0 1 0 0 1"
'''
#Alle Buttons pulled up ausser rotary button
os = "moode"
#os="volumio" '''alle volumio befehle muessen noch getestet werden'''
'''
Festlegen der Pins fuer die angeschlossenen I/O
GPIO22,23,24 werden normalerweise fuer Rotary verwendet
Diese GPIOs nicht verwenden:
GPIO0,GPIO1
GPIO0,1,26 sind nicht auf Proto HAT.
GPIO2,3,18,19,20,21 werden von I2C/JustboomAmp/HifiberryDAC/HifiberryAMP2 verwendet
GPIO2,3,18,19,20,21,4 wird von HifiberryAMP2 verwendet
'''
laser_BTN_P = 7 #Taster um LCD ein und aus zu schalten
laser_LED_P = 8
missile_BTN_P = None #Nicht an GPIO angeschlossen. Schaltet Apparillo ein
missile_LED_P = None #Wird evtl nicht an GPIO angeschlossen sondern an UVLO
lcd_RELAY_P = 11 #sclk #Relay+Transistor zum einschalten des LCDs; Bildschirm ist an wenn Pin high
lcd_time = 600 #Zeit in Sekunden nach denen der LCD automatisch ausgeschaltet wird.
uvlo_relay = 9 #miso #Relay+Transistor zum Unterbrechen der Messung des UVLO; eingestellt in /boot/config
rot_clk_P = 23 #Pins des Rotary Encoders; Mittlerer Pin auf GND
rot_data_P = 24 #Vertausche CLK und Data um Drehrichtung zu aendern
rot_BTN_P = 22 #pulled down button zum pausieren und herunterfahren des pi
rot_RGB_P_red = 14 #RGB LED zum visualisiern des Lautstaerkepegels
rot_RGB_P_green = 27
rot_RGB_P_blue = 17 #Die blaue Led wird noch nicht bzw. fuer nix wichtiges verwendet
vol_red = 80 #Lautstaerke ab der die LED rot leuchtet
vol_green = 40 #Lautstaerke ab der die LED rot leuchtet
holdtime = 5 #Dauer bis jeweiliger Taster als gehalten erkannt wird
try:
laser_BTN = Button(laser_BTN_P, pull_up=True, bounce_time=None, hold_time=holdtime, hold_repeat=True)
laser_LED = LED(laser_LED_P, active_high=True, initial_value=False)
rot_BTN = Button(rot_BTN_P, pull_up=False, bounce_time=None, hold_time=holdtime, hold_repeat=False)
rot_RGB = RGBLED(rot_RGB_P_red, rot_RGB_P_green, rot_RGB_P_blue, active_high=False, initial_value=(0,0,0))
# missile_LED = LED(missile_LED_P, active_high=True, initial_value=True)
lcd_RELAY = DigitalOutputDevice(lcd_RELAY_P, initial_value=False)
vol_mid = vol_green + (vol_red - vol_green)/2
if os == "moode": #Setzten einer Start-Lautstaerke
startupvol = str(20)
getoutput("/var/www/vol.sh " + startupvol)
#Laesst die Rotary LED aufleuchten/blinken um einen Abgeschlossenen Startvorgang zu signalisieren
rot_RGB.blink(on_time=1, off_time=0.3, fade_in_time=0.0, fade_out_time=0.2, on_color=(0, 0, 0), off_color=(1, 0, 0), n=1, background=False)
rot_RGB.blink(on_time=0.3, off_time=0.3, fade_in_time=0.2, fade_out_time=0.2, on_color=(0, 1, 1), off_color=(1, 0, 1), n=1, background=False)
rot_RGB.blink(on_time=0.3, off_time=0.3, fade_in_time=0.2, fade_out_time=0.2, on_color=(1, 1, 0), off_color=(0, 0, 1), n=1, background=False)
rot_RGB.blink(on_time=0.3, off_time=0.3, fade_in_time=0.2, fade_out_time=0.2, on_color=(1, 1, 1), off_color=(0, 1, 0), n=1, background=False)
rot_BTN.when_pressed = toggle
rot_BTN.when_held = held
laser_BTN.when_pressed = toggle_lcd
if os == "volumio":
rot_clk = Button(rot_clk_P, pull_up=True)
rot_data = Button(rot_data_P, pull_up=True, hold_repeat=True)
rot_clk.when_pressed = rotation # muss bei volumio wieder aktiviert werden
while True:
rot_BTN.wait_for_release()
vol_color()
sleep(0.2)
finally:
print("fertig")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
27809,
952,
22570,
1330,
25228,
30465,
11,
20969,
11,
12365,
11,
10231,
26410,
24728,
198,
6738,
640,
1330,
3993,
198,
6738,
850,
14681,
1330,
651,
22915,
198,
6738,
6737,
1330,
14985... | 2.272881 | 2,371 |
from django.utils.translation import ugettext_lazy as _
from django.forms import ModelForm, ValidationError
from .workflow import Account
from .constants import BANK_CODE_RANGE
| [
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
11,
3254,
24765,
12331,
198,
6738,
764,
1818,
11125,
1330,
10781,
198,
6738,
764,
9979,
118... | 3.442308 | 52 |
"""Selection classes.
Represents an enumeration using a widget.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import OrderedDict
from threading import Lock
from .widget import DOMWidget, register
from traitlets import (
Unicode, Bool, Any, Dict, TraitError, CaselessStrEnum, Tuple, List
)
from ipython_genutils.py3compat import unicode_type
class _Selection(DOMWidget):
"""Base class for Selection widgets
``options`` can be specified as a list or dict. If given as a list,
it will be transformed to a dict of the form ``{str(value):value}``.
When programmatically setting the value, a reverse lookup is performed
among the options to set the value of ``selected_label`` accordingly. The
reverse lookup uses the equality operator by default, but an other
predicate may be provided via the ``equals`` argument. For example, when
dealing with numpy arrays, one may set equals=np.array_equal.
"""
value = Any(help="Selected value")
selected_label = Unicode(help="The label of the selected value", sync=True)
options = Any(help="""List of (key, value) tuples or dict of values that the
user can select.
The keys of this list are the strings that will be displayed in the UI,
representing the actual Python choices.
The keys of this list are also available as _options_labels.
""")
_options_dict = Dict()
_options_labels = Tuple(sync=True)
_options_values = Tuple()
disabled = Bool(False, help="Enable or disable user changes", sync=True)
description = Unicode(help="Description of the value this widget represents", sync=True)
def _options_changed(self, name, old, new):
"""Handles when the options tuple has been changed.
Setting options implies setting option labels from the keys of the dict.
"""
if self.options_lock.acquire(False):
try:
self.options = new
options = self._make_options(new)
self._options_dict = {i[0]: i[1] for i in options}
self._options_labels = [i[0] for i in options]
self._options_values = [i[1] for i in options]
self._value_in_options()
finally:
self.options_lock.release()
def _value_changed(self, name, old, new):
"""Called when value has been changed"""
if self.value_lock.acquire(False):
try:
# Reverse dictionary lookup for the value name
for k, v in self._options_dict.items():
if self.equals(new, v):
# set the selected value name
self.selected_label = k
return
# undo the change, and raise KeyError
self.value = old
raise KeyError(new)
finally:
self.value_lock.release()
def _selected_label_changed(self, name, old, new):
"""Called when the value name has been changed (typically by the frontend)."""
if self.value_lock.acquire(False):
try:
self.value = self._options_dict[new]
finally:
self.value_lock.release()
class _MultipleSelection(_Selection):
"""Base class for MultipleSelection widgets.
As with ``_Selection``, ``options`` can be specified as a list or dict. If
given as a list, it will be transformed to a dict of the form
``{str(value): value}``.
Despite their names, ``value`` (and ``selected_label``) will be tuples, even
if only a single option is selected.
"""
value = Tuple(help="Selected values")
selected_labels = Tuple(help="The labels of the selected options",
sync=True)
@property
def _value_changed(self, name, old, new):
"""Called when value has been changed"""
if self.value_lock.acquire(False):
try:
self.selected_labels = [
self._options_labels[self._options_values.index(v)]
for v in new
]
except:
self.value = old
raise KeyError(new)
finally:
self.value_lock.release()
def _selected_labels_changed(self, name, old, new):
"""Called when the selected label has been changed (typically by the
frontend)."""
if self.value_lock.acquire(False):
try:
self.value = [self._options_dict[name] for name in new]
finally:
self.value_lock.release()
@register('IPython.ToggleButtons')
class ToggleButtons(_Selection):
"""Group of toggle buttons that represent an enumeration. Only one toggle
button can be toggled at any point in time."""
_view_name = Unicode('ToggleButtonsView', sync=True)
tooltips = List(Unicode(), sync=True)
icons = List(Unicode(), sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, sync=True, help="""Use a
predefined styling for the buttons.""")
@register('IPython.Dropdown')
class Dropdown(_Selection):
"""Allows you to select a single item from a dropdown."""
_view_name = Unicode('DropdownView', sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, sync=True, help="""Use a
predefined styling for the buttons.""")
@register('IPython.RadioButtons')
class RadioButtons(_Selection):
"""Group of radio buttons that represent an enumeration. Only one radio
button can be toggled at any point in time."""
_view_name = Unicode('RadioButtonsView', sync=True)
@register('IPython.Select')
class Select(_Selection):
"""Listbox that only allows one item to be selected at any given time."""
_view_name = Unicode('SelectView', sync=True)
@register('IPython.SelectMultiple')
class SelectMultiple(_MultipleSelection):
"""Listbox that allows many items to be selected at any given time.
Despite their names, inherited from ``_Selection``, the currently chosen
option values, ``value``, or their labels, ``selected_labels`` must both be
updated with a list-like object."""
_view_name = Unicode('SelectMultipleView', sync=True)
| [
37811,
4653,
1564,
6097,
13,
198,
198,
6207,
6629,
281,
27056,
341,
1262,
257,
26295,
13,
198,
37811,
198,
198,
2,
15069,
357,
66,
8,
449,
929,
88,
353,
7712,
4816,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
103... | 2.560956 | 2,551 |
"""Tests for views for REST APIs for channels"""
# pylint: disable=unused-argument
import pytest
from django.urls import reverse
from rest_framework import status
from open_discussions.constants import NOT_AUTHENTICATED_ERROR_TYPE
from open_discussions.factories import UserFactory
pytestmark = [pytest.mark.betamax, pytest.mark.usefixtures("mock_channel_exists")]
def test_list_subscribers(staff_client, staff_api, public_channel):
"""
The correct list of subscriber usernames is returned.
"""
users = UserFactory.create_batch(2)
for user in users:
staff_api.add_subscriber(user.username, public_channel.name)
url = reverse("subscriber-list", kwargs={"channel_name": public_channel.name})
resp = staff_client.get(url)
assert resp.status_code == status.HTTP_200_OK
for user in users:
assert {"subscriber_name": user.username} in resp.json()
@pytest.mark.parametrize("attempts", [1, 2])
def test_add_subscriber(staff_client, user, public_channel, attempts):
"""
Adds a subscriber to a channel as a staff user
"""
url = reverse("subscriber-list", kwargs={"channel_name": public_channel.name})
for _ in range(attempts):
resp = staff_client.post(
url, data={"subscriber_name": user.username}, format="json"
)
assert resp.status_code == status.HTTP_201_CREATED
assert resp.json() == {"subscriber_name": user.username}
def test_add_subscriber_mod(client, public_channel, staff_api, reddit_factories):
"""
Adds a subscriber to a channel as a moderator
"""
moderator = reddit_factories.user("new_mod_user")
new_subscriber = reddit_factories.user("new_sub_user")
staff_api.add_moderator(moderator.username, public_channel.name)
client.force_login(moderator)
url = reverse("subscriber-list", kwargs={"channel_name": public_channel.name})
resp = client.post(
url, data={"subscriber_name": new_subscriber.username}, format="json"
)
assert resp.status_code == status.HTTP_201_CREATED
assert resp.json() == {"subscriber_name": new_subscriber.username}
def test_add_subscriber_forbidden(staff_client, private_channel, user):
"""
If a user gets a 403 from praw we should return a 403 status
"""
url = reverse("subscriber-list", kwargs={"channel_name": private_channel.name})
resp = staff_client.post(
url, data={"subscriber_name": user.username}, format="json"
)
assert resp.status_code == status.HTTP_403_FORBIDDEN
def test_add_subscriber_anonymous(client, user, public_channel):
"""
Anonymous users can't add subscribers
"""
url = reverse("subscriber-list", kwargs={"channel_name": public_channel.name})
resp = client.post(url, data={"subscriber_name": user.username}, format="json")
assert resp.status_code == status.HTTP_403_FORBIDDEN
assert resp.data["error_type"] == NOT_AUTHENTICATED_ERROR_TYPE
def test_detail_subscriber(user_client, private_channel_and_contributor):
"""
Detail of a subscriber in a channel
"""
channel, contributor = private_channel_and_contributor
url = reverse(
"subscriber-detail",
kwargs={"channel_name": channel.name, "subscriber_name": contributor.username},
)
resp = user_client.get(url)
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {"subscriber_name": contributor.username}
def test_detail_subscriber_missing(user_client, private_channel, user):
"""
A missing subscriber should generate a 404
"""
url = reverse(
"subscriber-detail",
kwargs={"channel_name": private_channel.name, "subscriber_name": user.username},
)
resp = user_client.get(url)
assert resp.status_code == status.HTTP_404_NOT_FOUND
def test_detail_subscriber_anonymous(client, user, public_channel):
"""Anonymous users can't see subscriber information"""
url = reverse(
"subscriber-detail",
kwargs={"channel_name": public_channel.name, "subscriber_name": user.username},
)
resp = client.get(url)
assert resp.status_code == status.HTTP_403_FORBIDDEN
assert resp.data["error_type"] == NOT_AUTHENTICATED_ERROR_TYPE
@pytest.mark.parametrize("attempts", [1, 2])
def test_remove_subscriber(staff_client, staff_api, user, public_channel, attempts):
"""
Removes a subscriber from a channel
"""
staff_api.add_subscriber(user.username, public_channel.name)
url = reverse(
"subscriber-detail",
kwargs={"channel_name": public_channel.name, "subscriber_name": user.username},
)
for _ in range(attempts):
resp = staff_client.delete(url)
assert resp.status_code == status.HTTP_204_NO_CONTENT
def test_remove_subscriber_anonymous(client, user, public_channel):
"""Anonymous users can't remove subscribers"""
url = reverse(
"subscriber-detail",
kwargs={"channel_name": public_channel.name, "subscriber_name": user.username},
)
resp = client.delete(url)
assert resp.status_code == status.HTTP_403_FORBIDDEN
assert resp.data["error_type"] == NOT_AUTHENTICATED_ERROR_TYPE
| [
37811,
51,
3558,
329,
5009,
329,
30617,
23113,
329,
9619,
37811,
198,
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
198,
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1334,
62,
30604,
1330... | 2.707304 | 1,903 |
import numpy as np
from fedot.core.chains.chain import Chain
from fedot.core.chains.node import PrimaryNode, SecondaryNode
from fedot.core.data.data_split import train_test_data_setup
from test.unit.models.test_split_train_test import get_roc_auc_value, get_synthetic_input_data
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11672,
313,
13,
7295,
13,
38861,
13,
7983,
1330,
21853,
198,
6738,
11672,
313,
13,
7295,
13,
38861,
13,
17440,
1330,
21087,
19667,
11,
29521,
19667,
198,
6738,
11672,
313,
13,
7295,
13,
7... | 3.168539 | 89 |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.index import WebIndex
from Platforms.Discord.main_discord import PhaazebotDiscord
import json
import discord
from aiohttp.web import Response, Request
from Utils.Classes.webrequestcontent import WebRequestContent
from Platforms.Discord.utils import getDiscordServerUsers, getDiscordServerUserAmount
from Platforms.Discord.levels import Calc as LevelCalc
from Platforms.Web.Processing.Api.errors import apiMissingData
from Platforms.Web.Processing.Api.Discord.errors import apiDiscordGuildUnknown
DEFAULT_LIMIT:int = 50
MAX_LIMIT:int = 100
async def apiDiscordLevelsGet(cls:"WebIndex", WebRequest:Request) -> Response:
"""
Default url: /api/discord/levels/get
"""
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
# get required stuff
guild_id:str = Data.getStr("guild_id", "", must_be_digit=True)
limit:int = Data.getInt("limit", DEFAULT_LIMIT, min_x=1, max_x=MAX_LIMIT)
offset:int = Data.getInt("offset", 0, min_x=0)
member_id:str = Data.getStr("member_id", "", must_be_digit=True)
detailed:bool = Data.getBool("detailed", False) # with names, avatar hash etc.
nickname:bool = Data.getBool("nickname", False) # usernames or nicknames?
name_contains:str = Data.getStr("name_contains", "")
order:str = Data.getStr("order", "").lower() # order by
edited:int = Data.getInt("edited", 0, min_x=0, max_x=2) # 0 = all, 1 = only nonedited, 2 = only edited
# checks
if not guild_id:
return await apiMissingData(cls, WebRequest, msg="missing or invalid 'guild_id'")
# format
if order == "id":
order = "ORDER BY `id`"
elif order == "member_id":
order = "ORDER BY `member_id`"
elif order == "currency":
order = "ORDER BY `currency`"
else:
order = "ORDER BY `rank`, `exp`"
PhaazeDiscord:"PhaazebotDiscord" = cls.Web.BASE.Discord
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await apiDiscordGuildUnknown(cls, WebRequest)
# get levels
res_levels:list = await getDiscordServerUsers(PhaazeDiscord, guild_id=guild_id, member_id=member_id, limit=limit, offset=offset, order_str=order, edited=edited, name_contains=name_contains)
return_list:list = list()
for LevelUser in res_levels:
level_user:dict = LevelUser.toJSON()
if detailed:
Mem:discord.Member = Guild.get_member(int(LevelUser.member_id))
level_user["avatar"] = Mem.avatar if Mem else None
level_user["level"] = LevelCalc.getLevel(LevelUser.exp)
if not Mem:
level_user["username"] = "[N/A]"
else:
if nickname and Mem.nick:
level_user["username"] = Mem.nick
else:
level_user["username"] = Mem.name
return_list.append(level_user)
return cls.response(
text=json.dumps( dict(
result=return_list,
total=await getDiscordServerUserAmount(PhaazeDiscord, guild_id),
limit=limit,
offset=offset,
detailed=detailed,
status=200)
),
content_type="application/json",
status=200
)
| [
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
361,
41876,
62,
50084,
2751,
25,
198,
197,
6738,
19193,
82,
13,
13908,
13,
9630,
1330,
5313,
15732,
198,
197,
6738,
19193,
82,
13,
15642,
585,
13,
12417,
62,
15410,
585,
1330,
1380,
64,
... | 2.672939 | 1,116 |
from manimlib.imports import *
from accalib.electrical_circuits import BatteryLampCircuit, BatteryLampCircuitAC
from accalib.particles import Electron
from accalib.lines import DottedLine
from accalib.tools import rule_of_thirds_guide
| [
6738,
582,
320,
8019,
13,
320,
3742,
1330,
1635,
198,
6738,
697,
282,
571,
13,
9509,
8143,
62,
21170,
15379,
1330,
23490,
43,
696,
31560,
5013,
11,
23490,
43,
696,
31560,
5013,
2246,
198,
6738,
697,
282,
571,
13,
3911,
2983,
1330,
5... | 3.210526 | 76 |
from .workflows.run_workflow import run_workflow
__all__ = ['run_workflow']
| [
6738,
764,
1818,
44041,
13,
5143,
62,
1818,
11125,
1330,
1057,
62,
1818,
11125,
198,
198,
834,
439,
834,
796,
37250,
5143,
62,
1818,
11125,
20520,
198
] | 2.851852 | 27 |
import cv2
import numpy as np
import cmapy
import matplotlib.pyplot as plt
img = cv2.imread('/home/pi/opencv/video0007_frame0007gt_R128x128.png').astype(np.float) # BGR, float
blue = img[:,:,2]
green = img[:,:,1]
red = img[:,:,0]
exg = 2*green - red - blue
print("max exg", exg.max())
print("mean exg", exg.mean())
print("min exg", exg.min())
img = np.where(exg < 0, 0, exg).astype('uint8')
exr = 1.4*red - green
exr = np.where(exr < 0, 0, exr).astype('uint8')
exgr = exg - exr
print("max exgr", exgr.max())
print("mean exgr", exgr.mean())
print("min exgr", exgr.min())
exgr = np.where(exgr < 25, 0, exgr).astype('uint8')
img = img.astype(np.uint8) # convert back to uint8
exgr = exgr.astype(np.uint8) # convert back to uint8
exr = exr.astype(np.uint8) # convert back to uint8
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
out = clahe.apply(exgr)
im_color = cv2.applyColorMap(out, cv2.COLORMAP_INFERNO)
img_c = cv2.applyColorMap(exgr, cmapy.cmap('Reds')).astype(np.int)
_, R, NIR = cv2.split(img_c)
img_c = img_c.astype(np.uint8) # convert back to uint8
img_c = cv2.applyColorMap(img_c, cv2.COLORMAP_INFERNO)
#cv2.imwrite('new-image.png', exgr) # save the image
cv2.imshow('exr', exr)
cv2.imshow('img', img)
cv2.imshow('exgr', exgr)
cv2.imshow("colormap", im_color)
cv2.imshow("img_c", img_c)
cv2.waitKey()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
8899,
88,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
628,
198,
9600,
796,
220,
269,
85,
17,
13,
320,
961,
10786,
14,
11195,
14,
14415,
... | 2.174475 | 619 |
"""
Front end API for the fuzzi_moss library.
"""
from .fuzz_decorator import fuzz, set_fuzzer
from .fuzz_weaver import fuzz_clazz, defuzz_class, fuzz_module, defuzz_all_classes
from .config import pydysofu_random
from .core_fuzzers import fuzzer_invocations, fuzzer_invocations_count, reset_invocation_counters
| [
37811,
198,
25886,
886,
7824,
329,
262,
26080,
72,
62,
76,
793,
5888,
13,
198,
37811,
198,
198,
6738,
764,
69,
4715,
62,
12501,
273,
1352,
1330,
26080,
11,
900,
62,
69,
4715,
263,
198,
6738,
764,
69,
4715,
62,
732,
8770,
1330,
260... | 2.95283 | 106 |
#!/usr/bin/env python
import Tkinter as Tk
root=Tk.Tk()
label=Tk.Label(root, text="Label")
label.pack(side='top')
button=Tk.Button(root, text="unpack", command=label_unpack)
button.pack(side='top')
root.mainloop()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
309,
74,
3849,
355,
309,
74,
198,
15763,
28,
51,
74,
13,
51,
74,
3419,
198,
198,
18242,
28,
51,
74,
13,
33986,
7,
15763,
11,
2420,
2625,
33986,
4943,
198,
18242,
13,
... | 2.438202 | 89 |
import tensorlayerx as tlx
from gammagl.layers.conv import MessagePassing
class APPNPConv(MessagePassing):
'''
Approximate personalized propagation of neural predictions
'''
| [
11748,
11192,
273,
29289,
87,
355,
256,
75,
87,
198,
6738,
308,
6475,
363,
75,
13,
75,
6962,
13,
42946,
1330,
16000,
14478,
278,
198,
198,
4871,
3486,
13137,
47,
3103,
85,
7,
12837,
14478,
278,
2599,
198,
220,
220,
220,
705,
7061,
... | 3.016129 | 62 |