content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from .logit import Logit
from .logit_embedding import LogitWithEmbedding
from .crnn import CRNN
from .crnn_attention import AttentionCRNN
from .naive_bayes import naiveBayes
from .rocchio import Rocchio
from .knn import KNN
from .svc import SVC_
from .linear_svc import LinearSVC_
from .random_forest import randomForest
from .extra_trees import ExtraTrees
from .lightgbm import LightGBM
from .xgboost import XGBoost
from .bagging import Bagging
MODELS = [
'naive_bayes',
'rocchio', 'knn',
'logit', 'logit-embedding',
'svc', 'linear_svc',
'random-forest', 'extra_trees', 'lightgbm', 'xgboost',
'bagging',
'crnn', 'crnn-attention',
] | [
6738,
764,
6404,
270,
1330,
5972,
270,
198,
6738,
764,
6404,
270,
62,
20521,
12083,
1330,
5972,
270,
3152,
31567,
6048,
278,
198,
6738,
764,
6098,
20471,
1330,
8740,
6144,
198,
6738,
764,
6098,
20471,
62,
1078,
1463,
1330,
47406,
9419,
... | 2.665323 | 248 |
import sys
inputFile = sys.argv[1]
# Read file and turn into string
with open(inputFile, 'rb') as file:
data = file.read()
# break bit stream down to consituent parts
encoded = (bin(int.from_bytes(data, byteorder="big")))[2:] # remove 0b
garbage = encoded[:8]
if garbage == "10000000": # just use canonical to decode
encoded = encoded[8:] # Remove garbage byte
canonical_bits = bitsToBytes(encoded)
huffman_decompress = CanonicalHuffmanDecoder(string=canonical_bits)
decompressed = huffman_decompress.decompress()
else:
encoded = encoded[8:]
# Seperate offset_byte_arr
length1 = int(encoded[:24],2)
encoded = encoded[24:]
offset_byte_arr = encoded[:length1*8]
encoded = encoded[length1*8:]
# Seperate length_byte_arr
length2 = int(encoded[:24],2)
encoded = encoded[24:]
length_byte_arr = encoded[:length2*8]
encoded = encoded[length2*8:]
# Seperate compressed
compressed = encoded
# Decompress chars using CanonicalHuffmanDecoder
compressed = bitsToBytes(compressed)
huffman_decompress = CanonicalHuffmanDecoder(string=compressed)
chars = huffman_decompress.decompress()
chars_list = []
for i in chars:
chars_list.append(i)
offsets = []
lengths = []
for i in range(0, len(offset_byte_arr), 16):
byte_pair = offset_byte_arr[i:i+16]
offsets.append(int(byte_pair, 2))
for i in range(0, len(length_byte_arr), 8):
byte = length_byte_arr[i:i+8]
lengths.append(int(byte, 2))
# Construct dictionary from 3 lists
LZ77Dictionary = list(zip(offsets, lengths, chars_list))
# Decode LZ77Dictionary
decompressed = LZ77Decoder(LZ77Dictionary)
# Marks end-of-file if at the end
if decompressed[-1] == '-': # In latex file doesn't finish with a '-'
decompressed = decompressed[:-1]
# Write compressed version to new file
#outputFile = inputFile + ".tex"
outputFile = inputFile[:-3] + "-decoded.tex"
with open(outputFile, "w", newline='\n') as output_file: #newline='\n' removes carriage return
output_file.write(decompressed) | [
11748,
25064,
220,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
15414,
8979,
796,
25064,
13,
853,
85,
58,
16,
60,
201,
198,
2,
4149,
2393,
290,... | 2.421355 | 871 |
from maps.plugins import OverlayLayer
from maps.utils.layers import register_overlay_layer
from rest_framework.reverse import reverse
register_overlay_layer('Species observations', SpeciesObservations)
register_overlay_layer('Species observations by interview', SpeciesObservationsFilteredByInterview)
| [
6738,
8739,
13,
37390,
1330,
3827,
10724,
49925,
198,
6738,
8739,
13,
26791,
13,
75,
6962,
1330,
7881,
62,
2502,
10724,
62,
29289,
198,
198,
6738,
1334,
62,
30604,
13,
50188,
1330,
9575,
198,
198,
30238,
62,
2502,
10724,
62,
29289,
10... | 4.013158 | 76 |
import math
res = str(math.factorial(100))
digits = [int(c) for c in res]
print(sum(digits)) | [
11748,
10688,
198,
411,
796,
965,
7,
11018,
13,
22584,
5132,
7,
3064,
4008,
198,
12894,
896,
796,
685,
600,
7,
66,
8,
329,
269,
287,
581,
60,
198,
4798,
7,
16345,
7,
12894,
896,
4008
] | 2.555556 | 36 |
# Generated by Django 3.2.12 on 2022-02-11 15:57
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtailmarkdown.blocks
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
1065,
319,
33160,
12,
2999,
12,
1157,
1315,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
266,
363,
13199,
13,
7295,
13,
27372,
198,
11748,
266,
... | 2.983051 | 59 |
from collections import OrderedDict
import csv
import json
import os
JSON_REPORTS_DIR = 'genomes_final'
CSV_REPORTS_DIR = 'csv_genome_reports'
# CSV_MAP: an OrderedDict defining CSV header and corresponding JSON data.
# The values in this OrderedDict are functions that generate the needed value,
# given a Variant dict from the JSON-formatted report's 'Variants' array.
CSV_MAP = OrderedDict([
('variant', lambda v: v['Variant Name']),
('gene_symbol', lambda v:
v['Gene Symbol'] if 'Gene Symbol' in v else ''),
('summary', lambda v: v['Summary'] if v['Summary'] else ''),
('clinical_importance', lambda v: v['Clinical Importance']),
('evidence', lambda v: v['Evidence']),
('impact', lambda v: v['Impact']),
('frequency', lambda v:
v['Allele Frequency'] if v['Allele Frequency'] != '?' else ''),
('category', lambda v: ';'.join(v['Condition Tags'])),
('inheritance', lambda v: v['Inheritance']),
('zygosity', lambda v: v['Status']),
('PMIDs', lambda v:
';'.join(v['PMID List']) if 'PMID List' in v else ''),
# AFAICT there always - or almost always - only one dbSNP. -mpball
('dbSNP_ID', lambda v:
v['dbSNP IDs'][0] if 'dbSNP IDs' in v else ''),
('penetrance_score', lambda v:
v['Scores']['Penetrance'] if v['Scores']['Penetrance'] else ''),
('build_37_chromosome', lambda v:
v['Build 37 Chromosome'] if 'Build 37 Chromosome' in v else ''),
('build_37_position', lambda v:
v['Build 37 Position'] if 'Build 37 Position' in v else ''),
('build_37_variant_allele', lambda v:
v['Build 37 Variant Allele'] if 'Build 37 Variant Allele' in v
else ''),
('getev_report_url', lambda v: v['GET-Evidence Report URL']),
])
if __name__ == '__main__':
if not os.path.isdir(CSV_REPORTS_DIR):
os.mkdir(CSV_REPORTS_DIR)
write_csv_reports(JSON_REPORTS_DIR, CSV_REPORTS_DIR)
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
40386,
62,
35316,
33002,
62,
34720,
796,
705,
5235,
2586,
62,
20311,
6,
198,
7902,
53,
62,
35316,
33002,
62,
34720,
796,
... | 2.565508 | 748 |
sortname = {
'bubblesort': f'Bubble Sort O(n\N{SUPERSCRIPT TWO})', 'insertionsort': f'Insertion Sort O(n\N{SUPERSCRIPT TWO})',
'selectionsort': f'Selection Sort O(n\N{SUPERSCRIPT TWO})', 'mergesort': 'Merge Sort O(n log n)',
'quicksort': 'Quick Sort O(n log n)', 'heapsort': 'Heap Sort O(n log n)'
} | [
30619,
3672,
796,
1391,
201,
198,
220,
220,
220,
705,
46176,
7689,
419,
10354,
277,
6,
33,
549,
903,
33947,
440,
7,
77,
59,
45,
90,
40331,
4877,
36584,
51,
35288,
30072,
3256,
705,
28463,
507,
419,
10354,
277,
6,
44402,
295,
33947,
... | 2.282609 | 138 |
"""
"""
from .api import \
parse, is_pattern, build, expand, match, get, has, update, remove, apply, \
register, transform, assemble, ANY, is_inverted, get_multi, update_multi, \
remove_multi, match_multi, quote
| [
37811,
198,
37811,
198,
6738,
764,
15042,
1330,
3467,
198,
220,
220,
220,
21136,
11,
318,
62,
33279,
11,
1382,
11,
4292,
11,
2872,
11,
651,
11,
468,
11,
4296,
11,
4781,
11,
4174,
11,
3467,
198,
220,
220,
220,
7881,
11,
6121,
11,
... | 2.947368 | 76 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
# Generated by Django 2.1.5 on 2019-01-30 05:06
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
13130,
12,
486,
12,
1270,
8870,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# from django.contrib.auth.models import Group
from brumadinho.models import Geolocation, VisitedLocation, FoundPeople
from rest_framework import serializers
| [
2,
422,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
4912,
198,
6738,
865,
388,
17072,
8873,
13,
27530,
1330,
2269,
349,
5040,
11,
6911,
863,
14749,
11,
4062,
8061,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
628,
198
... | 3.809524 | 42 |
# https://www.hackerrank.com/challenges/any-or-all/problem
if __name__ == '__main__':
N = int(input())
usr_list = input().split()
positive_condition = list(map(lambda x: x[0] != "-", usr_list))
palindromic_condition = list(map(lambda x: "".join(reversed(x)) == x, usr_list))
lenght = len(positive_condition)
positive = True
palindromic = False
lenght = len(positive_condition)
for i in range(lenght):
if not(positive_condition[i]):
positive = False
break
if palindromic_condition[i]:
palindromic = True
print(palindromic and positive)
| [
198,
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
1092,
12,
273,
12,
439,
14,
45573,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
220,
198,
220,
220,
220,
399... | 1.758157 | 521 |
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import contextlib
import logging
import os
import socket
from typing import Iterator
import pytest
from pytest_embedded import Dut
@contextlib.contextmanager
@pytest.mark.esp32
@pytest.mark.ip101
@pytest.mark.parametrize('config', [
'ip101',
], indirect=True)
@pytest.mark.flaky(reruns=3, reruns_delay=5)
@pytest.mark.esp32
@pytest.mark.lan8720
@pytest.mark.parametrize('config', [
'lan8720',
], indirect=True)
@pytest.mark.flaky(reruns=3, reruns_delay=5)
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33160,
20386,
601,
361,
11998,
357,
2484,
272,
20380,
8,
7375,
42513,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
12624,
15,
12,
16,
13,
15,
198,
198,
11748,
4732,
8019,
198,
11748,
... | 2.58296 | 223 |
# -*- coding: utf-8 -*-
from engine import scene
class MenuMain(scene.Scene):
""" The main menu scene. """
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
3113,
1330,
3715,
198,
198,
4871,
21860,
13383,
7,
29734,
13,
36542,
2599,
198,
220,
220,
220,
37227,
383,
1388,
6859,
3715,
13,
37227,
198,
220,
220,
220,
... | 2.469388 | 49 |
import inspect
import os
import numpy as np
import cv2 as cv
import torch
from redrawing.data_interfaces.bodypose import BodyPose
from redrawing.data_interfaces.image import Image
from redrawing.components.stage import Stage
import redrawing.third_models.lightweight_human_modules as lhm
from ..third_models.lightweight_human_modules.models.with_mobilenet import PoseEstimationWithMobileNet
from ..third_models.lightweight_human_modules.keypoints import extract_keypoints, group_keypoints
from ..third_models.lightweight_human_modules.load_state import load_state
from ..third_models.lightweight_human_modules.pose import Pose, track_poses
from ..third_models.lightweight_human_modules.image_tools import normalize, pad_width
| [
11748,
10104,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
28034,
198,
198,
6738,
2266,
1831,
278,
13,
7890,
62,
3849,
32186,
13,
2618,
3455,
1330,
12290,
47,
577,
198,... | 3.480952 | 210 |
import math
import skimage
from skimage import io, transform, viewer, color, data, filters, feature, morphology, exposure
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from scipy import fftpack
from matplotlib.colors import LogNorm
from scipy import ndimage
from scipy.signal import argrelextrema
'''
This method would be a collection of the other code, but to compare the photos instead of the scans
As well as trying to see if any lines are detected, this will also compare how the brighness of the lightbox effects things
'''
brightness1 = np.load('photo1b1_scan.npy')
brightness2 = np.load('photo1b2_scan.npy')
brightness3 = np.load('photo1b3_scan.npy')
#print(np.shape(brightness1), np.shape(brightness2), np.shape(brightness3))
#this results in (3024, 4032) (3024, 4032) (3024, 4032)
'''
All of the pictures are currently landscape, and the tests have been looking for vertical lines, so shape must be rotated
'''
rotated1 = transform.rotate(brightness1, 90)
rotated2 = transform.rotate(brightness2, 90)
rotated3 = transform.rotate(brightness3, 90)
'''
Images must be cropped to hopefully remove all of the background in the image that isn't the sample
'''
cropped1 = rotated1[200:-800,800:-1000]
cropped2 = rotated2[200:-800,800:-1000]
cropped3 = rotated3[200:-800,800:-1000]
'''
This was used just to check the crop was to the correct dimensions
fig, ax = plt.subplots(ncols=3, nrows=1)
ax[0].imshow(cropped1, cmap='gray')
ax[0].set(xlabel='', ylabel = '', title = 'Brightness 1')
ax[1].imshow(cropped2, cmap='gray')
ax[1].set(xlabel='', ylabel = '', title = 'Brightness 2')
ax[2].imshow(cropped3, cmap='gray')
ax[2].set(xlabel='', ylabel = '', title = 'Brightness 3')
plt.show()
'''
'''
Next, experimenting with increasing the contrast
'''
c_min1, c_max1 = np.percentile(cropped1, (1,99)) #these are the paramters for the contrast
contrasted1 = exposure.rescale_intensity(cropped1, in_range=(c_min1, c_max1))#returns the image with increased contrast
c_min2, c_max2 = np.percentile(cropped2, (1,99))
contrasted2 = exposure.rescale_intensity(cropped2, in_range=(c_min1, c_max2))
c_min3, c_max3 = np.percentile(cropped3,(1,99))
contrasted3 = exposure.rescale_intensity(cropped3,in_range=(c_min3, c_max3))
'''
Used to plot the increased contrast version of the images
fig, ax = plt.subplots(ncols=3, nrows=1)
ax[0].imshow(contrasted1, cmap='gray')
ax[0].set(xlabel='', ylabel = '', title = 'Brightness 1')
ax[1].imshow(contrasted2, cmap='gray')
ax[1].set(xlabel='', ylabel = '', title = 'Brightness 2')
ax[2].imshow(contrasted3, cmap='gray')
ax[2].set(xlabel='', ylabel = '', title = 'Brightness 3')
plt.show()
'''
'''
Take the Fourier transform of each of the images
'''
fourier1 = fftpack.fft2(contrasted1)
fourier2 = fftpack.fft2(contrasted2)
fourier3 = fftpack.fft2(contrasted3)
'''
Plot the Fourier transform against the original image, and then the detected lines
fig, ax = plt.subplots(ncols=3,nrows=2,figsize =(8,2.5))
ax[0][0].imshow(cropped1, cmap='gray')
ax[0][0].set(xlabel='', ylabel = '', title = 'Brightness 1')
ax[0][1].imshow(cropped2, cmap='gray')
ax[0][1].set(xlabel='', ylabel = '', title = 'Brightness 2')
ax[0][2].imshow(cropped3, cmap='gray')
ax[0][2].set(xlabel='', ylabel = '', title = 'Brightness 3')
ax[1][0].plot(np.arange(0,np.size(cropped1[0]),1),np.abs(cropped1[1000]) )
ax[1][0].set(xlabel='pixel number', ylabel='FT',
title='Fourier Transform of 1000th row of pixels')
ax[1][0].grid()
ax[1][1].plot(np.arange(0,np.size(cropped2[0]),1),np.abs(cropped2[1000]) )
ax[1][1].set(xlabel='pixel number', ylabel='FT',
title='Fourier Transform of 1000th row of pixels')
ax[1][1].grid()
ax[1][2].plot(np.arange(0,np.size(cropped3[0]),1),np.abs(cropped3[1000]) )
ax[1][2].set(xlabel='Pixel Number', ylabel = 'FT',
title = 'Fourier Transform of 1000th row of pixels')
ax[1][2].grid()
plt.show()
'''
'''
Use argrelextrema to find lines within the 1000th row of pixels
'''
max_positions1 = argrelextrema(fourier1[1000], np.greater)
max_positions2 = argrelextrema(fourier2[1000], np.greater)
max_positions3 = argrelextrema(fourier3[1000], np.greater)
'''
Plot the detected lines from the max points of the FT
fix, ax = plt.subplots(ncols=3, nrows=2, figsize=(8,2.5))
ax[0][0].imshow(cropped1, cmap='gray')
ax[0][0].set(xlabel='', ylabel = '', title = 'Original Sample')
ax[0][1].imshow(cropped2, cmap='gray')
ax[0][1].set(xlabel='', ylabel = '', title = 'Higher Contrast Sample')
ax[0][2].imshow(cropped3, cmap='gray')
ax[0][2].set(xlabel='', ylabel = '', title = 'Sample - WTH Transform')
ax[1][0].imshow(cropped1,cmap='gray')
ax[1][0].vlines(max_positions1,color = 'yellow', ymin=0, ymax=1000, linewidth = 1)
ax[1][0].set(xlabel='', ylabel = '', title = 'Detected Lines')
ax[1][1].imshow(cropped2,cmap='gray')
ax[1][1].vlines(max_positions2,color = 'yellow', ymin=0, ymax=1000, linewidth=1)
ax[1][1].set(xlabel='', ylabel = '', title = 'Detected lines')
ax[1][2].imshow(cropped3,cmap='gray')
ax[1][2].vlines(max_positions3,color = 'yellow', ymin=0, ymax=1000, linewidth = 1)
ax[1][2].set(xlabel='', ylabel = '', title = 'Detected lines')
plt.show()
''' | [
11748,
10688,
201,
198,
11748,
1341,
9060,
201,
198,
6738,
1341,
9060,
1330,
33245,
11,
6121,
11,
19091,
11,
3124,
11,
1366,
11,
16628,
11,
3895,
11,
46320,
11,
7111,
201,
198,
6738,
3108,
8019,
1330,
10644,
201,
198,
11748,
2603,
294... | 2.40027 | 2,221 |
import torch
from process_data.files_utils import init_folders
import shutil
from models.encoders.pointnet import PointNet, PointNetDual
from models.model_gm import PointGMM
from custom_types import *
from tqdm import tqdm
import os
import pickle
import options
DEBUG = False
| [
11748,
28034,
198,
6738,
1429,
62,
7890,
13,
16624,
62,
26791,
1330,
2315,
62,
11379,
364,
198,
11748,
4423,
346,
198,
6738,
4981,
13,
12685,
375,
364,
13,
4122,
3262,
1330,
6252,
7934,
11,
6252,
7934,
36248,
198,
6738,
4981,
13,
1984... | 3.439024 | 82 |
from conans import ConanFile, CMake
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
628,
198
] | 3.454545 | 11 |
from typing import Optional
import abc
import logging
| [
6738,
19720,
1330,
32233,
198,
11748,
450,
66,
198,
11748,
18931,
628
] | 4.583333 | 12 |
import signal
import sys
from math import factorial
import sheduling
from itertools import *
cellsCount = 6 * 2 * len(sheduling.times)
maxFitness = -1000000000
theBestShedule = None
i = 0
# оптимизация. Не используем утро, вечер( крайние отрезки времени) и субботу
cells = list(filter(isBestCell, range(cellsCount)))
rcells = list(filter(isBestCell, range(cellsCount - 1, -1, -1)))
print(cells)
print(rcells)
shedule = sheduling.GlobalShedule()
length1 = permCount(len(cells), len(sheduling.lessons1))
length2 = permCount(len(rcells), len(sheduling.lessons2))
length = int(length1 * length2)
for lessonIndexes1 in permutations(cells, len(sheduling.lessons1)):
for lessonIndexes2 in permutations(rcells, len(sheduling.lessons2)):
# print("Iter ", i)
i += 1
shedule.for248 = convert(sheduling.lessons1, lessonIndexes1)
shedule.for247 = convert(sheduling.lessons2, lessonIndexes2)
if shedule.isValid():
fitness = shedule.fitness()
# print("Check ", i, "/", length, " shedule with fitness ", fitness)
if fitness > maxFitness:
print("New best shedule ", i, "/", length, " shedule with fitness ", fitness)
theBestShedule = shedule
maxFitness = fitness
save()
print(theBestShedule)
print(theBestShedule.fitness())
| [
11748,
6737,
201,
198,
11748,
25064,
201,
198,
6738,
10688,
1330,
1109,
5132,
201,
198,
201,
198,
11748,
14999,
16619,
201,
198,
6738,
340,
861,
10141,
1330,
1635,
201,
198,
201,
198,
46342,
12332,
796,
718,
1635,
362,
1635,
18896,
7,
... | 2.153729 | 657 |
from os.path import join
from poket.core import create_profile, Configmap
from poket.conf import settings
from os.path import join
| [
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
279,
482,
316,
13,
7295,
1330,
2251,
62,
13317,
11,
17056,
8899,
198,
6738,
279,
482,
316,
13,
10414,
1330,
6460,
198,
6738,
28686,
13,
6978,
1330,
4654,
628
] | 3.5 | 38 |
"""Let's learn Python"""
import string
class StrManipulator(object):
"""First Class"""
def process_string(self, unsan_str):
"""Process string into useful list"""
table = str.maketrans({key: None for key in string.punctuation})
return unsan_str.translate(table).lower().split()
def char2num(self, char):
"""Given char into encoded number string"""
if isinstance(char, str) and len(char) == 1:
if ord(char)-97 < 10:
return '0' + str(ord(char)-97)
else:
return str(ord(char)-97)
def str2num(self, word):
"""Given string into encoded number string"""
enc_str = ""
i = 0
while i < len(word):
enc_str += self.char2num(word[i])
i += 1
return enc_str
| [
37811,
5756,
338,
2193,
11361,
37811,
198,
11748,
4731,
198,
198,
4871,
4285,
5124,
541,
8927,
7,
15252,
2599,
198,
220,
220,
220,
37227,
5962,
5016,
37811,
628,
220,
220,
220,
825,
1429,
62,
8841,
7,
944,
11,
5576,
272,
62,
2536,
2... | 2.200535 | 374 |
import argparse
import random
import RPi.GPIO as GPIO
import time
parser = argparse.ArgumentParser()
parser.add_argument('k', type=int, help='GPIO number')
args = parser.parse_args()
main(args.k)
| [
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
10786,
74,
3256,
2099,
28,
600,... | 3.030769 | 65 |
import pygame, random
from pathlib import PurePath
from GameModules.ColorPalette import ColorPalette
from GameObjects.Sprite import Sprite
class PipeSet:
"""
Represents an obstacle in the game
"""
def __init__(self):
"""
Initializes a new instance of the pipe class
"""
self.canvas_height, self.canvas_width = pygame.display.get_surface().get_size()
self.color_palette = ColorPalette()
self.__x_coordinate = self.canvas_width
self.__pipe_width = 60
self.__passable_space_height = self.__pipe_width
self.offset = self.__pipe_width // 2 # Divide the pipe's width then floor (//) the result
# Sprites
self.__pipe_head = Sprite(str(PurePath("res/Images/pipe_head.png")))
self.__pipe_body = Sprite(str(PurePath("res/Images/pipe_body.png")))
self.top_pipe_height = 0
self.bottom_pipe_height = 0
# Pipe parameters
self.pipe_head_dimensions = (self.__pipe_width, self.offset)
self.bottom_pipe_body_dimensions = None
self.top_pipe_body_dimensions = None
self.bottom_pipe_y_coordinate = None
self.construct()
@property
@property
def to_canvas(self, canvas):
"""
Draws the bird onto the specified canvas or surface \n
:param canvas: The surface wherein the pipe set is to be drawn on
"""
pipe_x_coordinate = self.__x_coordinate - self.offset
# Parameters for the top pipe
top_pipe_body_location = (pipe_x_coordinate, -self.offset)
top_pipe_head_location = (pipe_x_coordinate, (self.top_pipe_height - self.offset))
# Parameters for the bottom pipe
bottom_pipe_body_location = (pipe_x_coordinate, self.bottom_pipe_y_coordinate)
bottom_pipe_head_location = (pipe_x_coordinate, self.bottom_pipe_y_coordinate)
# Draw the top pipe
self.__pipe_body.to_canvas(canvas=canvas, location=top_pipe_body_location,
dimensions=self.top_pipe_body_dimensions)
self.__pipe_head.to_canvas(canvas=canvas, location=top_pipe_head_location,
dimensions=self.pipe_head_dimensions)
# Draw the bottom pipe
self.__pipe_body.to_canvas(canvas=canvas, location=bottom_pipe_body_location,
dimensions=self.bottom_pipe_body_dimensions)
self.__pipe_head.to_canvas(canvas=canvas, location=bottom_pipe_head_location,
dimensions=self.pipe_head_dimensions)
def construct(self):
"""
Calculates the height and gap between the two pipes in the pipe set
"""
max_height = abs((self.canvas_width // 2) - self.__passable_space_height * 4)
self.bottom_pipe_height = random.choice(range(max_height, self.canvas_height))
self.top_pipe_height = random.choice(range(self.__passable_space_height, max_height))
self.bottom_pipe_body_dimensions = (self.__pipe_width, self.bottom_pipe_height * 4)
self.top_pipe_body_dimensions = (self.__pipe_width, self.top_pipe_height)
self.bottom_pipe_y_coordinate = self.bottom_pipe_height + self.offset
def scroll(self, scroll_speed):
"""
Gradually move the pipe towards the bird and off the screen
"""
self.__x_coordinate -= scroll_speed
def collide(self, bird):
"""
Determines whether the bird had collided with either of the two pipes in the pipe set \n
:param bird: The bird object that the player controls \n
:return:True if the bird had collided with any of the pipes in the set
"""
bird_in_contact = ((self.x_coordinate - self.pipe_width) - bird.x_coordinate) <= 0
mid_point_y = ((self.top_pipe_height + self.bottom_pipe_height) // 2) - self.pipe_width
if (bird_in_contact and (bird.y_coordinate <= self.top_pipe_height and bird.y_coordinate < mid_point_y)) or \
(bird_in_contact and (bird.y_coordinate >= self.bottom_pipe_height and bird.y_coordinate > mid_point_y)):
return True
else:
return False
def is_cleared(self, bird):
"""
Determines whether the bird has successfully passed through the pipe \n
:return: True if the bird did not hit any of the pipes in the pipe set
"""
mid_point_x = (self.__x_coordinate + bird.x_coordinate) // 2
mid_point_y = (self.top_pipe_height + self.bottom_pipe_height) // 2
is_in_between_pipes = ((bird.y_coordinate >= mid_point_y) or (bird.y_coordinate <= mid_point_y))
# if the bird passes through the are in-between the two pipes, it has cleared the obstacle
if bird.x_coordinate == mid_point_x and is_in_between_pipes:
return True
return False
| [
11748,
12972,
6057,
11,
4738,
198,
6738,
3108,
8019,
1330,
17129,
15235,
198,
6738,
3776,
5841,
5028,
13,
10258,
11531,
5857,
1330,
5315,
11531,
5857,
198,
6738,
3776,
10267,
82,
13,
38454,
578,
1330,
33132,
628,
198,
4871,
36039,
7248,
... | 2.393418 | 2,036 |
import sys
import util
import countries_code_index
import countries_code_individual
if __name__ == "__main__":
run(sys.argv[1])
| [
11748,
25064,
201,
198,
11748,
7736,
201,
198,
11748,
2678,
62,
8189,
62,
9630,
201,
198,
11748,
2678,
62,
8189,
62,
43129,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
1057,
... | 2.692308 | 52 |
from abc import ABC
import numpy
| [
6738,
450,
66,
1330,
9738,
198,
198,
11748,
299,
32152,
628,
628
] | 3.083333 | 12 |
import numpy as np
import nrefocus
from scipy.ndimage import gaussian_filter
import pytest
import qpimage
if __name__ == "__main__":
# Run all tests
_loc = locals()
for _key in list(_loc.keys()):
if _key.startswith("test_") and hasattr(_loc[_key], "__call__"):
_loc[_key]()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
299,
5420,
10901,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
31986,
31562,
62,
24455,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
10662,
79,
9060,
628,
628,
198,
361,
11593,
3672,
834,
... | 2.381679 | 131 |
from yacs.config import CfgNode as CN
| [
6738,
331,
16436,
13,
11250,
1330,
327,
40616,
19667,
355,
31171,
628
] | 3.25 | 12 |
import networkx as nx
import matplotlib.pyplot as plt
class Node():
"""This is the base class for a nx node
It has basic electrical properties like
Voltage (All nodes have voltage)
Charge (Pins can collect charge, like capacitors)
Nodes compute functions as well, which based on the
edges the node is connected to,
"""
pass
class Wire():
"""This is the base class for a nx edge
It has basic electrical properties like
Voltage
Current
"""
pass
class Pin(Node):
"""A pin is a node
"""
pass
class ComponentCore(Node):
"""The "core" of a component is a node. It is surrounded by other nodes
which are pins.
"""
pass
class Component(nx.Graph):
"""A component is a graph
"""
# An "empty" component is a graph with 1 node (ComponentCore), itself
pass
class Sheet(nx.Graph):
"""A sheet is just a graph with a draw function and some other stuff
"""
pass
class Schematic(nx.Graph):
"""A schematic is a graph which contains many sheets, etc.
"""
pass
n = Node()
G = nx.Graph()
G.add_node(n)
print(G.nodes)
# G.add_nodes_from([2, 3])
# G.add_edge(1, 2)
# G.add_edge(1, 3)
# G.add_edge(2, 3)
# nx.draw(G, with_labels=True, node_size=1500, node_color="skyblue",
# node_shape="s", alpha=0.5, linewidths=40)
# plt.show()
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4871,
19081,
33529,
198,
220,
220,
220,
37227,
1212,
318,
262,
2779,
1398,
329,
257,
299,
87,
10139,
628,
220,
220,
220,
632,
468... | 2.597353 | 529 |
from flask_api import FlaskAPI
from flask import request
from functools import partial, reduce
from mapping import ResponseMapper
from constants import PROJECT_NAME, APIS
from constants import CACHE_NAME, CACHE_DB, SYNC_INTERVAL
import requests_cache as cache
import requests
import json
import pdb
import os
import re
dbreak = pdb.set_trace
# initialize the request caching
cache.install_cache(
cache_name = CACHE_NAME,
backend = CACHE_DB,
expire_after = SYNC_INTERVAL
)
class FlexApp(FlaskAPI):
'''
App for hosting an API.
'''
class HTTPExchange(object):
'''
A wrapper around the flask request and response objects that constitute
the http exchange for a given endpoint. This is passed to the mapping
methods. Only accepts the response object since it contains the request
object.
'''
@property
@property
@property
@property
def set_passthroughs(app, mapper):
'''
Passthrough routing methods for the PokeAPI endpoints.
All make corresponding HTTP requests and pass a response object
to the defined mapper. Import an instance of JsonMapper and define
json mappings using the @maps(...) decorator to specify
a json transformation.
'''
request_handlers = { method: make_handler(method) for method in ['GET', 'POST', 'PUT', 'DELETE'] }
def dispatch(endpoint, **params):
'''
Executes the request handler for the current request (as supplied by the
flask request class), obtains the response, then maps it using the
mapper object passed to set_passthroughs
'''
full_uri = make_uri(endpoint, params)
api_request_handler = request_handlers[request.method]
response = api_request_handler(full_uri, request)
exchange = HTTPExchange(response, params=params)
return mapper.map(endpoint, exchange)
def assign_routing_rules_for_api(api_data):
'''
@param api_data: A dictionary with two key-value pairs:
- base_uri: The base uri that prefixes all endpoints
- endpoints: A dictionary of endpoint names to endpoints.
Any parameters specified using flask syntax
'''
base_uri = api_data["base_uri"]
uri_list = api_data["endpoints"].items()
for name, endpoint in uri_list:
full_uri = base_uri + endpoint
f = partial(dispatch, full_uri)
f.__name__ = name
app.add_url_rule(endpoint, view_func=f, methods=request_handlers.keys())
for api_info in APIS.values():
assign_routing_rules_for_api(api_info)
def make_uri(uri, params):
'''
Replaces parameters in the uri with parameters in the
params dict. The uri endpoint should specify parameters
using the flask syntax <type:name>, such as <string:color>
'''
for name, value in params.items():
pattern = '<[a-zA-Z]*:name>'.replace('name', name)
uri = re.sub(pattern, str(value), uri, count=1)
return uri
| [
6738,
42903,
62,
15042,
1330,
46947,
17614,
198,
6738,
42903,
1330,
2581,
198,
6738,
1257,
310,
10141,
1330,
13027,
11,
4646,
198,
6738,
16855,
1330,
18261,
44,
11463,
198,
6738,
38491,
1330,
21965,
23680,
62,
20608,
11,
3486,
1797,
198,
... | 2.671616 | 1,145 |
import userStory03
import userStory04
(listOfPeople,listOfFamilies)= project3.gedComParse('error.ged')
project3.present(listOfPeople,listOfFamilies)
# (listOfPeople1,listOfFamilies1)= project3.gedComParse('correct.ged')
# project3.present(listOfPeople1,listOfFamilies1)
userStory03.userStory03(listOfPeople)
userStory04.userStory04(listOfFamilies)
userStory19.us_19(listOfPeople, listOfFamilies)
userStory20.us_20(listOfPeople, listOfFamilies)
| [
11748,
2836,
11605,
3070,
201,
198,
11748,
2836,
11605,
3023,
201,
198,
201,
198,
7,
4868,
5189,
8061,
11,
4868,
5189,
37,
321,
3922,
47505,
1628,
18,
13,
2004,
5377,
10044,
325,
10786,
18224,
13,
2004,
11537,
201,
198,
16302,
18,
13,... | 2.58427 | 178 |
#
#
# File to test current configuration of Pyloric project.
#
# To execute this type of file, type 'nC.bat -python XXX.py' (Windows)
# or 'nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using nC.bat -python XXX.py' or 'nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File(os.getcwd(), "../lobster_pyloric_2004.ncx")
############## Main settings ##################
simConfigs = []
simConfigs.append("Test3STGcells")
simDt = 0.01
simulators = ["NEURON"]
numConcurrentSims = 4
varTimestepNeuron = False
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = False
#############################################
if __name__ == "__main__":
testAll()
| [
2,
201,
198,
2,
201,
198,
2,
220,
220,
9220,
284,
1332,
1459,
8398,
286,
350,
2645,
8146,
1628,
13,
201,
198,
2,
201,
198,
2,
220,
220,
1675,
12260,
428,
2099,
286,
2393,
11,
2099,
705,
77,
34,
13,
8664,
532,
29412,
27713,
13,
... | 2.344426 | 601 |
'''
MIT License
Copyright (c) 2021 Caio Alexandre
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
'''
import enum
import asyncio
from typing import Optional
import discord
from discord.ext import commands, flags
from discord.ext.menus import Button
from eris import Eris
from utils.context import ErisContext
from utils.menus import MenuBase
from utils.time import FutureTime
PUNISHMENTS_CHANNEL_ID = 798013309617176587
class ReasonMenu(MenuBase):
'''Menu para selecionar o motivo da punição.'''
class Mod(commands.Cog, name='Moderação'):
'''Comandos relacionados a moderação do servidor.'''
@commands.Cog.listener()
# TODO: Hackban.
# TODO: Banir mais de uma pessoa de uma vez.
@flags.command(aliases=['b'])
@flags.add_flag('--quiet', '-q', action='store_true')
@flags.command(aliases=['k'])
@flags.add_flag('--quiet', '-q', action='store_true')
@flags.command(aliases=['c'])
@flags.add_flag('--user', type=discord.User, nargs='+')
@flags.add_flag('--contains', type=str, nargs='+')
@flags.add_flag('--starts', type=str, nargs='+')
@flags.add_flag('--ends', type=str, nargs='+')
@flags.add_flag('--emoji', action='store_true')
@flags.add_flag('--bot', action='store_const', const=lambda m: m.author.bot)
@flags.add_flag('--embeds', action='store_const', const=lambda m: len(m.embeds))
@flags.add_flag('--files', action='store_const', const=lambda m: len(m.attachments))
@flags.add_flag('--reactions', action='store_const', const=lambda m: len(m.reactions))
@flags.add_flag('--after', type=int)
@flags.add_flag('--before', type=int)
@commands.Cog.listener()
| [
7061,
6,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
33448,
6488,
952,
21000,
260,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696,
... | 2.955255 | 961 |
import re
import time
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, cast
import wrapt # type: ignore
from robot.libraries.BuiltIn import BuiltIn # type: ignore
from robot.utils import timestr_to_secs # type: ignore
from .utils import AssertionOperator, is_truthy, logger
NumericalOperators = [
AssertionOperator["=="],
AssertionOperator["!="],
AssertionOperator[">="],
AssertionOperator[">"],
AssertionOperator["<="],
AssertionOperator["<"],
]
SequenceOperators = [
AssertionOperator["*="],
AssertionOperator["validate"],
AssertionOperator["=="],
AssertionOperator["!="],
]
handlers: Dict[AssertionOperator, Tuple[Callable, str]] = {
AssertionOperator["=="]: (lambda a, b: a == b, "should be"),
AssertionOperator["!="]: (lambda a, b: a != b, "should not be"),
AssertionOperator["<"]: (lambda a, b: a < b, "should be less than"),
AssertionOperator[">"]: (lambda a, b: a > b, "should be greater than"),
AssertionOperator["<="]: (lambda a, b: a <= b, "should be less than or equal"),
AssertionOperator[">="]: (lambda a, b: a >= b, "should be greater than or equal"),
AssertionOperator["*="]: (lambda a, b: b in a, "should contain"),
AssertionOperator["matches"]: (lambda a, b: re.search(b, a), "should match"),
AssertionOperator["^="]: (
lambda a, b: re.search(f"^{re.escape(b)}", a),
"should start with",
),
AssertionOperator["$="]: (
lambda a, b: re.search(f"{re.escape(b)}$", a),
"should end with",
),
AssertionOperator["validate"]: (
lambda a, b: BuiltIn().evaluate(b, namespace={"value": a}),
"should validate to true with",
),
}
T = TypeVar("T")
@wrapt.decorator
| [
11748,
302,
198,
11748,
640,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
11,
5994,
19852,
11,
3350,
198,
198,
11748,
7917,
457,
220,
1303,
2099,
25,
8856,
198,
6738,
9379,
13,
75,
... | 2.477401 | 708 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the ModelFactory class that can resolve model class names and create/load
appropriate models
"""
import logging
from typing import Union, Type
from .helpers import register_model, ModelType
from .model import ModelConfig, AbstractModel, AbstractModelFactory
from .tagger_models import TaggerModelFactory
from .text_models import TextModelFactory
logger = logging.getLogger(__name__)
class ModelFactory:
"""Auto class that identifies appropriate text/tagger model from text_models.py/tagger_models.py
to load one based on the inputted configs or from the loaded configs file.
The .create_model_from_config() methods allows to load the appropriate model when a ModelConfig
is passed. The .create_model_from_path() method uses AbstractModel's load method to load a
dumped config, which is then used to load appropriate model and return it through a metadata
dictionary object.
"""
@classmethod
def create_model_from_config(
cls,
model_config: Union[dict, ModelConfig]
) -> Type[AbstractModel]:
"""
Instantiates and returns a valid model from the specified model configs
Args:
model_config (Union[dict, ModelConfig]): Model configs inputted either as dict or an
instance of ModelConfig
Returns:
model (Type[AbstractModel]): A text/tagger model instance
Raises:
ValueError: When the configs are invalid
"""
is_valid_config = model_config and isinstance(model_config, (ModelConfig, dict))
if not is_valid_config:
msg = f"Need a valid model config to create a text/tagger model in ModelFactory. " \
f"Found model_config={model_config} of type({type(model_config)})"
raise ValueError(msg)
model_config = cls._resolve_model_config(model_config)
model_type = cls._get_model_type(model_config)
model_class = cls._get_model_factory(model_type).get_model_cls(model_config)
return model_class(model_config)
@classmethod
def create_model_from_path(cls, path: str) -> Union[None, Type[AbstractModel]]:
"""
Loads and returns a model from the specified path
Args:
path (str): A pickle file path from where a model can be loaded
Returns:
model (Union[None, Type[AbstractModel]]): Returns None when the specified path is not
found or if the model loaded from the specified path is a NoneType. If found a valid
config and a valid model, the model is load by calling .load() method and returned
Raises:
ValueError: When the path is invalid
"""
if not (path and isinstance(path, str)):
msg = f"Need a valid path to load a text/tagger model in ModelFactory. " \
f"Found path={path} of type({type(path)})"
raise ValueError(msg)
if not path.endswith(".pkl"):
msg = "Model Path must end with .pkl for ModelFactory to be able to identify the model"
raise ValueError(msg)
try:
# if loading from path, determine the ABCModel type & return after doing XxxModel.load()
model_config = AbstractModel.load_model_config(path)
model_config = cls._resolve_model_config(model_config)
model_type = cls._get_model_type(model_config)
model_class = cls._get_model_factory(model_type).get_model_cls(model_config)
return model_class.load(path)
except FileNotFoundError:
# sometimes a model (and its config file) might not be dumped, eg. in role classifiers
# or even if dumped, can be of NoneType enclosed in a dictionary
msg = f"No model file found while trying to load model from path: {path}. It might " \
f"be the case that the classifier didn't need a model due to one or no classes."
logger.warning(msg)
return None
@staticmethod
def _resolve_model_config(model_config: Union[dict, ModelConfig]) -> ModelConfig:
"""
Resolves and returns model configs.
Args:
model_config (Union[dict, ModelConfig]): If inputted as a dict, a new instance of
ModelConfig is created with that dict and returned.
Returns:
model_config (ModelConfig): An instance of ModelConfig
Raises:
ValueError: When the model config is of an invalid type
"""
# format configs
if isinstance(model_config, dict):
model_config = ModelConfig(**model_config)
# validate configs
if not isinstance(model_config, ModelConfig):
msg = f"Expected input config to be either a valid dictionary or an instance of " \
f"ModelConfig class, but found of type {type(model_config)}"
raise ValueError(msg)
return model_config
@staticmethod
def _get_model_type(model_config: ModelConfig) -> ModelType:
"""
Returns model type from the model config and validates if it is a valid type or not.
Args:
model_config (ModelConfig): An instance of ModelConfig
Returns:
model_type (ModelType): The model type obtained from configs
Raises:
ValueError: When the model type is invalid
"""
model_type = model_config.model_type
try:
return ModelType(model_type)
except ValueError as e:
msg = f"Invalid model configuration: Unknown model type {model_type}. " \
f"Known types are: {[v.value for v in ModelType.__members__.values()]}"
raise ValueError(msg) from e
@staticmethod
def _get_model_factory(model_type: ModelType) -> Type[AbstractModelFactory]:
"""
Returns a factory based on the provided model type
Args:
model_type (ModelType): An object of ModelType specifying the type of model to create
Returns:
model_factory (Type[AbstractModelFactory]): A model factory for specified model_type
"""
return {
ModelType.TEXT_MODEL: TextModelFactory,
ModelType.TAGGER_MODEL: TaggerModelFactory,
}[model_type]
@staticmethod
ModelFactory.register_models()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
28289,
11998,
11,
3457,
13,
290,
1854,
13,
220,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,... | 2.642563 | 2,669 |
import os
import sys
import time
import numpy as np
from .network import res2num_blocks
from .model import StyleGANModel
from ..base_executor import ExecutorBase
from ...utils import image_utils, utils
from ...utils.decorator import tpu_decorator
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
27349,
1330,
581,
17,
22510,
62,
27372,
198,
6738,
764,
19849,
1330,
17738,
45028,
17633,
198,
6738,
11485,
8692,
62,
18558,
38409,
1330... | 3.528571 | 70 |
# vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import re
import math
target_area_re = r"^target area: x=([-0-9]+)..([-0-9]+), y=([-0-9]+)..([-0-9]+)$"
# Analysis of the x position
#############################
# step is [0, 1, 2, 3, ..., n
# x-velocity is [vx, vx-1, vx-2, vx-3, ..., vx - n, ...,
# until it reaches 0 then it stays 0
# x-position is [0, vx, 2*vx-1, 3*vx-(1+2), ..., n*vx - sum(1..n-1)
# until it reaches the speed is 0 then position does not change
# x(n) = n*vx - sum(1..n-1)
# = n*vx - n*(n-1)/2
# = n * (vx - (n-1)/2)
# Analysis of the y position
#############################
# step is [0, 1, 2, 3, ..., n
# y-velocity is [vy, vy-1, vy-2, vy-3, ..., vy - n, ...,
# until it reaches 0 (at the top) then same in reverse
# y-position is [0, vy, 2*vy-1, 3*vy-(1+2), ..., n*vy - sum(1..n-1)
# until it reaches the top then same in reverse
# y(n) = n*vy - sum(1..n-1)
# = n*vy - n*(n-1)/2
# = n * (vy - (n-1)/2)
# = -n²/2 + n(2vy+1)/2
#
# Based on the properties of the roots of a second degree polynom:
# - actual maximum reached on n = vy + 1/2 which is halfway between
# integers corresponding to the actual integer maximum (vy & vy+1)
# - position is 0 for:
# * n = 0
# * n = 2*vy + 1
# We have the following values around the roots:
# * y(0) = 0
# * y(1) = vy
# * ...
# * y(2vy) = vy
# * y(2vy+1) = 0
# * y(2vy+2) = - vy - 1
# Hence, integer, non-zero values cannot be reached if in the ]-vy-1, vy[ interval
# Going the other way round from a reached position y:
# - if y > 0: 0 < vy <= y
# - if y < 0:
# * if vy > 0, then y <= -vy - 1 < 0 (on the way down after the maximum)
# * if vy < 0, then y <= vy < 0 (direct shoot downward)
# Finding whether a point or an area is reachable
def yield_divisors_using_divisions(n):
"""Yields distinct divisors of n.
This uses sucessive divisions so it can be slower than
yield_divisors_using_primes_factorisation on big inputs but it is easier
to understand, the upper bound of O(sqrt(n)) is guarantee and faster on
small inputs."""
assert n > 0
yield 1
if n > 1:
yield n
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
j = n // i
yield i
if i != j:
yield j
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| [
2,
25357,
25,
900,
6482,
10394,
28,
19,
7400,
11338,
28,
19,
4292,
8658,
25,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
11748,
10688,
198,
198,
16793,
62,
20337,
62,
260,
796,
374,
1,
61,
16793,
1989,
25,
2124,
16193,
58,
12,
1... | 2.109864 | 1,247 |
import sys
import os
import configparser
from clint.textui import puts, indent, colored
class config(object):
""" This class represents the config file.
:param string path_file: Path to config file
"""
## Check the validity of the config file
def check(self):
""" Check every options in configuration file """
## General section
# Check if content_folder is set
if not self.content_folder:
print(" \033[91m::\033[0m \"content_folder\" must be given in pyame.conf (general section)")
sys.exit(0)
if self.content_folder == "resources" or self.content_folder == self.archive_path:
print(" \033[91m::\033[0m \"content_folder\" cant be \"resources\", or \"%s\". (general section)" % archive_path)
sys.exit(0)
# Check if template_name is set
if not self.template_name:
print(" \033[93m::\033[0m \"template_name\" must be given in pyame.conf (general section)")
sys.exit(0)
# Check if website_url is set
if not self.website_url:
self.website_url = "/"
if not self.tpl_path or not self.static_path:
print(" \033[91m::\033[0m \"tpl_path\", \"static_path\" must be given in pyame.conf (general section)")
sys.exit(0)
# Others section
# Check if archive is set
if self.archive != "true" and self.archive != "false" or not self.archive:
print(" \033[91m::\033[0m \"archive\" must be \"true\" or \"false\" in pyame.conf (others section)")
sys.exit(0)
# Create defaults files
# Check if content_folder exist, if not, create it.
if not os.path.exists(self.content_folder):
print(" \033[93m::\033[0m \"content_folder\" you have given not exist. It will be automatically create")
os.makedirs(self.content_folder)
# Check if template_name exit
self.template_path = "%s/%s" % (self.tpl_path, self.template_name)
if not os.path.exists(self.template_path) or not os.path.exists("%s/index.html" % self.template_path) or not os.path.exists("%s/view.html" % self.template_path):
print(" \033[91m::\033[0m \"template_name\" you have given not exist.\n \033[93m::\033[0m These files: index.html, view.html must be in template folder.")
sys.exit(0)
# Remote section
# Check remote section
if self.remote != "true" and self.remote != "false" or not self.remote:
print(" \033[91m::\033[0m \"remote\" must be \"true\" or \"false\" in pyame.conf (remote section)")
sys.exit(0)
if self.remote == "true":
if self.remote_host == "":
print(" \033[91m::\033[0m \"remote_host\" must be given in pyame.conf (remote section)")
sys.exit(0)
if self.remote_user == "":
print(" \033[91m::\033[0m \"remote_user\" must be given in pyame.conf (remote section)")
sys.exit(0)
if self.remote_path == "":
print(" \033[91m::\033[0m \"remote_path\" must be given in pyame.conf (remote section)")
sys.exit(0)
def string_to_list(self, elements):
""" Transform a String elements into a List
:param string elements: The string elements extracted from the config file
:rtype: list
"""
elements_list = []
for element in elements.split(','):
elements_list.append(element.replace('"', '').strip())
return elements_list
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
4566,
48610,
198,
198,
6738,
537,
600,
13,
5239,
9019,
1330,
7584,
11,
33793,
11,
16396,
198,
198,
4871,
4566,
7,
15252,
2599,
198,
220,
220,
220,
37227,
220,
220,
220,
770,
1398,
6870,
262... | 2.313826 | 1,555 |
#!/bin/env python
"""
Build and possibly publish product images. It doesn't login to any registry when publishing
but it assumes a `docker login` has been performed before.
Usage: build_product_images.py --help
Example:
build_product_images.py --product zookeeper,kafka -image_version 0.1 --push
This whill build an image for each Apache Zookeeper and APache Kafka version configured in conf.py
"""
import conf
import argparse
import subprocess
import sys
import re
def build_image_args(version):
"""
Returns a list of --build-arg command line arguments that are used by the
docker build command.
Arguments:
- version: Can be a str, in which case it's considered the PRODUCT
or a dict.
"""
result = []
if isinstance(version, dict):
for k, v in version.items():
result.extend(['--build-arg', f'{k.upper()}={v}'])
elif isinstance(version, str):
result=['--build-arg', f'PRODUCT={version}']
else:
raise ValueError(f'Unsupported version object: {version}')
return result
def build_image_tags(image_name, image_version, product_version):
"""
Returns a list of --tag command line arguments that are used by the
docker build command.
Each image is tagged with three tags as follows:
1. <product>-<dependency1>-<dependency2>...-<image>
2. <product>-<dependency1>-<dependency2>...-<platform>
3. <product>-<platform>
"""
result = []
platform_version = re.search(r'^\d+', image_version)[0]
if isinstance(product_version, dict):
dep_versions = "-".join([f'{key}{value}' for key, value in product_version.items() if key != "product"])
image_tag = "-".join([product_version['product'], dep_versions, f'stackable{image_version}'])
platform_tag = "-".join([product_version['product'], dep_versions, f'stackable{platform_version}'])
latest_tag = "-".join([product_version['product'], f'stackable{platform_version}'])
result.extend([
'-t', f'{image_name}:{image_tag}',
'-t', f'{image_name}:{platform_tag}',
'-t', f'{image_name}:{latest_tag}'
])
elif isinstance(product_version, str):
result.extend([
'-t', f'{image_name}:{product_version}-stackable{image_version}',
'-t', f'{image_name}:{product_version}-stackable{platform_version}'])
else:
raise ValueError(f'Unsupported version object: {product_version}')
return result
def build_and_publish_image(args, products):
"""
Returns a list of commands that need to be run in order to build and
publish product images.
"""
commands = []
for p in products:
for v in p['versions']:
image_name=f'{args.registry}/stackable/{p["name"]}'
tags = build_image_tags(image_name, args.image_version, v)
build_args = build_image_args(v)
commands.append(['docker', 'build', *build_args, *tags, '-f', p["name"] + '/Dockerfile', '.'])
if args.push:
commands.append(['docker', 'push', '--all-tags', image_name])
return commands
def run_commands(dry, commands):
"""
Runs the commands to build and publish images. In dry-run mode it only
lists the command on stdout.
"""
for cmd in commands:
if dry:
subprocess.run(['echo', *cmd])
else:
ret = subprocess.run(cmd)
if ret.returncode != 0:
sys.exit(1)
if __name__ == "__main__":
main()
| [
2,
48443,
8800,
14,
24330,
21015,
198,
37811,
198,
15580,
290,
5457,
7715,
1720,
4263,
13,
632,
1595,
470,
17594,
284,
597,
20478,
618,
12407,
198,
4360,
340,
18533,
257,
4600,
45986,
17594,
63,
468,
587,
6157,
878,
13,
198,
198,
2835... | 2.462759 | 1,450 |
import pytest
from itables import show
from itables.sample_dfs import (
get_countries,
get_dict_of_test_dfs,
get_dict_of_test_series,
get_indicators,
get_population,
)
@pytest.mark.parametrize("df_name,df", get_dict_of_test_dfs().items())
@pytest.mark.parametrize("series_name,series", get_dict_of_test_series().items())
| [
11748,
12972,
9288,
198,
198,
6738,
340,
2977,
1330,
905,
198,
6738,
340,
2977,
13,
39873,
62,
7568,
82,
1330,
357,
198,
220,
220,
220,
651,
62,
9127,
1678,
11,
198,
220,
220,
220,
651,
62,
11600,
62,
1659,
62,
9288,
62,
7568,
82,... | 2.423611 | 144 |
import json
import pandas as pd
import numpy as np
import os
import string
import tensorflow as tf
class DataIngestion:
"""Class for ingesting data from the dataset directory""" | [
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
4731,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
4871,
6060,
27682,
395,
295,
25,
198,
197,
37811,
9487,
32... | 3.673469 | 49 |
# Copyright (c) 2022 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
import collections.abc as abc
import enum
import random
import string
import sys
from dataclasses import dataclass, field as dataclass_field
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, TypeVar, Union
import hypothesis.strategies as st
import pytest
from hypothesis import given, settings
from omegaconf import II, OmegaConf, ValidationError
from omegaconf.errors import (
ConfigIndexError,
ConfigTypeError,
ConfigValueError,
KeyValidationError,
)
from typing_extensions import Final, Literal
from hydra_zen import builds, instantiate, mutable_value
from hydra_zen._compatibility import Version, _get_version
from hydra_zen.structured_configs._utils import (
field,
is_interpolated_string,
safe_name,
sanitized_type,
)
from hydra_zen.typing import Builds
from tests import everything_except
T = TypeVar("T")
current_module: str = sys.modules[__name__].__name__
@pytest.mark.parametrize(
"obj, expected_name",
[
(1, "1"),
(dict, "dict"),
(C, "C"),
(C(), "C as a repr"),
("moo", "'moo'"),
(None, "None"),
(f, "f"),
],
)
@given(
st.from_type(type)
) # this draws any type that has a strategy registered with hypothesis!
NoneType = type(None)
@pytest.mark.parametrize(
"in_type, expected_type",
[
(int, int), # supported primitives
(float, float),
(str, str),
(bool, bool),
(Color, Color),
(C, Any), # unsupported primitives
(type(None), Any),
(set, Any),
(list, Any),
(tuple, Any),
(dict, Any),
(callable, Any),
(frozenset, Any),
(T, Any),
(Literal[1, 2], Any), # unsupported generics
(Type[int], Any),
(Builds, Any),
(Builds[int], Any),
(Type[Builds[int]], Any),
(Set, Any),
(Set[int], Any),
(Final[int], Any),
(Callable, Any),
(Callable[[int], int], Any),
(abc.Callable, Any),
(abc.Mapping, Any),
(Union[str, int], Any),
(Optional[frozenset], Any),
(Union[NoneType, frozenset], Any),
(Union[NoneType, int], Optional[int]), # supported Optional
(Optional[Color], Optional[Color]),
(Optional[List[Color]], Optional[List[Color]]),
(Optional[List[List[int]]], Optional[List[Any]]),
(List[int], List[int]), # supported containers
(List[frozenset], List[Any]),
(List[List[int]], List[Any]),
(List[T], List[Any]),
(Dict[str, float], Dict[str, float]),
(Dict[C, int], Dict[Any, int]),
(Dict[str, C], Dict[str, Any]),
(Dict[C, C], Dict[Any, Any]),
(Dict[str, List[int]], Dict[str, Any]),
(Tuple[str], Tuple[str]),
(Tuple[str, ...], Tuple[str, ...]),
(Tuple[str, str, str], Tuple[str, str, str]),
(Tuple[List[int]], Tuple[Any]),
(Union[NoneType, Tuple[int, int]], Optional[Tuple[int, int]]),
(Union[Tuple[int, int], NoneType], Optional[Tuple[int, int]]),
],
)
@pytest.mark.parametrize(
"func, value", [(f_list, [1]), (f_dict, dict(a=1)), (f_tuple, (1,))]
)
# II renders a string in omegaconf's interpolated-field format
@given(st.text(alphabet=string.ascii_lowercase, min_size=1).map(II))
@settings(deadline=None)
@given(everything_except(str))
@given(st.text(alphabet=string.printable))
@given(
major=st.integers(0, 100),
minor=st.integers(0, 100),
patch=st.integers(0, 100),
# tests Hydra-style and OmegaConf-style dev/rc-style
# release strings. See:
# https://pypi.org/project/hydra-core/#history
# https://pypi.org/project/omegaconf/#history
patch_suffix=st.just("")
| st.integers(0, 100).map(lambda x: f"rc{x}")
| st.integers(0, 100).map(lambda x: f".dev{x}"),
)
| [
2,
15069,
357,
66,
8,
33160,
10140,
5136,
286,
8987,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
11748,
17268,
13,
39305,
355,
450,
66,
198,
11748,
33829,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
25064,
198,
... | 2.263849 | 1,751 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install keyboard
import keyboard
keyboard.add_hotkey('shift', lambda: keyboard.write('on'))
keyboard.add_hotkey('shift', lambda: keyboard.write('off'), trigger_on_release=True)
# Block forever.
keyboard.wait()
# OR, wait Escape:
# keyboard.wait('esc')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
2,
7347,
2721,
10586,
198,
11748,
10586,
198,
2539,... | 2.806723 | 119 |
from __future__ import division
from scitbx.math import clustering
from scitbx.array_family import flex
if __name__ == '__main__':
run()
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
629,
270,
65,
87,
13,
11018,
1330,
32966,
1586,
198,
6738,
629,
270,
65,
87,
13,
18747,
62,
17989,
1330,
7059,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
... | 3.043478 | 46 |
s=input("Enter a string:")
swap_case(s) #Calling the function
| [
82,
28,
15414,
7203,
17469,
257,
4731,
25,
4943,
198,
2032,
499,
62,
7442,
7,
82,
8,
220,
220,
220,
220,
1303,
48593,
262,
2163,
198,
220,
220,
220,
220
] | 2.333333 | 30 |
#!/usr/bin/env python3
import argparse
from textblob import TextBlob
from textblob.en.sentiments import NaiveBayesAnalyzer
import nltk
def initialize_model():
'''Initializes a textblob naive bayes sentiment analysis model.
'''
nltk.download('movie_reviews')
nltk.download('punkt')
# prevent exposing the learner
global analyzer
analyzer = NaiveBayesAnalyzer()
analyzer.train()
def predict(text):
'''Returns the polarity value after performing inference on a text.
Uses a trained textblob sentiment analysis model.
If the difference between positive and negative probabilities is too small, the result is considered neutral.
Args:
text: Text to perform inference on.
'''
MIN_DELTA = 0.1
preds = analyzer.analyze(text)
p_pos = preds[1]
p_neg = preds[2]
diff = p_pos - p_neg
if diff >= MIN_DELTA:
return 'positive'
elif diff <= -MIN_DELTA:
return 'negative'
else:
return 'neutral'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='process arguments required for performing inference with trained model on input text')
parser.add_argument('-t', '--text', help='string: text to perform inference on', type=str, action='store', required=True)
args = parser.parse_args()
initialize_model()
polarity = predict(args.text)
print(polarity)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
198,
6738,
2420,
2436,
672,
1330,
8255,
3629,
672,
198,
6738,
2420,
2436,
672,
13,
268,
13,
34086,
6800,
1330,
11013,
425,
15262,
274,
37702,
9107,
19... | 3.058962 | 424 |
#!/usr/bin/env python
import os
import subprocess
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
"""Remove a file if it exists."""
try:
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
except FileNotFoundError:
pass
def execute(*args, suppress_exception=False, cwd=None):
"""Execute a command and return is stdout as a string."""
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
out, err = proc.communicate()
out = out.decode("utf-8")
err = err.decode("utf-8")
if err and not suppress_exception:
raise Exception(err)
else:
return out
def init_git():
"""Init git project if it's not initialized yet."""
if not os.path.exists(os.path.join(PROJECT_DIRECTORY, ".git")):
execute("git", "init", cwd=PROJECT_DIRECTORY)
def install_pre_commit_hooks():
"""Install a pre-commit hook."""
execute("pre-commit", "install")
if __name__ == "__main__":
if "{{ cookiecutter.create_author_file }}" != "y":
remove_file("AUTHORS.md")
remove_file("docs/authors.md")
if "Not open source" == "{{ cookiecutter.open_source_license }}":
remove_file("LICENSE")
try:
init_git()
except Exception as e:
print(e)
if "{{ cookiecutter.install_precommit_hooks }}" == "y":
install_pre_commit_hooks()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
198,
31190,
23680,
62,
17931,
23988,
15513,
796,
28686,
13,
6978,
13,
5305,
6978,
7,
418,
13,
6978,
13,
66,
2799,
343,
8,
628,
198,
4299,
4... | 2.397993 | 598 |
""" Copyright (c) 2017-2021 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
class Lrn(Layer):
"""Lrn layer performs local response normlization with the following formula:
:math:`LRN(x)[obj][ch] = x[obj][ch] * / ((bias + alpha * sqrSum[obj][ch] / windowSize) ^ beta)`
where :math:`obj` is index of the object , :math:`ch` is index of the channel,
:math:`window_size`, :math:`bias`, :math:`alpha` and :math:`beta` are layer settings
and :math:`sqrSum[obj][ch] = \sum_{i=\max(0, ch - \lfloor\frac{windowSize - 1}{2}\rfloor)}^{\min(C - 1, ch + \lceil\frac{windowSize - 1}{2}\rceil)}x[obj][i]^2`
:param input_layer: The input layer and the number of its output. If no number
is specified, the first output will be connected.
:type input_layer: object, tuple(object, int)
:param window_size: The size of window used in normalization
:type: int, default=1
:param bias: value added to the scaled sum of squares
:type: float, default=1.
:param alpha: scale value of sum of squares
:type: float, default=1e-4
:param beta: exponent of the formula
:type: float, default=0.75
.. rubric:: Layer inputs:
(1) the set of objects, of dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** * **Height** * **Width** * **Depth** - the number of objects
- **Channels** - the size of the object
.. rubric:: Layer outputs:
(1) the result of the layer, of the dimensions of the input.
"""
@property
def window_size(self):
"""Gets the window size.
"""
return self._internal.get_window_size()
@window_size.setter
def window_size(self, window_size):
"""Sets the window size.
"""
self._internal.set_window_size(int(window_size))
@property
def bias(self):
"""Gets the bias.
"""
return self._internal.get_bias()
@bias.setter
def bias(self, bias):
"""Sets the bias.
"""
self._internal.set_bias(bias)
@property
def alpha(self):
"""Gets the alpha.
"""
return self._internal.get_alpha()
@alpha.setter
def alpha(self, alpha):
"""Sets the alpha.
"""
self._internal.set_alpha(alpha)
@property
def beta(self):
"""Gets the beta.
"""
return self._internal.get_beta()
@beta.setter
def beta(self, beta):
"""Sets the beta.
"""
self._internal.set_beta(beta)
| [
37811,
15069,
357,
66,
8,
2177,
12,
1238,
2481,
9564,
17513,
56,
19174,
11419,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 2.692565 | 1,197 |
# 2015-05-22
# rgba_value is a four-tuple
import time
time1 = time.clock()
# for event queue, we are using a priority queue
# we may have site events and circle events with same priority
from core.fortune.SweepLine import *
from core.fortune.arc_tree.ArcTree import *
# from core.priority_queue.PriorityQueue import *
from core.priority_queue.EventQueue import *
from core.fortune.TruncatedBisectorEdgeDict import *
from core.fortune.events.SiteEvent import *
# planar straight-line graph
# vertices have distinct locations
# edges do not cross
"""
class Graph:
def __init__(self):
pass
class Edge:
def __init__(self):
pass
def getEndpoints(self):
pass
class Vertex:
def __init__(self):
pass
def getLocation(self):
pass
def setLocation(self, location):
pass
class SweepLine:
def __init__(self):
pass
"""
# input: a set of n points
# output: a dcel with n bounded faces
# running time: O(n * log(n))
# note: points must have distinct locations
# diagram = Order_1_voronoi_diagram([(0, 0), (0, 1), (0, 2), (0, 3)])
"""
subdivision = diagram.get_subdivision()
print subdivision
# event_queue = PriorityQueue()
event_queue = EventQueue()
sweep_line = SweepLine()
arc_tree = ArcTree(sweep_line)
"""
sweep_line = SweepLine()
tree = ArcTree(3, sweep_line)
vertex_list = []
# event_queue = PriorityQueue()
event_queue = EventQueue()
truncated_bisector_edge_dict = TruncatedBisectorEdgeDict()
# create voronoi diagram
# 1. process events
# 2. apply bounding box
# 3. retrieve cells
# point_list = [(0, 0), (0, 1), (0, 2), (0, 3)]
# point_list = [(0, 0), (1, 1), (0, 2)]
# point_list = [(0, 4), (1, 1), (2, 3)]
import random
point_set = set([])
# while len(point_set) < 500:
# while len(point_set) < 1000:
# while len(point_set) < 1024:
# while len(point_set) < 10000:
while len(point_set) < 1000:
# print len(point_set)
"""
x = random.randint(0, 512)
y = random.randint(0, 512)
"""
"""
x = random.randint(128, 384)
y = random.randint(128, 384)
"""
"""
x = random.randint(128, 896)
y = random.randint(128, 384)
"""
"""
x = random.randint(128, 896)
y = random.randint(128, 896)
"""
"""
x = random.uniform(128, 896)
y = random.uniform(128, 896)
"""
"""
x = random.uniform(128, 2944)
y = random.uniform(128, 2944)
"""
x = random.randint(128, 396)
y = random.randint(128, 396)
"""
x = random.random() * 100
y = random.random() * 100
"""
location = (x, y)
# print location
point_set = point_set | set([location])
point_list = list(point_set)
# this collection of points could lead to jumping event priority values
# point_list = [(100, 200), (200, 400), (300, 200), (400, 400), (500, 200)]
# point_list = [(200, 400), (300, 200), (400, 400)]
# point_list = [(100, 200), (200, 400), (300, 200), (400, 400)]
# point_list = [(100, 200), (200, 400), (300, 200), (400, 400)]
# point_list = [(200, 400), (300, 250), (400, 400)]
# point_list = [(300, 200), (400, 200), (500, 200), (400, 100)]
# point_list = [(300, 200), (400, 200), (400, 100)]
# point_list = [(400, 200), (500, 200), (400, 100)]
# point_list = [(97, 96), (47, 93), (67, 93), (76, 88), (34, 91)]
# point_list = [(10, 62), (30, 79), (60, 48), (39, 27), (16, 64), (84, 92), (93, 4), (44, 73), (61, 68), (38, 69)]
# point_list = [(336, 177), (298, 366), (307, 268), (242, 156), (299, 366), (314, 203), (370, 203)]
# point_list = [(136, 177), (298, 366), (307, 268)]
# point_list = [(359, 228), (179, 172), (291, 179), (170, 314), (178, 163), (305, 155), (230, 271)]
# point_list = [(234, 383), (297, 320), (181, 218), (363, 159), (344, 251), (247, 226), (228, 249), (284, 241), (358, 154), (152, 383)]
# point_list = [(263, 308), (146, 169), (227, 193), (216, 264), (220, 379), (326, 246), (129, 170), (175, 302), (154, 354), (323, 164), (181, 301), (366, 182), (169, 160), (131, 173), (292, 334), (322, 161), (356, 272), (130, 291), (295, 380), (296, 133), (261, 351), (291, 365), (372, 213), (236, 172), (160, 333), (286, 200), (363, 378), (183, 149), (252, 353), (297, 273), (325, 182), (155, 137), (362, 380), (200, 239), (248, 233), (341, 345), (250, 270), (144, 159), (163, 374), (178, 130), (215, 182), (173, 368), (305, 184), (277, 319), (156, 366), (230, 374), (221, 334), (308, 242), (236, 316), (343, 376)]
# point_list = [(336, 198), (270, 330), (250, 219), (298, 324), (321, 346), (273, 382), (223, 143), (365, 335), (309, 382), (358, 340), (220, 303), (251, 225), (332, 382), (356, 323), (192, 228), (290, 275), (265, 300), (266, 162), (356, 253), (200, 131), (129, 153), (204, 200), (327, 158), (128, 240), (383, 280), (225, 382), (304, 282), (372, 313), (187, 219), (297, 143), (317, 156), (254, 317), (134, 128), (286, 266), (321, 182), (183, 364), (222, 326), (271, 187), (376, 128), (333, 301), (255, 237), (192, 244), (321, 301), (273, 331), (264, 136), (145, 185), (197, 177), (287, 190), (168, 368), (358, 312)]
# point_list = [(449, 226), (574, 161), (428, 268), (643, 205), (816, 233), (524, 186), (648, 128), (665, 230), (691, 258), (207, 138), (760, 346), (754, 144), (499, 290), (680, 274), (256, 233), (650, 189), (779, 349), (287, 255), (871, 361), (166, 231), (864, 206), (583, 326), (720, 132), (691, 209), (545, 273), (162, 202), (450, 333), (577, 166), (488, 263), (774, 284), (840, 283), (471, 141), (497, 369), (218, 313), (432, 289), (701, 307), (456, 353), (869, 146), (280, 218), (519, 135), (352, 203), (680, 157), (155, 318), (217, 195), (730, 365), (149, 245), (264, 313), (322, 304), (521, 320), (638, 282), (788, 377), (603, 219), (672, 243), (130, 350), (192, 359), (493, 251), (210, 199), (584, 254), (786, 157), (205, 135), (179, 172), (743, 223), (520, 305), (152, 149), (797, 164), (781, 170), (402, 384), (297, 320), (379, 347), (227, 214), (182, 241), (304, 349), (309, 131), (352, 320), (886, 229), (468, 261), (422, 369), (626, 373), (760, 191), (604, 164), (358, 177), (702, 188), (135, 236), (858, 150), (770, 170), (864, 230), (704, 151), (257, 156), (459, 383), (872, 363), (257, 372), (187, 326), (622, 307), (136, 277), (504, 144), (824, 239), (450, 330), (260, 314), (696, 354), (607, 357), (807, 257), (715, 303), (246, 129), (166, 254), (264, 325), (300, 131), (628, 186), (261, 191), (887, 305), (347, 367), (370, 357), (475, 281), (635, 172), (480, 210), (693, 329), (713, 240), (307, 249), (247, 175), (208, 212), (523, 143), (533, 204), (326, 314), (658, 139), (656, 272), (192, 373), (234, 292), (302, 325), (847, 340), (755, 305), (758, 345), (268, 376), (664, 377), (266, 254), (139, 308), (253, 216), (634, 263), (574, 356), (516, 159), (543, 305), (333, 138), (225, 213), (440, 242), (224, 175), (671, 361), (131, 352), (602, 221), (456, 181), (544, 332), (191, 209), (627, 258), (304, 218), (205, 307), (342, 327), (820, 367), (572, 140), (721, 206), (804, 331), (602, 242), (577, 290), (593, 363), (495, 283), (762, 221), (175, 213), (542, 247), (147, 152), (701, 174), (882, 285), (399, 173), (853, 357), (380, 347), (671, 163), (476, 301), (235, 307), (379, 381), (529, 243), (889, 159), (335, 384), (205, 347), (179, 355), (283, 286), (526, 169), (421, 226), (341, 344), (785, 303), (799, 252), (885, 217), (786, 205), (857, 228), (591, 212), (894, 322), (160, 269), (393, 252), (754, 186), (745, 220), (505, 194), (778, 277), (847, 297), (529, 378), (880, 174), (192, 374), (686, 273), (737, 299), (338, 175), (413, 202), (300, 294), (890, 194), (767, 310), (200, 252), (759, 352), (600, 277), (270, 200), (510, 205), (630, 318), (698, 373), (408, 189), (773, 357), (853, 251), (620, 205), (870, 380), (279, 200), (522, 286), (174, 367), (769, 380), (761, 309), (234, 157), (764, 236), (367, 272), (561, 209), (739, 365), (717, 351), (392, 162), (572, 367), (379, 344), (426, 147), (450, 180), (825, 204), (510, 376), (533, 361), (896, 247), (421, 160), (283, 210), (780, 213), (539, 215), (498, 323), (187, 206), (459, 165), (393, 136), (197, 295), (772, 206), (808, 212), (848, 128), (530, 319), (290, 357), (860, 313), (304, 210), (234, 137), (690, 256), (766, 129), (248, 352), (248, 381), (840, 343), (873, 162), (564, 276), (748, 230), (274, 177), (455, 313), (547, 329), (576, 229), (525, 157), (434, 255), (442, 272), (888, 320), (601, 342), (795, 370), (739, 221), (432, 294), (628, 279), (359, 245), (819, 208), (745, 287), (291, 200), (532, 169), (675, 263), (304, 243), (895, 241), (497, 297), (508, 146), (727, 325), (172, 321), (640, 282), (277, 242), (161, 302), (727, 201), (494, 251), (430, 289), (744, 242), (624, 181), (640, 291), (694, 382), (857, 150), (505, 184), (533, 364), (351, 139), (655, 149), (642, 377), (160, 372), (684, 307), (815, 277), (415, 198), (675, 190), (570, 226), (229, 150), (840, 339), (133, 351), (270, 174), (138, 317), (173, 187), (672, 258), (287, 257), (530, 279), (246, 226), (817, 252), (550, 166), (776, 138), (704, 164), (887, 169), (833, 243), (695, 224), (889, 372), (570, 203), (677, 220), (830, 361), (179, 267), (169, 295), (287, 326), (314, 235), (347, 247), (193, 174), (388, 175), (620, 249), (147, 174), (815, 289), (863, 379), (518, 249), (797, 295), (380, 353), (821, 305), (592, 138), (248, 224), (477, 229), (794, 217), (628, 272), (626, 185), (142, 292), (863, 303), (690, 346), (515, 264), (783, 244), (611, 329), (741, 282), (816, 175), (432, 275), (589, 213), (782, 136), (549, 328), (393, 154), (318, 250), (448, 255), (307, 287), (382, 201), (213, 161), (804, 143), (378, 214), (654, 273), (598, 309), (707, 383), (789, 196), (312, 232), (271, 298), (706, 185), (561, 215), (390, 251), (678, 241), (150, 142), (527, 154), (218, 244), (564, 314), (422, 378), (855, 255), (181, 212), (131, 173), (129, 318), (847, 149), (482, 235), (743, 199), (581, 248), (379, 269), (445, 316), (323, 198), (845, 254), (886, 380), (894, 281), (383, 138), (564, 215), (181, 335), (628, 154), (540, 169), (605, 365), (202, 364), (336, 215), (716, 294), (582, 244), (213, 383), (568, 301), (859, 293), (666, 336), (543, 277), (863, 160), (727, 306), (502, 344), (700, 196), (148, 310), (152, 177), (860, 348), (819, 216), (244, 235), (798, 197), (548, 311), (188, 358), (547, 348), (741, 328), (291, 151), (457, 132), (826, 336), (879, 239), (848, 177), (634, 245), (212, 300), (442, 375), (806, 243), (863, 302), (661, 232), (435, 345), (629, 177), (691, 288), (638, 249), (755, 321), (419, 132), (619, 146), (306, 150), (888, 381), (432, 362), (870, 326), (419, 365), (140, 359), (172, 192), (742, 128), (274, 166), (544, 138), (303, 242), (277, 363), (445, 217), (590, 343), (859, 242), (773, 291), (380, 356), (622, 223), (742, 301), (288, 146), (130, 159), (357, 184), (436, 298), (446, 282), (553, 172), (614, 331), (392, 145), (424, 357), (424, 186), (825, 311), (371, 264), (419, 328), (252, 223), (182, 212), (339, 358), (753, 277), (482, 153), (694, 192), (620, 370), (135, 269), (835, 276), (828, 245), (892, 340), (824, 205), (350, 313), (185, 244), (262, 382), (603, 191), (423, 137), (704, 157), (778, 258)]
# point_list = [(892, 389), (244, 813), (800, 672), (866, 732), (856, 134), (261, 690), (639, 271), (790, 306), (149, 528), (880, 302), (792, 415), (379, 189), (213, 190), (545, 343), (171, 556), (153, 472), (380, 456), (542, 412), (311, 660), (732, 417), (432, 239), (705, 586), (565, 684), (743, 477), (186, 876), (377, 540), (883, 245), (795, 619), (632, 157), (319, 658), (216, 235), (881, 459), (873, 393), (482, 737), (436, 743), (210, 502), (265, 415), (178, 341), (223, 732), (639, 605), (173, 386), (523, 837), (833, 762), (132, 399), (874, 646), (517, 594), (453, 793), (541, 588), (367, 242), (234, 533), (520, 411), (747, 761), (186, 207), (174, 700), (447, 549), (437, 378), (297, 292), (781, 132), (821, 522), (511, 706), (886, 230), (492, 701), (663, 315), (425, 870), (774, 197), (185, 186), (388, 454), (238, 381), (338, 144), (755, 816), (170, 714), (308, 574), (881, 716), (661, 254), (709, 729), (803, 178), (651, 539), (529, 332), (697, 138), (497, 516), (885, 378), (254, 558), (643, 417), (579, 405), (882, 796), (481, 220), (177, 218), (778, 450), (853, 679), (139, 140), (323, 581), (276, 460), (437, 189), (188, 885), (264, 243), (168, 381), (574, 435), (322, 869), (618, 246), (222, 877)]
print "point list:", point_list
site_list = point_list
# print "locations determined"
# print site_list
# site_event_pairs = [(SiteEvent(p), p[1]) for p in point_list]
y_values = [x[1] for x in point_list]
max_y_value = max(y_values)
# print "max y-value:", max_y_value
y_maximal_point_list = [x for x in point_list if x[1] == max_y_value]
y_not_maximal_point_list = [x for x in point_list if x[1] != max_y_value]
y_maximal_point_list_sorted_by_x = y_maximal_point_list[ : ]
y_maximal_point_list_sorted_by_x.sort(key = lambda x: x[0])
# print y_maximal_point_list_sorted_by_x
leading_arc_site_list = y_maximal_point_list_sorted_by_x
non_leading_arc_site_list = y_not_maximal_point_list
leading_arc_site_event_pair_list = [(SiteEvent(s), s[1]) for s in leading_arc_site_list]
non_leading_arc_site_event_pair_list = [(SiteEvent(s), s[1]) for s in non_leading_arc_site_list]
# print "leading arc site list:", leading_arc_site_list
"""
initial_point_list_sorted_by_x = y_maximal_point_list_sorted_by_x
trailing_point_list = point_with_y_component_not_maximal_list
initial_site_event_pairs_sorted_by_x = [(SiteEvent(p), p[1]) for p in initial_point_list_sorted_by_x]
trailing_site_event_pairs = [(SiteEvent(p), p[1]) for p in trailing_point_list]
"""
initial_y_value = max_y_value
sweep_line.setY(initial_y_value)
# handle leading arcs
for site_event_pair in leading_arc_site_event_pair_list:
event, y = site_event_pair
event.handle(tree, vertex_list, event_queue, truncated_bisector_edge_dict, sweep_line, initial_y_value)
# print tree.toIntervalStringList()
# raise Exception()
# create initial truncated bisector edges
num_leading_arcs = len(leading_arc_site_list)
num_initial_truncated_bisector_edges = max(0, num_leading_arcs - 1)
for i in range(num_initial_truncated_bisector_edges):
site_a = leading_arc_site_list[i]
site_b = leading_arc_site_list[i + 1]
truncated_bisector_edge_dict.addTruncatedBisectorEdge(site_a, site_b)
# print site_event_pairs
# for site_event_pair in site_event_pairs:
# handle non-leading arcs
for site_event_pair in non_leading_arc_site_event_pair_list:
event, y = site_event_pair
# print event.getLocation(), -1 * y
# print "y-value for site event:", y
# working with a min-heap
# using negative y values for priorities
event_queue.insert((-1 * y, False), event)
x_values = [x[0] for x in point_list]
y_values = [x[1] for x in point_list]
min_x_value = min(x_values)
max_x_value = max(x_values)
min_y_value = min(y_values)
max_y_value = max(y_values)
circle_event_parameter_value_list_tuple_list = []
# process events with largest y values first
while not event_queue.isEmpty():
# print "lowest value that serves as a priority:", event_queue.min().getKey()
# print event_queue.queue
entry = event_queue.removeMin()
event = entry.getValue()
# priority = entry.getKey()
key = entry.getKey()
priority, is_for_split_residue_arc = key
"""
if event.isCircleEvent() == True:
# raise Exception(priority, event.getArc().getFocus())
print "circle event:", priority, event.getArc().getFocus()
print "circle event originated from a site event:", event.getCausingEventIsSiteType()
arc = event.getArc()
left_arc = tree.getArcLeftNeighbor(arc)
right_arc = tree.getArcRightNeighbor(arc)
print left_arc.getFocus(), arc.getFocus(), right_arc.getFocus()
l_y = tree.getSweepLine().getY()
print CircleEvent.getIntersectionHelper(l_y, left_arc, arc, right_arc)
print CircleEvent.getLargestEmptyCircleLowestExtent(l_y, left_arc, arc, right_arc)
circle_event_parameter_value_list_tuple_list.append((event, (left_arc, arc, right_arc, l_y)))
elif event.isSiteEvent() == True:
print "site event:", priority, event.getLocation()
"""
# print priority
# print "y-value for priority:", getYValueForPriority(priority)
"""
if sweep_line.getY() != None and not (getYValueForPriority(priority) <= (sweep_line.getY() + tree._getTolerance())):
print event_queue.toString()
print getYValueForPriority(priority), sweep_line.getY()
raise Exception("jumping event priority values")
"""
sweep_line.setY(getYValueForPriority(priority))
# print "sweep-line y:", sweep_line.getY()
# handle event
# print event_queue.toString()
initial_y_value = max_y_value
event.handle(tree, vertex_list, event_queue, truncated_bisector_edge_dict, sweep_line, initial_y_value)
# print event_queue.toString()
# unique_site_list = list(set(site_list))
edge_list = truncated_bisector_edge_dict.getAllEdgesWithEndpointsThatAreNotIndeterminate()
edge_location_pair_list = [(x.getFirstVertex().getLocation(), x.getSecondVertex().getLocation()) for x in edge_list]
unique_edge_location_pair_list = list(set(edge_location_pair_list))
non_finite_length_edge_list = truncated_bisector_edge_dict.getAllEdgesWithAnEndpointThatIsIndeterminate()
non_finite_length_edge_location_pair_list = [(x.getFirstVertex().getLocation(), x.getSecondVertex().getLocation()) for x in non_finite_length_edge_list]
unique_non_finite_length_edge_location_pair_list = list(set(non_finite_length_edge_location_pair_list))
vertex_location_list = [x.getLocation() for x in vertex_list]
unique_vertex_location_list = list(set(vertex_location_list))
# def isSizeZeroEdge(edge):
def isSizeZeroEdge(edge_location_pair):
"""
vertex1 = edge.getFirstVertex()
vertex2 = edge.getSecondVertex()
location1 = vertex1.getLocation()
location2 = vertex2.getLocation()
return location1 == location2
"""
location1 = edge_location_pair[0]
location2 = edge_location_pair[1]
return location1 == location2
unique_non_size_zero_edge_location_pair_list = [x for x in unique_edge_location_pair_list if isSizeZeroEdge(x) == False]
site_points = []
for site in site_list:
# print site
# plot
# note that we output sites in same order in which they were described,
# and not necessarily order in which they were added
site_points.append(site)
from PIL import Image, ImageDraw
# im = Image.new("RGB", (512, 512), "white")
# im = Image.new("RGB", (1024, 512), "white")
# im = Image.new("RGB", (1024, 1024), "white")
# im = Image.new("RGB", (3072, 3072), "white")
im = Image.new("RGB", (512, 512), "white")
# im = Image.new("RGB", (1024, 1024), "white")
draw = ImageDraw.Draw(im)
plotPoints(draw, site_points, (128, 128, 128))
points = []
"""
print len(circle_event_parameter_value_list_tuple_list)
# raise Exception()
for circle_event_parameter_value_list_tuple in circle_event_parameter_value_list_tuple_list:
circle_event, parameter_value_list = circle_event_parameter_value_list_tuple
left_arc, arc, right_arc, l_y = parameter_value_list
drawCircleEvent(draw, left_arc, arc, right_arc, l_y, (64, 64, 64))
highlightSiteForArc(draw, arc, (255, 0, 0))
"""
# for vertex in vertex_list:
# for vertex in unique_vertex_list:
for vertex_location in unique_vertex_location_list:
# print vertex.toString()
# print vertex_location
# plot
# points.append(vertex.getLocation())
points.append(vertex_location)
plotPoints(draw, points, (0, 0, 0))
segments = []
# for edge in edge_list:
# for edge in non_size_zero_edge_list:
for edge_location_pair in unique_non_size_zero_edge_location_pair_list:
# print edge.toString()
# print edge_location_pair
# plot
"""
vertex1 = edge.getFirstVertex()
vertex2 = edge.getSecondVertex()
location1 = vertex1.getLocation()
location2 = vertex2.getLocation()
segment = (location1, location2)
"""
segment = edge_location_pair
segments.append(segment)
for edge_location_pair in unique_non_finite_length_edge_location_pair_list:
# print edge_location_pair
pass
plotLineSegments(draw, segments, (0, 0, 0))
im.save("diagram.png")
# deal with graph that may have edges that have indeterminate endpoints
# settle indeterminate endpoints
# overlay subdivision implied by graph with edges with settled endpoints
# with subdivision consisting of bounding box
# make sure that we have enough faces identified
# come up with a trapezoidal map
# process queries
time2 = time.clock()
time_diff = time2 - time1
print time_diff
| [
2,
1853,
12,
2713,
12,
1828,
201,
198,
201,
198,
2,
48670,
7012,
62,
8367,
318,
257,
1440,
12,
83,
29291,
201,
198,
201,
198,
11748,
640,
201,
198,
201,
198,
2435,
16,
796,
640,
13,
15750,
3419,
201,
198,
201,
198,
2,
329,
1785,... | 2.385685 | 8,844 |
# Generated by Django 2.0.5 on 2018-12-04 07:33
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
1065,
12,
3023,
8753,
25,
2091,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import time
import edgeiq
import pyfrc
from networktables import NetworkTables
import logging
import numpy as np
# Constant for the default confidence (0 being 0% sure and 1 being 100% sure)
default_conf_thres = .75
# TODO: Order the predictions in terms of priority (proximity?)
if __name__ == "__main__":
main() | [
11748,
640,
198,
11748,
5743,
25011,
198,
11748,
12972,
69,
6015,
198,
6738,
3127,
83,
2977,
1330,
7311,
51,
2977,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
20217,
329,
262,
4277,
6628,
357,
15,
852,
657,
4,
... | 3.34375 | 96 |
#!/usr/bin/env python
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
"""
import sys
import django
from django.conf import settings
import test_settings
if not settings.configured:
settings.configure(**test_settings.__dict__)
if '1.7' in django.get_version():
django.setup()
from django_nose import NoseTestSuiteRunner
class NoseTestRunner(NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
if __name__ == '__main__':
runtests(*sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
1212,
4226,
318,
257,
6908,
284,
9058,
257,
8390,
37770,
2858,
11,
1201,
428,
42339,
198,
1324,
481,
307,
4166,
290,
6789,
2354,
597,
1020,
361,
452,
37770,
1628,
13,
198,
... | 3.279476 | 229 |
lista_pc = []
lista_pc.append ("Processador Intel Core i9 11900K 11ª geração")
lista_pc.append ("Placa de vídeo NVIDIA GeForce RTX 2080TI 11 GB")
lista_pc.append ("Memória 2x 32GB DDR4 3000Mhz")
lista_pc.append ("HD 4TB Seagate 7200 RPM")
lista_pc.append ("SSD 2TB NVME SN850 Leitura 7000MB/s - Gravação 5300MB/s")
lista_pc.append ("Cooler Watercooler ML240L V2 RGB")
lista_pc.append ("Placa Mãe Asus Z590-P")
lista_pc.append ("Fonte Cooler Master 800W Gold")
lista_pc.append ("Gabinete Cooler Master COSMOS C700M")
print("Lista PC: {}".format(lista_pc))
del lista_pc[1]
del lista_pc[7]
print("")
print("Lista PC (sem placa de vídeo e sem gabinete): {}".format(lista_pc))
| [
4868,
64,
62,
14751,
796,
17635,
198,
198,
4868,
64,
62,
14751,
13,
33295,
5855,
18709,
7079,
8180,
7231,
1312,
24,
15136,
405,
42,
1367,
126,
103,
308,
8607,
16175,
28749,
4943,
198,
198,
4868,
64,
62,
14751,
13,
33295,
5855,
3646,
... | 2.387543 | 289 |
#!/usr/bin/python
"""
Script used to compile and deploy a tagged configuration on a deployment server.
This script is intended to be called by SVN post-commit hook script.
"""
__version__ = "1.0.3"
__author__ = "Michel Jouvin <jouvin@lal.in2p3.fr>"
import sys
import os
import re
import shutil
from subprocess import *
import StringIO
import pysvn
import logging
import logging.handlers
import syslog
import socket
from optparse import OptionParser
import ConfigParser
# Initializations
this_script = os.path.abspath(sys.argv[0])
verbosity = 0
lock_created = False
client = None
logger = None
tag = None
java_root = '/usr/java'
lock_file = '/var/lock/quattor-deploy'
script_parent_dir = os.path.dirname(os.path.dirname(this_script))
config_file_default = '/etc/quattor-deploy.conf'
config_sections = { 'build-tag':'build-tag', 'scdb':'scdb' }
config_defaults = StringIO.StringIO("""
# Options commented out are configuration options available for which no
# sensible default value can be defined.
[build-tag]
# If not starting with /, relative to directory specified by option svn_cache
ant_cmd: external/ant/bin/ant
# ant options (passed through env variable ANT_OPTS)
#ant_opts: -Xmx2048M
# ant stdout: allow to redirect ant output to a file for debugging purposes (default is PIPE)
# Default changed to a file because of problem in subprocess module if the
# output is very large (Python 2.4) leading to the parent/child communication
# to hang.
ant_stdout: /tmp/ant-deploy-notify.log
#ant_stdout: PIPE
# ant target to do the deployment. Default should be appropriate.
ant_target: deploy.and.notify
# If not starting with /, relative to /usr/java.
java_version: latest
# If not starting with /, relative to parent of this directory script
svn_cache: svncache
# Number of retries for SVN switch to new tag in case of error
switch_retry_count: 1
# Verbosity level
verbose: 0
[scdb]
# URL associated with the repository root. Required parameter without default value.
#repository_url: http://svn.example.com/scdb
# Branch where to create SCDB deployment tags
# Keep consistent with quattor.build.properties if not using default values.
tags_branch: /tags
# Branch corresponding to SCDB trunk (only branch allowed to deploy)
# Keep consistent with quattor.build.properties if not using default values.
trunk_branch: /trunk
""")
def check_pid(pid):
""" Check for the existence of a unix pid (signal 0 does nothing). """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Configure loggers and handlers
logging_source = 'build-tag'
logger = logging.getLogger(logging_source)
logger.setLevel(logging.DEBUG)
#fmt=logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fmt=logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setLevel(logging.WARNING)
logger.addHandler(syslog_handler)
# SVN requires the response to be valid XML.
terminal_handler = logging.StreamHandler()
terminal_handler.setLevel(logging.DEBUG)
terminal_handler.setFormatter(fmt)
logger.addHandler(terminal_handler)
parser = OptionParser()
parser.add_option('--config', dest='config_file', action='store', default=config_file_default, help='Name of the configuration file to use')
parser.add_option('-v', '--debug', '--verbose', dest='verbosity', action='count', default=0, help='Increase verbosity level for debugging (on stderr)')
parser.add_option('--version', dest='version', action='store_true', default=False, help='Display various information about this script')
options, args = parser.parse_args()
if options.version:
debug (0,"Version %s written by %s" % (__version__,__author__))
debug (0,__doc__)
sys.exit(0)
if len(args) < 1:
abort("tag to deploy must be specified")
if options.verbosity:
verbosity = options.verbosity
tag = args[0]
# Read configuration file.
# The file must exists as there is no sensible default value for several options.
config = ConfigParser.ConfigParser()
config.readfp(config_defaults)
try:
config.readfp(open(options.config_file))
except IOError, (errno,errmsg):
if errno == 2:
abort(1,'Configuration file (%s) is missing.' % (options.config_file))
else:
abort('Error opening configuration file (%s): %s (errno=%s)' % (options.config_file,errmsg,errno))
if not config.has_section(config_sections['scdb']):
abort('[%s] section is missing in configuration file (%s)' % (config_sections['scdb'],options.config_file))
# Use verbose option from config file only if it specified a greater level of verbosity
# that the one specified on the command line.
try:
section = config_sections['build-tag']
config_verbose = config.getint(section,'verbose')
except ValueError:
abort("Invalid value specified for 'verbose' (section %): must be an integer >=0" % (section))
if config_verbose > verbosity:
verbosity = config_verbose
# Get mandatory options without default values
try:
# Section [scdb]
section = config_sections['scdb']
option_name = 'repository_url'
repository_url = config.get(config_sections['scdb'],option_name)
except ConfigParser.NoOptionError:
abort("Option %s (section %s) is required but not defined" % (option_name,section))
# Remove trailing / as a SVN doesn't like a // in the url
if re.search('/$',repository_url):
repository_url = repository_url.rstrip('/')
debug(1,"Trailing / stripped from 'repository_url'. New value: %s" % (repository_url))
# Get options with default values
try:
# Section [scdb]
section = config_sections['scdb']
option_name = 'tags_branch'
tags_branch = config.get(section,option_name)
# Section [build-tag]
section = config_sections['build-tag']
option_name = 'ant_cmd'
ant_cmd = config.get(section,option_name)
option_name = 'ant_target'
ant_target = config.get(section,option_name)
option_name = 'java_version'
java_version = config.get(section,option_name)
option_name = 'svn_cache'
svn_cache = config.get(section,option_name)
option_name = 'switch_retry_count'
switch_retry_count = config.getint(section,option_name)
option_name = 'ant_stdout'
ant_stdout_file = config.get(section,option_name)
except ValueError:
abort("Option % (section %s) not defined: internal error (default value should exist)." % (option_name,section))
# Ensure the branch names start with a / and has no trailing / (some SVN versions don't like //)
if not re.match('/',tags_branch):
tags_branch = '/' + tags_branch
debug(1,"Leading / added to 'tags_branch'. New value: %s" % (tags_branch))
# Remove trailing / as a SVN doesn't like a // in the url
if re.search('/$',tags_branch):
tags_branch = tags_branch.rstrip('/')
debug(1,"Trailing / stripped from 'tags_branch'. New value: %s" % (tags_branch))
# Optional ant_opts
section = config_sections['build-tag']
try:
option_name = 'ant_opts'
ant_opts = config.get(section,option_name)
except:
ant_opts = None
if not re.match('^/',java_version):
java_version = java_root + '/' + java_version
if not re.match('^/',svn_cache):
svn_cache = script_parent_dir + '/' + svn_cache
# Checks availability of required applications
# ant existence must be tested after the check out as it is part of SCDB.
if not os.path.exists(java_version):
abort("Specified Java version (%s) does not exist. Use 'java_version' to specify another version" % (java_version))
# Ensure there is not another instance of the script already running.
# Unfortunatly, as there is no way to open a new lock file without cloberring, this is
# a 2-step operation with a very small race condition if another script instance is doing the
# same test between both steps. But this is probably not a real issue as svncache will be locked
# by one of the instance and the other one will fail.
already_running = True
try:
lock_fd = open(lock_file,'r')
except IOError, detail:
if detail.errno == 2:
already_running = False
else:
abort('Failed to open lock file (%s): %s' % (lock_file,detail))
if already_running:
pidstr = lock_fd.readline().rstrip()
lock_fd.close()
try:
pid=int(pidstr)
except ValueError, detail:
abort("Lock file (%s) found but doesn't contain a valid pid (%s)" % (lock_file,pidstr))
if check_pid(pid):
abort("%s already running (pid=%d). Retry later..." % (this_script,pid))
try:
lock_fd = open(lock_file,'w')
lock_fd.write(str(os.getpid()))
lock_fd.close()
except IOError, detail:
abort('Failed to open lock file (%s): %s' % (lock_file,detail))
# Switch SVN cache to new tag
tag_url = repository_url + tags_branch + '/' + tag
debug(0, "Processing tag %s..." % (tag_url))
debug(0, "SVN cache: %s" % (svn_cache))
client = pysvn.Client()
client.exception_style = 0
# If svn_cache exists, check it is valid, else delete it.
if os.path.isdir(svn_cache) and os.access(svn_cache,os.W_OK):
try:
debug(1,'Checking %s is a valid SVN working copy' % (svn_cache))
wc_info = client.info(svn_cache)
except pysvn.ClientError, e:
warning("%s is not a valid SVN working copy. Deleting and checking out again..." % (svn_cache))
shutil.rmtree(svn_cache)
# If svn_cache doesn't exist, do a checkout
if not os.path.isdir(svn_cache):
try:
debug(0,"Checking out %s into %s" % (tag_url,svn_cache))
client.checkout(path=svn_cache,url=tag_url)
except pysvn.ClientError, e:
debug(1,'Error during checkout of %s. Trying to continue' % tag_url)
# Switch to new tag.
# Do also after an initial checkout as it may allow to complete a failed check out.
# Retry switch in case of errors as specified by switch_retry_count
switch_failed = True
i = 1
while switch_failed and i <= switch_retry_count:
if i > 1 and switch_failed:
debug(1,'Switch to tag %s failed. Retrying (%d/%d)...' % (tag,i,switch_retry_count))
else:
debug(0,'Switching to tag %s (url=%s)' % (tag,tag_url))
switch_failed = False
try:
client.switch(path=svn_cache,url=tag_url)
except pysvn.ClientError, e:
switch_failed = True
last_errror = e
i += 1
if switch_failed:
abort('Failed to switch SVN cache to new tag: %s' % (e))
# Compile and deploy
if not re.match(ant_cmd,'^/'):
ant_cmd = svn_cache + '/' + ant_cmd
if not os.path.exists(ant_cmd) or not os.access(ant_cmd,os.X_OK):
abort("ant (%s) not found. Use option 'ant_cmd' to specify another location." % (ant_cmd))
deploy_cmd = [ ant_cmd ]
deploy_cmd.append(ant_target)
ant_env = {}
ant_env['JAVA_HOME'] = java_version
if ant_opts:
debug(1,'Defining ANT_OPTS as "%s"' % (ant_opts))
ant_env['ANT_OPTS'] = ant_opts
if ant_stdout_file == 'PIPE':
ant_stdout = PIPE
else:
# Unfortunatly flag 'r+' doesn't create the file if it doesn't exist. It is then
# necessary to reopen the file for reading.
ant_stdout = file(ant_stdout_file, 'w')
debug(0,"Executing command: '%s'" % (' '.join(deploy_cmd)))
try:
proc = Popen(deploy_cmd, shell=False, cwd=svn_cache, env=ant_env, stdout=ant_stdout, stderr=STDOUT)
retcode = proc.wait()
output = proc.communicate()[0]
if not ant_stdout == PIPE:
# Do not send back ant output if redirected to a file as it can be very large and tends to cause
# problems with Python subprocess module.
try:
ant_stdout.close()
#ant_stdout = file(ant_stdout_file, 'r')
#output = ant_stdout.read()
output = "See %s on %s" % (ant_stdout_file,socket.getfqdn())
except:
debug(1,'Error reading ant output file(%s)' % (ant_stdout_file))
if retcode < 0:
abort('ant command aborted by signal %d. Command output:\n%s' % (-retcode, output))
elif retcode > 0:
abort('Error during ant command (status=%d). Script output:\n%s' % (retcode,output))
else:
debug(1,'Tag %s deployed successfully. Script output:\n%s' % (tag,output))
except OSError, detail:
abort('Failed to execute ant command: %s' % (detail))
# Remove lock file
try:
os.remove(lock_file)
except OSError, detail:
if detail.errno != 2:
warning('Failed to delete lock file (%s): %s' % (lock_file,detail))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
198,
7391,
973,
284,
17632,
290,
6061,
257,
30509,
8398,
319,
257,
14833,
4382,
13,
198,
1212,
4226,
318,
5292,
284,
307,
1444,
416,
20546,
45,
1281,
12,
41509,
8011,
4226,
13,
... | 2.893234 | 4,168 |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Jens Krüger <jens.krueger@frm2.tum.de>
#
# *****************************************************************************
from numpy import array
from nicos.clients.gui.panels import Panel
from nicos.clients.gui.utils import loadUi, waitCursor
from nicos.core.errors import NicosError
from nicos.core.utils import ADMIN
from nicos.guisupport.livewidget import LiveWidget1D
from nicos.guisupport.plots import GRCOLORS, MaskedPlotCurve
from nicos.guisupport.qt import QDialogButtonBox, QDoubleValidator, QLabel, \
QMessageBox, QSize, QSizePolicy, Qt, QVBoxLayout, QWidget, pyqtSlot
from nicos.guisupport.widget import NicosWidget
from nicos.utils import findResource
from nicos_mlz.toftof.devices.calculations import ResolutionAnalysis
COLOR_BLACK = GRCOLORS['black']
COLOR_RED = GRCOLORS['red']
COLOR_GREEN = GRCOLORS['green']
COLOR_BLUE = GRCOLORS['blue']
ANGSTROM = '\u212b'
DELTA = '\u0394'
LAMBDA = '\u03bb'
MICRO = '\xb5'
MINUSONE = '\u207b\xb9'
| [
2,
220,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
41906,
17174,
4557,
35625,
198,
2,
45593,
2640,
11,
262,
7311,
276,
42410,
6779,
4482,
286,
262,
10373,
57,
198,
2,
15069,
357,
66,
8,
3717,
12,
1238,
1828,
... | 3.28547 | 585 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is some demo code showing how a BRSKI proxy would
find a registrar in an ANIMA network using GRASP. This version
also shows how the proxy could advertise itself by flooding
to on-link nodes seeking a proxy. The actual BRSKI transactions
are not included.
"""
import sys
#sys.path.insert(0, '..') # in case grasp.py is one level up
sys.path.insert(0, 'graspy/')
import grasp
import threading
import time
import socket
try:
socket.IPPROTO_IPV6
except:
socket.IPPROTO_IPV6 = 41
import ipaddress
###################################
# Utility routine for debugging:
# Print out the GRASP objective registry
# and flood cache
###################################
###################################
# Function to flood an objective
###################################
###################################
# Main thread starts here
###################################
grasp.tprint("==========================")
grasp.tprint("ASA Procksy is starting up.")
grasp.tprint("==========================")
grasp.tprint("Procksy is a demonstration Autonomic Service Agent.")
grasp.tprint("It mimics a BRSKI Join Assistant (proxy) by")
grasp.tprint("looking for a registrar and then by announcing")
grasp.tprint("the methods it supports, with associated locators,")
grasp.tprint("as flooded GRASP objectives.")
grasp.tprint("Then it pretends to generate BRSKI traffic.")
grasp.tprint("This version uses floods to find a registrar,")
grasp.tprint("per draft-ietf-anima-bootstrapping-keyinfra-12")
#grasp.tprint('modulo an error in the "AN_proxy" definition')
grasp.tprint("On Windows or Linux, there should soon be")
grasp.tprint("a nice window that displays the process.")
grasp.tprint("==========================")
#grasp.test_mode = True # tell everybody it's a test, will print extra diagnostics
time.sleep(1) # time to read the text
####################################
# Register this ASA
####################################
# The ASA name is arbitrary - it just needs to be
# unique in the GRASP instance.
grasp.skip_dialogue(False,False,True)
_err,_asa_nonce = grasp.register_asa("Procksy")
if not _err:
grasp.tprint("ASA Procksy registered OK")
else:
grasp.tprint("ASA registration failure:",grasp.etext[_err])
exit()
####################################
# Construct a GRASP objective
####################################
# This is an empty GRASP objective to find the registrar
# It's only used for get_flood so doesn't need to be filled in
reg_obj = grasp.objective("AN_join_registrar")
reg_obj.synch = True
####################################
# Create ports for the proxy's communication
# with pledges
####################################
# For this demo, we just make up some numbers:
t_port = 11800 + grasp._prng.randint(0,5) #slightly random for demo
u_port = 11900 + grasp._prng.randint(0,5) #slightly random for demo
proxy_address = grasp.unspec_address # This is the unspecified address,
# which signals link-local address to API
proxy_ttl = 180000 #milliseconds to live of the announcement
####################################
# Construct a correponding asa_locator
####################################
proxy_locator = grasp.asa_locator(proxy_address,0,False)
proxy_locator.is_ipaddress = True
####################################
# Construct the GRASP objective to announce the proxy
####################################
proxy_obj = grasp.objective("AN_proxy")
proxy_obj.synch = True
proxy_obj.value = ""
# proxy_obj.loop_count not set, the API forces it to 1 for link-local use
####################################
# Register the GRASP objective
####################################
grasp.skip_dialogue(False,False,True)
_err = grasp.register_obj(_asa_nonce, proxy_obj)
if not _err:
grasp.tprint("Objective", proxy_obj.name,"registered OK")
else:
grasp.tprint("Objective registration failure:", grasp.etext[_err])
exit() # demo code doesn't handle registration errors
####################################
# Start pretty printing
####################################
#grasp.init_bubble_text("BRSKI Join Proxy")
grasp.tprint("Proxy starting now")
###################################
# Now find the registrar and pick one or two methods
###################################
while True:
registrar1 = None
registrar2 = None
_err, _results = grasp.get_flood(_asa_nonce, reg_obj)
if not _err:
# _results contains the returned locators if any
for x in _results:
# use whatever logic you want to decide which results to use.
# For the demo code, we just pick one or two at random:
grasp.tprint("Got", reg_obj.name, "at",
x.source.locator, x.source.protocol, x.source.port)
#"Got AN_join_registrar at None 6 7017"
grasp.tprint("\n\nGot", reg_obj.name, "at", x.source.locator, x.source.protocol, x.source.port, "\n\n")
#Got AN_join_registrar at 2002:ac14::3 6 80
#if (not registrar1) and grasp._prng.randint(0,2):
if (not registrar1):
registrar1 = x.source
#elif grasp._prng.randint(0,2):
else:
if x.source != registrar1:
registrar2 = x.source
grasp.tprint(registrar1, registrar2)
#<grasp.asa_locator object at 0x7fd37cc16390> None
else:
grasp.tprint("get_flood failed", grasp.etext[_err])
###################################
# Flood the chosen ones to neighbors
###################################
if registrar1:
#grasp.tprint("Floodout1")
floodout(registrar1)
if registrar2:
#grasp.tprint("Floodout2")
floodout(registrar2)
grasp.tprint(registrar1,registrar2) # <grasp.asa_locator object at 0x02F27370> None
###################################
# Listen for a pledge with timeout
###################################
# Here, do the socket calls etc. to listen
# for a BRSKI request from a pledge.
# But for the demo, we just pretend...
time.sleep(5)
# simulate no request from pledge
'''
if grasp._prng.randint(0,2) == 0:
grasp.tprint("No pledge contacted proxy")
else:
###################################
# BRSKI request received, now proxy it
###################################
# Here, do the socket calls etc. to talk
# to the registrar.
# But for the demo, we just pretend...
try:
grasp.tprint("Pretending to contact registrar")
# (socket calls etc)
# simulate a random failure with a divide-by-zero
_= 1/grasp._prng.randint(0,3)
except:
# Socket failure, we should mark this registrar as expired.
grasp.tprint("Communication failed, expiring that registrar")
'''
###################################
# Wait and loop back to find another registrar
# and wait for another pledge.
###################################
else:
grasp.tprint("No registrar found, waiting to try again")
time.sleep(18) # wait chosen to avoid synchronicity with Reggie
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
1212,
318,
617,
13605,
2438,
4478,
703,
257,
347,
6998,
37845,
15741,
561,
198,
19796,
257,
4214,
20040... | 2.846597 | 2,601 |
# Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
#from scipy.misc import imread, imsave, imshow, imresize
import tensorflow as tf
import numpy as np
from net import xdet_body_v2
from utility import train_helper
from utility import eval_helper
from utility import metrics
from dataset import dataset_factory
from preprocessing import preprocessing_factory
from preprocessing import anchor_manipulator
from preprocessing import common_preprocessing
# hardware related configuration
tf.app.flags.DEFINE_integer(
'num_readers', 16,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 48,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
# scaffold related configuration
tf.app.flags.DEFINE_string(
'data_dir', '../PASCAL/VOC_TF/VOC2007TEST_TF/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_2007', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'test', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_v2/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_string(
'debug_dir', './Debug_v2/',
'The directory where the debug files will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 10,
'The frequency with which summaries are saved, in seconds.')
# model related configuration
tf.app.flags.DEFINE_integer(
'train_image_size', 304,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'resnet_size', 50,
'The size of the ResNet model to use.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first', # 'channels_first' or 'channels_last'
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'weight_decay', 0.0002, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.6, 'Matching threshold in the loss function.')#0.6
tf.app.flags.DEFINE_float(
'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.')#0.45
tf.app.flags.DEFINE_float(
'select_threshold', 0.01, 'Class-specific confidence score threshold for selecting a box.')
tf.app.flags.DEFINE_float(
'nms_threshold', 0.4, 'Matching threshold in NMS algorithm.')
tf.app.flags.DEFINE_integer(
'nms_topk_percls', 200, 'Number of object for each class to keep after NMS.')
tf.app.flags.DEFINE_integer(
'nms_topk', 200, 'Number of total object to keep after NMS.')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model/resnet50',#None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'model_scope', 'xdet_resnet',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_boolean(
'run_on_cloud', True,
'Wether we will train on cloud (checkpoint will be found in the "data_dir/cloud_checkpoint_path").')
tf.app.flags.DEFINE_string(
'cloud_checkpoint_path', 'resnet50',
'The path to a checkpoint from which to fine-tune.')
FLAGS = tf.app.flags.FLAGS
from dataset import dataset_common
label2name_table = gain_translate_table()
def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):
"""
ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2
|x| - 0.5 / sigma^2, otherwise
"""
sigma2 = sigma * sigma
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
return outside_mul
if not FLAGS.run_on_cloud:
from scipy.misc import imread, imsave, imshow, imresize
from utility import draw_toolbox
#[feature_h, feature_w, num_anchors, 4]
# only support batch_size 1
def xdet_model_fn(features, labels, mode, params):
"""Our model_fn for ResNet to be used with our Estimator."""
num_anchors_list = labels['num_anchors_list']
num_feature_layers = len(num_anchors_list)
shape = labels['targets'][-1]
if mode != tf.estimator.ModeKeys.TRAIN:
org_image = labels['targets'][-2]
isdifficult = labels['targets'][-3]
bbox_img = labels['targets'][-4]
gbboxes_raw = labels['targets'][-5]
glabels_raw = labels['targets'][-6]
glabels = labels['targets'][:num_feature_layers][0]
gtargets = labels['targets'][num_feature_layers : 2 * num_feature_layers][0]
gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0]
with tf.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=tf.AUTO_REUSE):
backbone = xdet_body_v2.xdet_resnet_v2(params['resnet_size'], params['data_format'])
body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
cls_pred, location_pred = xdet_body_v2.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format'])
if params['data_format'] == 'channels_first':
cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1])
location_pred = tf.transpose(location_pred, [0, 2, 3, 1])
#org_image = tf.transpose(org_image, [0, 2, 3, 1])
# batch size is 1
shape = tf.squeeze(shape, axis = 0)
glabels = tf.squeeze(glabels, axis = 0)
gtargets = tf.squeeze(gtargets, axis = 0)
gscores = tf.squeeze(gscores, axis = 0)
cls_pred = tf.squeeze(cls_pred, axis = 0)
location_pred = tf.squeeze(location_pred, axis = 0)
if mode != tf.estimator.ModeKeys.TRAIN:
org_image = tf.squeeze(org_image, axis = 0)
isdifficult = tf.squeeze(isdifficult, axis = 0)
gbboxes_raw = tf.squeeze(gbboxes_raw, axis = 0)
glabels_raw = tf.squeeze(glabels_raw, axis = 0)
bbox_img = tf.squeeze(bbox_img, axis = 0)
bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, location_pred.get_shape().as_list()[:-1] + [-1, 4]))#(location_pred)#
eval_ops, save_image_op = bboxes_eval(org_image, shape, bbox_img, cls_pred, bboxes_pred, glabels_raw, gbboxes_raw, isdifficult, params['num_classes'])
_ = tf.identity(save_image_op, name='save_image_with_bboxes_op')
cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])
location_pred = tf.reshape(location_pred, [-1, 4])
glabels = tf.reshape(glabels, [-1])
gscores = tf.reshape(gscores, [-1])
gtargets = tf.reshape(gtargets, [-1, 4])
# raw mask for positive > 0.5, and for negetive < 0.3
# each positive examples has one label
positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold'])
fpositive_mask = tf.cast(positive_mask, tf.float32)
n_positives = tf.reduce_sum(fpositive_mask)
# negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard
# note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.)
negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.)
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
fnegtive_mask = tf.cast(negtive_mask, tf.float32)
n_negtives = tf.reduce_sum(fnegtive_mask)
n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0]
prob_for_negtives = tf.where(negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select)
selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1]
# # random select negtive examples for classification
# selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where(
# tf.greater(n_negtives, 0),
# tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives),
# tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)),
# name='rand_select_negtive')
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask))
total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32))
# add mask for glabels and cls_pred here
glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask))
cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask))
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask))
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.)
#cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('cross_entropy_loss', cross_entropy)
loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred))
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1))
loc_loss = tf.identity(loc_loss, name='location_loss')
tf.summary.scalar('location_loss', loc_loss)
tf.losses.add_loss(loc_loss)
with tf.control_dependencies([save_image_op]):
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
loss = 1.2 * (cross_entropy + loc_loss) + params['weight_decay'] * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
predictions = {
'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]),
'saved_image_index': save_image_op }
summary_hook = tf.train.SummarySaverHook(
save_secs=FLAGS.save_summary_steps,
output_dir=FLAGS.model_dir,
summary_op=tf.summary.merge_all())
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
evaluation_hooks = [summary_hook],
loss=loss, eval_metric_ops=eval_ops)#=eval_ops)
else:
raise ValueError('This script only support predict mode!')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
2,
15069,
2864,
22597,
272,
15233,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7... | 2.412068 | 5,817 |
import os
from run import TEST_CASES_DIR, run_test_case
FEATURE_DIR = os.path.join(TEST_CASES_DIR, "02_feature_door")
run_test_case(os.path.join(FEATURE_DIR, "01_01_test_door.py"))
run_test_case(os.path.join(FEATURE_DIR, "01_02_test_door.py"))
| [
11748,
28686,
198,
6738,
1057,
1330,
43001,
62,
34,
1921,
1546,
62,
34720,
11,
1057,
62,
9288,
62,
7442,
628,
198,
15112,
40086,
62,
34720,
796,
28686,
13,
6978,
13,
22179,
7,
51,
6465,
62,
34,
1921,
1546,
62,
34720,
11,
366,
2999,
... | 2.296296 | 108 |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.misc import derivative
import logging
import coloredlogs
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', logger=logger,fmt='%(asctime)s,%(msecs)03d %(programname)s, %(name)s[%(process)d] %(levelname)s %(message)s')
class approx_DL():
"""
Approximate luminosity_distance relation,
agrees with astropy.FlatLambdaCDM(H0=70, Om0=0.3, Ob0=None) better than 1%
"""
def symmetrize(a):
"""
Symmmetrize matrix
"""
return a + a.T - np.diag(a.diagonal())
def random_X_bzl(size, numBands=5, redshiftMax=3.0):
"""Create random (but reasonable) input space for photo-z GP """
X = np.zeros((size, 3))
X[:, 0] = np.random.randint(low=0, high=numBands-1, size=size)
X[:, 1] = np.random.uniform(low=0.1, high=redshiftMax, size=size)
X[:, 2] = np.random.uniform(low=0.5, high=10.0, size=size)
return X
def random_filtercoefs(numBands, numCoefs):
"""Create random (but reasonable) coefficients describing
numBands photometric filters as sum of gaussians"""
fcoefs_amp\
= np.random.uniform(low=0., high=1., size=numBands*numCoefs)\
.reshape((numBands, numCoefs))
fcoefs_mu\
= np.random.uniform(low=3e3, high=1e4, size=numBands*numCoefs)\
.reshape((numBands, numCoefs))
fcoefs_sig\
= np.random.uniform(low=30, high=500, size=numBands*numCoefs)\
.reshape((numBands, numCoefs))
return fcoefs_amp, fcoefs_mu, fcoefs_sig
def random_linecoefs(numLines):
"""Create random (but reasonable) coefficients describing lines in SEDs"""
lines_mu = np.random.uniform(low=1e3, high=1e4, size=numLines)
lines_sig = np.random.uniform(low=5, high=50, size=numLines)
return lines_mu, lines_sig
def random_hyperparams():
"""Create random (but reasonable) hyperparameters for photo-z GP"""
alpha_T, var_C, var_L = np.random.uniform(low=0.5, high=2.0, size=3)
alpha_C, alpha_L = np.random.uniform(low=10.0, high=1000.0, size=2)
return var_C, var_L, alpha_C, alpha_L, alpha_T
def dirichlet(alphas, rsize=1):
"""
Draw samples from a Dirichlet distribution.
"""
gammabs = np.array([np.random.gamma(alpha, size=rsize)
for alpha in alphas])
fbs = gammabs / gammabs.sum(axis=0)
return fbs.T
def approx_flux_likelihood(
f_obs, # nf
f_obs_var, # nf
f_mod, # nz, nt, nf
ell_hat=0, # 1 or nz, nt
ell_var=0, # 1 or nz, nt
f_mod_covar=None, # nz, nt, nf (, nf)
marginalizeEll=True,
normalized=False,
renormalize=True,
returnChi2=False,
returnEllML=False):
"""
Approximate flux likelihood, with scaling of both the mean and variance.
This approximates the true likelihood with an iterative scheme.
"""
assert len(f_obs.shape) == 1
assert len(f_obs_var.shape) == 1
assert len(f_mod.shape) == 3
nz, nt, nf = f_mod.shape
if f_mod_covar is not None:
assert len(f_mod_covar.shape) == 3
if f_mod_covar is None or len(f_mod_covar.shape) == 3:
f_obs_r = f_obs[None, None, :]
ellML = 0
niter = 1 if f_mod_covar is None else 2
for i in range(niter):
if f_mod_covar is not None:
var = f_obs_var[None, None, :] + ellML**2 * f_mod_covar
else:
var = f_obs_var[None, None, :]
invvar = 1/var # nz * nt * nf
# np.where(f_obs_r/var < 1e-6, 0.0, var**-1.0) # nz * nt * nf
FOT = np.sum(f_mod * f_obs_r * invvar, axis=2)
FTT = np.sum(f_mod**2 * invvar, axis=2)
FOO = np.sum(f_obs_r**2 * invvar, axis=2)
if np.all(ell_var > 0):
FOT += ell_hat / ell_var # nz * nt
FTT += 1. / ell_var # nz * nt
FOO += ell_hat**2 / ell_var # nz * nt
log_sigma_det = np.sum(np.log(var), axis=2)
ellbk = 1*ellML
ellML = (FOT / FTT)[:, :, None]
if returnEllML:
return ellML
chi2 = FOO - FOT**2.0 / FTT # nz * nt
if returnChi2:
return chi2
logDenom = 0.
if normalized:
logDenom = logDenom + log_sigma_det + nf * np.log(2*np.pi)
if np.all(ell_var > 0):
logDenom = logDenom + np.log(2*np.pi * ell_var)
if marginalizeEll:
logDenom = logDenom + np.log(FTT)
if np.all(ell_var > 0):
logDenom = logDenom - np.log(2*np.pi)
like = -0.5*chi2 - 0.5*logDenom # nz * nt
if renormalize:
like -= like.max()
return np.exp(like) # nz * nt
def CIlevel(redshiftGrid, PDF, fraction, numlevels=200):
"""
Computes confidence interval from PDF.
"""
evidence = np.trapz(PDF, redshiftGrid)
for level in np.linspace(0, PDF.max(), num=numlevels):
ind = np.where(PDF <= level)
resint = np.trapz(PDF[ind], redshiftGrid[ind])
if resint >= fraction*evidence:
return level
def kldiv(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def computeMetrics(ztrue, redshiftGrid, PDF, confIntervals):
"""
Compute various metrics on the PDF
"""
zmean = np.average(redshiftGrid, weights=PDF)
zmap = redshiftGrid[np.argmax(PDF)]
zstdzmean = np.sqrt(np.average((redshiftGrid-zmean)**2, weights=PDF))
zstdzmap = np.sqrt(np.average((redshiftGrid-zmap)**2, weights=PDF))
pdfAtZ = np.interp(ztrue, redshiftGrid, PDF)
cumPdfAtZ = np.interp(ztrue, redshiftGrid, PDF.cumsum())
confidencelevels = [
CIlevel(redshiftGrid, PDF, 1.0 - confI) for confI in confIntervals
]
return [ztrue, zmean, zstdzmean, zmap, zstdzmap, pdfAtZ, cumPdfAtZ]\
+ confidencelevels
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
44374,
1330,
27255,
198,
198,
11748,
18931,
198,
11748,
16396,
6404,
82,
198,
198,
6404,
1362,
796,
1893... | 2.059965 | 2,835 |
import shutil
from pathlib import Path
from typing import Dict
import pytest
from ethpm_types.manifest import PackageManifest
@pytest.fixture
@pytest.fixture
| [
11748,
4423,
346,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
198,
198,
11748,
12972,
9288,
198,
6738,
4555,
4426,
62,
19199,
13,
805,
8409,
1330,
15717,
5124,
8409,
628,
198,
31,
9078,
9288,
13,
69,
9602,
62... | 3.211538 | 52 |
from nate.svonet.graph_svo import generate_ticks, find_max_burst
import networkx as nx
import stop_words as sw
import copy
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator
import numpy as np
from multiprocessing import Process, Queue
from os import cpu_count
| [
6738,
299,
378,
13,
21370,
36823,
13,
34960,
62,
82,
13038,
1330,
7716,
62,
83,
3378,
11,
1064,
62,
9806,
62,
31961,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
2245,
62,
10879,
355,
1509,
198,
11748,
4866,
198,
11748,
19798,
29... | 3.221239 | 113 |
from __future__ import print_function, absolute_import
from __future__ import unicode_literals
import argparse
import logging
import logging.config
import sys
from opentargets_validator.helpers import file_or_resource, URLZSource
from opentargets_validator.validator import validate
if __name__ == '__main__':
sys.exit(main())
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
25064,
198,
198,
... | 3.383838 | 99 |
# -*- coding: utf-8 -*-
'''
Retrieve Pillar data by doing a SQLite3 query
sqlite3 is included in the stdlib since python2.5.
This module is a concrete implementation of the sql_base ext_pillar for SQLite3.
:maturity: new
:platform: all
Configuring the sqlite3 ext_pillar
=====================================
Use the 'sqlite3' key under ext_pillar for configuration of queries.
SQLite3 database connection configuration requires the following values
configured in the master config:
Note, timeout is in seconds.
.. code-block:: yaml
pillar.sqlite3.database: /var/lib/salt/pillar.db
pillar.sqlite3.timeout: 5.0
Complete example
=====================================
.. code-block:: yaml
pillar:
sqlite3:
database: '/var/lib/salt/pillar.db'
timeout: 5.0
ext_pillar:
- sqlite3:
fromdb:
query: 'SELECT col1,col2,col3,col4,col5,col6,col7
FROM some_random_table
WHERE minion_pattern LIKE %s'
depth: 5
as_list: True
with_lists: [1,3]
'''
from __future__ import absolute_import
# Import python libs
from contextlib import contextmanager
import logging
import sqlite3
# Import Salt libs
from salt.pillar.sql_base import SqlBaseExtPillar
# Set up logging
log = logging.getLogger(__name__)
class SQLite3ExtPillar(SqlBaseExtPillar):
'''
This class receives and processes the database rows from SQLite3.
'''
@classmethod
def _get_options(self):
'''
Returns options used for the SQLite3 connection.
'''
defaults = {'database': '/var/lib/salt/pillar.db',
'timeout': 5.0}
_options = {}
_opts = __opts__.get('pillar', {}).get('sqlite3', {})
for attr in defaults:
if attr not in _opts:
log.debug('Using default for SQLite3 pillar {0}'.format(attr))
_options[attr] = defaults[attr]
continue
_options[attr] = _opts[attr]
return _options
@contextmanager
def _get_cursor(self):
'''
Yield a SQLite3 cursor
'''
_options = self._get_options()
conn = sqlite3.connect(_options.get('database'),
timeout=float(_options.get('timeout')))
cursor = conn.cursor()
try:
yield cursor
except sqlite3.Error as err:
log.exception('Error in ext_pillar SQLite3: {0}'.format(err.args))
finally:
conn.close()
def ext_pillar(minion_id,
pillar,
*args,
**kwargs):
'''
Execute queries against SQLite3, merge and return as a dict
'''
return SQLite3ExtPillar().fetch(minion_id, pillar, *args, **kwargs)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
9781,
30227,
39179,
1366,
416,
1804,
257,
16363,
578,
18,
12405,
198,
198,
25410,
578,
18,
318,
3017,
287,
262,
14367,
8019,
1201,
21015,
17,
13,
20,
13,... | 2.260064 | 1,242 |
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import VarianceThreshold
'''
For regression: f_regression, mutual_info_regression
For classification: chi2, f_classif, mutual_info_classif
'''
'''
threshold=0.0 means keeps only those features with non-zero variance
'''
| [
6738,
1341,
35720,
13,
30053,
62,
49283,
1330,
33166,
17,
198,
6738,
1341,
35720,
13,
30053,
62,
49283,
1330,
9683,
42,
13014,
198,
6738,
1341,
35720,
13,
30053,
62,
49283,
1330,
15965,
590,
817,
10126,
628,
198,
7061,
6,
198,
220,
22... | 3.3 | 110 |
import boto3
import subprocess
# By default since code is run from the Organizations account, the account is trusted. Use this variable to add another account as trusted such as an Automation or Security account, from where CI/CD pipelines will be run. If you don't need or have a dedicated account, just use the Organization Account ID.
trusted_account = '<ENTER THE ACCOUNT ID YOU WILL RUN CI/CD PIPELINES FROM>'
if __name__ == '__main__':
# Session credentials of current user and use this to assume roles.
org_session = boto3.Session()
accounts = get_org_accounts(org_session)
# Get the current AWS Organization Account ID
aws_org_account = boto3.client('sts').get_caller_identity()['Account']
# Used to obtain the list of AWS Regions using the EC2 service.
#ec2 = org_session.client('ec2', region_name='us-east-1')
#regions = ec2.describe_regions()['Regions']
for account in accounts:
try:
account = account.strip()
# Session of the assumed IAM Role in the corresponding member account using the session(OrganizationAccountAccessRole/ControlTowerExecution) role.
# If you have Control Tower enabled and necessary accounts enrolled, use `ControlTowerExecution`
# Under normal conditions this should be 'OrganizationAccountAccessRole'
session, resp = assume_role(org_session, account, 'OrganizationAccountAccessRole')
# Credentials of the assumed role which will be used to set environment variables.
aws_access_key_id = str(resp['Credentials']['AccessKeyId'])
aws_secret_access_key = str(resp['Credentials']['SecretAccessKey'])
aws_session_token = str(resp['Credentials']['SessionToken'])
# Iterate CDK Bootstrapping for all regions.
# Comment out this `for` loop and Shift-Tab below section, if bootstrapping is not necessary for all regions.
region_name = 'us-east-1' # Comment this out and un-comment lines 50-51 to enable bootstrapping for all regions.
#for region in regions:
#region_name = region['RegionName']
'''
Export environment variables
* AWS_ACCESS_KEY_ID
* AWS_SECRET_ACCESS_KEY
* AWS_SESSION_TOKEN
Execute `cdk bootstrap aws://<account>/<region> --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess --trust <CI/CD AWS Account>`
'''
command = "export AWS_ACCESS_KEY_ID=" + aws_access_key_id + ";export AWS_SECRET_ACCESS_KEY=" + aws_secret_access_key + ";export AWS_SESSION_TOKEN=" + aws_session_token + "; cdk bootstrap aws://" + account + "/" + region_name + " --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess --trust " + trusted_account
# Execute the command in a single process shell.
aws_cli = subprocess.run(command, shell=True)
except Exception as e:
print(e)
pass | [
11748,
275,
2069,
18,
198,
11748,
850,
14681,
198,
198,
2,
2750,
4277,
1201,
2438,
318,
1057,
422,
262,
41846,
1848,
11,
262,
1848,
318,
13467,
13,
5765,
428,
7885,
284,
751,
1194,
1848,
355,
13467,
884,
355,
281,
17406,
341,
393,
4... | 3.106622 | 891 |
"""
Pythagorean triplet version 1.2.10.20
Copyright (c) 2020 Shahibur Rahaman
Licensed under MIT
"""
print("--------------------------------------------------------")
print("")
hypo = float(input("Enter the length of hypotenuse of triangle: "))
base = float(input("Enter the length of base of triangle: "))
height = float(input("Enter the length of height of triangle: "))
hy2 = hypo * hypo
ba2 = base * base
he2 = height * height
print("")
if hypo < base + height:
if hy2 == ba2 + he2:
print(f"The dimensions {hypo}, {base} and {height} forms a pythagorean triplet")
else:
print(f"The dimensions {hypo}, {base} and {height} are not a pythagorean triplet")
else:
print("The given dimensions do not form a triangle.")
print("")
print("--------------------------------------------------------") | [
37811,
198,
47,
5272,
363,
29456,
15055,
83,
2196,
352,
13,
17,
13,
940,
13,
1238,
198,
198,
15269,
357,
66,
8,
12131,
18381,
38616,
18655,
10546,
198,
26656,
15385,
739,
17168,
198,
37811,
198,
198,
4798,
7203,
43801,
4943,
198,
4798... | 3.402542 | 236 |
""" This script exports the tellor query registry to json
"""
from telliot.queries.query_registry import query_registry
from telliot.queries.query_registry import QueryRegistry
exported = query_registry.json(indent=2)
print(exported)
with open("query_registry_export.json", "w") as f:
f.write(exported)
imported = QueryRegistry.parse_raw(exported)
| [
37811,
770,
4226,
15319,
262,
1560,
273,
12405,
20478,
284,
33918,
198,
37811,
198,
6738,
1560,
5151,
13,
421,
10640,
13,
22766,
62,
2301,
4592,
1330,
12405,
62,
2301,
4592,
198,
6738,
1560,
5151,
13,
421,
10640,
13,
22766,
62,
2301,
... | 3.051724 | 116 |
from typing import Union
from genomics_data_index.api.query.SamplesQuery import SamplesQuery
from genomics_data_index.api.query.impl.cluster.ClusterScoreMethod import ClusterScoreMethod
from genomics_data_index.storage.SampleSet import SampleSet
class ClusterScoreMRCAJaccard(ClusterScoreMethod):
"""
A method which assigns cluster scores based on all the samples under the most recent common ancestor (MRCA) of the
passed set of samples.
"""
| [
6738,
19720,
1330,
4479,
198,
198,
6738,
2429,
31994,
62,
7890,
62,
9630,
13,
15042,
13,
22766,
13,
50,
12629,
20746,
1330,
3409,
2374,
20746,
198,
6738,
2429,
31994,
62,
7890,
62,
9630,
13,
15042,
13,
22766,
13,
23928,
13,
565,
5819,... | 3.5 | 132 |
from __future__ import division, print_function, absolute_import
import copy
import os.path as osp
from collections import defaultdict
import numpy as np
import tarfile
import zipfile
import torch
import operator
from torchreid.utils import read_image, download_url, mkdir_if_missing
class Dataset:
"""An abstract class representing a Dataset.
This is the base class for ``ImageDataset`` and ``VideoDataset``.
Args:
train (list): contains tuples of (img_path(s), pid, camid).
query (list): contains tuples of (img_path(s), pid, camid).
gallery (list): contains tuples of (img_path(s), pid, camid).
transform: transform function.
mode (str): 'train', 'query' or 'gallery'.
combineall (bool): combines train, query and gallery in a dataset for training.
verbose (bool): show information.
"""
_junk_pids = [] # contains useless person IDs, e.g. background, false detections
def __add__(self, other):
"""Adds two datasets together (only the train set)."""
updated_train = copy.deepcopy(self.train)
for record in other.train:
dataset_id = record[3] if len(record) > 3 else 0
num_train_pids = 0
if dataset_id in self.num_train_pids:
num_train_pids = self.num_train_pids[dataset_id]
old_obj_id = record[1]
new_obj_id = old_obj_id + num_train_pids
num_train_cams = 0
if dataset_id in self.num_train_cams:
num_train_cams = self.num_train_cams[dataset_id]
old_cam_id = record[2]
new_cam_id = old_cam_id + num_train_cams
updated_record = record[:1] + (new_obj_id, new_cam_id) + record[3:]
updated_train.append(updated_record)
###################################
# Things to do beforehand:
# 1. set verbose=False to avoid unnecessary print
# 2. set combineall=False because combineall would have been applied
# if it was True for a specific dataset, setting it to True will
# create new IDs that should have been included
###################################
first_field = updated_train[0][0]
if isinstance(first_field, str):
return ImageDataset(
updated_train,
self.query,
self.gallery,
transform=self.transform,
mode=self.mode,
combineall=False,
verbose=False
)
else:
return VideoDataset(
updated_train,
self.query,
self.gallery,
transform=self.transform,
mode=self.mode,
combineall=False,
verbose=False,
seq_len=self.seq_len,
sample_method=self.sample_method
)
def __radd__(self, other):
"""Supports sum([dataset1, dataset2, dataset3])."""
if other == 0:
return self
else:
return self.__add__(other)
@staticmethod
def parse_data(data):
"""Parses data list and returns the number of person IDs
and the number of camera views.
Args:
data (list): contains tuples of (img_path(s), pid, camid)
"""
pids, cams = defaultdict(set), defaultdict(set)
for record in data:
dataset_id = record[3] if len(record) > 3 else 0
pids[dataset_id].add(record[1])
cams[dataset_id].add(record[2])
num_pids = {dataset_id: len(dataset_pids) for dataset_id, dataset_pids in pids.items()}
num_cams = {dataset_id: len(dataset_cams) for dataset_id, dataset_cams in cams.items()}
return num_pids, num_cams
def get_num_pids(self, data):
"""Returns the number of training person identities."""
return self.parse_data(data)[0]
def get_num_cams(self, data):
"""Returns the number of training cameras."""
return self.parse_data(data)[1]
@staticmethod
def show_summary(self):
"""Shows dataset statistics."""
pass
@staticmethod
@staticmethod
def combine_all(self):
"""Combines train, query and gallery in a dataset for training."""
combined = copy.deepcopy(self.train)
new_obj_ids = self._get_obj_ids(self.query, self._junk_pids)
new_obj_ids = self._get_obj_ids(self.gallery, self._junk_pids, new_obj_ids)
id2label_map = dict()
for dataset_id, dataset_ids in new_obj_ids.items():
id2label_map[dataset_id] = {obj_id: i for i, obj_id in enumerate(set(dataset_ids))}
combined += self._relabel(self.query, self._junk_pids, id2label_map, self.num_train_pids)
combined += self._relabel(self.gallery, self._junk_pids, id2label_map, self.num_train_pids)
self.train = combined
self.num_train_pids = self.get_num_pids(self.train)
def download_dataset(self, dataset_dir, dataset_url):
"""Downloads and extracts dataset.
Args:
dataset_dir (str): dataset directory.
dataset_url (str): url to download dataset.
"""
if osp.exists(dataset_dir):
return
if dataset_url is None:
raise RuntimeError(
'{} dataset needs to be manually '
'prepared, please follow the '
'document to prepare this dataset'.format(
self.__class__.__name__
)
)
print('Creating directory "{}"'.format(dataset_dir))
mkdir_if_missing(dataset_dir)
fpath = osp.join(dataset_dir, osp.basename(dataset_url))
print(
'Downloading {} dataset to "{}"'.format(
self.__class__.__name__, dataset_dir
)
)
download_url(dataset_url, fpath)
print('Extracting "{}"'.format(fpath))
try:
tar = tarfile.open(fpath)
tar.extractall(path=dataset_dir)
tar.close()
except:
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(dataset_dir)
zip_ref.close()
print('{} dataset is ready'.format(self.__class__.__name__))
@staticmethod
def check_before_run(required_files):
"""Checks if required files exist before going deeper.
Args:
required_files (str or list): string file name(s).
"""
if isinstance(required_files, str):
required_files = [required_files]
for fpath in required_files:
if not osp.exists(fpath):
raise RuntimeError('"{}" is not found'.format(fpath))
@staticmethod
class ImageDataset(Dataset):
"""A base class representing ImageDataset.
All other image datasets should subclass it.
``__getitem__`` returns an image given index.
It will return ``img``, ``pid``, ``camid`` and ``img_path``
where ``img`` has shape (channel, height, width). As a result,
data in each batch has shape (batch_size, channel, height, width).
"""
class VideoDataset(Dataset):
"""A base class representing VideoDataset.
All other video datasets should subclass it.
``__getitem__`` returns an image given index.
It will return ``imgs``, ``pid`` and ``camid``
where ``imgs`` has shape (seq_len, channel, height, width). As a result,
data in each batch has shape (batch_size, seq_len, channel, height, width).
"""
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
198,
11748,
4866,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
... | 2.219912 | 3,415 |
'''
Description:
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
For example:
1 -> A
2 -> B
3 -> C
...
26 -> Z
27 -> AA
28 -> AB
...
Example 1:
Input: 1
Output: "A"
Example 2:
Input: 28
Output: "AB"
Example 3:
Input: 701
Output: "ZY"
'''
# n : the number of input
## Time Complexity: O( log n )
#
# The core procedure is a variant of base conversion from decimal to base 26.
# Thus, the depth of recursive call is of O( log n )
## Space Complexity: O( log n)
# The major overhead in space is to maintain call stack for recursive call, which is of O( log n )
if __name__ == '__main__':
test_bench() | [
7061,
6,
198,
198,
11828,
25,
198,
198,
15056,
257,
3967,
18253,
11,
1441,
663,
11188,
5721,
3670,
355,
1656,
287,
281,
24134,
9629,
13,
198,
198,
1890,
1672,
25,
628,
220,
220,
220,
352,
4613,
317,
198,
220,
220,
220,
362,
4613,
... | 2.818548 | 248 |
from __future__ import annotations
import logging
from abc import abstractmethod
from typing import cast
import docker
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
3350,
198,
198,
11748,
36253,
628,
198
] | 4.392857 | 28 |
## UNDERSTANDING DISSIPATION
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
from scipy.integrate import cumtrapz
import matplotlib.pyplot as plt
import time as tictoc
from netCDF4 import Dataset
import glob
from cmocean import cm
# import functions
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
exec(open(path+'swm_output.py').read())
exec(open(path+'stoch/swm_rhs.py').read())
## OPTIONS
runfolder = [3,10]
## read data
runpath = path+'data/run%04i' % runfolder[0]
ncu = Dataset(runpath+'/u.nc')
ncv = Dataset(runpath+'/v.nc')
nch = Dataset(runpath+'/h.nc')
u1 = ncu['u'][-1000,:,:].flatten()
v1 = ncv['v'][-1000,:,:].flatten()
h1 = nch['h'][-1000,:,:].flatten()
print('netCDF data read.')
# close netcdfs
ncu.close()
ncv.close()
nch.close()
param1 = np.load(runpath+'/param.npy').all()
param1['output'] = 0
##
runpath = path+'data/run%04i' % runfolder[1]
ncu = Dataset(runpath+'/u.nc')
ncv = Dataset(runpath+'/v.nc')
nch = Dataset(runpath+'/h.nc')
u2 = ncu['u'][-1,:,:].flatten()
v2 = ncv['v'][-1,:,:].flatten()
h2 = nch['h'][-1,:,:].flatten()
print('netCDF data read.')
# close netcdfs
ncu.close()
ncv.close()
nch.close()
param2 = np.load(runpath+'/param.npy').all()
param2['output'] = 0
##
global param
param = param2
q = Q1(u2,v2,h2+param['H'])
q2 = Q2(u2,v2,h2+param['H'])
## plotting
fig,(ax1,ax2) = plt.subplots(1,2,sharex=True,sharey=True,figsize=(12,6))
plt.tight_layout(rect=[0,0,1,0.95])
pq = q*2e16*100
pq2 = q2*2e16
levs = np.linspace(-abs(pq2).max()*0.02,abs(pq2).max()*0.02,64)
param = param2
c = ax1.contourf(param2['x_T'],param2['y_T'],h2mat(pq),levs,extend='both',cmap='RdBu_r')
c = ax2.contourf(param2['x_T'],param2['y_T'],h2mat(pq2),levs,extend='both',cmap='RdBu_r')
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_xlabel('x')
ax2.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_xlim(571e3,1824e3)
ax1.set_ylim(1051e3,2131e3)
ax1.set_title(r'$100*\nabla\mathbf{u} \cdot \nabla(\nabla^2\mathbf{u})$')
ax2.set_title(r'$\mathbf{u} \cdot \nabla^4\mathbf{u}$')
cbar = plt.colorbar(c,ax=(ax1,ax2),ticks=[-1,-0.5,0,0.5,1])
cbar.set_label(r'[$2 \cdot 10^{-16}m^{-2}s^{-2}$]')
plt.show() | [
2235,
35219,
2257,
6981,
2751,
13954,
50,
4061,
6234,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6978,
796,
31051,
11195,
14,
28015,
5439,
413,
263,
14,
29412,
14,
2032,
76,
14,
6,
198,
11748,
28686,
26,
28686,
13,
354,
... | 2.016859 | 1,127 |
import tensorflow as tf
from ..libml.data_augmentations import weak_augment, medium_augment, strong_augment
def ict(x, u, height, width):
"""
Applies medium augmentations on inputs x and u returns augmented tensors.
Args:
x: tensor, labeled batch of images of shape [batch, height, width, channels]
u: tensor, unlabeled batch of images of shape [batch, height, widht, channels]
height: int, height of images
width: int, width of images
Returns:
Augmented labeled tensor and two augmented unlabeled tensors.
"""
x_augment = medium_augment(x, height, width)
u_teacher = weak_augment(u, height, width)
u_student = medium_augment(u, height, width)
return x_augment, u_teacher, u_student
@tf.function
def ssl_loss_ict(labels_x, logits_x, labels_teacher, logits_student):
"""
Computes two cross entropy losses based on the labeled and unlabeled data.
loss_x is referring to the labeled CE loss and loss_u to the unlabeled CE loss.
Args:
labels_x: tensor, contains labels corresponding to logits_x of shape [batch, num_classes]
logits_x: tensor, contains the logits of an batch of images of shape [batch, num_classes]
labels_teacher: tensor, labels of teacher model of shape [batch, num_classes]
labels_student: tensor, logits of student model of shape [batch, num_classes]
Returns:
Two floating point numbers, the first representing the labeled CE loss
and the second holding the MSE loss values.
"""
x_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_x, logits=logits_x)
x_loss = tf.reduce_mean(x_loss)
ict_loss = tf.reduce_mean((labels_teacher - tf.nn.softmax(logits_student)) ** 2, -1)
ict_loss = tf.reduce_mean(ict_loss)
return x_loss, ict_loss | [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
11485,
8019,
4029,
13,
7890,
62,
559,
5154,
602,
1330,
4939,
62,
559,
5154,
11,
7090,
62,
559,
5154,
11,
1913,
62,
559,
5154,
628,
198,
4299,
220,
713,
7,
87,
11,
334,
11,
6001... | 2.606944 | 720 |
import os
import logging
api_version = os.getenv("MENDER_API_VERSION", "v1")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
#logging.getLogger("paramiko").setLevel(logging.DEBUG)
logging.info("Setting api_version as: " + api_version)
import authentication
import admission
import deployments
import artifacts
import inventory
auth = authentication.Authentication()
adm = admission.Admission(auth)
deploy = deployments.Deployments(auth)
image = artifacts.Artifacts()
inv = inventory.Inventory(auth)
| [
11748,
28686,
198,
11748,
18931,
198,
198,
15042,
62,
9641,
796,
28686,
13,
1136,
24330,
7203,
44,
10619,
1137,
62,
17614,
62,
43717,
1600,
366,
85,
16,
4943,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
198,
198,
64... | 3.292994 | 157 |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .selection import Selection as Selection_84d609fa
from .x_text_listener import XTextListener as XTextListener_b04d0b97
class XTextComponent(XInterface_8f010a43):
"""
gives access to the text of a component and makes it possible to register event listeners.
See Also:
`API XTextComponent <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1awt_1_1XTextComponent.html>`_
"""
__ooo_ns__: str = 'com.sun.star.awt'
__ooo_full_ns__: str = 'com.sun.star.awt.XTextComponent'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.awt.XTextComponent'
@abstractmethod
def addTextListener(self, l: 'XTextListener_b04d0b97') -> None:
"""
registers a text event listener.
"""
@abstractmethod
def getMaxTextLen(self) -> int:
"""
returns the currently set maximum text length.
"""
@abstractmethod
def getSelectedText(self) -> str:
"""
returns the currently selected text.
"""
@abstractmethod
def getSelection(self) -> 'Selection_84d609fa':
"""
returns the current user selection.
"""
@abstractmethod
def getText(self) -> str:
"""
returns the text of the component.
"""
@abstractmethod
def insertText(self, Sel: 'Selection_84d609fa', Text: str) -> None:
"""
inserts text at the specified position.
"""
@abstractmethod
def isEditable(self) -> bool:
"""
returns if the text is editable by the user.
"""
@abstractmethod
def removeTextListener(self, l: 'XTextListener_b04d0b97') -> None:
"""
unregisters a text event listener.
"""
@abstractmethod
def setEditable(self, bEditable: bool) -> None:
"""
makes the text editable for the user or read-only.
"""
@abstractmethod
def setMaxTextLen(self, nLen: int) -> None:
"""
sets the maximum text length.
"""
@abstractmethod
def setSelection(self, aSelection: 'Selection_84d609fa') -> None:
"""
sets the user selection.
"""
@abstractmethod
def setText(self, aText: str) -> None:
"""
sets the text of the component.
"""
__all__ = ['XTextComponent']
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
15069,
33160,
1058,
33,
6532,
12,
22405,
12,
12041,
25,
19935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
345,
743,... | 2.576217 | 1,253 |
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets.base import load_boston
from sklearn.model_selection import GridSearchCV
from skpro.parametric import ParametricEstimator
from skpro.parametric.estimators import Constant
model = ParametricEstimator(
point=RandomForestRegressor(),
std=Constant('mean(y)')
)
# Initiate GridSearch meta-estimator
parameters = {'point__max_depth': [None, 5, 10, 15]}
clf = GridSearchCV(model, parameters)
# Optimize hyperparameters
X, y = load_boston(return_X_y=True)
clf.fit(X, y)
print('Best score is %f for parameter: %s' % (clf.best_score_, clf.best_params_))
# >>> Best score is -4.058729 for parameter: {'point__max_depth': 15} | [
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
8081,
44292,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
13,
8692,
1330,
3440,
62,
65,
5744,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
24846,
18243,
33538,
198,
198,
67... | 2.900415 | 241 |
# -*- coding: utf-8 -*-
"""
@date: 2020/12/28 上午10:35
@file: key_word.py
@author: zj
@description:
"""
KEY_OUTPUT = 'output'
KEY_LOSS = 'loss'
KEY_SEP = ',,'
KEY_DATASET = 'data.csv'
KEY_CLASSES = 'cls.csv'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
31,
4475,
25,
12131,
14,
1065,
14,
2078,
220,
41468,
39355,
230,
940,
25,
2327,
198,
31,
7753,
25,
1994,
62,
4775,
13,
9078,
198,
31,
9800,
25,
1976... | 1.93578 | 109 |
import pygame
import random
screen_size = [360, 600]
screen = pygame.display.set_mode(screen_size)
score = 0
green = (0, 255, 0)
pygame.font.init()
background = load('./img/space_open_space_planets_135213_360x640.jpg')
kill = load('./img/meteorBrown_med1.png')
user = load('./img/playerShip1_orange.png')
user_x = 160
c_positions = [0-get_rand_offset(), 0-get_rand_offset(), 0]
keep_alive = True
clock = pygame.time.Clock()
while keep_alive:
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT] and user_x < 300:
user_x = user_x + 10
elif keys[pygame.K_LEFT] and user_x > 0:
user_x = user_x - 10
elif keys [pygame.K_UP] and user_x > 0:
user_x = user_x - 10
quit('thanks for playing')
elif keys [pygame.K_DOWN] and user_x > 0 :
user_x = user_x - 100
set_s_position(0, 40)
set_s_position(1, 160)
set_s_position(2, 280)
screen.blit(background, [0, 0])
screen.blit(kill, [40, c_positions[0]])
screen.blit(kill, [160, c_positions[1]])
screen.blit(kill, [280, c_positions[2]])
display_score(score)
screen.blit(user, [user_x, 500])
if c_positions[0] > 500 and user_x < 100:
print('crash 1', user_x)
score = score - 50
if c_positions[2] > 500 and user_x > 200:
print('crash 3', user_x)
score = score - 50
if c_positions[1] > 500 and user_x > 100 and user_x < 200:
print('crash 2', user_x)
score = score - 50
pygame.display.update()
clock.tick(60)
| [
11748,
12972,
6057,
198,
11748,
4738,
198,
198,
9612,
62,
7857,
796,
685,
15277,
11,
10053,
60,
198,
9612,
796,
12972,
6057,
13,
13812,
13,
2617,
62,
14171,
7,
9612,
62,
7857,
8,
198,
26675,
796,
657,
198,
14809,
796,
357,
15,
11,
... | 2.171589 | 711 |
from pathlib import Path
from rdflib import Graph, Namespace
from rdflib.namespace import TIME
import sys
sys.path.append(str(Path(__file__).parent.parent))
from timefuncs import is_after
TFUN = Namespace("https://w3id.org/timefuncs/")
AFTER = Namespace("https://w3id.org/timefuncs/testdata/after/")
tests_dir = Path(__file__).parent
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
374,
67,
2704,
571,
1330,
29681,
11,
28531,
10223,
198,
6738,
374,
67,
2704,
571,
13,
14933,
10223,
1330,
20460,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
2536,
7,
15235,
7,
... | 2.840336 | 119 |
from changequantification import IntensityDifferenceMetric
from petstandarduptakevaluecomputation import PETStandardUptakeValueComputation
| [
6738,
1487,
40972,
2649,
1330,
2558,
6377,
28813,
1945,
9171,
1173,
198,
6738,
4273,
20307,
37623,
539,
8367,
785,
1996,
341,
1330,
32043,
23615,
52,
457,
539,
11395,
5377,
1996,
341,
198
] | 4.34375 | 32 |
import torch
| [
11748,
28034,
628
] | 4.666667 | 3 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from __future__ import absolute_import
import logging
from django.core.cache import cache
from django.core.urlresolvers import reverse
from sentry import http
from sentry import tagstore
from sentry.api.fields.actor import Actor
from sentry.incidents.logic import get_incident_aggregates
from sentry.incidents.models import IncidentStatus
from sentry.utils import json
from sentry.utils.assets import get_asset_url
from sentry.utils.dates import to_timestamp
from sentry.utils.http import absolute_uri
from sentry.models import (
GroupStatus,
GroupAssignee,
OrganizationMember,
Project,
User,
Identity,
Integration,
Team,
ReleaseProject,
)
logger = logging.getLogger("sentry.integrations.slack")
# Attachment colors used for issues with no actions take
ACTIONED_ISSUE_COLOR = "#EDEEEF"
RESOLVED_COLOR = "#0cbd4d"
LEVEL_TO_COLOR = {
"debug": "#fbe14f",
"info": "#2788ce",
"warning": "#f18500",
"error": "#E03E2F",
"fatal": "#d20f2a",
}
MEMBER_PREFIX = "@"
CHANNEL_PREFIX = "#"
strip_channel_chars = "".join([MEMBER_PREFIX, CHANNEL_PREFIX])
# Different list types in slack that we'll use to resolve a channel name. Format is
# (<list_name>, <result_name>, <prefix>).
LIST_TYPES = [
("channels", "channels", CHANNEL_PREFIX),
("groups", "groups", CHANNEL_PREFIX),
("users", "members", MEMBER_PREFIX),
]
def get_channel_id(organization, integration_id, name):
"""
Fetches the internal slack id of a channel.
:param organization: The organization that is using this integration
:param integration_id: The integration id of this slack integration
:param name: The name of the channel
:return:
"""
name = strip_channel_name(name)
try:
integration = Integration.objects.get(
provider="slack", organizations=organization, id=integration_id
)
except Integration.DoesNotExist:
return None
token_payload = {"token": integration.metadata["access_token"]}
# Look for channel ID
payload = dict(token_payload, **{"exclude_archived": False, "exclude_members": True})
session = http.build_session()
for list_type, result_name, prefix in LIST_TYPES:
# Slack limits the response of `<list_type>.list` to 1000 channels, paginate if
# needed
cursor = ""
while cursor is not None:
items = session.get(
"https://slack.com/api/%s.list" % list_type,
params=dict(payload, **{"cursor": cursor}),
)
items = items.json()
if not items.get("ok"):
logger.info(
"rule.slack.%s_list_failed" % list_type, extra={"error": items.get("error")}
)
return None
cursor = items.get("response_metadata", {}).get("next_cursor", None)
item_id = {c["name"]: c["id"] for c in items[result_name]}.get(name)
if item_id:
return prefix, item_id
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
6738,
1908,
563,
... | 2.495443 | 1,207 |
#! /usr/bin/env python
import os
import numpy as np
from astropy.io import fits
from astropy import units as u
import sys
import string
import matplotlib.pylab as plot
import nrm_analysis
from nrm_analysis.fringefitting.LG_Model import NRM_Model
from nrm_analysis.misctools import utils
from nrm_analysis import nrm_core, InstrumentData
from nrm_analysis import find_affine2d_parameters as FAP
from pathlib import Path
from nrm_analysis.misctools.utils import Affine2d
np.set_printoptions(precision=4, linewidth=160)
def examine_residuals(ff, trim=36):
""" input: FringeFitter instance after fringes are fit """
print("\nExamine_residuals, standard deviations & variances of *independent* CP's and CAs:")
print(" Closure phase mean {:+.4f} std dev {:.2e} var {:.2e}".format(ff.nrm.redundant_cps.mean(),
np.sqrt(utils.cp_var(ff.nrm.N,ff.nrm.redundant_cps)),
utils.cp_var(ff.nrm.N, ff.nrm.redundant_cps)))
print(" Closure amp mean {:+.4f} std dev {:.2e} var {:.2e}".format(ff.nrm.redundant_cas.mean(),
np.sqrt(utils.cp_var(ff.nrm.N,ff.nrm.redundant_cas)),
utils.cp_var(ff.nrm.N, ff.nrm.redundant_cas)))
print(" Fringe amp mean {:+.4f} std dev {:.2e} var {:.2e}".format(ff.nrm.fringeamp.mean(),
ff.nrm.fringeamp.std(),
ff.nrm.fringeamp.var()))
np.set_printoptions(precision=3, formatter={'float': lambda x: '{:+.1e}'.format(x)}, linewidth=80)
print(" Normalized residuals trimmed by {:d} pixels from each edge".format(trim))
print((ff.nrm.residual/ff.datapeak)[trim:-trim,trim:-trim])
print(" Normalized residuals max and min: {:.2e}, {:.2e}".format( ff.nrm.residual.max() / ff.datapeak,
ff.nrm.residual.min() / ff.datapeak))
utils.default_printoptions()
def analyze_data(fitsfn=None, fitsimdir=None, affine2d=None,
psf_offset_find_rotation = (0.0,0.0),
psf_offset_ff = None,
rotsearch_d=None,
set_pistons=None,
oversample=3):
"""
returns: affine2d (measured or input),
psf_offset_find_rotation (input),
psf_offset_ff (input or found),
fringe pistons/r (found)
"""
print("analyze_data: input file", fitsfn)
print("analyze_data: oversample", oversample)
data = fits.getdata(fitsfn)
fobj = fits.open(fitsfn)
print(fobj[0].header['FILTER'])
niriss = InstrumentData.NIRISS(fobj[0].header['FILTER'], bpexist=False)
ff_t = nrm_core.FringeFitter(niriss,
datadir=fitsimdir,
savedir=fitsimdir,
oversample=oversample,
oifprefix="ov{:d}_".format(oversample),
interactive=False)
ff_t.fit_fringes(fitsfn)
print(fitsfn)
sys.exit()
examine_residuals(ff_t)
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
print("analyze_data: fringepistons/rad", ff_t.nrm.fringepistons)
utils.default_printoptions()
return affine2d, psf_offset_find_rotation, ff_t.nrm.psf_offset, ff_t.nrm.fringepistons
def main(fitsimdir, ifn, oversample=3):
"""
fitsimdir: string: dir containing data file
ifn: str: inout data file name, 2d cal or 3d calint MAST header fits file
"""
fitsimdir = os.path.expanduser('~')+"/data/implaneia/niriss_development/2dinput/"
if not os.path.exists(fitsimdir):
os.makedirs(fitsimdir)
df = fitsimdir+'niscal_mir.fits'
np.set_printoptions(formatter={'float': lambda x: '{:+.2e}'.format(x)}, linewidth=80)
print("__main__: analyzing", ifn)
aff, psf_offset_r, psf_offset_ff, fringepistons = analyze_data(df, fitsimdir, oversample=oversample)
print("implaneia output in: ", fitsimdir, "\n")
plot.show()
if __name__ == "__main__":
main(fitsimdir=os.path.expanduser('~')+"/data/implaneia/niriss_development/2dinput/",
ifn='niscal_mir.fits',
oversample=5
)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
11748,
25064,
198,
11748,
4731,
... | 1.941815 | 2,303 |
import random
import numpy as np
import tensorflow as tf
import Constants as Constants
from datasets.COCO.COCO_instance import COCOInstanceDataset
from datasets.Util import Reader
| [
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
11748,
4757,
1187,
355,
4757,
1187,
198,
6738,
40522,
13,
34,
4503,
46,
13,
34,
4503,
46,
62,
39098,
1330,
327,
4503,
46,
33384... | 3.388889 | 54 |
# Copyright 2008-2020 pydicom authors. See LICENSE file for details.
"""Unit tests for pydicom.data_manager"""
import json
import os
from os.path import basename
from pathlib import Path
import shutil
import pytest
from pydicom.data import (
get_charset_files, get_testdata_files, get_palette_files, fetch_data_files
)
from pydicom.data.data_manager import (
DATA_ROOT, get_testdata_file, external_data_sources
)
from pydicom.data import download
from pydicom.data.download import (
get_data_dir, calculate_file_hash, get_cached_filehash
)
EXT_PYDICOM = False
if 'pydicom-data' in external_data_sources():
EXT_PYDICOM = True
@pytest.fixture
def download_failure():
"""Simulate a download failure."""
download._SIMULATE_NETWORK_OUTAGE = True
yield
download._SIMULATE_NETWORK_OUTAGE = False
@pytest.mark.skipif(not EXT_PYDICOM, reason="pydicom-data not installed")
class TestExternalDataSource:
"""Tests for the external data sources."""
def as_posix(self, path):
"""Return `path` as a posix path"""
return Path(path).as_posix()
def test_get_testdata_file_local(self):
"""Test that local data path retrieved OK."""
fname = "CT_small.dcm"
fpath = self.as_posix(get_testdata_file(fname))
assert "pydicom/data/test_files" in fpath
def test_get_testdata_file_external(self):
"""Test that external data source preferred over cache."""
fname = "693_UNCI.dcm"
fpath = self.as_posix(get_testdata_file(fname))
assert "data_store/data" in fpath
def test_get_testdata_file_external_hash_mismatch(self):
"""Test that the external source is not used when hash is not OK."""
p = self.dpath / "693_UNCI.dcm"
with open(p, 'wb') as f:
f.write(b"\x00\x01")
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash != ref_hash
fpath = self.as_posix(get_testdata_file(p.name))
assert ".pydicom/data" in fpath
def test_get_testdata_file_external_hash_match(self):
"""Test that external source is used when hash is OK."""
fname = "693_UNCI.dcm"
p = self.dpath / fname
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash == ref_hash
fpath = self.as_posix(get_testdata_file(fname))
assert "data_store/data" in fpath
def test_get_testdata_file_external_ignore_hash(self):
"""Test that non-pydicom-data external source ignores hash check."""
external_data_sources()['mylib'] = external_data_sources()[
'pydicom-data']
p = self.dpath / "693_UNCI.dcm"
with open(p, 'wb') as f:
f.write(b"\x00\x01")
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash != ref_hash
fpath = self.as_posix(get_testdata_file(p.name))
assert "data_store/data" in fpath
def test_get_testdata_file_missing(self):
"""Test no such file available."""
fname = "MY_MISSING_FILE.dcm"
assert get_testdata_file(fname) is None
def test_get_testdata_files_local(self):
"""Test that local data paths retrieved OK."""
fname = "CT_small*"
paths = get_testdata_files(fname)
assert 1 == len(paths)
assert "pydicom/data/test_files" in self.as_posix(paths[0])
def test_get_testdata_files_local_external_and_cache(self):
"""Test that local, external and cache paths retrieved OK."""
fname = "693*"
paths = get_testdata_files(fname)
assert 7 == len(paths)
# Local preferred first
assert "pydicom/data/test_files" in self.as_posix(paths[0])
# External source preferred second
assert "data_store/data" in self.as_posix(paths[1])
# Cache source preferred last
assert ".pydicom/data" in self.as_posix(paths[4])
def test_get_testdata_files_hash_match(self):
"""Test that the external source is not used when hash is not OK."""
p = self.dpath / "693_UNCI.dcm"
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash == ref_hash
fpaths = get_testdata_files("693_UNCI*")
fpaths = [self.as_posix(p) for p in fpaths]
assert 2 == len(fpaths)
assert "data_store/data" in fpaths[0]
assert ".pydicom/data" in fpaths[1]
def test_get_testdata_files_hash_mismatch(self):
"""Test that the external source is not used when hash is not OK."""
p = self.dpath / "693_UNCI.dcm"
with open(p, 'wb') as f:
f.write(b"\x00\x01")
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash != ref_hash
fpaths = get_testdata_files("693_UNCI*")
fpaths = [self.as_posix(p) for p in fpaths]
assert 1 == len(fpaths)
assert ".pydicom/data" in fpaths[0]
def test_get_testdata_files_external_ignore_hash(self):
"""Test that non-pydicom-data external source ignores hash check."""
external_data_sources()['mylib'] = external_data_sources()[
'pydicom-data']
p = self.dpath / "693_UNCI.dcm"
with open(p, 'wb') as f:
f.write(b"\x00\x01")
ext_hash = calculate_file_hash(p)
ref_hash = get_cached_filehash(p.name)
assert ext_hash != ref_hash
fpaths = get_testdata_files("693_UNCI*")
fpaths = [self.as_posix(p) for p in fpaths]
assert 2 == len(fpaths)
assert "data_store/data" in fpaths[0]
assert ".pydicom/data" in fpaths[1]
@pytest.mark.skipif(EXT_PYDICOM, reason="pydicom-data installed")
class TestDownload:
"""Tests for the download module."""
def test_get_testdata_file_network_outage(self, download_failure):
"""Test a network outage when using get_testdata_file."""
fname = "693_UNCI.dcm"
msg = (
r"A download failure occurred while attempting to "
r"retrieve 693_UNCI.dcm"
)
with pytest.warns(UserWarning, match=msg):
assert get_testdata_file(fname) is None
def test_get_testdata_files_network_outage(self, download_failure):
"""Test a network outage when using get_testdata_files."""
msg = (
r"One or more download failures occurred, the list of matching "
r"file paths may be incomplete"
)
with pytest.warns(UserWarning, match=msg):
assert [] == get_testdata_files("693_UN*")
def test_fetch_data_files():
"""Test fetch_data_files()."""
# Remove a single file from the cache
cache = get_data_dir()
path = cache / "693_J2KR.dcm"
if path.exists():
path.unlink()
assert not path.exists()
fetch_data_files()
assert path.exists()
def test_fetch_data_files_download_failure(download_failure):
"""Test fetch_data_files() with download failures."""
msg = r"An error occurred downloading the following files:"
with pytest.raises(RuntimeError, match=msg):
fetch_data_files()
def test_hashes():
"""Test for duplicates in hashes.json."""
# We can't have case mixes because windows filenames are case insensitive
root = Path(DATA_ROOT)
with open(root / "hashes.json", "r") as f:
filenames = json.load(f).keys()
filenames = [name.lower() for name in filenames]
assert len(set(filenames)) == len(filenames)
def test_urls():
"""Test for duplicates in urls.json."""
# We can't have case mixes because windows filenames are case insensitive
root = Path(DATA_ROOT)
with open(root / "urls.json", "r") as f:
filenames = json.load(f).keys()
filenames = [name.lower() for name in filenames]
assert len(set(filenames)) == len(filenames)
| [
2,
15069,
3648,
12,
42334,
279,
5173,
291,
296,
7035,
13,
4091,
38559,
24290,
2393,
329,
3307,
13,
198,
37811,
26453,
5254,
329,
279,
5173,
291,
296,
13,
7890,
62,
37153,
37811,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
28... | 2.287154 | 3,472 |
# Generated by Django 3.2.7 on 2021-12-04 01:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
1065,
12,
3023,
5534,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import functools
import hashlib
import imghdr
import httpx
from qcloud_cos import CosConfig, CosS3Client
from epicteller.core.config import Config
config = CosConfig(
Region=Config.COS_REGION,
SecretId=Config.COS_SECRET_ID,
SecretKey=Config.COS_SECRET_KEY,
)
cos_client = CosS3Client(config)
get_avatar_url = functools.partial(get_full_url, size='xl')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2779,
2414,
198,
11748,
1257,
310,
10141,
198,
11748,
12234,
8019,
198,
11748,
545,
456,
7109,
198,
198,
11748,
... | 2.54386 | 171 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import json
import logging
import ctypes as ct
from .._constants import VSCODE_CREDENTIALS_SECTION
try:
import ctypes.wintypes as wt
except (IOError, ValueError):
pass
_LOGGER = logging.getLogger(__name__)
SUPPORTED_CREDKEYS = set(("Type", "TargetName", "Persist", "UserName", "Comment", "CredentialBlob"))
_PBYTE = ct.POINTER(ct.c_byte)
_PCREDENTIAL = ct.POINTER(_CREDENTIAL)
_advapi = ct.WinDLL("advapi32") # type: ignore
_advapi.CredReadW.argtypes = [wt.LPCWSTR, wt.DWORD, wt.DWORD, ct.POINTER(_PCREDENTIAL)]
_advapi.CredReadW.restype = wt.BOOL
_advapi.CredFree.argtypes = [_PCREDENTIAL]
| [
2,
20368,
650,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
2,
20368,
650,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
269,
19199,
355,
269,
83,
198,
6738,
1148... | 2.67128 | 289 |
import datetime
import pytz
import json
import string
from urllib.request import Request, urlopen
from autonomus.models import Tag, Event, Link,User
from dateutil import parser
from autonomus.controllers import tags_controller, events_controller
import requests
import re
from autonomus.utils import sms
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(50000)
meetUpHeaders = {
'Authorization': 'Bearer 48bafed7ddb40e635bf562959a48d0ba',
'Content-Type': 'application/json'
}
eventBrideHeaders = {
'Authorization': 'Bearer UQX3SCD7LBHRUIATU5BC',
'Content-Type': 'application/json'
}
| [
11748,
4818,
8079,
198,
198,
11748,
12972,
22877,
198,
11748,
33918,
198,
11748,
4731,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19390,
11,
19016,
9654,
198,
6738,
11827,
385,
13,
27530,
1330,
17467,
11,
8558,
11,
7502,
11,
12982,
198... | 2.95 | 220 |
# from e2e_perf_logger import *
#
# memo_dic = E2EPerfLogger().read_dict_from_csv()
# E2EPerfLogger().log_all_perf(memo_dic)
#
# def printa(text):
# print(text + "-a")
#
#
# class ABC():
# def __init__(self):
# self.func = printa
#
# def wow(self):
# self.func("WOW")
#
# a = ABC()
# a.wow()
# from workloads.relay_workloads import create_relay_workload
# from tvm import relay
#
# a = relay.var("data", shape=(10,10))
# b = relay.var("data", shape=(10,10))
# workload = relay.add(a, b)
# mod, params = create_relay_workload(workload)
#
# from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
# mod, config = partition_for_tensorrt(mod, params)
# print(mod)
# from deap import base
# toolbox = base.Toolbox()
# toolbox.mate()
# print("imported")
# class BackendOp(object):
# def __init__(self, backend, pattern, constraint):
#
# BackendOp(backend='tensorrt', pattern=is_op('nn.conv2d').has_attr({"TOpPattern": K_ELEMWISE}), constraint = is_op('nn.conv2d').has_attr({"data_layout": "NHWC"}))
# from collections import defaultdict
# a = defaultdict(dict)
# a[1][2] = 0
# print(a)
# a.add(1)
# a.add(2)
# for i in a:
# print(i)
# b = frozenbitarray('1110')
# c = frozenbitarray('1000')
# print(a|b)
# dic = {}
# dic[a] = 1
# print(dic[b])
#
# # c = ['0', '0', '0']
# # c[2] = '1'
# # print("".join(c))
# print('0'*10)
# from tvm.relay.dataflow_pattern import *
# from tvm import relay
# import logging
# import sys
# import Enum
#
# class Color(Enum):
# black = 0
#
# class Color():
# def __init__(self):
#
# print(Color.black)
# class StreamToLogger(object):
# """
# Fake file-like stream object that redirects writes to a logger instance.
# """
# def __init__(self, logger, level):
# self.logger = logger
# self.level = level
# self.linebuf = ''
#
# def write(self, buf):
# for line in buf.rstrip().splitlines():
# self.logger.log(self.level, line.rstrip())
#
# def flush(self):
# pass
#
# logging.basicConfig(filename='example.log', level=logging.WARNING, format='%(asctime)s:[%(levelname)s] %(message)s')
# log = logging.getLogger('logger')
# sys.stdout = StreamToLogger(log,logging.WARNING)
# sys.stderr = StreamToLogger(log,logging.CRITICAL)
# print('Test to standard out')
# raise Exception('Test to standard error')
# logging.debug('This message should go to the log file')
# logging.info('So should this')
# logging.warning('And this, too')
# raise ValueError("What if it happens")
# logging.error('And non-ASCII stuff, too, like Øresund and Malmö')
# dshape = (1, 16, 64, 64)
# x = relay.var("x", shape=dshape)
# pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
# upsampled = relay.nn.upsampling(pooled, scale_h=2, scale_w=2, layout="NCHW")
# out = relay.Tuple((upsampled, x))
# print(out)
# print(out.fields)
# data = relay.var("data", relay.TensorType((1, 64, 56, 56), "float32"))
# conv_weight = relay.var("2_weight", relay.TensorType((64, 64, 1, 1), "float32"))
# conv2d = relay.nn.conv2d(
# data=data, weight=conv_weight, kernel_size=(1, 1), channels=64, padding=(0, 0)
# )
#
# is_tuple_get_item()
# print(repr(add))
# print(pat)
# print(match)
# from tvm.relay.transform.pattern_manager.op_type import OpType
# for op_type in OpType:
# print(op_type)
# from subprocess import Popen, PIPE, STDOUT, DEVNULL
# import time
#
# start_time = time.time()
# cmd = ['python3', 'tmp_measure_network.py', "nasrnn", "cuda"]
# p = Popen(cmd, stdout=DEVNULL, stderr=PIPE)
# p.wait()
# out, err = p.communicate()
# res = err.decode("utf-8").partition("##result:")
# assert(len(res)==3)
# numbers = res[2].split()
# mean_perf, std_perf = float(numbers[0]), float(numbers[1])
# print(f"time elapsed: {time.time()-start_time}")
#
# # import tvm
# # from tvm import relay
# #
# # def _traverse_expr(node, node_dict):
# # if node in node_dict:
# # return
# # # if isinstance(node, relay.op.op.Op):
# # # return
# # if isinstance(node, tvm.ir.op.Op):
# # return
# #
# # # print("{} : {}".format(node, type(node)))
# # node_dict[node] = len(node_dict)
# # print(node.backend)
# #
# # data = relay.var("data", shape=(10, 10))
# # expr = relay.nn.relu(data)
# # relay.analysis.update_backend(expr, "wow")
# # relay.analysis.update_backend(data, "wow2")
# # node_dict = {}
# # relay.analysis.post_order_visit(expr, lambda node: _traverse_expr(node, node_dict)) | [
198,
198,
2,
422,
304,
17,
68,
62,
525,
69,
62,
6404,
1362,
1330,
1635,
198,
2,
198,
2,
16155,
62,
67,
291,
796,
412,
17,
36,
5990,
69,
11187,
1362,
22446,
961,
62,
11600,
62,
6738,
62,
40664,
3419,
198,
2,
412,
17,
36,
5990,
... | 2.380522 | 1,879 |
from GUI.Shapes.Shape import Shape
| [
6738,
25757,
13,
2484,
7916,
13,
33383,
1330,
25959,
628
] | 3.6 | 10 |
from django.contrib import admin
from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter
from mptt.admin import MPTTModelAdmin
from categories.models import Category, RecommendedFor
class CategoryAdmin(MPTTModelAdmin):
""" Class representing category admin
Attributes:
list_display (tuple): display fields tuple
list_filter (tuple): filter fields tuple
search_fields (tuple): search fields tuple
"""
list_display = ('name', 'parent', 'slug')
list_filter = (
('parent', RelatedDropdownFilter),
)
search_fields = ('name', 'slug')
admin.site.register(Category, CategoryAdmin)
admin.site.register(RecommendedFor, RecommendedForAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
62,
28482,
62,
4868,
24455,
62,
14781,
2902,
13,
10379,
1010,
1330,
19809,
26932,
2902,
22417,
198,
6738,
285,
457,
83,
13,
28482,
1330,
4904,
15751,
17633,
46787,
... | 3.140351 | 228 |
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle, TA_CENTER
| [
6738,
989,
23912,
13,
8019,
13,
47720,
1330,
651,
36674,
21466,
3347,
316,
11,
2547,
6111,
21466,
11,
21664,
62,
43960,
1137,
628
] | 3.521739 | 23 |
from pygsmmodule.imei.imei import ImeiSupport
| [
6738,
12972,
14542,
3020,
375,
2261,
13,
45519,
13,
45519,
1330,
314,
1326,
72,
15514,
628,
628,
628
] | 2.833333 | 18 |
# Copyright (c) 2018 Science and Technology Facilities Council
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran 2003 rule R1102 : This file tests the support for the
program statement.
'''
import pytest
from fparser.two.utils import NoMatchError
from fparser.two.Fortran2003 import Program_Stmt
def test_valid(f2003_create):
''' Test that valid code is parsed correctly. '''
obj = Program_Stmt("program a")
assert isinstance(obj, Program_Stmt)
assert str(obj) == 'PROGRAM a'
assert repr(obj) == "Program_Stmt('PROGRAM', Name('a'))"
def test_invalid(f2003_create):
''' Test that exceptions are raised for invalid code. '''
for string in ["", " ", "prog", "program", "programa", "a program",
"a program a", "program a a"]:
with pytest.raises(NoMatchError) as excinfo:
_ = Program_Stmt(string)
assert "Program_Stmt: '{0}'".format(string) in str(excinfo.value)
| [
2,
15069,
357,
66,
8,
2864,
5800,
290,
8987,
48939,
4281,
198,
198,
2,
1439,
2489,
10395,
13,
198,
198,
2,
3401,
6637,
925,
355,
636,
286,
262,
277,
48610,
1628,
389,
9387,
198,
2,
739,
262,
1708,
5964,
25,
198,
198,
2,
2297,
39... | 3.284404 | 763 |
from pathlib import Path
import pytest
from .. import file_funcs
@pytest.mark.file_funcs
class TestFileFuncs:
"""Tests all funcs in file_funcs"""
@pytest.mark.skip(reason="New hire work")
def test_delete_files(self, tmp_path: Path):
"""Tests the delete files decorator
Files should be removed both at the start and end of the func
"""
file_path = tmp_path / "delete_files_test.txt"
@file_funcs.delete_files([file_path])
with file_path.open(mode="w+") as f:
f.write("test")
func()
assert not file_path.exists()
@pytest.mark.parametrize("paths", [["test_file_utils.txt",
"test_file_utils2.txt"],
["test_file_utils3.txt"],
["test_dir"],
["test_dir_2",
"test_dir_3"]])
def test_delete_paths(self, paths: list, tmp_path: Path):
"""Tests that files are deleted properly"""
paths = [tmp_path / x for x in paths]
# for each path
for path in paths:
# If it's a file
if path.suffix == ".txt":
with path.open(mode="w+") as f:
f.write("test")
# If it's a dir
else:
path.mkdir()
file_funcs.delete_paths(paths)
for path in paths:
assert not path.exists()
@pytest.mark.skip(reason="New hires work")
def test_delete_paths_asserts(self):
"""Tests that assert statements correct typing"""
pass
@pytest.mark.skip(reason="New hires work")
def test_delete_paths_sudo(self):
"""Tests that sudo rm -rf is called when needed"""
pass
@pytest.mark.skip(reason="New hires work")
def test_delete_paths_not_exists(self):
"""Ensures delete_paths succeeds with nonexistant paths"""
pass
def test_clean_paths(self, tmp_path: Path):
"""Ensures clean_paths removes and recreates dirs"""
# Directories
dir_paths = [tmp_path / "test1", tmp_path / "test2" / "test3"]
# Files that exist within those directories
file_paths = [x / "test.txt" for x in dir_paths]
for path in dir_paths:
path.mkdir(parents=True)
# Write a temporary file that should get removed
for path in file_paths:
with path.open(mode="w+") as f:
f.write("test")
assert path.exists()
file_funcs.clean_paths(dir_paths)
# Make sure directories still exist
for path in dir_paths:
assert path.exists()
# Make sure directories are empty
for path in file_paths:
assert not path.exists()
@pytest.mark.skip(reason="New hires")
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
11485,
1330,
2393,
62,
12543,
6359,
628,
198,
31,
9078,
9288,
13,
4102,
13,
7753,
62,
12543,
6359,
198,
4871,
6208,
8979,
24629,
6359,
25,
198,
220,
220,
220... | 2.058405 | 1,404 |
#!/usr/bin/env python
import asyncio
import aiohttp
import logging
import time
import ujson
import websockets
import hummingbot.connector.exchange.binance.binance_constants as CONSTANTS
from binance.client import Client as BinanceClient
from typing import (
AsyncIterable,
Dict,
Optional,
Tuple,
)
from hummingbot.connector.exchange.binance import binance_utils
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.logger import HummingbotLogger
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
30351,
952,
198,
11748,
257,
952,
4023,
198,
11748,
18931,
198,
11748,
640,
198,
11748,
334,
17752,
198,
11748,
2639,
11603,
198,
198,
11748,
41465,
13645,
13,
8443,
273,
13,... | 3.141509 | 212 |
import math
def hexdump(data: memoryview,
bytes_per_line: int = 16,
bytes_per_chunk: int = 4) -> list:
"""
Returns a list of strings. Each entry will be one line of the hex dump from
the given data.
"""
# Allowing the flexibility for whatever size dump is needed, but still
# placing reasonable limits.
assert 1 <= bytes_per_line <= 256, "bytes_per_line must be within 1-256"
assert 1 <= bytes_per_chunk <= 256, "bytes_per_chunk must be within 1-256"
dump = []
num_chunks = math.ceil(bytes_per_line / bytes_per_chunk)
# Two char per byte plus the spaces in between each chunk.
char_per_line = bytes_per_line * 2 + num_chunks - 1
# Iterate one line at a time
for i in range(0, len(data), bytes_per_line):
raw = ''
text = ''
# Iterate the data for this line.
for j, b in enumerate(data[i:i+bytes_per_line]):
# Add spaces in between each chunk.
if 0 != j and 0 == j % bytes_per_chunk:
raw += ' '
# Convert to hex string.
raw += ("%02X") % (b)
# Convert to character.
text += chr(b) if 0x20 <= b < 0x7f else '.'
# Left justify to pad spaces on the right.
raw = raw.ljust(char_per_line)
text = text.ljust(bytes_per_line)
# Append a new line in the output
dump.append(("%08X: %s |%s|") % (i, raw, text))
return dump
| [
11748,
10688,
198,
198,
4299,
17910,
39455,
7,
7890,
25,
4088,
1177,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
9881,
62,
525,
62,
1370,
25,
493,
796,
1467,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.301563 | 640 |