content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import re
import sys
import numpy as np
from collections import Counter
import collections
import h5py
import pandas as pd
pathname = os.path.abspath('C:\\Users\\Vicky\\Desktop\\Courses\\Avi\\erisk collections_2017_2018\\erisk collections\\2017\\2test')
os.chdir(pathname)
#############Read Output ###############################################
y_train = pd.read_excel('y_train.xlsx', sheetname='sheet1')
y_test = pd.read_excel('y_test.xlsx', sheetname='sheet1')
############# Read First Dataset ######################################
with h5py.File('datatrain.h5', 'r') as hf:
data = hf['datatrain'][:]
with h5py.File('datatest.h5', 'r') as hf:
data1 = hf['datatest'][:]
x_train = data
x_test =data1
############# Read Second Dataset ######################################
with h5py.File('datatrain2.h5', 'r') as hf:
n1data = hf['datatrain'][:]
with h5py.File('datatest2.h5', 'r') as hf:
n1data1 = hf['datatest'][:]
with h5py.File('datatest_basic2017_n_w_e_tp.h5', 'r') as hf:
n1data1_features = hf['datatest'][:]
x_train=n1data
for i in range (0,len(x_train),1):
x_train[i][28917] = x_train[i][28917]*len(x_train)
#y_train.ravel()
x_test=n1data1
for i in range (0,len(x_test),1):
x_test[i][28917] = x_test[i][28917]*len(x_train)
##########################################################
###################Results NN#############################
##1st Dataset
accuracy_score = 0.885286783042394
confusion_matrix = [342 7]
[ 39 13]
classification_report
precision recall f1-score support
0 0.90 0.98 0.94 349
1 0.65 0.25 0.36 52
micro avg 0.89 0.89 0.89 401
macro avg 0.77 0.61 0.65 401
weighted avg 0.87 0.89 0.86 401
#2nd Dataset
accuracy_score=0.9152119700748129
confusion_matrix = [340 9]
[ 25 27]
classification_report
precision recall f1-score support
0 0.93 0.97 0.95 349
1 0.75 0.52 0.61 52
micro avg 0.92 0.92 0.92 401
macro avg 0.84 0.75 0.78 401
weighted avg 0.91 0.92 0.91 401 | [
11748,
28686,
201,
198,
11748,
302,
201,
198,
11748,
25064,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
17268,
1330,
15034,
201,
198,
11748,
17268,
201,
198,
11748,
289,
20,
9078,
201,
198,
11748,
19798,
292,
355,
279,
67,
... | 1.954839 | 1,240 |
color =["red", "green", "purple", "orange", "blue", "yellow"]
pick = input("What is your favorite color ? ")
if pick in color:
# Storing the rank of the color picked
rank = color.index(pick) + 1
# Check if the color is in your list
if rank == 1:
print("That is my favorite color.")
elif rank == 2:
print("That is my 2nd favorite color.")
elif rank == 3:
print("That is my 3rd favorite color.")
elif rank > 3:
print("That is my "+str(rank) +"th favorite color.")
else:
print("I do not care too much for that color.")
| [
8043,
796,
14692,
445,
1600,
366,
14809,
1600,
366,
14225,
1154,
1600,
366,
43745,
1600,
366,
17585,
1600,
366,
36022,
8973,
198,
198,
27729,
796,
5128,
7203,
2061,
318,
534,
4004,
3124,
5633,
366,
8,
198,
198,
361,
2298,
287,
3124,
2... | 2.495968 | 248 |
"""This module is used to profile getrecommendations() and bestreds.BestrefsScript()."""
from crds.python23 import pickle
import crds
from crds.tests.test_config import run_and_profile
from crds import data_file
if __name__ == "__main__":
run_and_profile("HST pickle/unpickle", "pickle_unpickle('hst.pmap', 'data/j8bt06o6q_raw.fits')", globals())
| [
37811,
1212,
8265,
318,
973,
284,
7034,
651,
47335,
437,
602,
3419,
290,
1266,
445,
82,
13,
13014,
5420,
82,
7391,
3419,
526,
15931,
198,
198,
6738,
1067,
9310,
13,
29412,
1954,
1330,
2298,
293,
198,
198,
11748,
1067,
9310,
198,
198,
... | 2.717557 | 131 |
import requests
from colorama import Fore, init
from threading import Thread
import random, time, names, string, json, os
from threading import Lock
from random import choice
s_print_lock = Lock()
def s_print(*a, **b):
"""Thread safe print function"""
with s_print_lock:
print(*a, **b)
def get_proxy(proxy_list):
'''
(list) -> dict
Given a proxy list <proxy_list>, a proxy is selected and returned.
'''
# Choose a random proxy
proxy = random.choice(proxy_list)
m = proxy.strip().split(':')
if len(m) == 4:
base = f"{':'.join(m[:2])}" # ip:port
if len(m) == 4:
proxies = {
'http': f"http://{':'.join(m[-2:])}@{base}" + '/',
'https': f"http://{':'.join(m[-2:])}@{base}" + '/'
}
else:
# Set up the proxy to be used
proxies = {
"http": str(proxy),
"https": str(proxy)
}
# Return the proxy
return proxies
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except Exception:
log('e', Fore.RED + "Couldn't locate <" + path + ">.")
if (len(raw_lines) == 0):
log('e', Fore.RED + "No data in <" + path + ">.")
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
ua = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.6 (KHTML, like Gecko) Chrome/16.0.897.0 Safari/535.6',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2',
]
if __name__ == '__main__':
init(autoreset=True)
main()
| [
11748,
7007,
198,
6738,
3124,
1689,
1330,
4558,
11,
2315,
198,
6738,
4704,
278,
1330,
14122,
198,
11748,
4738,
11,
640,
11,
3891,
11,
4731,
11,
33918,
11,
28686,
198,
6738,
4704,
278,
1330,
13656,
198,
6738,
4738,
1330,
3572,
628,
198... | 2.184473 | 1,404 |
# https://leetcode.com/problems/number-of-students-unable-to-eat-lunch
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
17618,
12,
1659,
12,
19149,
658,
12,
403,
540,
12,
1462,
12,
4098,
12,
75,
3316,
628
] | 2.482759 | 29 |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \
shaped_data, connect_front
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
1828,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
1280,
85,
2879,
13,
31... | 3.055901 | 161 |
##############################################
# The MIT License (MIT)
# Copyright (c) 2014 Kevin Walchko
# see LICENSE for full details
##############################################
# this directory contains things for testing and fake sources
# import pygecko.test.fake_camera as cv2
# from pygecko.test.process import GeckoSimpleProcess
| [
29113,
7804,
4242,
2235,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
15069,
357,
66,
8,
1946,
7939,
6445,
354,
7204,
198,
2,
766,
38559,
24290,
329,
1336,
3307,
198,
29113,
7804,
4242,
2235,
198,
2,
428,
8619,
4909,
1243,
329,... | 4.275 | 80 |
"""
Interactive fitting of peaks in noisy 2D images.
Copyright (c) 2013, rhambach.
This file is part of the TEMimage package and released
under the MIT-Licence. See LICENCE file for details.
"""
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import matplotlib.pyplot as plt
import point_browser as pb
from matplotlib.widgets import Button, RadioButtons, Slider
class FindCenters(pb.PointBrowser):
"""
Semi-automatic fitting of bright points in TEM image
dragging ... (opt) if True, dragging is allowed for sliders
"""
def RefineCenters(self,event):
" refine positions by fitting 2D Gaussian in neighborhood of local max "
from scipy.optimize import leastsq
from sys import stdout
#print "Refine()";
NN = self.nbhd_size;
Nx,Ny = self.image.shape;
dx,dy = np.mgrid[-NN:NN+1,-NN:NN+1];
# refine each point separately
self.points = self.points.astype(float); # allow subpixel precision
for ip in range(len(self.points)):
P = self.points[ip];
x,y = np.round(P);
# get neighborhood (skip border)
xmin,xmax = dx[[0,-1],0]+x; # first and last element in dx
ymin,ymax = dy[0,[0,-1]]+y; # " dy
if xmin<0 or ymin<0 or xmax>=Nx or ymax>=Ny: continue
nbhd = self.image[xmin:xmax+1,ymin:ymax+1];
assert nbhd.shape == (2*NN+1,2*NN+1)
# calculate center of mass
p0 = (0.,0.,self.image[tuple(P)],0.,NN/2); # initial guess
residuals = lambda param: (nbhd - gauss(*param)).flat; # residuals
p,ierr = leastsq(lambda p: (nbhd - gauss(*p)).flat, p0);# least-squares fit
self.points[ip] = (x+p[0],y+p[1]); # correct position of point
# DEBUG: plot fits for each point
if self.verbosity > 0:
print "Refining Points... %d %%\r" % (100*ip/len(self.points-1)),
if self.verbosity > 3:
print "IN: ",p0
print "OUT: ",p
if self.verbosity > 10:
plt.figure();
ix = nbhd.shape[0]/2;
plt.plot(dy[ix],nbhd[ix], 'k',label='image');
plt.plot(dy[ix],gauss(*p0)[ix],'g',label='first guess');
plt.plot(dy[ix],gauss(*p)[ix], 'r',label='final fit');
plt.plot(dx[:,ix],nbhd[:,ix], 'k--');
plt.plot(dx[:,ix],gauss(*p0)[:,ix],'g--');
plt.plot(dx[:,ix],gauss(*p)[:,ix], 'r--');
plt.legend();
plt.show();
if self.verbosity > 0: print "Refining Points. Finished.";
stdout.flush();
self._update_points();
def find_local_maxima(self, data, neighborhood_size):
"""
find local maxima within neighborhood
idea from http://stackoverflow.com/questions/9111711
(get-coordinates-of-local-maxima-in-2d-array-above-certain-value)
"""
# find local maxima in image (width specified by neighborhood_size)
data_max = filters.maximum_filter(data,neighborhood_size);
maxima = (data == data_max);
assert np.sum(maxima) > 0; # we should always find local maxima
# remove connected pixels (plateaus)
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
maxima *= 0;
for dx,dy in slices:
maxima[(dx.start+dx.stop-1)/2, (dy.start+dy.stop-1)/2] = 1
# calculate difference between local maxima and lowest
# pixel in neighborhood (will be used in select_local_maxima)
data_min = filters.minimum_filter(data,neighborhood_size);
diff = data_max - data_min;
self._maxima = maxima;
self._diff = diff;
return maxima,diff
def refine_local_maxima(self,N):
" select highest N local maxima using thresholding "
maxima = self._maxima; diff = self._diff;
# select highest local maxima using thresholding
if np.sum(maxima) > N:
# calc treshold from sorted list of differences for local maxima
thresh = np.sort(diff[maxima].flat)[-N];
# keep only maxima with diff>thresh
maxima = np.logical_and(maxima, diff>thresh);
# TODO: refine fit by local 2D Gauss-Fit
# return list of x,y positions of local maxima
return np.asarray(np.where(maxima)).T;
# --- self-test -------------------------------------------------------------
if __name__ == '__main__':
import tifffile as tiff
# read test image
image = tiff.imread("tests/graphene_flower_filtered.tif");
FH = FindCenters(image,verbosity=3);
plt.show();
| [
37811,
198,
220,
21365,
15830,
286,
25740,
287,
31210,
362,
35,
4263,
13,
628,
220,
15069,
357,
66,
8,
2211,
11,
374,
2763,
19496,
13,
220,
198,
220,
220,
220,
770,
2393,
318,
636,
286,
262,
309,
3620,
9060,
5301,
290,
2716,
198,
... | 2.403877 | 1,857 |
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'variables': {
'use_libjpeg_turbo%': '<(use_libjpeg_turbo)',
'conditions': [
['use_libjpeg_turbo==1', {
'libjpeg_include_dir%': [ '<(DEPTH)/third_party/libjpeg_turbo', ],
}, {
'libjpeg_include_dir%': [ '<(DEPTH)/third_party/libjpeg', ],
}],
],
},
'includes': ['../build/common.gypi'],
'targets': [
{
'target_name': 'common_video',
'type': 'static_library',
'include_dirs': [
'<(webrtc_root)/modules/interface/',
'interface',
'jpeg/include',
'libyuv/include',
],
'direct_dependent_settings': {
'include_dirs': [
'interface',
'jpeg/include',
'libyuv/include',
],
},
'conditions': [
['build_libjpeg==1', {
'dependencies': ['<(libjpeg_gyp_path):libjpeg',],
}, {
# Need to add a directory normally exported by libjpeg.gyp.
'include_dirs': ['<(libjpeg_include_dir)'],
}],
['build_libyuv==1', {
'dependencies': ['<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',],
}, {
# Need to add a directory normally exported by libyuv.gyp.
'include_dirs': ['<(libyuv_dir)/include',],
}],
],
'sources': [
'interface/i420_video_frame.h',
'i420_video_frame.cc',
'jpeg/include/jpeg.h',
'jpeg/data_manager.cc',
'jpeg/data_manager.h',
'jpeg/jpeg.cc',
'libyuv/include/webrtc_libyuv.h',
'libyuv/include/scaler.h',
'libyuv/webrtc_libyuv.cc',
'libyuv/scaler.cc',
'plane.h',
'plane.cc',
],
},
], # targets
'conditions': [
['include_tests==1', {
'targets': [
{
'target_name': 'common_video_unittests',
'type': 'executable',
'dependencies': [
'common_video',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/test/test.gyp:test_support_main',
],
'sources': [
'i420_video_frame_unittest.cc',
'jpeg/jpeg_unittest.cc',
'libyuv/libyuv_unittest.cc',
'libyuv/scaler_unittest.cc',
'plane_unittest.cc',
],
},
], # targets
}], # include_tests
],
} | [
2,
15069,
357,
66,
8,
2321,
383,
5313,
49,
4825,
1628,
7035,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
198,
2,
326,
460,
307,
1043,
287,
262,
38559,
... | 1.958071 | 1,431 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
foodon_lookup = {}
siren_lookup = {}
nameless = '03301032 03301034 03301067 03301068 03301076 03301078 03301107 03301131 03301149 03301152 03301160 03301163 03301168 03301173 03301183 03301192 03301199 03301206 03301212 03301213 03301221 03301229 03301253 03301287 03301288 03301305 03301308 03301309 03301323 03301324 03301332 03301350 03301378 03301398 03301410 03301412 03301418 03301436 03301437 03301467 03301511 03301524 03301554 03301563 03301575 03301587 03301600 03301614 03301626 03301627 03301649 03301670 03301740 03301744 03301753 03301754 03301757 03301772 03301824 03301825 03301840 03301849 03301867 03301908 03301909 03301913 03301928 03301932 03301947 03301962 03301971 03302034 03302035 03302047 03302061 03302067 03302069 03302071 03302081 03302105 03302121 03302122 03302123 03302143 03302147 03302170 03302187 03302190 03302210 03302218 03302219 03302228 03302243 03302274 03302291 03302384 03302386 03302404 03302406 03302408 03302416 03302420 03302425 03302454 03302456 03302467 03302477 03302513 03302518 03302532 03302548 03302566 03302573 03302574 03302577 03302596 03302620 03302630 03302652 03302657 03302671 03302703 03302726 03302780 03302787 03302805 03302881 03302927 03302935 03302948 03302954 03302987 03302988 03302997 03303050 03303062 03303076 03303079 03303095 03303098 03303100 03303152 03303159 03303175 03303179 03303186 03303213 03303216 03303347 03303361 03303363 03303377 03303390 03303395 03303397 03303398 03303400 03303415 03303511 03303554 03303707 03303763 03303770 03303826 03303830 03303843 03303885 03303894 03303919 03303921 03303930 03303938 03303946 03303951 03304000 03304005 03304006 03304018 03304034 03304035 03304041 03304065 03304076 03304077 03304084 03304091 03304099 03304104 03304110 03304152 03304153 03304173 03304198 03304202 03304206 03304231 03304233 03304243 03304244 03304256 03304257 03304259 03304293 03304338 03304339 03304372 03304373 03304442 03304443 03304448 03304464 03304495 03304497 03304502 03304538 03304539 03304547 03304561 03304570 03304571 03304596 03304599 03304603 03304607 03304628 03304637 03304641 03304642 03304650 03304660 03304674 03304688 03304693 03304699 03304707 03304750 03304766 03304814 03304820 03304829 03304831 03304835 03304839 03304851 03304854 03304862 03304865 03304870 03304873 03304877 03304879 03304881 03304886 03304888 03304900 03304902 03304912 03304913 03304928 03304944 03304946 03304953 03304957 03304960 03304996 03305013 03305015 03305034 03305063 03305067 03305086 03305100 03305126 03305128 03305165 03305168 03305191 03305223 03305231 03305238 03305285 03305286 03305318 03305319 03305334 03305367 03305368 03305386 03305393 03305433 03305445 03305452 03305455 03305495 03305510 03305518 03305538 03305557 03305564 03305595 03305605 03305618 03305629 03305691 03305708 03305714 03305746 03305749 03305754 03305757 03305772 03305782 03305797 03305804 03305819 03305850 03305862 03305869 03305874 03305987 03305991 03306008 03306016 03306026 03306056 03306076 03306082 03306109 03306113 03306126 03306129 03306131 03306133 03306140 03306141 03306150 03306158 03306163 03306165 03306177 03306181 03306188 03306191 03306208 03306220 03306233 03306239 03306247 03306261 03306274 03306308 03306324 03306325 03306329 03306351 03306352 03306361 03306362 03306372 03306375 03306391 03306393 03306395 03306400 03306411 03306420 03306451 03306457 03306458 03306460 03306461 03306463 03306465 03306475 03306476 03306477 03306487 03306506 03306515 03306521 03306529 03306534 03306535 03306537 03306541 03306542 03306547 03306548 03306553 03306566 03306601 03306638 03306645 03306649 03306652 03306653 03306686 03306694 03306698 03306714 03306729 03306740 03306743 03306755 03306759 03306767 03306770 03306802 03306822 03306835 03306841 03306846 03306891 03306916 03306936 03306953 03306996 03306997 03307019 03307072 03307094 03307161 03307167 03307179 03307184 03307188 03307197 03307213 03307218 03307221 03307232 03307244 03307273 03307281 03307306 03307308 03307311 03307336 03307338 03307340 03307343 03307349 03307370 03307376 03307397 03307398 03307399 03307432 03307474 03307501 03307543 03307552 03307580 03307607 03307609 03307638 03307643 03307657 03307673 03307688 03307690 03307710 03307716 03307722 03307727 03307728 03307744 03307747 03307767 03307770 03307771 03307781 03307787 03307821 03307847 03307862 03307865 03307870 03307873 03307924 03307931 03307986 03307987 03307996 03307997 03307999 03308000 03308007 03308012 03308014 03308019 03308022 03308023 03308029 03308031 03308032 03308050 03308057 03308070 03308084 03308090 03308093 03308094 03308107 03308108 03308120 03308121 03308146 03308149 03308151 03308164 03308171 03308172 03308173 03308176 03308177 03308183 03308185 03308188 03308197 03308198 03308200 03308205 03308216 03308217 03308227 03308228 03308230 03308232 03308256 03308262 03308263 03308274 03308275 03308276 03308283 03308289 03308291 03308292 03308324 03308332 03308338 03308366 03308368 03308378 03308387 03308420 03308422 03308427 03308428 03308429 03308430 03308431 03308433 03308442 03308444 03308447 03308451 03308452 03308459 03308465 03308493 03308494 03308499 03308514 03308518 03308540 03308552 03308556 03308559 03308575 03308582 03308583 03308585 03308587 03308593 03308609 03308619 03308624 03308644 03308649 03308689 03308691 03308700 03308735 03308758 03308759 03308775 03308777 03308785 03308787 03308789 03308792 03308796 03308804 03308833 03308837 03308841 03308842 03308844 03308858 03308870 03308871 03308873 03308880 03308883 03308887 03308895 03308897 03308902 03308904 03308905 03308911 03308913 03308919 03308939 03308952 03308955 03308956 03308960 03308967 03308971 03308972 03308976 03308977 03308979 03308980 03308982 03308983 03308984 03308985 03308987 03308988 03308989 03308990 03308992 03308998 03309000 03309002 03309019 03309056 03309058 03309062 03309069 03309070 03309084 03309093 03309110 03309112 03309113 03309114 03309117 03309118 03309120 03309133 03309141 03309148 03309155 03309156 03309161 03309162 03309163 03309166 03309167 03309170 03309172 03309174 03309177 03309178 03309180 03309182 03309184 03309186 03309190 03309191 03309193 03309200 03309201 03309202 03309206 03309211 03309213 03309215 03309234 03309235 03309238 03309245 03309257 03309259 03309260 03309261 03309264 03309266 03309267 03309268 03309269 03309272 03309273 03309275 03309276 03309281 03309288 03309293 03309298 03309306 03309309 03309322 03309327 03309334 03309339 03309342 03309355 03309424 03309430 03309431 03309432 03309433 03309435 03309438 03309449 03309463 03309465 03309466 03309467 03309471 03309472 03309473 03309479 03309486 03309487 03309493 03309494 03309501 03309516 03309519 03309520 03309528 03309530 03309532 03309568 03309574 03309583 03309584 03309616 03309617 03309629 03309630 03309633 03309640 03309642 03309645 03309655 03309685 03309686 03309692 03309693 03309697 03309705 03309706 03309710 03309713 03309716 03309717 03309719 03309720 03309721 03309722 03309726 03309727 03309728 03309729 03309731 03309747 03309749 03309752 03309753 03309754 03309756 03309761 03309784 03309864 03309865 03309877 03309898 03309900 03309901 03309910 03309911 03309913 03309914 03309916 03309919 03309926 03309936 03309953 03309968 03309975 03309979 03309980 03309985 03309988 03310027 03310028 03310035 03310036 03310040 03310043 03310046 03310048 03310055 03310068 03310076 03310079 03310080 03310085 03310094 03310095 03310104 03310115 03310119 03310120 03310121 03310122 03310123 03310136 03310141 03310142 03310148 03310158 03310159 03310170 03310175 03310180 03310181 03310183 03310193 03310199 03310203 03310217 03310218 03310229 03310230 03310233 03310236 03310237 03310252 03310267 03310271 03310281 03310298 03310299 03310300 03310303 03310322 03310342 03310346 03310354 03310357 03310377 03310384 03310413 03310419 03310425 03310428 03310441 03310449 03310450 03310451 03310465 03310487 03310493 03310545 03310550 03310552 03310556 03310562 03310570 03310573 03310622 03310629 03310658 03310682 03310697 03310721 03310730 03310731 03310732 03310733 03310739 03310747 03310799 03310821 03310823 03310830 03310832 03310849 03310858 03310864 03310866 03310872 03310876 03310877 03310893 03310896 03310903 03310925 03310928 03310942 03310943 03310967 03310980 03311000 03311008 03311025 03311031 03311047 03311048 03311049 03311055 03311103 03311104 03311117 03311135 03311144 03311161 03311222 03311257 03311266 03311267 03311273 03311278 03311281 03311286 03311291 03311303 03311340 03311347 03311348 03311356 03311374 03311380 03311386 03311392 03311405 03311421 03311437 03311438 03311439 03311440 03311457 03311464 03311471 03311478 03311479 03311481 03311491 03311492 03311502 03311503 03311529 03311535 03311553 03311558 03311566 03311576 03311577 03311599 03311603 03311619 03311641 03311651 03311656 03311671 03311672 03311725 03311748 03311752 03311757 03311758 03311759 03311763 03311767 03311768 03311773 03311780 03311790 03311800 03311837 03311839 03311848 03311857 03311859 03311860 03311867 03311870 03311872 03311886 03312038 03312063 03312067 03315054 03315087 03315099 03315135 03315223 03315252 03315282 03315292 03315296 03315315 03315331 03315342 03315351 03315356 03315361 03315427 03315451 03315473 03315477 03315494 03315518 03315553 03315574 03315643 03315730 03315754 03315844 03315864 03315867 03315870 03315888 03315906 03315971 03315972 03315973 03316008 03316018 03316095 03316101 03316107 03316142 03316144 03316145 03316183 03316203 03316217 03316232 03316264 03316277 03316297 03316327 03316330 03316331 03316343 03316370 03316374 03316383 03316389 03316405 03316417 03316423 03316427 03316482 03316510 03316516 03316520 03316535 03316542 03316551 03316570 03316614 03316635 03316687 03316688 03316696 03316741 03316752 03316783 03316860 03316863 03316901 03316971 03316992 03317053 03317058 03317069 03317145 03317146 03317238 03317327 03317332 03317370 03317389 03317453 03317470 03317472 03317475 03317494 03317508 03317548 03317557 03317584 03317621 03317636 03317644 03317654 03317663 03317675 03411028 03411105 03411207 03411246 03411321 03411424 03411526 03411605 03411613 03411619 03411631 03411779 03411847 03411953 03412245 03412282 03460122 03460124 03460143 03460147 03460153 03460163 03460171 03460184 03460185 03460186 03460191 03460205 03460210 03460212 03460260 03460279 03460282 03460296 03460297 03460302 03460309 03460318 03460333 03460338 03460340 03460341 03460342 03460345 03460362 03460380 03460390 03480004 03490004 03530003'.split(' ')
# Read dictionary
regquote = re.compile(r'".+')
with open('foodon-labels.tsv', "r") as lookup_handle:
for line in lookup_handle:
URL, label = line.strip().split('\t')
#<http://purl.obolibrary.org/obo/FOODON_03304344> "tempura batter"@en
#<http://www.ebi.ac.uk/ancestro/ancestro_0309>
#URL = URL.replace('<http://purl.obolibrary.org/obo/','')
URL = URL[1:-1] # chop < and >
label = label[1:]
print URL, re.sub(regquote,'',label)
foodon_lookup[URL] = re.sub(regquote,'',label) # chop everything from remaining quote onwards
with open('imports/siren_labels.txt', "r") as lookup_handle:
for line in lookup_handle:
(id, label) = line.strip().split('\t')
siren_lookup[id] = label
with (open('imports/siren_augment.owl', 'w')) as output_handle:
with open('imports/siren_augment.owl.old.txt', "r") as ins:
for line in ins:
# this substitutes line's URI reference with textual value
if line[0] == '*': # and not "' (" in line:
terms = re.split('(http:\/\/[a-z.]+\/[a-z]+\/[A-Za-z]+_[0-9]+)', line)
if len(terms) == 3:
if terms[1] in foodon_lookup:
label = foodon_lookup[terms[1]].replace('<','<').replace('>','>')
terms[1] = "'" + label + "' (" + terms[1] + ")"
# print 'textualizing' , terms[1]
else:
print 'couldnt find description foodon_lookup :', terms[1]
line = ''.join(terms)
if line[0:14] == ' <owl:Class':
terms = re.split('([0-9]+)', line) # extract FoodOn term ID.
if len(terms) == 3 and terms[1] in nameless:
if 'FOODON_' + terms[1] in siren_lookup:
line = line + '\n\t\t' + '<rdfs:label xml:lang="en">' + siren_lookup['FOODON_' + terms[1]] + '</rdfs:label>'
#print 'adding label for ',terms[1]
else:
print 'couldnt find label in siren_lookup :', terms[1]
output_handle.write(line)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
198,
198,
19425,
261,
62,
5460,
929,
796,
23884,
198,
82,
24080,
62,
5460,
929,
796,
23884,
198,
198,
7402,
5321... | 2.384452 | 5,184 |
#encoding=utf-8
## SOLVED 2014/04/10
## 134043
# The first two consecutive numbers to have two distinct prime factors are:
# 14 = 2 × 7
# 15 = 3 × 5
# The first three consecutive numbers to have three distinct prime factors are:
# 644 = 2² × 7 × 23
# 645 = 3 × 5 × 43
# 646 = 2 × 17 × 19.
# Find the first four consecutive integers to have four distinct prime factors.
# What is the first of these numbers?
import helpers.prime as prime
FACTOR_COUNT = 4
def prime_factor_count(n):
"""Returns the number of distinct prime factors of a number."""
return len(prime.multiset_prime_factors(n))
| [
2,
12685,
7656,
28,
40477,
12,
23,
198,
2235,
36817,
53,
1961,
1946,
14,
3023,
14,
940,
198,
2235,
1511,
1821,
3559,
198,
198,
2,
383,
717,
734,
12785,
3146,
284,
423,
734,
7310,
6994,
5087,
389,
25,
198,
198,
2,
1478,
796,
362,
... | 3.102564 | 195 |
#!/usr/bin/env python
'''
A solution to a ROSALIND bioinformatics problem.
Problem Title: Consensus and Profile
Rosalind ID: SUBS
Rosalind #: 009
URL: http://rosalind.info/problems/subs/
'''
from numpy import zeros
from scripts import ReadFASTA
# Data is in FASTA form
dna_list = ReadFASTA('data/rosalind_cons.txt')
# Setup an array and count into the array
M = zeros((4,len(dna_list[0][1])), dtype = int)
snp_dict = {'A':0, 'C':1, 'G':2, 'T':3}
for dna in dna_list:
for index, snp in enumerate(dna[1]):
M[snp_dict[snp]][index] += 1
# Determine the consensus string
consensus = ''
to_snp = {0:'A', 1:'C', 2:'G', 3:'T'}
for i in range(0,len(dna_list[0][1])):
maxval = [-1,-1]
for j in range(0,4):
if maxval[1] < M[j][i]:
maxval = [j, M[j][i]]
consensus += to_snp[maxval[0]]
# Format the count properly
consensus = [consensus, 'A:', 'C:', 'G:', 'T:']
for index, col in enumerate(M):
for val in col:
consensus[index+1] += ' '+str(val)
# Print and write the output
print '\n'.join(consensus)
with open('output/010_CONS.txt', 'w') as output_data:
output_data.write('\n'.join(consensus))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
32,
4610,
284,
257,
48263,
1847,
12115,
13401,
259,
18982,
873,
1917,
13,
198,
198,
40781,
11851,
25,
3515,
7314,
290,
13118,
198,
35740,
282,
521,
4522,
25,
13558,
4462,... | 2.26378 | 508 |
#!/usr/bin/env python
"""Copies data from RAPID netCDF output to a CF-compliant netCDF file.
Remarks:
A new netCDF file is created with data from RAPID [1] simulation model
output. The result follows CF conventions [2] with additional metadata
prescribed by the NODC timeSeries Orthogonal template [3] for time series
at discrete point feature locations.
This script was created for the National Flood Interoperability Experiment,
and so metadata in the result reflects that.
Requires:
netcdf4-python - https://github.com/Unidata/netcdf4-python
Inputs:
Lookup CSV table with COMID, Lat, Lon, and Elev_m columns. Columns must
be in that order and these must be the first four columns. The order of
COMIDs in the table must match the order of features in the netCDF file.
RAPID output netCDF file. File must be named *YYYYMMDDTHHMMZ.nc, e.g.,
rapid_20150124T0000Z.nc. The ISO datetime indicates the first time
coordinate in the file. An example CDL text representation of the file
header is shown below. The file must be located in the 'input' folder.
Input files are moved to the 'archive' upon completion.
///////////////////////////////////////////////////
netcdf result_2014100520141101 {
dimensions:
Time = UNLIMITED ; // (224 currently)
COMID = 61818 ;
variables:
float Qout(Time, COMID) ;
///////////////////////////////////////////////////
Outputs:
CF-compliant netCDF file of RAPID results, named with original filename
with "_CF" appended to the filename. File is written to 'output' folder.
Input netCDF file is archived or deleted, based on 'archive' config
parameter.
Usage:
Option 1: Run standalone. Script will use logger.
Option 2: Run from another script.
First, import the script, e.g., import make_CF_RAPID_output as cf.
If you want to use this script's logger (optional):
1. Call init_logger with the full path of a log filename to get a
logger designed for use with this script.
2. Call main() with the logger as the first argument.
If you don't want to use the logger, just call main().
References:
[1] http://rapid-hub.org/
[2] http://cfconventions.org/
[3] http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/
"""
import ConfigParser
import csv
from datetime import datetime, timedelta
from glob import glob
import inspect
import os
import re
import shutil
from netCDF4 import Dataset
import numpy as np
def csv_to_list(csv_file, delimiter=','):
"""
Reads in a CSV file and returns the contents as list,
where every row is stored as a sublist, and each element
in the sublist represents 1 cell in the table.
"""
with open(csv_file, 'rb') as csv_con:
reader = csv.reader(csv_con, delimiter=delimiter)
return list(reader)
def get_this_file():
"""Returns full filename of this script.
Remarks: Inspect sometimes only gives filename without path if run from
command prompt or as a Windows scheduled task with a Start in location
specified.
"""
f = inspect.stack()[0][1]
if not os.path.isfile(f):
f = os.path.realpath(__file__)
return f
def get_this_path():
"""Returns path to this script."""
return os.path.dirname(get_this_file())
def log(message, severity):
"""Logs, prints, or raises a message.
Arguments:
message -- message to report
severity -- string of one of these values:
CRITICAL|ERROR|WARNING|INFO|DEBUG
"""
print_me = ['WARNING', 'INFO', 'DEBUG']
if severity in print_me:
print severity, message
else:
raise Exception(message)
def validate_raw_nc(nc):
"""Checks that raw netCDF file has the right dimensions and variables.
Arguments:
nc -- netCDF dataset object representing raw RAPID output
Returns:
name of ID dimension,
length of time dimension,
name of flow variable
Remarks: Raises exception if file doesn't validate.
"""
dims = nc.dimensions
if 'COMID' in dims:
id_dim_name = 'COMID'
elif 'FEATUREID' in dims:
id_dim_name = 'FEATUREID'
else:
msg = 'Could not find ID dimension. Looked for COMID and FEATUREID.'
raise Exception(msg)
id_len = len(dims[id_dim_name])
if 'Time' not in dims:
msg = 'Could not find time dimension. Looked for Time.'
raise Exception(msg)
time_len = len(dims['Time'])
variables = nc.variables
id_var_name = None
if 'COMID' in dims:
id_var_name = 'COMID'
elif 'FEATUREID' in dims:
id_var_name = 'FEATUREID'
if id_var_name is not None and id_var_name != id_dim_name:
msg = ('ID dimension name (' + id_dim_name + ') does not equal ID ' +
'variable name (' + id_var_name + ').')
log(msg, 'WARNING')
if 'Qout' in variables:
q_var_name = 'Qout'
elif 'm3_riv' in variables:
q_var_name = 'm3_riv'
else:
msg = 'Could not find flow variable. Looked for Qout and m3_riv.'
raise Exception(msg)
return id_dim_name, id_len, time_len, q_var_name
def initialize_output(filename, id_dim_name, time_len,
id_len, time_step_seconds):
"""Creates netCDF file with CF dimensions and variables, but no data.
Arguments:
filename -- full path and filename for output netCDF file
id_dim_name -- name of Id dimension and variable, e.g., COMID
time_len -- (integer) length of time dimension (number of time steps)
id_len -- (integer) length of Id dimension (number of time series)
time_step_seconds -- (integer) number of seconds per time step
"""
cf_nc = Dataset(filename, 'w', format='NETCDF3_CLASSIC')
# Create global attributes
log(' globals', 'DEBUG')
cf_nc.featureType = 'timeSeries'
cf_nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'
cf_nc.Conventions = 'CF-1.6'
cf_nc.cdm_data_type = 'Station'
cf_nc.nodc_template_version = (
'NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1')
cf_nc.standard_name_vocabulary = ('NetCDF Climate and Forecast (CF) ' +
'Metadata Convention Standard Name ' +
'Table v28')
cf_nc.title = 'RAPID Result'
cf_nc.summary = ("Results of RAPID river routing simulation. Each river " +
"reach (i.e., feature) is represented by a point " +
"feature at its midpoint, and is identified by the " +
"reach's unique NHDPlus COMID identifier.")
cf_nc.time_coverage_resolution = 'point'
cf_nc.geospatial_lat_min = 0.0
cf_nc.geospatial_lat_max = 0.0
cf_nc.geospatial_lat_units = 'degrees_north'
cf_nc.geospatial_lat_resolution = 'midpoint of stream feature'
cf_nc.geospatial_lon_min = 0.0
cf_nc.geospatial_lon_max = 0.0
cf_nc.geospatial_lon_units = 'degrees_east'
cf_nc.geospatial_lon_resolution = 'midpoint of stream feature'
cf_nc.geospatial_vertical_min = 0.0
cf_nc.geospatial_vertical_max = 0.0
cf_nc.geospatial_vertical_units = 'm'
cf_nc.geospatial_vertical_resolution = 'midpoint of stream feature'
cf_nc.geospatial_vertical_positive = 'up'
cf_nc.project = 'National Flood Interoperability Experiment'
cf_nc.processing_level = 'Raw simulation result'
cf_nc.keywords_vocabulary = ('NASA/Global Change Master Directory ' +
'(GCMD) Earth Science Keywords. Version ' +
'8.0.0.0.0')
cf_nc.keywords = 'DISCHARGE/FLOW'
cf_nc.comment = 'Result time step (seconds): ' + str(time_step_seconds)
timestamp = datetime.utcnow().isoformat() + 'Z'
cf_nc.date_created = timestamp
cf_nc.history = (timestamp + '; added time, lat, lon, z, crs variables; ' +
'added metadata to conform to NODC_NetCDF_TimeSeries_' +
'Orthogonal_Template_v1.1')
# Create dimensions
log(' dimming', 'DEBUG')
cf_nc.createDimension('time', time_len)
cf_nc.createDimension(id_dim_name, id_len)
# Create variables
log(' timeSeries_var', 'DEBUG')
timeSeries_var = cf_nc.createVariable(id_dim_name, 'i4', (id_dim_name,))
timeSeries_var.long_name = (
'Unique NHDPlus COMID identifier for each river reach feature')
timeSeries_var.cf_role = 'timeseries_id'
log(' time_var', 'DEBUG')
time_var = cf_nc.createVariable('time', 'i4', ('time',))
time_var.long_name = 'time'
time_var.standard_name = 'time'
time_var.units = 'seconds since 1970-01-01 00:00:00 0:00'
time_var.axis = 'T'
log(' lat_var', 'DEBUG')
lat_var = cf_nc.createVariable('lat', 'f8', (id_dim_name,),
fill_value=-9999.0)
lat_var.long_name = 'latitude'
lat_var.standard_name = 'latitude'
lat_var.units = 'degrees_north'
lat_var.axis = 'Y'
log(' lon_var', 'DEBUG')
lon_var = cf_nc.createVariable('lon', 'f8', (id_dim_name,),
fill_value=-9999.0)
lon_var.long_name = 'longitude'
lon_var.standard_name = 'longitude'
lon_var.units = 'degrees_east'
lon_var.axis = 'X'
log(' z_var', 'DEBUG')
z_var = cf_nc.createVariable('z', 'f8', (id_dim_name,),
fill_value=-9999.0)
z_var.long_name = ('Elevation referenced to the North American ' +
'Vertical Datum of 1988 (NAVD88)')
z_var.standard_name = 'surface_altitude'
z_var.units = 'm'
z_var.axis = 'Z'
z_var.positive = 'up'
log(' crs_var', 'DEBUG')
crs_var = cf_nc.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.epsg_code = 'EPSG:4269' # NAD83, which is what NHD uses.
crs_var.semi_major_axis = 6378137.0
crs_var.inverse_flattening = 298.257222101
return cf_nc
def write_comid_lat_lon_z(cf_nc, lookup_filename, id_var_name):
"""Add latitude, longitude, and z values for each netCDF feature
Arguments:
cf_nc -- netCDF Dataset object to be modified
lookup_filename -- full path and filename for lookup table
id_var_name -- name of Id variable
Remarks:
Lookup table is a CSV file with COMID, Lat, Lon, and Elev_m columns.
Columns must be in that order and these must be the first four columns.
"""
#get list of COMIDS
lookup_table = csv_to_list(lookup_filename)
lookup_comids = np.array([int(float(row[0])) for row in lookup_table[1:]])
# Get relevant arrays while we update them
nc_comids = cf_nc.variables[id_var_name][:]
lats = cf_nc.variables['lat'][:]
lons = cf_nc.variables['lon'][:]
zs = cf_nc.variables['z'][:]
lat_min = None
lat_max = None
lon_min = None
lon_max = None
z_min = None
z_max = None
# Process each row in the lookup table
for nc_index, nc_comid in enumerate(nc_comids):
try:
lookup_index = np.where(lookup_comids == nc_comid)[0][0] + 1
except Exception:
log('COMID %s misssing in comid_lat_lon_z file' % nc_comid,
'ERROR')
lat = float(lookup_table[lookup_index][1])
lats[nc_index] = lat
if (lat_min) is None or lat < lat_min:
lat_min = lat
if (lat_max) is None or lat > lat_max:
lat_max = lat
lon = float(lookup_table[lookup_index][2])
lons[nc_index] = lon
if (lon_min) is None or lon < lon_min:
lon_min = lon
if (lon_max) is None or lon > lon_max:
lon_max = lon
z = float(lookup_table[lookup_index][3])
zs[nc_index] = z
if (z_min) is None or z < z_min:
z_min = z
if (z_max) is None or z > z_max:
z_max = z
# Overwrite netCDF variable values
cf_nc.variables['lat'][:] = lats
cf_nc.variables['lon'][:] = lons
cf_nc.variables['z'][:] = zs
# Update metadata
if lat_min is not None:
cf_nc.geospatial_lat_min = lat_min
if lat_max is not None:
cf_nc.geospatial_lat_max = lat_max
if lon_min is not None:
cf_nc.geospatial_lon_min = lon_min
if lon_max is not None:
cf_nc.geospatial_lon_max = lon_max
if z_min is not None:
cf_nc.geospatial_vertical_min = z_min
if z_max is not None:
cf_nc.geospatial_vertical_max = z_max
def convert_ecmwf_rapid_output_to_cf_compliant(start_date,
start_folder=None,
time_step=6*3600, #time step in seconds
output_id_dim_name='COMID', #name of ID dimension in output file, typically COMID or FEATUREID
output_flow_var_name='Qout' #name of streamflow variable in output file, typically Qout or m3_riv
):
"""
Copies data from RAPID netCDF output to a CF-compliant netCDF file.
"""
if start_folder:
path = start_folder
else:
path = get_this_path()
# Get files to process
inputs = glob(os.path.join(path,"Qout*.nc"))
if len(inputs) == 0:
log('No files to process', 'INFO')
return
rapid_input_directory = os.path.join(path, "rapid_input")
#make sure comid_lat_lon_z file exists before proceeding
try:
comid_lat_lon_z_lookup_filename = os.path.join(rapid_input_directory,
[filename for filename in os.listdir(rapid_input_directory) \
if re.search(r'comid_lat_lon_z.*?\.csv', filename, re.IGNORECASE)][0])
except IndexError:
comid_lat_lon_z_lookup_filename = ""
pass
if comid_lat_lon_z_lookup_filename:
for rapid_nc_filename in inputs:
try:
cf_nc_filename = '%s_CF.nc' % os.path.splitext(rapid_nc_filename)[0]
log('Processing %s' % rapid_nc_filename, 'INFO')
log('New file %s' % cf_nc_filename, 'INFO')
time_start_conversion = datetime.utcnow()
# Validate the raw netCDF file
rapid_nc = Dataset(rapid_nc_filename)
log('validating input netCDF file', 'DEBUG')
input_id_dim_name, id_len, time_len, input_flow_var_name = (
validate_raw_nc(rapid_nc))
# Initialize the output file (create dimensions and variables)
log('initializing output', 'DEBUG')
cf_nc = initialize_output(cf_nc_filename, output_id_dim_name,
time_len, id_len, time_step)
# Populate time values
log('writing times', 'DEBUG')
total_seconds = time_step * time_len
end_date = (start_date +
timedelta(seconds=(total_seconds - time_step)))
d1970 = datetime(1970, 1, 1)
secs_start = int((start_date - d1970).total_seconds())
secs_end = secs_start + total_seconds
cf_nc.variables['time'][:] = np.arange(
secs_start, secs_end, time_step)
cf_nc.time_coverage_start = start_date.isoformat() + 'Z'
cf_nc.time_coverage_end = end_date.isoformat() + 'Z'
# Populate comid, lat, lon, z
log('writing comid lat lon z', 'DEBUG')
lookup_start = datetime.now()
cf_nc.variables[output_id_dim_name][:] = rapid_nc.variables[input_id_dim_name][:]
write_comid_lat_lon_z(cf_nc, comid_lat_lon_z_lookup_filename, output_id_dim_name)
duration = str((datetime.now() - lookup_start).total_seconds())
log('Lookup Duration (s): ' + duration, 'DEBUG')
# Create a variable for streamflow. This is big, and slows down
# previous steps if we do it earlier.
log('Creating streamflow variable', 'DEBUG')
q_var = cf_nc.createVariable(
output_flow_var_name, 'f4', (output_id_dim_name, 'time'))
q_var.long_name = 'Discharge'
q_var.units = 'm^3/s'
q_var.coordinates = 'time lat lon z'
q_var.grid_mapping = 'crs'
q_var.source = ('Generated by the Routing Application for Parallel ' +
'computatIon of Discharge (RAPID) river routing model.')
q_var.references = 'http://rapid-hub.org/'
q_var.comment = ('lat, lon, and z values taken at midpoint of river ' +
'reach feature')
log('Copying streamflow values', 'DEBUG')
q_var[:] = rapid_nc.variables[input_flow_var_name][:].transpose()
rapid_nc.close()
cf_nc.close()
#delete original RAPID output
try:
os.remove(rapid_nc_filename)
except OSError:
pass
#replace original with nc compliant file
shutil.move(cf_nc_filename, rapid_nc_filename)
log('Time to process %s' % (datetime.utcnow()-time_start_conversion), 'INFO')
except Exception, e:
#delete cf RAPID output
try:
os.remove(cf_nc_filename)
except OSError:
pass
log('Error in main function %s' % e, 'WARNING')
raise
else:
log("No comid_lat_lon_z file found. Skipping ...", "INFO")
log('Files processed: ' + str(len(inputs)), 'INFO')
if __name__ == "__main__":
convert_ecmwf_rapid_output_to_cf_compliant(start_date=datetime(1980,1,1),
start_folder='/Users/Alan/Documents/RESEARCH/RAPID/input/nfie_texas_gulf_region/rapid_updated'
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
37811,
13379,
444,
1366,
422,
371,
2969,
2389,
2010,
34,
8068,
5072,
284,
257,
18551,
12,
23855,
3014,
2010,
34,
8068,
2393,
13,
201,
198,
201,
198,
8413,
5558,
25,
201,
198,
22... | 2.091722 | 8,951 |
# Copyright [2020] [Toyota Research Institute]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests related to Splicing files"""
import os
import unittest
import numpy as np
from beep.utils import MaccorSplice
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
| [
2,
15069,
685,
42334,
60,
685,
48236,
4265,
4992,
5136,
60,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.491379 | 232 |
if __name__ == '__main__':
victor = Pessoa(nome='Victor')
vinicius = Pessoa(victor,nome='Vinicius')
print(Pessoa.cumprimentar(vinicius))
print(id(vinicius))
print(vinicius.nome)
print(vinicius.idade)
for filho in vinicius.filhos:
print(filho.nome)
| [
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2210,
273,
796,
350,
408,
12162,
7,
77,
462,
11639,
21944,
273,
11537,
198,
220,
220,
220,
410,
47277,
3754,
796,
350,
408,
12162,
7,
32433,
273... | 2.028169 | 142 |
"""Example to create a Panel of Ophyd Signals from an object"""
import sys
import numpy as np
from qtpy.QtWidgets import QApplication
import typhos
from ophyd import Component as Cpt
from ophyd import Device, Signal
from typhos.utils import SignalRO
class Sample(Device):
"""Simulated Device"""
readback = Cpt(SignalRO, value=1)
setpoint = Cpt(Signal, value=2)
waveform = Cpt(SignalRO, value=np.random.randn(100, ))
image = Cpt(SignalRO, value=np.abs(np.random.randn(100, 100)) * 455)
# Create my device without a prefix
sample = Sample('', name='sample')
if __name__ == '__main__':
# Create my application
app = QApplication(sys.argv)
typhos.use_stylesheet()
# Create my panel
panel = typhos.TyphosSignalPanel.from_device(sample)
panel.sortBy = panel.byName
# Execute
panel.show()
app.exec_()
| [
37811,
16281,
284,
2251,
257,
18810,
286,
440,
746,
5173,
5865,
874,
422,
281,
2134,
37811,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
10662,
83,
9078,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
198,
19... | 2.712025 | 316 |
import tensorly as tl
from tensorly.decomposition import parafac, partial_tucker
import numpy as np
import torch
import torch.nn as nn
from typing import *
def cp_decomposition_conv_layer(layer, rank):
# rank = max(layer.weight.data.numpy().shape) // 3
"""Gets a conv layer and a target rank,
returns a nn.Sequential object with the decomposition"""
# Perform CP decomposition on the layer weight tensorly.
last, first, vertical, horizontal = parafac(layer.weight.data, rank=rank, init="svd").factors
pointwise_s_to_r_layer = torch.nn.Conv2d(
in_channels=first.shape[0],
out_channels=first.shape[1],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=False,
)
depthwise_vertical_layer = torch.nn.Conv2d(
in_channels=vertical.shape[1],
out_channels=vertical.shape[1],
kernel_size=(vertical.shape[0], 1),
stride=1,
padding=(layer.padding[0], 0),
dilation=layer.dilation,
groups=vertical.shape[1],
bias=False,
)
depthwise_horizontal_layer = torch.nn.Conv2d(
in_channels=horizontal.shape[1],
out_channels=horizontal.shape[1],
kernel_size=(1, horizontal.shape[0]),
stride=layer.stride,
padding=(0, layer.padding[0]),
dilation=layer.dilation,
groups=horizontal.shape[1],
bias=False,
)
pointwise_r_to_t_layer = torch.nn.Conv2d(
in_channels=last.shape[1],
out_channels=last.shape[0],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=True,
)
if layer.bias is not None:
pointwise_r_to_t_layer.bias.data = layer.bias.data
depthwise_horizontal_layer.weight.data = (
torch.transpose(horizontal, 1, 0).unsqueeze(1).unsqueeze(1)
)
depthwise_vertical_layer.weight.data = (
torch.transpose(vertical, 1, 0).unsqueeze(1).unsqueeze(-1)
)
pointwise_s_to_r_layer.weight.data = torch.transpose(first, 1, 0).unsqueeze(-1).unsqueeze(-1)
pointwise_r_to_t_layer.weight.data = last.unsqueeze(-1).unsqueeze(-1)
new_layers = [
pointwise_s_to_r_layer,
depthwise_vertical_layer,
depthwise_horizontal_layer,
pointwise_r_to_t_layer,
]
return nn.Sequential(*new_layers)
def tucker_decomposition_conv_layer(
layer: nn.Module,
normed_rank: List[int] = [0.5, 0.5],
) -> nn.Module:
"""Gets a conv layer,
returns a nn.Sequential object with the Tucker decomposition.
The ranks are estimated with a Python implementation of VBMF
https://github.com/CasvandenBogaard/VBMF
"""
if hasattr(layer, "rank"):
normed_rank = getattr(layer, "rank")
rank = [
int(r * layer.weight.shape[i]) for i, r in enumerate(normed_rank)
] # output channel * normalized rank
rank = [max(r, 2) for r in rank]
core, [last, first] = partial_tucker(
layer.weight.data,
modes=[0, 1],
n_iter_max=2000000,
rank=rank,
init="svd",
)
# A pointwise convolution that reduces the channels from S to R3
first_layer = nn.Conv2d(
in_channels=first.shape[0],
out_channels=first.shape[1],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=False,
)
# A regular 2D convolution layer with R3 input channels
# and R3 output channels
core_layer = nn.Conv2d(
in_channels=core.shape[1],
out_channels=core.shape[0],
kernel_size=layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
dilation=layer.dilation,
bias=False,
)
# A pointwise convolution that increases the channels from R4 to T
last_layer = nn.Conv2d(
in_channels=last.shape[1],
out_channels=last.shape[0],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=True,
)
if hasattr(layer, "bias") and layer.bias is not None:
last_layer.bias.data = layer.bias.data
first_layer.weight.data = torch.transpose(first, 1, 0).unsqueeze(-1).unsqueeze(-1)
last_layer.weight.data = last.unsqueeze(-1).unsqueeze(-1)
core_layer.weight.data = core
new_layers = [first_layer, core_layer, last_layer]
return nn.Sequential(*new_layers)
| [
11748,
11192,
273,
306,
355,
256,
75,
198,
6738,
11192,
273,
306,
13,
12501,
296,
9150,
1330,
1582,
1878,
330,
11,
13027,
62,
83,
12603,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77... | 2.170768 | 2,032 |
# -*- coding:utf-8 -*-
import wave
import requests
import time
import base64
import numpy as np
from pyaudio import PyAudio, paInt16
import time
from playsound import playsound
import os
import sys
framerate = 16000 #
num_samples = 2000
channels = 1
sampwidth = 2 #
FILEPATH = 'speech.wav'
base_url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s"
APIKey = "xquGU6uUM5EUMmnjbWGkkGUG"
SecretKey = "nfhYce3srBPwc6VQGbYL6KhGv3Cuwoo7"
HOST = base_url % (APIKey, SecretKey)
if __name__ == '__main__':
while True:
print('************ 您请说:')
my_record() # 进行录音
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN, int(1537))
if type(result) == str:
print('rec result:'+result)
else:
print('未听到您说话^-^') | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
6769,
198,
11748,
7007,
198,
11748,
640,
198,
11748,
2779,
2414,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
24051,
1330,
9485,
21206,
11,
14187,
5317,
1433,
... | 2.104762 | 420 |
from glob import glob
import os
import sys
from setuptools import setup
name = "pypdc"
version="0.0.8"
description = "Python asymptotic Partial Directed Coherence and Directed Coherence estimation package for brain connectivity analysis."
authors = {
"Sameshima": ("Koichi Sameshima", "ksameshi@usp.br"),
"Brito": ("Carlos Stein Naves de Brito", "c.brito@ucl.ac.uk"),
"Baldo" : ("Heitor Baldo", "hbaldo@usp.br")
}
platforms = ["Linux", "Mac OSX", "Windows", "Unix"]
keywords = [
"Brain Connectivity",
"PDC", "iPDC",
"Granger Causality",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Bio-Informatics",
]
packages = ["pypdc"]
with open("README.rst", "r") as fh:
long_description = fh.read()
if __name__ == "__main__":
setup(
name=name,
version=version,
author=authors["Sameshima"][0],
author_email=authors["Sameshima"][1],
description=description,
keywords=keywords,
platforms=platforms,
classifiers=classifiers,
packages=packages,
zip_safe=False,
) | [
6738,
15095,
1330,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
3672,
796,
366,
79,
4464,
17896,
1,
198,
9641,
2625,
15,
13,
15,
13,
23,
1,
198,
11213,
796,
366,
37906,
355,
4948,
... | 2.666116 | 605 |
#!/usr/bin/env python3
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Utils for accessing Android devices."""
import json
import re
from typing import Sequence
from .benchmark_definition import (execute_cmd_and_get_output, DeviceInfo,
PlatformType)
def get_android_device_model(verbose: bool = False) -> str:
"""Returns the Android device model."""
model = execute_cmd_and_get_output(
["adb", "shell", "getprop", "ro.product.model"], verbose=verbose)
model = re.sub(r"\W+", "-", model)
return model
def get_android_cpu_abi(verbose: bool = False) -> str:
"""Returns the CPU ABI for the Android device."""
return execute_cmd_and_get_output(
["adb", "shell", "getprop", "ro.product.cpu.abi"], verbose=verbose)
def get_android_cpu_features(verbose: bool = False) -> Sequence[str]:
"""Returns the CPU features for the Android device."""
cpuinfo = execute_cmd_and_get_output(["adb", "shell", "cat", "/proc/cpuinfo"],
verbose=verbose)
features = []
for line in cpuinfo.splitlines():
if line.startswith("Features"):
_, features = line.split(":")
return features.strip().split()
return features
def get_android_gpu_name(verbose: bool = False) -> str:
"""Returns the GPU name for the Android device."""
vkjson = execute_cmd_and_get_output(["adb", "shell", "cmd", "gpu", "vkjson"],
verbose=verbose)
vkjson = json.loads(vkjson)
name = vkjson["devices"][0]["properties"]["deviceName"]
# Perform some canonicalization:
# - Adreno GPUs have raw names like "Adreno (TM) 650".
name = name.replace("(TM)", "")
# Replace all consecutive non-word characters with a single hypen.
name = re.sub(r"\W+", "-", name)
return name
def get_android_device_info(verbose: bool = False) -> DeviceInfo:
"""Returns device info for the Android device."""
return DeviceInfo(PlatformType.ANDROID, get_android_device_model(verbose),
get_android_cpu_abi(verbose),
get_android_cpu_features(verbose),
get_android_gpu_name(verbose))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
33448,
383,
314,
11587,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
410,
17,
13,
15,
351,
27140,
15996,
1475,
11755,
13,
198,
2,
4091,
3740,
1378,
297,
14761,... | 2.616423 | 889 |
"""
Create a profile from an ASCII CTD datafile
===========================================
Use the TAMOC ambient module to create profiles in netCDF format for use by
TAMOC from data in text files downloaded from a CTD. This file demonstrates
working with the data from the R/V Brooks McCall at Station BM 54 on May 30,
2010, stored in the file /Raw_Data/ctd_BM54.cnv.
Notes
-----
Much of the input data in the script (e.g., columns to extract, column names,
lat and lon location data, date and time, etc.) is read by the user manually
from the header file of the CTD text file. These data are then hand-coded in
the script text. While it would be straightforward to automate this process
for a given format of CTD files, this step is left to the user to customize to
their own data sets.
Requires
--------
This script read data from the text file::
./Profiles/Raw_Data/ctd_BM54.dat
Returns
-------
This script generates a `ambient.Profile` object, whose netCDF file is written
to the file::
./Profiles/Profiles/BM54.nc
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import ambient
from tamoc import seawater
from netCDF4 import date2num, num2date
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import os
if __name__ == '__main__':
# Get the path to the input file
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__),
'../../tamoc/data'))
dat_file = os.path.join(__location__,'ctd_BM54.cnv')
# Load in the data using numpy.loadtxt
raw = np.loadtxt(dat_file, comments = '#', skiprows = 175,
usecols = (0, 1, 3, 8, 9, 10, 12))
# Describe the organization of the data in raw.
var_names = ['temperature', 'pressure', 'wetlab_fluorescence', 'z',
'salinity', 'density', 'oxygen']
var_units = ['deg C', 'db', 'mg/m^3', 'm', 'psu', 'kg/m^3', 'mg/l']
z_col = 3
# Clean the profile to remove reversals in the depth coordinate
data = ambient.extract_profile(raw, z_col, 50.0)
# Convert the profile data to standard units in TAMOC
profile, units = ambient.convert_units(data, var_units)
# Create an empty netCDF4-classic dataset to store this CTD data
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__),
'../../test/output'))
nc_file = os.path.join(__location__,'BM54.nc')
summary = 'Dataset created by profile_from_ctd in the ./bin directory' \
+ ' of TAMOC'
source = 'R/V Brooks McCall, station BM54'
sea_name = 'Gulf of Mexico'
p_lat = 28.0 + 43.945 / 60.0
p_lon = 360 - (88.0 + 22.607 / 60.0)
p_time = date2num(datetime(2010, 5, 30, 18, 22, 12),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = ambient.create_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Insert the CTD data into the netCDF dataset
comments = ['measured'] * len(var_names)
nc = ambient.fill_nc_db(nc, profile, var_names, units, comments, z_col)
# Create an ambient.Profile object for this dataset
bm54 = ambient.Profile(nc, chem_names=['oxygen'], err=0.00001)
# Close the netCDF dataset
bm54.nc.close()
# Since the netCDF file is now fully stored on the hard drive in the
# correct format, we can initialize an ambient.Profile object directly
# from the netCDF file
bm54 = ambient.Profile(nc_file, chem_names='all')
# Plot the density profile using the interpolation function
z = np.linspace(bm54.nc.variables['z'].valid_min,
bm54.nc.variables['z'].valid_max, 250)
rho = np.zeros(z.shape)
T = np.zeros(z.shape)
S = np.zeros(z.shape)
C = np.zeros(z.shape)
O2 = np.zeros(z.shape)
tsp = bm54.get_values(z, ['temperature', 'salinity', 'pressure'])
for i in range(len(z)):
rho[i] = seawater.density(tsp[i,0], tsp[i,1], tsp[i,2])
T[i], S[i], C[i], O2[i] = bm54.get_values(z[i], ['temperature',
'salinity', 'wetlab_fluorescence', 'oxygen'])
plt.figure(1)
plt.clf()
plt.show()
ax1 = plt.subplot(121)
ax1.plot(rho, z)
ax1.set_xlabel('Density (kg/m^3)')
ax1.set_ylabel('Depth (m)')
ax1.invert_yaxis()
ax1.set_title('Computed data')
# Compare to the measured profile
z_m = bm54.nc.variables['z'][:]
rho_m = bm54.nc.variables['density'][:]
ax2 = plt.subplot(1,2,2)
ax2.plot(rho_m, z_m)
ax2.set_xlabel('Density (kg/m^3)')
ax2.invert_yaxis()
ax2.set_title('Measured data')
plt.draw()
plt.figure(2)
plt.clf()
plt.show()
ax1 = plt.subplot(131)
ax1.plot(C*1.e6, z, '-', label='Fluorescence (g/m^3)')
ax1.set_xlabel('CTD component values')
ax1.set_ylabel('Depth (m)')
ax1.set_ylim([800, 1500])
ax1.set_xlim([0, 40])
ax1.invert_yaxis()
ax1.locator_params(tight=True, nbins=6)
ax1.legend(loc='upper right', prop={'size':10})
ax1.grid(True)
ax2 = plt.subplot(132)
ax2.plot(T - 273.15, z, '-', label='Temperature (deg C)')
ax2.plot(O2*1.e3, z, '--', label='Oxygen (g/m^3)')
ax2.set_xlabel('CTD component values')
ax2.set_ylabel('Depth (m)')
ax2.set_ylim([800, 1500])
ax2.set_xlim([0, 8])
ax2.invert_yaxis()
ax2.locator_params(tight=True, nbins=6)
ax2.legend(loc='upper right', prop={'size':10})
ax2.grid(True)
ax3 = plt.subplot(133)
ax3.plot(S, z, '-', label='Salinity (psu)')
ax3.set_xlabel('CTD component values')
ax3.set_ylabel('Depth (m)')
ax3.set_ylim([800, 1500])
ax3.set_xlim([34.5, 35])
ax3.invert_yaxis()
ax3.locator_params(tight=True, nbins=6)
ax3.legend(loc='upper right', prop={'size':10})
ax3.grid(True)
plt.draw()
# Close the netCDF dataset
bm54.nc.close()
| [
37811,
198,
16447,
257,
7034,
422,
281,
37101,
16356,
35,
1366,
7753,
198,
10052,
2559,
18604,
198,
198,
11041,
262,
33112,
4503,
25237,
8265,
284,
2251,
16545,
287,
2010,
34,
8068,
5794,
329,
779,
416,
220,
198,
51,
2390,
4503,
422,
... | 2.183461 | 2,878 |
import os
import json
import unittest
from pathlib import Path
from typing import Dict, Any
import jsonschema.exceptions
from schema_entry.entrypoint import EntryPoint
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
555,
715,
395,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
198,
11748,
44804,
684,
2395,
2611,
13,
1069,
11755,
198,
198,
6738,
32815,
62,
13000,
13,
13000... | 3.604167 | 48 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with no arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
if __name__ == "__main__":
googletest.main()
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.903448 | 290 |
# FileName: Flask-Blog > blog > config.py
import os, secrets
| [
2,
9220,
5376,
25,
46947,
12,
42383,
1875,
4130,
1875,
4566,
13,
9078,
201,
198,
11748,
28686,
11,
13141,
201,
198,
201,
198
] | 2.826087 | 23 |
# -*- coding: utf-8 -*-
"""
QR code that be scanned to allow login
"""
import qrcode
import tempfile
import webbrowser
from decimal import Decimal
from qrcode.image.svg import SvgImage
from .constants import JIKE_URI_SCHEME_FMT, RENDER2BROWSER_HTML_TEMPLATE
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
48,
49,
2438,
326,
307,
28660,
284,
1249,
17594,
198,
37811,
198,
198,
11748,
10662,
6015,
1098,
198,
11748,
20218,
7753,
198,
11748,
3992,
40259,
198,
... | 2.676768 | 99 |
from decorators import debug, do_twice
# debug(do_twice(greet()))
@debug
@do_twice
greet("Eva") | [
6738,
11705,
2024,
1330,
14257,
11,
466,
62,
4246,
501,
198,
198,
2,
14257,
7,
4598,
62,
4246,
501,
7,
70,
2871,
3419,
4008,
198,
31,
24442,
198,
31,
4598,
62,
4246,
501,
628,
198,
70,
2871,
7203,
44239,
4943
] | 2.45 | 40 |
import json
from collections import defaultdict
import numpy as np
from tqdm import tqdm
from stanza.nlp.corenlp import CoreNLPClient
import itertools
from jiwer import wer
from utils import get_cnet_best_pass
import csv
from math import exp
from functools import reduce
import time
client = None
def cnet_best_n_paths(confusion_network,n,paths):
"""Prints n best paths in list format with each element as a pair of string and log probability.Takes actual probability as input"""
confusion_network=[list(sorted(i,key=lambda x:x[1],reverse=True)) for i in confusion_network]
if confusion_network:
if paths:
new_addition=[[[l[0]],l[1]] for l in confusion_network[0][:n]]
paths=list(itertools.product(paths,new_addition))
paths=[[reduce(lambda x,y:x[0]+y[0],path),reduce(lambda x,y:x[1]+y[1],path)] for path in paths]
paths=list(sorted(paths,key=lambda x:x[1],reverse=True))[:n]
return cnet_best_n_paths(confusion_network[1:],n,paths)
else:
paths=confusion_network[0][:n] #[['<s>', 0.9999000049998333], ['!null', 0.0]]
paths=[[[l[0]],l[1]] for l in paths] #[[['<s>'], 0.9999000049998333], [['!null'], 0.0]]
return cnet_best_n_paths(confusion_network[1:],n,paths)
else:
return paths
| [
11748,
33918,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
336,
35819,
13,
21283,
79,
13,
7295,
21283,
79,
1330,
7231,
45,
19930,
11792,
198,
11748,... | 2.314136 | 573 |
#!python
from queue import LinkedQueue
if __name__ == '__main__':
test_binary_search_tree()
| [
2,
0,
29412,
198,
6738,
16834,
1330,
7502,
276,
34991,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1332,
62,
39491,
62,
12947,
62,
21048,
3419,
198
] | 2.75 | 36 |
import asyncio
import urllib.parse
from dataclasses import dataclass
import httpx
@dataclass(eq=False)
| [
11748,
30351,
952,
198,
11748,
2956,
297,
571,
13,
29572,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
11748,
2638,
87,
628,
198,
31,
19608,
330,
31172,
7,
27363,
28,
25101,
8,
628,
628,
628,
198
] | 2.871795 | 39 |
if __name__ == "__main__":
tests()
| [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5254,
3419,
198
] | 2.210526 | 19 |
# -*- coding: utf-8 -*-
import numpy as np
from .signal_interpolate import signal_interpolate
from .signal_formatpeaks import _signal_formatpeaks_sanitize
def signal_period(peaks, sampling_rate=1000, desired_length=None,
interpolation_order="cubic"):
"""Calculate signal period from a series of peaks.
Parameters
----------
peaks : list, array, DataFrame, Series or dict
The samples at which the peaks occur. If an array is passed in, it is
assumed that it was obtained with `signal_findpeaks()`. If a DataFrame
is passed in, it is assumed it is of the same length as the input
signal in which occurrences of R-peaks are marked as "1", with such
containers obtained with e.g., ecg_findpeaks() or rsp_findpeaks().
sampling_rate : int
The sampling frequency of the signal that contains peaks (in Hz, i.e.,
samples/second). Defaults to 1000.
desired_length : int
By default, the returned signal rate has the same number of elements as
the raw signal. If set to an integer, the returned signal rate will be
interpolated between peaks over `desired_length` samples. Has no
effect if a DataFrame is passed in as the `signal` argument. Defaults
to None.
interpolation_order : str
Order used to interpolate the rate between peaks. See
`signal_interpolate()`.
Returns
-------
array
A vector containing the period.
See Also
--------
signal_findpeaks, signal_fixpeaks, signal_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000,
>>> frequency=1)
>>> info = nk.signal_findpeaks(signal)
>>>
>>> rate = nk.signal_rate(peaks=info["Peaks"])
>>> nk.signal_plot(rate)
"""
peaks, desired_length = _signal_formatpeaks_sanitize(peaks, desired_length)
# Sanity checks.
if len(peaks) <= 3:
print("NeuroKit warning: _signal_formatpeaks(): too few peaks detected"
" to compute the rate. Returning empty vector.")
return np.full(desired_length, np.nan)
# Calculate period in sec, based on peak to peak difference and make sure
# that rate has the same number of elements as peaks (important for
# interpolation later) by prepending the mean of all periods.
period = np.ediff1d(peaks, to_begin=0) / sampling_rate
period[0] = np.mean(period[1:])
# Interpolate all statistics to desired length.
if desired_length != np.size(peaks):
period = signal_interpolate(peaks, period,
desired_length=desired_length,
method=interpolation_order)
return period
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
12683,
282,
62,
3849,
16104,
378,
1330,
6737,
62,
3849,
16104,
378,
198,
6738,
764,
12683,
282,
62,
18982,
431,
4730,... | 2.597415 | 1,083 |
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # pylint: disable=W0614,W0401
#============================================================================
# Generic Django project settings
#============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'c)lzq@kp6ta$=2m5cvzbg7_66j7m+__kv+ay_b34uyg**pf@+('
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'compressor',
'taggit',
'modelcluster',
'morgan.apps.cms',
)
#============================================================================
# Calculation of directories relative to the project module location
#============================================================================
import os
import sys
import morgan as project_module
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
PYTHON_BIN = os.path.dirname(sys.executable)
ve_path = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif ve_path and os.path.exists(os.path.join(ve_path, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(ve_path, 'var')
else:
# Set the variable root to the local configuration location (which is
# ignored by the repository).
VAR_ROOT = os.path.join(PROJECT_DIR, 'settings', 'local')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#============================================================================
# Project URLS and media settings
#============================================================================
WAGTAIL_SITE_NAME = 'Morgan County'
ROOT_URLCONF = 'morgan.urls'
LOGIN_URL = 'wagtailadmin_login'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
#============================================================================
# Templates
#============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
WAGTAILSEARCH_RESULTS_TEMPLATE = 'cms/search.html'
#============================================================================
# Middleware
#============================================================================
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
#============================================================================
# Auth / security
#============================================================================
ALLOWED_HOSTS = []
AUTHENTICATION_BACKENDS += ()
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
##============================================================================
# API settings
#=============================================================================
REST_FRAMEWORK = {
'PAGINATE_BY': 10,
'PAGINATE_BY_PARAM': 'page_size',
'MAX_PAGINATE_BY': 100,
'DEFAULT_AUTHENTICATION_CLASSES': [],
'DEFAULT_PAGINATION_SERIALIZER_CLASS':
'rest_framework_ember.pagination.EmberPaginationSerializer',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_ember.parsers.EmberJSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_ember.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
}
#============================================================================
# Miscellaneous project settings
#============================================================================
#============================================================================
# Third party app settings
#============================================================================
| [
2,
17267,
3298,
6460,
284,
787,
340,
4577,
284,
9117,
6460,
13,
198,
6738,
42625,
14208,
13,
10414,
13,
20541,
62,
33692,
1330,
1635,
220,
220,
1303,
279,
2645,
600,
25,
15560,
28,
54,
3312,
1415,
11,
54,
3023,
486,
198,
198,
2,
2... | 2.967697 | 2,167 |
from django.contrib import admin
from .models import *
admin.site.register(Car)
admin.site.register(DriverLicense)
admin.site.register(Driver)
admin.site.register(Ownership)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
28482,
13,
15654,
13,
30238,
7,
9914,
8,
198,
28482,
13,
15654,
13,
30238,
7,
32103,
34156,
8,
198,
28482,
13,
15654,
13,
30238,
7,
32103,
... | 3.259259 | 54 |
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
__version__ = '0.2.0'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
| [
37811,
50,
746,
28413,
4149,
464,
23579,
82,
7505,
13,
198,
198,
4863,
3740,
1378,
12567,
13,
785,
14,
29038,
12,
305,
24677,
14,
82,
746,
28413,
12,
18769,
26418,
12,
43810,
13,
198,
198,
37811,
198,
11748,
28686,
198,
198,
834,
96... | 2.511628 | 129 |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definition for the Segmentation Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from dataloader import mode_keys
from modeling import base_model
from modeling import losses
from modeling.architecture import factory
from modeling.architecture import nn_ops
class SegmentationModel(base_model.BaseModel):
"""Segmentation model function."""
def metric_fn(logits, masks, valid_masks, model_loss=None):
"""Customized eval metric function.
Args:
logits: A float-type tensor of shape [B, h, w, C], that is - batch size,
model output height, model output width, and number of classes,
representing the logits.
masks: An integer-type tensor of shape [B, H, W, 1] representing the
groundtruth classes for each pixel. H and W can be different (typically
larger) than h, w.
valid_masks: An boolean-type tensor of shape [B, H, W, 1], True where the
`masks` is valid, and False where it is to be disregarded.
model_loss: A float-type tensor containing the model loss.
Returns:
A dictionary, where the keys are metric names and the values are scalar
tensors representing the resirctive metrics.
"""
masks = tf.cast(tf.squeeze(masks, axis=3), tf.int32)
valid_masks = tf.squeeze(valid_masks, axis=3)
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
logits = tf.image.resize_bilinear(
logits, tf.shape(masks)[1:3], align_corners=False)
predictions = tf.argmax(logits, axis=3, output_type=tf.int32)
_, _, _, num_classes = logits.get_shape().as_list()
masks = tf.reshape(masks, shape=[-1])
predictions = tf.reshape(predictions, shape=[-1])
valid_masks = tf.reshape(valid_masks, shape=[-1])
miou = tf.metrics.mean_iou(
masks, predictions, num_classes, weights=valid_masks)
model_metrics = {'miou': miou}
if model_loss is not None:
model_metrics['model_loss'] = tf.metrics.mean(model_loss)
one_hot_predictions = tf.one_hot(predictions, num_classes)
one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes])
one_hot_labels = tf.one_hot(masks, num_classes)
one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])
for c in range(num_classes):
tp, tp_op = tf.metrics.true_positives(
one_hot_labels[:, c],
one_hot_predictions[:, c],
weights=valid_masks)
fp, fp_op = tf.metrics.false_positives(
one_hot_labels[:, c],
one_hot_predictions[:, c],
weights=valid_masks)
fn, fn_op = tf.metrics.false_negatives(
one_hot_labels[:, c],
one_hot_predictions[:, c],
weights=valid_masks)
tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)
iou = tf.where(
tf.greater(tp + fn, 0.0), tp / (tp + fn + fp),
tf.constant(-1, dtype=tf.float32))
model_metrics['eval/iou_class_%d' % c] = (iou, tp_fp_fn_op)
return model_metrics
| [
2,
406,
600,
355,
25,
21015,
17,
11,
21015,
18,
198,
2,
15069,
13130,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
1... | 2.764087 | 1,331 |
"""
Implements Multi-fidelity GP Bandit Optimisaiton.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
from argparse import Namespace
from copy import deepcopy
import time
import numpy as np
# Local imports
from mf_func import MFOptFunction
from mf_gp import all_mf_gp_args, MFGPFitter
from mf_gpb_utils import acquisitions, fidelity_choosers
from mf_gpb_utils import is_an_opt_fidel_query, latin_hc_sampling
from utils.optimisers import direct_ft_maximise, random_maximise
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter
mf_gp_bandit_args = [
get_option_specs('capital_type', False, 'given',
('The type of capital to be used. If \'given\', it will use the cost specified. '
'Could be one of given, cputime, or realtime')),
get_option_specs('max_iters', False, 1e5,
'The maximum number of iterations, regardless of capital.'),
get_option_specs('gamma_0', False, '1',
('The multiplier in front of the default threshold value for switching. Should be',
'a scalar or the string \'adapt\'.')),
get_option_specs('acq', False, 'mf_gp_ucb',
'Which acquisition to use. Should be one of mf_gp_ucb, gp_ucb or gp_ei'),
get_option_specs('acq_opt_criterion', False, 'rand',
'Which optimiser to use when maximising the acquisition function.'),
get_option_specs('acq_opt_max_evals', False, -1,
'Number of evaluations when maximising acquisition. If negative uses default value.'),
get_option_specs('gpb_init', False, 'random_lower_fidels',
'How to initialise. Should be either random_lower_fidels or random.'),
get_option_specs('gpb_init_capital', False, -1.0,
('The amount of capital to be used for initialisation. If negative, will use',
'init_capital_frac fraction of the capital for optimisation.')),
get_option_specs('gpb_init_capital_frac', False, 0.1,
'The percentage of the capital to use for initialisation.'),
# The following are perhaps not so important.
get_option_specs('shrink_kernel_with_time', False, 1,
'If True, shrinks the kernel with time so that we don\'t get stuck.'),
get_option_specs('perturb_thresh', False, 1e-4,
('If the next point chosen is too close to an exisiting point by this times the '
'diameter, then we will perturb the point a little bit before querying. This is '
'mainly to avoid numerical stability issues.')),
get_option_specs('build_new_gp_every', False, 20,
'Updates the GP via a suitable procedure every this many iterations.'),
get_option_specs('report_results_every', False, 20,
'Report results every this many iterations.'),
get_option_specs('monitor_progress_every', False, 9,
('Performs some simple sanity checks to make sure we are not stuck every this many',
' iterations.')),
get_option_specs('monitor_domain_kernel_shrink', False, 0.9,
('If the optimum has not increased in a while, shrinks the kernel smoothness by this',
' much to increase variance.')),
get_option_specs('monitor_mf_thresh_increase', False, 1.5,
('If we have not queried at the highest fidelity in a while, increases the leading',
'constant by this much')),
get_option_specs('track_every_time_step', False, 0,
('If 1, it tracks every time step.')),
# TODO: implement code for next_pt_std_thresh
get_option_specs('next_pt_std_thresh', False, 0.005,
('If the std of the queried point queries below this times the kernel scale ',
'frequently we will reduce the bandwidth range')),
]
# All of them including what is needed for fitting GP.
all_mf_gp_bandit_args = all_mf_gp_args + mf_gp_bandit_args
# The MFGPBandit Class
# ========================================================================================
class MFGPBandit(object):
""" MFGPBandit Class. """
# pylint: disable=attribute-defined-outside-init
# Methods needed for construction -------------------------------------------------
def __init__(self, mf_opt_func, options=None, reporter=None):
""" Constructor. """
self.reporter = get_reporter(reporter)
if options is None:
options = load_options(all_mf_gp_bandit_args, reporter=reporter)
self.options = options
# Set up mfgp and mfof attributes
self.mfof = mf_opt_func # mfof refers to an MFOptFunction object.
self.mfgp = None
# Other set up
self._set_up()
def _set_up(self):
""" Some additional set up routines. """
# Check for legal parameter values
self._check_options_vals('capital_type', ['given', 'cputime', 'realtime'])
self._check_options_vals('acq', ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'mf_gp_ucb_finite',
'mf_sko'])
self._check_options_vals('acq_opt_criterion', ['rand', 'direct'])
if isinstance(self.options.gpb_init, str):
self._check_options_vals('gpb_init', ['random', 'random_lower_fidels'])
# Set up some book keeping parameters
self.available_capital = 0.0
self.time_step = 0
self.num_opt_fidel_queries = 0
# Copy some stuff over from mfof
copyable_params = ['fidel_dim', 'domain_dim']
for param in copyable_params:
setattr(self, param, getattr(self.mfof, param))
# Set up acquisition optimisation
self._set_up_acq_opt()
# set up variables for monitoring
self.monit_kernel_shrink_factor = 1
self.monit_thresh_coeff = 1
# Set initial history
self.history = Namespace(query_fidels=np.zeros((0, self.fidel_dim)),
query_points=np.zeros((0, self.domain_dim)),
query_vals=np.zeros(0),
query_costs=np.zeros(0),
curr_opt_vals=np.zeros(0),
query_at_opt_fidel=np.zeros(0).astype(bool),
)
@classmethod
def _check_arg_vals(cls, arg_val, arg_name, allowed_vals):
""" Checks if arg_val is in allowed_vals. """
if arg_val not in allowed_vals:
err_str = '%s should be one of %s.'%(arg_name,
' '.join([str(x) for x in allowed_vals]))
raise ValueError(err_str)
def _check_options_vals(self, option_name, allowed_vals):
""" Checks if the option option_name has taken a an allowed value. """
return self._check_arg_vals(getattr(self.options, option_name),
option_name, allowed_vals)
# Methods for setting up optimisation of acquisition ----------------------------------
def _set_up_acq_opt(self):
""" Sets up acquisition optimisation. """
# First set up function to get maximum evaluations.
if isinstance(self.options.acq_opt_max_evals, int):
if self.options.acq_opt_max_evals > 0:
self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals
else:
self.get_acq_opt_max_evals = None
else:
# In this case, the user likely passed a function here.
self.get_acq_opt_max_evals = self.options.acq_opt_max_evals
# Now based on the optimisation criterion, do additional set up
if self.options.acq_opt_criterion == 'direct':
self._set_up_acq_opt_direct()
elif self.options.acq_opt_criterion == 'rand':
self._set_up_acq_opt_rand()
else:
raise NotImplementedError('Not implemented acq opt for %s yet!'%(
self.options.acq_opt_criterion))
def _set_up_acq_opt_direct(self):
""" Sets up acquisition optimisation with direct. """
def _direct_wrap(*args):
""" A wrapper so as to only return optimal value. """
_, opt_pt, _ = direct_ft_maximise(*args)
return opt_pt
direct_lower_bounds = [0] * self.domain_dim
direct_upper_bounds = [1] * self.domain_dim
self.acq_optimise = lambda obj, max_evals: _direct_wrap(obj,
direct_lower_bounds, direct_upper_bounds, max_evals)
# Set up function for obtaining number of function evaluations.
if self.get_acq_opt_max_evals is None:
lead_const = 15 * min(5, self.domain_dim)**2
self.get_acq_opt_max_evals = lambda t: lead_const * np.sqrt(min(t, 1000))
# Acquisition function should be evaluated via single evaluations.
self.acq_query_type = 'single'
def _set_up_acq_opt_rand(self):
""" Sets up acquisition optimisation with direct. """
def _random_max_wrap(*args):
""" A wrapper so as to only return optimal value. """
_, opt_pt = random_maximise(*args)
return opt_pt
rand_bounds = np.array([[0, 1]] * self.domain_dim)
self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj,
rand_bounds, max_evals)
if self.get_acq_opt_max_evals is None:
lead_const = 7 * min(5, self.domain_dim)**2
self.get_acq_opt_max_evals = lambda t: np.clip(
lead_const * np.sqrt(min(t, 1000)), 1000, 2e4)
# Acquisition function should be evaluated via multiple evaluations
self.acq_query_type = 'multiple'
# Book keeping methods ------------------------------------------------------------
def _update_history(self, pts_fidel, pts_domain, pts_val, pts_cost, at_opt_fidel):
""" Adds a query point to the history and discounts the capital etc. """
pts_fidel = pts_fidel.reshape(-1, self.fidel_dim)
pts_domain = pts_domain.reshape(-1, self.domain_dim)
pts_val = pts_val if hasattr(pts_val, '__len__') else [pts_val]
pts_cost = pts_cost if hasattr(pts_cost, '__len__') else [pts_cost]
# Append to history
self.history.query_fidels = np.append(self.history.query_fidels, pts_fidel, axis=0)
self.history.query_points = np.append(self.history.query_points, pts_domain, axis=0)
self.history.query_vals = np.append(self.history.query_vals, pts_val, axis=0)
self.history.query_costs = np.append(self.history.query_costs, pts_cost, axis=0)
self.history.curr_opt_vals = np.append(self.history.curr_opt_vals, self.gpb_opt_val)
self.history.query_at_opt_fidel = np.append(self.history.query_at_opt_fidel,
at_opt_fidel)
def _get_min_distance_to_opt_fidel(self):
""" Computes the minimum distance to the optimal fidelity. """
dists_to_of = np.linalg.norm(self.history.query_fidels - self.mfof.opt_fidel, axis=1)
return dists_to_of.min()
def _report_current_results(self):
""" Writes the current results to the reporter. """
cost_frac = self.spent_capital / self.available_capital
report_str = ' '.join(['%s-%03d::'%(self.options.acq, self.time_step),
'cost: %0.3f,'%(cost_frac),
'#hf_queries: %03d,'%(self.num_opt_fidel_queries),
'optval: %0.4f'%(self.gpb_opt_val)
])
if self.num_opt_fidel_queries == 0:
report_str = report_str + '. min-to-of: %0.4f'%(
self._get_min_distance_to_opt_fidel())
self.reporter.writeln(report_str)
# Methods for managing the GP -----------------------------------------------------
def _build_new_gp(self):
""" Builds the GP with the data in history and stores in self.mfgp. """
if hasattr(self.mfof, 'init_mfgp') and self.mfof.init_mfgp is not None:
self.mfgp = deepcopy(self.mfof.init_mfgp)
self.mfgp.add_mf_data(self.history.query_fidels, self.history.query_points,
self.history.query_vals)
mfgp_prefix_str = 'Using given gp: '
else:
# Set domain bandwidth bounds
if self.options.shrink_kernel_with_time:
bw_ub = max(0.2, 2/(1+self.time_step)**0.25)
domain_bw_log_bounds = [[0.05, bw_ub]] * self.domain_dim
self.options.domain_bandwidth_log_bounds = np.array(domain_bw_log_bounds)
else:
self.options.domain_bandwidth_log_bounds = np.array([[0, 4]] * self.domain_dim)
# Set fidelity bandwidth bounds
self.options.fidel_bandwidth_log_bounds = np.array([[0, 4]] * self.fidel_dim)
# Call the gp fitter
mfgp_fitter = MFGPFitter(self.history.query_fidels, self.history.query_points,
self.history.query_vals, options=self.options, reporter=self.reporter)
self.mfgp, _ = mfgp_fitter.fit_gp()
mfgp_prefix_str = 'Fitting GP (t=%d): '%(self.time_step) # increase bandwidths
mfgp_str = ' -- %s%s.'%(mfgp_prefix_str, str(self.mfgp))
self.reporter.writeln(mfgp_str)
def _add_data_to_mfgp(self, fidel_pt, domain_pt, val_pt):
""" Adds data to self.mfgp. """
self.mfgp.add_mf_data(fidel_pt.reshape((-1, self.fidel_dim)),
domain_pt.reshape((-1, self.domain_dim)),
np.array(val_pt).ravel())
# Methods needed for initialisation -----------------------------------------------
def perform_initial_queries(self):
""" Performs an initial set of queries to initialise optimisation. """
if not isinstance(self.options.gpb_init, str):
raise NotImplementedError('Not implemented taking given initialisation yet.')
# First determine the initial budget.
gpb_init_capital = (self.options.gpb_init_capital if self.options.gpb_init_capital > 0
else self.options.gpb_init_capital_frac * self.available_capital)
if self.options.acq in ['gp_ucb', 'gp_ei']:
num_sf_init_pts = np.ceil(float(gpb_init_capital)/self.mfof.opt_fidel_cost)
fidel_init_pts = np.repeat(self.mfof.opt_fidel.reshape(1, -1), num_sf_init_pts,
axis=0)
elif self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite', 'mf_sko']:
fidel_init_pts = self._mf_method_random_initial_fidels_random(gpb_init_capital)
num_init_pts = len(fidel_init_pts)
domain_init_pts = latin_hc_sampling(self.domain_dim, num_init_pts)
for i in range(num_init_pts):
self.query(fidel_init_pts[i], domain_init_pts[i])
if self.spent_capital >= gpb_init_capital:
break
self.reporter.writeln('Initialised %s with %d queries, %d at opt_fidel.'%(
self.options.acq, len(self.history.query_vals), self.num_opt_fidel_queries))
def _mf_method_random_initial_fidels_interweaved(self):
"""Gets initial fidelities for a multi-fidelity method. """
rand_fidels = self.mfof.get_candidate_fidelities()
np.random.shuffle(rand_fidels)
num_rand_fidels = len(rand_fidels)
opt_fidels = np.repeat(self.mfof.opt_fidel.reshape(1, -1), num_rand_fidels, axis=0)
fidel_init_pts = np.empty((2*num_rand_fidels, self.fidel_dim), dtype=np.float64)
fidel_init_pts[0::2] = rand_fidels
fidel_init_pts[1::2] = opt_fidels
return fidel_init_pts
def _mf_method_random_initial_fidels_random(self, gpb_init_capital):
"""Gets initial fidelities for a multi-fidelity method. """
cand_fidels = self.mfof.get_candidate_fidelities()
cand_costs = self.mfof.cost(cand_fidels)
not_too_expensive_fidel_idxs = cand_costs <= (gpb_init_capital / 3.0)
fidel_init_pts = cand_fidels[not_too_expensive_fidel_idxs, :]
np.random.shuffle(fidel_init_pts)
return np.array(fidel_init_pts)
def initialise_capital(self):
""" Initialises capital. """
self.spent_capital = 0.0
if self.options.capital_type == 'cputime':
self.cpu_time_stamp = time.clock()
elif self.options.capital_type == 'realtime':
self.real_time_stamp = time.time()
def optimise_initialise(self):
""" Initialisation for optimisation. """
self.gpb_opt_pt = None
self.gpb_opt_val = -np.inf
self.initialise_capital() # Initialise costs
self.perform_initial_queries() # perform initial queries
self._build_new_gp()
# Methods needed for monitoring -------------------------------------------------
def _monitor_progress(self):
""" Monitors progress. """
# self._monitor_opt_val()
self._monitor_opt_fidel_queries()
def _monitor_opt_val(self):
""" Monitors progress of the optimum value. """
# Is the optimum increasing over time.
if (self.history.curr_opt_vals[-self.options.monitor_progress_every] * 1.01 >
self.gpb_opt_val):
recent_queries = self.history.query_points[-self.options.monitor_progress_every:, :]
recent_queries_mean = recent_queries.mean(axis=0)
dispersion = np.linalg.norm(recent_queries - recent_queries_mean, ord=2, axis=1)
dispersion = dispersion.mean() / np.sqrt(self.domain_dim)
lower_dispersion = 0.05
upper_dispersion = 0.125
if dispersion < lower_dispersion:
self.monit_kernel_shrink_factor *= self.options.monitor_domain_kernel_shrink
elif dispersion > upper_dispersion:
self.monit_kernel_shrink_factor /= self.options.monitor_domain_kernel_shrink
if not lower_dispersion < dispersion < upper_dispersion:
self.mfgp.domain_kernel.change_smoothness(self.monit_kernel_shrink_factor)
self.mfgp.build_posterior()
self.reporter.writeln('%s--monitor: Kernel shrink set to %0.4f.'%(' '*10,
self.monit_kernel_shrink_factor))
def _monitor_opt_fidel_queries(self):
""" Monitors if we querying at higher fidelities too much or too little. """
# Are we querying at higher fidelities too much or too little.
if self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite']:
of_start_query = max(0, (len(self.history.query_vals) -
2*self.options.monitor_progress_every))
of_recent_query_idxs = range(of_start_query, len(self.history.query_vals))
recent_query_at_opt_fidel = self.history.query_at_opt_fidel[of_recent_query_idxs]
recent_query_at_opt_fidel_mean = recent_query_at_opt_fidel.mean()
if not 0.25 <= recent_query_at_opt_fidel_mean <= 0.75:
if recent_query_at_opt_fidel_mean < 0.25:
self.monit_thresh_coeff *= self.options.monitor_mf_thresh_increase
else:
self.monit_thresh_coeff /= self.options.monitor_mf_thresh_increase
self.reporter.writeln(('%s-- monitor: Changing thresh_coeff to %0.3f, ' +
'recent-query-frac: %0.3f.')%(
' '*10, self.monit_thresh_coeff,
recent_query_at_opt_fidel_mean))
# Methods needed for optimisation -------------------------------------------------
def _terminate_now(self):
""" Returns true if we should terminate now. """
if self.time_step >= self.options.max_iters:
return True
return self.spent_capital >= self.available_capital
def add_capital(self, capital):
""" Adds capital. """
self.available_capital += capital
def _determine_next_query_point(self):
""" Obtains the next query point according to the acquisition. """
# Construction of acquisition function ------
if self.options.acq in ['mf_gp_ucb', 'gp_ucb', 'mf_gp_ucb_finite']:
def _acq_max_obj(x):
""" A wrapper for the mf_gp_ucb acquisition. """
ucb, _ = acquisitions.mf_gp_ucb(self.acq_query_type, x, self.mfgp,
self.mfof.opt_fidel, self.time_step)
return ucb
elif self.options.acq in ['gp_ei', 'mf_sko']:
def _acq_max_obj(x):
""" A wrapper for the gp_ei acquisition. """
return acquisitions.gp_ei(self.acq_query_type, x, self.mfgp, self.mfof.opt_fidel,
self.gpb_opt_val)
else:
raise NotImplementedError('Only implemented %s yet!.'%(self.options.acq))
# Maximise -----
next_pt = self.acq_optimise(_acq_max_obj, self.get_acq_opt_max_evals(self.time_step))
# Store results -----
acq_params = Namespace()
if self.options.acq in ['mf_gp_ucb', 'gp_ucb', 'mf_gp_ucb_finite']:
max_acq_val, beta_th = acquisitions.mf_gp_ucb_single(next_pt, self.mfgp,
self.mfof.opt_fidel, self.time_step)
acq_params.beta_th = beta_th
acq_params.thresh_coeff = self.monit_thresh_coeff
else:
max_acq_val = acquisitions.gp_ei_single(next_pt, self.mfgp, self.mfof.opt_fidel,
self.gpb_opt_val)
acq_params.max_acq_val = max_acq_val
return next_pt, acq_params
def _determine_next_fidel(self, next_pt, acq_params):
""" Determines the next fidelity. """
if self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite']:
next_fidel = fidelity_choosers.mf_gp_ucb(next_pt, self.mfgp, self.mfof, acq_params)
elif self.options.acq in ['mf_sko']:
next_fidel = fidelity_choosers.mf_sko(self.mfof, next_pt, self.mfgp, acq_params)
elif self.options.acq in ['gp_ucb', 'gp_ei']:
next_fidel = deepcopy(self.mfof.opt_fidel)
return next_fidel
@classmethod
def _process_next_fidel_and_pt(cls, next_fidel, next_pt):
""" Processes next point and fidel. Will do certiain things such as perturb it if its
too close to an existing point. """
return next_fidel, next_pt
def _update_capital(self, fidel_pt):
""" Updates the capital according to the cost of the current query. """
if self.options.capital_type == 'given':
pt_cost = self.mfof.cost_single(fidel_pt)
elif self.options.capital_type == 'cputime':
new_cpu_time_stamp = time.clock()
pt_cost = new_cpu_time_stamp - self.cpu_time_stamp
self.cpu_time_stamp = new_cpu_time_stamp
elif self.options.capital_type == 'realtime':
new_real_time_stamp = time.time()
pt_cost = new_real_time_stamp - self.real_time_stamp
self.real_time_stamp = new_real_time_stamp
self.spent_capital += pt_cost
return pt_cost
# The actual querying happens here
def query(self, fidel_pt, domain_pt):
""" The querying happens here. It also calls functions to update history and the
maximum value/ points. But it does *not* update the GP. """
val_pt = self.mfof.eval_single(fidel_pt, domain_pt)
cost_pt = self._update_capital(fidel_pt)
# Update the optimum point
if (np.linalg.norm(fidel_pt - self.mfof.opt_fidel) < 1e-5 and
val_pt > self.gpb_opt_val):
self.gpb_opt_val = val_pt
self.gpb_opt_pt = domain_pt
# Add to history
at_opt_fidel = is_an_opt_fidel_query(fidel_pt, self.mfof.opt_fidel)
self._update_history(fidel_pt, domain_pt, val_pt, cost_pt, at_opt_fidel)
if at_opt_fidel:
self.num_opt_fidel_queries += 1
return val_pt, cost_pt
def _time_keeping(self, reset=0):
""" Used to keep time by _track_time_step. """
curr_keep_time = time.time()
curr_keep_clock = time.clock()
if reset:
self.last_keep_time = curr_keep_time
self.last_keep_clock = curr_keep_clock
else:
time_diff = curr_keep_time - self.last_keep_time
clock_diff = curr_keep_clock - self.last_keep_clock
self.last_keep_time = curr_keep_time
self.last_keep_clock = curr_keep_clock
return round(time_diff, 3), round(clock_diff, 3)
def _track_time_step(self, msg=''):
""" Used to track time step. """
if not self.options.track_every_time_step:
return
if not msg:
self._time_keeping(0)
self.reporter.writeln('')
else:
self.reporter.write('%s: t%s, '%(msg, self._time_keeping()))
# Main optimisation function ------------------------------------------------------
def optimise(self, max_capital):
""" This executes the sequential optimisation routine. """
# Preliminaries
self.add_capital(max_capital)
self.optimise_initialise()
# Main loop --------------------------
while not self._terminate_now():
self.time_step += 1 # increment time
if self.time_step % self.options.build_new_gp_every == 0: # Build GP if needed
self._build_new_gp()
if self.time_step % self.options.monitor_progress_every == 0:
self._monitor_progress()
# Determine next query
self._track_time_step()
next_pt, acq_params = self._determine_next_query_point()
self._track_time_step('#%d, next point'%(self.time_step))
next_fidel = self._determine_next_fidel(next_pt, acq_params)
next_fidel, next_pt = self._process_next_fidel_and_pt(next_fidel, next_pt)
self._track_time_step('next fidel')
next_val, _ = self.query(next_fidel, next_pt)
self._track_time_step('querying')
# update the gp
self._add_data_to_mfgp(next_fidel, next_pt, next_val)
self._track_time_step('gp-update')
if self.time_step % self.options.report_results_every == 0: # report results
self._report_current_results()
return self.gpb_opt_pt, self.gpb_opt_val, self.history
# MFGPBandit Class ends here ========================================================
# APIs for MF GP Bandit optimisation -----------------------------------------------------
# Optimisation from a mf_Func.MFOptFunction object
def mfgpb_from_mfoptfunc(mf_opt_func, max_capital, acq=None, options=None,
reporter='default'):
""" MF GP Bandit optimisation with an mf_func.MFOptFunction object. """
# if not isinstance(mf_opt_func, MFOptFunction):
# raise ValueError('mf_opt_func should be a mf_func.MFOptFunction instance.')
if acq is not None:
if options is None:
reporter = get_reporter(reporter)
options = load_options(all_mf_gp_bandit_args, reporter=reporter)
options.acq = acq
return (MFGPBandit(mf_opt_func, options, reporter)).optimise(max_capital)
# Main API
def mfgpb(mf_func, fidel_cost_func, fidel_bounds, domain_bounds, opt_fidel, max_capital,
acq=None, options=None, reporter=None, vectorised=True, true_opt_pt=None,
true_opt_val=None):
# pylint: disable=too-many-arguments
""" This function executes GP Bandit (Bayesian Optimisation)
Input Arguments:
- mf_func: The multi-fidelity function to be optimised.
- fidel_cost_func: The function which describes the cost for each fidelity.
- fidel_bounds, domain_bounds: The bounds for the fidelity space and domain.
- opt_fidel: The point in the fidelity space at which to optimise mf_func.
- max_capital: The maximum capital for optimisation.
- options: A namespace which gives other options.
- reporter: A reporter object to write outputs.
- vectorised: If true, it means mf_func and fidel_cost_func take matrix inputs. If
false, they take only single point inputs.
- true_opt_pt, true_opt_val: The true optimum point and value (if known). Mostly for
experimenting with synthetic problems.
Returns: (gpb_opt_pt, gpb_opt_val, history)
- gpb_opt_pt, gpb_opt_val: The optimum point and value.
- history: A namespace which contains a history of all the previous queries.
"""
mf_opt_func = MFOptFunction(mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel, vectorised, true_opt_pt, true_opt_val)
return mfgpb_from_mfoptfunc(mf_opt_func, max_capital, acq, options, reporter)
| [
37811,
198,
220,
1846,
1154,
902,
15237,
12,
69,
23091,
14714,
10243,
270,
30011,
271,
4548,
261,
13,
198,
220,
1377,
479,
392,
292,
14814,
31,
6359,
13,
11215,
84,
13,
15532,
198,
37811,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748... | 2.41858 | 11,109 |
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import scoped_session, sessionmaker
from settings import DATABASE
TEST_DB = dict(DATABASE)
engine = create_engine(URL(**TEST_DB))
Session = scoped_session(sessionmaker(bind=engine))
def delete_rows(db_engine, base_obj):
"""Deletes data in tables without dropping the schema."""
connection = db_engine.connect()
with connection.begin() as trans:
for table in reversed(base_obj.metadata.sorted_tables):
connection.execute(table.delete())
trans.commit()
connection.close()
| [
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
18392,
13,
6371,
1330,
10289,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
629,
19458,
62,
29891,
11,
6246,
10297,
198,
198,
6738,
6460,
1330,
360,
1404,
... | 2.942584 | 209 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##################################################
#
# make_dict.py
#
# This module takes a text file, marked up with
# units (e.g. w for word, m for morpheme) and ids
# and converted to IPA, and produces a
# .dict file for processing by PocketSphinx.
#
##################################################
from __future__ import print_function, unicode_literals
from __future__ import division, absolute_import
import logging
import argparse
import pystache
from readalongs.g2p.util import load_xml, save_txt
try:
unicode()
except:
unicode = str
DICT_TEMPLATE = '''{{#items}}
{{id}}\t{{pronunciation}}
{{/items}}
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Make a pronunciation dictionary from a G2P'd XML file")
parser.add_argument('input', type=str, help='Input XML')
parser.add_argument('output', type=str, help='Output .dict file')
parser.add_argument('--unit', type=str, default='m',
help='XML tag of the unit of analysis '
'(e.g. "w" for word, "m" for morpheme)')
args = parser.parse_args()
go(args.input, args.output, args.unit)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
14468,
2235,
198,
2,
198,
2,
787,
62,
11600,
13,
9078,
198,
2,
198,
2,
770,
8265,
2753,
257,
2420,
... | 2.765376 | 439 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:05:23 2019
@author: TempestGuerra
"""
import numpy as np
import math as mt
from HerfunChebNodesWeights import chebpolym, cheblb | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
5979,
678,
1511,
25,
2713,
25,
1954,
13130,
198,
198,
31,
9800,
25,
34367,
8205,
2... | 2.594937 | 79 |
from .connection import database, init_db
| [
6738,
764,
38659,
1330,
6831,
11,
2315,
62,
9945,
198
] | 4.2 | 10 |
"""Tests for create_data.py."""
import json
import shutil
import tempfile
import unittest
from glob import glob
from os import path
import tensorflow as tf
from opensubtitles import create_data
_TRAIN_FILE = "\n".join([
"matt: AAAA", # words followed by colons are stripped.
"[skip]", # text in brackets is removed.
"BBBB",
"", "", "" # empty lines are ignored.
"CCCC",
"(all laughing)",
"c3po:",
"- DDDD (boom!)",
"123", # line length will be below the test --min_length.
"12345", # line length will be above the test --min_length.
])
_TEST_FILE = """
aaaa
bbbb
cccc
dddd
"""
if __name__ == "__main__":
unittest.main()
| [
37811,
51,
3558,
329,
2251,
62,
7890,
13,
9078,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
6738,
15095,
1330,
15095,
198,
6738,
28686,
1330,
3108,
198,
198,
11748,... | 2.583969 | 262 |
import logging
import sys
from metriql2metabase.dbt_metabase import MetabaseClient
from metriql2metabase.models.metabase import MetabaseModel, MetabaseColumn, MetabaseMetric
| [
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
1138,
380,
13976,
17,
4164,
5754,
13,
9945,
83,
62,
4164,
5754,
1330,
3395,
5754,
11792,
198,
6738,
1138,
380,
13976,
17,
4164,
5754,
13,
27530,
13,
4164,
5754,
1330,
3395,
5754,
17633,
... | 3.45098 | 51 |
"""
# Clover平台变量机制实现。
# author : taoyanli0808
# date : 2020-05-27
# version: 1.2
# -------------------- Clover平台变量机制 --------------------
# clover平台变量分为4种类型,平台内置变量、自定义变量、触发变量与运行时变量
# 1、平台内置变量
# clover平台内置变量目前有response、request、keyword、variable、exception、
# validator、extractor共7个,详见各模块说明文档。
# 2、自定义变量
# 自定义变量(default)通过平台“配置管理-变量配置”页面进行添加,每个自定义
# 变量关联到团队与项目,同一团队下相同项目不能存在同名变量。自定义变量可以采用
# 字母、数字和下划线进行命名,但不可与平台内置变量重复。
# 3、触发变量
# 触发变量为通过页面或接口(包含Jenkins等插件)运行平台用例时用户提交的变量。
# 触发变量的优先级高于自定义变量,低于运行时变量。通常可以将域名设置为变量形
# 式,例如调试时使用自定义变量host指向测试环境http://test.52clover.cn,
# 当运行时采用触发变量重新指定host为http://www.52clover.cn覆盖自定义变量。
# 4、运行时变量
# 运行时变量通常为提取器提取的接口上下文变量,在用例执行生命周期内有效。
# 最常见的运行时变量使用场景为提取接口响应数据传递给下一个接口,使用提取器提取
# 接口响应数据保存为变量形式,下一个接口直接使用变量提取值。
# 5、变量优先级
# 平台内置变量 > 运行时变量 > 触发变量 > 自定义变量
"""
import re
from typing import Text
from clover.core import RESERVED
from clover.core.logger import Logger
from clover.core.request import Request
from clover.core.extractor import Extractor
from clover.models import query_to_dict
from clover.environment.models import VariableModel
| [
37811,
198,
2,
41154,
33176,
111,
20998,
108,
20998,
246,
34932,
237,
17312,
118,
26344,
114,
22522,
252,
163,
236,
108,
16764,
198,
2,
1772,
1058,
20486,
726,
272,
4528,
15,
28362,
198,
2,
3128,
220,
220,
1058,
12131,
12,
2713,
12,
... | 0.920484 | 1,157 |
#!/usr/bin/env python3
from Utils import *
if __name__ == "__main__":
with open("./p059_cipher.txt", "r") as f:
contents = f.read()
contents = contents[:-1].split(",")
contents = [int(x) for x in contents]
print(contents)
testContents = contents[:120]
candidates = []
# Your code here!
for a in range(ord('a'), ord('z') + 1):
print("progress", chr(a))
for b in range(ord('a'), ord('z') + 1):
for c in range(ord('a'), ord('z') + 1):
key = [a, b, c]
# decrypted = []
# for i, v in enumerate(testContents):
# decryptedChar = v ^ key[i % 3]
# if not (decryptedChar >= ord('a') and decryptedChar <= ord('z'
decrypted = "".join([chr(v ^ key[i % 3]) for i, v in enumerate(contents)])
numNormalChars = sum([1 for x in decrypted if isNormalCharacter(x)])
if numNormalChars / len(decrypted) > 0.95:
print(decrypted)
print("Solution", sum([ord(x) for x in decrypted]))
# if decrypted.find("the") != -1 and decrypted.find("and") != -1:
# print(decrypted)
checks = [isNormalCharacter(c) for c in decrypted]
if not False in checks:
print(decrypted)
print("Candidates:", len(candidates))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
7273,
4487,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
351,
1280,
7,
1911,
14,
79,
46712,
62,
66,
10803,
13,
... | 1.983425 | 724 |
import io
import os.path
import tarfile
import tempfile
from abc import ABC, abstractmethod
from typing import List
import docker
import docker.errors
from docker.models.containers import Container
from docker.models.volumes import Volume
from docker.types import Mount
| [
11748,
33245,
198,
11748,
28686,
13,
6978,
198,
11748,
13422,
7753,
198,
11748,
20218,
7753,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
36253,
198,
11748,
36253,
13,
48277,
198,
6738,
... | 4.014493 | 69 |
#!/usr/bin/env python
from tools.multiclass_shared import prepare_data
[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1e-5],[traindat,testdat,label_traindat,label_testdat,2.2,1e-5]]
if __name__=='__main__':
print('MulticlassLogisticRegression')
classifier_multiclasslogisticregression_modular(*parameter_list[0])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
4899,
13,
16680,
291,
31172,
62,
28710,
1330,
8335,
62,
7890,
628,
198,
198,
58,
27432,
19608,
11,
6167,
62,
27432,
19608,
11,
1332,
19608,
11,
6167,
62,
9288,
19608,
60,
796,
... | 2.694805 | 154 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
from rpython.rlib import jit
file_expr_opts = ["-e", "--eval",
"-f", "--load",
"-t", "--require",
"-l", "--lib",
"-p",
"-r", "--script",
"-u", "--require-script",
"-k",
"-m", "--main",
"-g", "--eval-json",
"-gg"]
inter_opts = ["-i", "--repl",
"-n", "--no-lib",
"-v", "--version"]
conf_opts = ["-c", "--no-compiled",
"-q", "--no-init-file",
"-I",
"-X", "--collects",
"-G", "--config",
"-A", "--addon",
"-A", "--addon",
"-U", "--no-user-path",
"-R", "--compiled",
"-C", "--cross",
"-N", "--name",
"-M", "--compile-any",
"-j", "--no-jit",
"-d", "--no-delay",
"-b", "--binary",
"-W", "--warn",
"-O", "--stdout",
"-L", "--syslog",
"--kernel",
"--save-callgraph"]
meta_opts = ["--make-linklet-zos",
"--load-regexp",
"--verbose",
"--jit",
"-h", "--help"]
dev_opts = ["--dev",
"--load-linklets",
"--load-as-linklets",
"--eval-linklet",
"--run-as-linklet",
"--just-init"]
all_opts = file_expr_opts + inter_opts + conf_opts + meta_opts + dev_opts
INIT = -1
RETURN_OK = 0
MISSING_ARG = 5
JUST_EXIT = 3
RET_JIT = 2
BAD_ARG = 5
config = {
'repl' : False,
'no-lib' : False,
'version' : False,
'stop' : False,
'just_kernel' : False,
'verbose' : False,
'just-init' : False,
'dev-mode' : False,
'use-compiled' : True,
'compile-machine-independent' : False,
'load-regexp' : False,
'make-zos' : False
}
# EOF
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
6738,
374,
29412,
13,
81,
8019,
1330,
474,
270,
198,
198,
7753,
62,
31937,
62,
404,
912,
796,
14631,
12,
... | 1.642079 | 1,193 |
from io import BytesIO
from io import StringIO
import re
from urllib.parse import urljoin
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from bs4 import BeautifulSoup
from common import cache_request
BASE_URL = 'https://www.ok.gov/elections/About_Us/County_Election_Boards/'
# covers county edge case where next line starts with *
re_county_section = re.compile(r'(?<=COUNTY\n).*?(?=\n\n|\n\*)', flags=re.MULTILINE + re.DOTALL)
re_phone_fax_section = re.compile(r'(?<=PHONE\n).*?(?=\n\n)', flags=re.MULTILINE + re.DOTALL)
re_mailing_section = re.compile(r'(?<=MAILING ADDRESS\n).*?(?=\n\n)', flags=re.MULTILINE + re.DOTALL)
re_number_space = re.compile(r'[\d]+\s*')
# Oklahoma uses pdfminer since its pdf doesn't work with PyPDF2
if __name__ == "__main__":
print(fetch_data())
| [
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
33245,
1330,
10903,
9399,
198,
11748,
302,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
6738,
37124,
1084,
263,
13,
12315,
3849,
79,
1330,
12960,
26198,
13511,
11,
14340,
... | 2.520325 | 369 |
#! usr/bin/env python
# -*- coding: utf-8 -*-
"""
爬取 星座屋 星座运势
http://tools.2345.com/naonao/
"""
import re
from functools import reduce
import requests
from bs4 import BeautifulSoup
from everyday_wechat.utils.common import SPIDER_HEADERS
__all__ = ['get_2345_horoscope', 'get_today_horoscope']
XZW_BASE_URL_TODAY = "http://tools.2345.com/naonao/"
XZW_BASE_URL_TOMORROW = " "
CONSTELLATION_DICT = {
"白羊座": "baiyang",
"金牛座": "jinniu",
"双子座": "shuangzi",
"巨蟹座": "juxie",
"狮子座": "shizi",
"处女座": "chunv",
"天秤座": "tiancheng",
"天蝎座": "tianxie",
"射手座": "sheshou",
"摩羯座": "moxie",
"水瓶座": "shuiping",
"双鱼座": "shuangyu",
}
def get_2345_horoscope(name, is_tomorrow=False):
'''
获取2345网(http://tools.2345.com/naonao/)的星座运势
:param name: 星座名称
:return:
'''
if not name in CONSTELLATION_DICT:
print('星座输入有误')
return
try:
if is_tomorrow :
print('不可查询明日运势')
return
req_url = XZW_BASE_URL_TODAY
resp = requests.get(req_url, headers=SPIDER_HEADERS)
if resp.status_code == 200:
html = resp.text
lucky_num = ""
lucky_color = ""
detail_horoscope = ""
good_partner = ""
lucky_thing = ""
soup = BeautifulSoup(html,"html.parser")
day_all_constellation_info = soup.find_all('ul', class_='constellation-list')[0]
for day_per_constellation_info in day_all_constellation_info.find_all('li'):
if(day_per_constellation_info.find("a").get_text() == name):
result_str_list = day_per_constellation_info.find("div", class_="list-right").get_text().split()
detail_horoscope = result_str_list[0]
lucky_color = result_str_list[1][5:]
lucky_num = result_str_list[2][5:]
good_partner = result_str_list[3][5:]
lucky_thing = result_str_list[4][5:]
break
if is_tomorrow:
detail_horoscope = detail_horoscope.replace('今天', '明天')
return_text = '{name}{_date}运势:\n【幸运颜色】{color}\n【幸运数字】{num}\n【幸运物品】{_thing}\n【契合星座】{_partner}\n【综合运势】{horoscope}'.format(
_date='明日' if is_tomorrow else '今日',
name=name,
color=lucky_color,
num=lucky_num,
_thing=lucky_thing,
_partner=good_partner,
horoscope=detail_horoscope
)
return return_text
except Exception as exception:
print(str(exception))
get_today_horoscope = get_2345_horoscope
if __name__ == '__main__':
# print (get_constellation(3, 10))
# print(get_xzw_text("03-18"))
is_tomorrow = False
print(get_2345_horoscope("水瓶座", is_tomorrow))
| [
2,
0,
514,
81,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
13328,
230,
105,
20998,
244,
10545,
246,
253,
41753,
100,
161,
109,
233,
10545,
246,
253,
... | 1.722754 | 1,670 |
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField, ArrayField
import uuid
@receiver(post_save, sender=User)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
67... | 3.319149 | 94 |
# -*- coding: utf-8 -*-
"""
.. module:: sqlite3_db
:platform: Unix
:synopsis: I/O for sqlite3 database
.. moduleauthor:: Ryan Long <ryanlong1004@gmail.com>
"""
import os
import sqlite3
class Sqlite3:
"""Gateway for I/O to sqlite 3 database
..todo:: Add context manager so we can close the db
"""
def __init__(self):
"""Creates connection to sqlite3 database"""
self.conn = sqlite3.connect(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../data/yify.db3"
)
)
def create_table(self):
"""Creates the library table if it does not exist"""
c = self.conn.cursor()
c.execute(
"""
CREATE TABLE IF NOT EXISTS library
(
id TEXT NOT NULL,
title TEXT NOT NULL,
year INT NOT NULL,
format TEXT NOT NULL,
summary TEXT NOT NULL,
runtime INT NOT NULL,
rating TEXT NOT NULL,
link TEXT NOT NULL,
published TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL,
PRIMARY KEY(id)
)
"""
)
self.conn.commit()
def insert_records(self, records: list):
"""Inserts list of records into the library table"""
c = self.conn.cursor()
c.executemany(
"""
INSERT OR IGNORE INTO library
(id, title, year, format, summary, runtime, rating, link, published)
VALUES
(?,?,?,?,?,?,?,?,?)""",
records,
)
self.conn.commit()
if __name__ == "__main__":
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
8265,
3712,
44161,
578,
18,
62,
9945,
198,
220,
220,
1058,
24254,
25,
33501,
198,
220,
220,
1058,
28869,
24608,
25,
314,
14,
46,
329,
44161,
578,
18,
... | 2.003484 | 861 |
from mongoengine import fields
from mongoengine import DynamicDocument
from .sequence import Sequence
| [
6738,
285,
25162,
18392,
1330,
7032,
198,
6738,
285,
25162,
18392,
1330,
26977,
24941,
198,
6738,
764,
43167,
1330,
45835,
628
] | 4.904762 | 21 |
print([abs(float(num)) for num in input().split()])
| [
4798,
26933,
8937,
7,
22468,
7,
22510,
4008,
329,
997,
287,
5128,
22446,
35312,
3419,
12962,
201,
198
] | 2.944444 | 18 |
from .base import GnuRecipe
| [
6738,
764,
8692,
1330,
18509,
84,
37523,
628
] | 3.625 | 8 |
#############
# Imports #
#############
import globalvars
import modules.conf as conf
import hashlib
import os
import shutil
import subprocess
from urllib.request import urlretrieve
###############
# Functions #
###############
##########
# Main #
##########
if __name__ == "__main__":
main()
| [
7804,
4242,
2,
198,
1303,
1846,
3742,
1303,
198,
7804,
4242,
2,
198,
198,
11748,
3298,
85,
945,
198,
11748,
13103,
13,
10414,
355,
1013,
198,
11748,
12234,
8019,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
6... | 3.234043 | 94 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic import CreateView
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
... | 3.024691 | 81 |
from django.db import models
from django.utils.translation import gettext_lazy as _
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
628
] | 3.541667 | 24 |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, reverse
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
# Create your views here.
# @login_required
# @csrf_exempt #解决403错误
# def login_redirect(request):
# # pass
# return render(request, 'backweb/index')
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
9575,
198,
6738,
42625,
14208,
13... | 2.848101 | 158 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
# getHrs.py
# Creator: Robert Toribio
# Date: 10/28/2021
import statsapi
from datetime import date
import re
if __name__ == "__main__":
main()
| [
2,
651,
39,
3808,
13,
9078,
198,
2,
21038,
25,
5199,
4022,
571,
952,
198,
2,
7536,
25,
838,
14,
2078,
14,
1238,
2481,
628,
198,
11748,
9756,
15042,
198,
6738,
4818,
8079,
1330,
3128,
198,
11748,
302,
628,
628,
220,
220,
220,
220,
... | 2.104651 | 86 |
#061: Refaça o DESAFIO 51, lendo o primeiro termo e a razão de uma PA, mostrando os 10 primeiros termos da progressão usando a estrutura while.
print('=-=' * 15)
print(f'{"PROGRESSÃO ARITMÉTICA":^40}')
print('=-=' * 15)
n = int(input('Digite o 1º termo da PA: '))
r = int(input('Digite a razão: '))
c = n
z = 0
while z <= 10:
print(f'\033[33m{c}\033[m', end=" -> ")
c += r
z += 1
print('FIM!') | [
2,
3312,
16,
25,
6524,
64,
50041,
267,
22196,
8579,
9399,
6885,
11,
22096,
78,
267,
6994,
7058,
3381,
78,
304,
257,
374,
1031,
28749,
390,
334,
2611,
8147,
11,
749,
25192,
78,
28686,
838,
6994,
72,
4951,
3381,
418,
12379,
4371,
2874... | 2.164021 | 189 |
"""Provide version and author details."""
__title__ = "steelconnection"
__description__ = "Simplify access to the Riverbed SteelConnect REST API."
__version__ = "0.95.0"
__author__ = "Greg Mueller"
__author_email__ = "steelconnection@grelleum.com"
__copyright__ = "Copyright 2018 Greg Mueller"
__license__ = "MIT"
__url__ = "https://github.com/grelleum/SteelConnection"
__documentation__ = "https://steelconnection.readthedocs.io/"
| [
37811,
15946,
485,
2196,
290,
1772,
3307,
526,
15931,
628,
198,
834,
7839,
834,
796,
366,
44822,
38659,
1,
198,
834,
11213,
834,
796,
366,
8890,
489,
1958,
1895,
284,
262,
5866,
3077,
7851,
13313,
30617,
7824,
526,
198,
834,
9641,
834... | 3.144928 | 138 |
import unittest
import logging
from log_analyzer import *
import os
import imp
if __name__ == "__main__":
unittest.main() | [
11748,
555,
715,
395,
201,
198,
11748,
18931,
201,
198,
6738,
2604,
62,
38200,
9107,
1330,
1635,
201,
198,
11748,
28686,
201,
198,
11748,
848,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
... | 2.537037 | 54 |
from Robotic_Servos import *
import time
import sys
import pandas as pd
print("--------------------------------------------------------------------")
print("README")
print("The is the calibration function, help you to calibarate the gripper")
print("for different device. Stongly reconmmend you calibarate the gripper")
print("every time you set up a new finger tip or device.\n")
print("Please read the calibrating file carefully in advance, otherwise the")
print("gripper may break your device.\n")
print("The calibration may take you around 3 mins")
print("Please check the Dynamixel ID and the output port number in advance")
print("If you are ready, please take your gripper on your hand, and plug in")
print("the cable(power and USB).\n")
print("--------------------------------------------------------------------")
input("Press 'Enter' for confirmation")
id = input("Please enter the ID of your Dynamixel and press 'Enter' for confirmation\
, e.g. 3\nYour input:")
id = int(id)
com = input("Please enter the com port number from your PC and press 'Enter' for confirmation,e.g. 5\nYour input:")
port_num = "COM%s" % com
print("\nInitilizing and activating your servo......")
port = openport(port_num)
packet = openpacket()
servo = Robotis_Servo(port, packet, id)
limits = {}
time.sleep(1)
print("Initilizing and activating finished, please follow the instruction to finish the rest\
of the calibration\n")
print("--------------------------------------------------------------------")
print("\nPlease gentelly push the finger to your expected close position")
ref = input("Please enter 'yes'(lower case) and press 'Enter' for confirming the position\
\nYour input:")
if ref == "yes":
print("Don't move the finger, writing......")
close_limit = servo.read_current_pos()
time.sleep(1)
limits['close_limit'] = int(close_limit)
else:
print("Invalid input, existing")
sys.exit(0)
print("The close limit is successfully stored\n")
print("Please gentelly pull the finger to your expected open position")
ref = input("please enter 'yes'(lower case) and press 'Enter' for confirming the position\
\nYour input:")
ref = str(ref)
if ref == "yes":
print("Don't move the finger, writing......")
open_limit= servo.read_current_pos()
time.sleep(1)
limits['open_limit'] = int(open_limit)
else:
print("Invalid input, existing")
sys.exit(0)
print("The open limit is successfully stored\n")
df = pd.DataFrame([limits], columns=['close_limit', "open_limit"])
df.to_csv("./calibaration.csv", index=False)
print("\nThe calibration is successful, thanks for your cooration:)")
input("Press enter to exit")
| [
6738,
3851,
6210,
62,
11838,
418,
1330,
1635,
201,
198,
11748,
640,
201,
198,
11748,
25064,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
201,
198,
4798,
7203,
10097,
650,
4943,
201,
198,
4798,
7203,
15675,
11682,
4943,
201,
19... | 3.14043 | 883 |
import typing as t
import warnings
from collections.abc import Mapping as ABCMapping
from functools import wraps
from flask import current_app
from flask import jsonify
from flask import Response
from flask.views import MethodViewType
from marshmallow import ValidationError as MarshmallowValidationError
from webargs.flaskparser import FlaskParser as BaseFlaskParser
from webargs.multidictproxy import MultiDictProxy
from .exceptions import _ValidationError
from .helpers import _sentinel
from .schemas import EmptySchema
from .schemas import Schema
from .types import DecoratedType
from .types import DictSchemaType
from .types import HTTPAuthType
from .types import OpenAPISchemaType
from .types import RequestType
from .types import ResponseReturnValueType
from .types import SchemaType
BODY_LOCATIONS = ['json', 'files', 'form', 'form_and_files', 'json_or_form']
SUPPORTED_LOCATIONS = BODY_LOCATIONS + ['query', 'headers', 'cookies', 'querystring']
class FlaskParser(BaseFlaskParser):
"""Overwrite the default `webargs.FlaskParser.handle_error`.
Update the default status code and the error description from related
configuration variables.
"""
parser: FlaskParser = FlaskParser()
use_args: t.Callable = parser.use_args
@parser.location_loader('form_and_files')
@parser.location_loader('files')
class APIScaffold:
"""A base class for [`APIFlask`][apiflask.app.APIFlask] and
[`APIBlueprint`][apiflask.blueprint.APIBlueprint].
This class contains the route shortcut decorators (i.e. `get`, `post`, etc.) and
API-related decorators (i.e. `auth_required`, `input`, `output`, `doc`).
*Version added: 1.0*
"""
def get(self, rule: str, **options: t.Any):
"""Shortcut for `app.route()` or `app.route(methods=['GET'])`."""
return self._method_route('GET', rule, options)
def post(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['POST'])`."""
return self._method_route('POST', rule, options)
def put(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['PUT'])`."""
return self._method_route('PUT', rule, options)
def patch(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['PATCH'])`."""
return self._method_route('PATCH', rule, options)
def delete(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['DELETE'])`."""
return self._method_route('DELETE', rule, options)
def auth_required(
self,
auth: HTTPAuthType,
role: t.Optional[str] = None,
roles: t.Optional[list] = None,
optional: t.Optional[str] = None
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Protect a view with provided authentication settings.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
Examples:
```python
from apiflask import APIFlask, HTTPTokenAuth, auth_required
app = APIFlask(__name__)
auth = HTTPTokenAuth()
@app.get('/')
@app.auth_required(auth)
def hello():
return 'Hello'!
```
Arguments:
auth: The `auth` object, an instance of
[`HTTPBasicAuth`][apiflask.security.HTTPBasicAuth]
or [`HTTPTokenAuth`][apiflask.security.HTTPTokenAuth].
role: Deprecated since 1.0, use `roles` instead.
roles: The selected roles to allow to visit this view, accepts a list of role names.
See [Flask-HTTPAuth's documentation][_role]{target:_blank} for more details.
[_role]: https://flask-httpauth.readthedocs.io/en/latest/#user-roles
optional: Set to `True` to allow the view to execute even the authentication
information is not included with the request, in which case the attribute
`auth.current_user` will be `None`.
*Version changed: 1.0.0*
- The `role` parameter is deprecated.
*Version changed: 0.12.0*
- Move to `APIFlask` and `APIBlueprint` classes.
*Version changed: 0.4.0*
- Add parameter `roles`.
"""
_roles = None
if role is not None:
warnings.warn(
'The `role` parameter is deprecated and will be removed in 1.1, '
'use `roles` and always pass a list instead.',
DeprecationWarning,
stacklevel=3,
)
_roles = [role]
elif roles is not None:
_roles = roles
return decorator
def input(
self,
schema: SchemaType,
location: str = 'json',
schema_name: t.Optional[str] = None,
example: t.Optional[t.Any] = None,
examples: t.Optional[t.Dict[str, t.Any]] = None,
**kwargs: t.Any
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Add input settings for view functions.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
If the validation passed, the data will inject into view
function as a positional argument in the form of `dict`. Otherwise,
an error response with the detail of the validation result will be returned.
Examples:
```python
from apiflask import APIFlask, input
app = APIFlask(__name__)
@app.get('/')
@app.input(PetInSchema)
def hello(parsed_and_validated_input_data):
print(parsed_and_validated_input_data)
return 'Hello'!
```
Arguments:
schema: The marshmallow schema of the input data.
location: The location of the input data, one of `'json'` (default),
`'files'`, `'form'`, `'cookies'`, `'headers'`, `'query'`
(same as `'querystring'`).
schema_name: The schema name for dict schema, only needed when you pass
a schema dict (e.g., `{'name': String(required=True)}`) for `json`
location.
example: The example data in dict for request body, you should use either
`example` or `examples`, not both.
examples: Multiple examples for request body, you should pass a dict
that contains multiple examples. Example:
```python
{
'example foo': { # example name
'summary': 'an example of foo', # summary field is optional
'value': {'name': 'foo', 'id': 1} # example value
},
'example bar': {
'summary': 'an example of bar',
'value': {'name': 'bar', 'id': 2}
},
}
```
*Version changed: 1.0*
- Ensure only one input body location was used.
- Add `form_and_files` and `json_or_form` (from webargs) location.
- Rewrite `files` to act as `form_and_files`.
- Use correct request content type for `form` and `files`.
*Version changed: 0.12.0*
- Move to APIFlask and APIBlueprint classes.
*Version changed: 0.4.0*
- Add parameter `examples`.
"""
if isinstance(schema, ABCMapping):
schema = _generate_schema_from_mapping(schema, schema_name)
if isinstance(schema, type): # pragma: no cover
schema = schema()
return decorator
def output(
self,
schema: SchemaType,
status_code: int = 200,
description: t.Optional[str] = None,
schema_name: t.Optional[str] = None,
example: t.Optional[t.Any] = None,
examples: t.Optional[t.Dict[str, t.Any]] = None,
links: t.Optional[t.Dict[str, t.Any]] = None,
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Add output settings for view functions.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
The decorator will format the return value of your view function with
provided marshmallow schema. You can return a dict or an object (such
as a model class instance of ORMs). APIFlask will handle the formatting
and turn your return value into a JSON response.
P.S. The output data will not be validated; it's a design choice of marshmallow.
marshmallow 4.0 may be support the output validation.
Examples:
```python
from apiflask import APIFlask, output
app = APIFlask(__name__)
@app.get('/')
@app.output(PetOutSchema)
def hello():
return the_dict_or_object_match_petout_schema
```
Arguments:
schema: The schemas of the output data.
status_code: The status code of the response, defaults to `200`.
description: The description of the response.
schema_name: The schema name for dict schema, only needed when you pass
a schema dict (e.g., `{'name': String()}`).
example: The example data in dict for response body, you should use either
`example` or `examples`, not both.
examples: Multiple examples for response body, you should pass a dict
that contains multiple examples. Example:
```python
{
'example foo': { # example name
'summary': 'an example of foo', # summary field is optional
'value': {'name': 'foo', 'id': 1} # example value
},
'example bar': {
'summary': 'an example of bar',
'value': {'name': 'bar', 'id': 2}
},
}
```
links: The `links` of response. It accepts a dict which maps a link name to
a link object. Example:
```python
{
'getAddressByUserId': {
'operationId': 'getUserAddress',
'parameters': {
'userId': '$request.path.id'
}
}
}
```
See the [docs](https://apiflask.com/openapi/#response-links) for more details
about setting response links.
*Version changed: 0.12.0*
- Move to APIFlask and APIBlueprint classes.
*Version changed: 0.10.0*
- Add `links` parameter.
*Version changed: 0.9.0*
- Add base response customization support.
*Version changed: 0.6.0*
- Support decorating async views.
*Version changed: 0.5.2*
- Return the `Response` object directly.
*Version changed: 0.4.0*
- Add parameter `examples`.
"""
if schema == {}:
schema = EmptySchema
if isinstance(schema, ABCMapping):
schema = _generate_schema_from_mapping(schema, schema_name)
if isinstance(schema, type): # pragma: no cover
schema = schema()
if isinstance(schema, EmptySchema):
status_code = 204
return decorator
def doc(
self,
summary: t.Optional[str] = None,
description: t.Optional[str] = None,
tag: t.Optional[str] = None,
tags: t.Optional[t.List[str]] = None,
responses: t.Optional[t.Union[t.List[int], t.Dict[int, str]]] = None,
deprecated: t.Optional[bool] = None,
hide: t.Optional[bool] = None,
operation_id: t.Optional[str] = None,
security: t.Optional[t.Union[str, t.List[t.Union[str, t.Dict[str, list]]]]] = None,
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Set up the OpenAPI Spec for view functions.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
Examples:
```python
from apiflask import APIFlask, doc
app = APIFlask(__name__)
@app.get('/')
@app.doc(summary='Say hello', tags=['Foo'])
def hello():
return 'Hello'
```
Arguments:
summary: The summary of this endpoint. If not set, the name of the view function
will be used. If your view function is named with `get_pet`, then the summary
will be "Get Pet". If the view function has a docstring, then the first
line of the docstring will be used. The precedence will be:
```
@app.doc(summary='blah') > the first line of docstring > the view function name
```
description: The description of this endpoint. If not set, the lines after the empty
line of the docstring will be used.
tag: Deprecated since 1.0, use `tags` instead.
tags: A list of tag names of this endpoint, map the tags you passed in the `app.tags`
attribute. If `app.tags` is not set, the blueprint name will be used as tag name.
responses: The other responses for this view function, accepts a dict in a format
of `{404: 'Not Found'}` or a list of status code (`[404, 418]`). If pass a dict,
and a response with the same status code is already exist, the existing
description will be overwritten.
deprecated: Flag this endpoint as deprecated in API docs.
hide: Hide this endpoint in API docs.
operation_id: The `operationId` of this endpoint. Set config `AUTO_OPERATION_ID` to
`True` to enable the auto-generating of operationId (in the format of
`{method}_{endpoint}`).
security: The `security` used for this endpoint. Match the security info specified in
the `SECURITY_SCHEMES` configuration. If you don't need specify the scopes, just
pass a security name (equals to `[{'foo': []}]`) or a list of security names (equals
to `[{'foo': []}, {'bar': []}]`).
*Version changed: 1.0*
- Add `security` parameter to support customizing security info.
- The `role` parameter is deprecated.
*Version changed: 0.12.0*
- Move to `APIFlask` and `APIBlueprint` classes.
*Version changed: 0.10.0*
- Add parameter `operation_id`.
*Version changed: 0.5.0*
- Change the default value of parameters `hide` and `deprecated` from `False` to `None`.
*Version changed: 0.4.0*
- Add parameter `tag`.
*Version changed: 0.3.0*
- Change the default value of `deprecated` from `None` to `False`.
- Rename parameter `tags` to `tag`.
*Version added: 0.2.0*
"""
_tags = None
if tag is not None:
warnings.warn(
'The `tag` parameter is deprecated and will be removed in 1.1, '
'use `tags` and always pass a list instead.',
DeprecationWarning,
stacklevel=2,
)
_tags = [tag]
elif tags is not None:
_tags = tags
return decorator
| [
11748,
19720,
355,
256,
198,
11748,
14601,
198,
6738,
17268,
13,
39305,
1330,
337,
5912,
355,
9738,
44,
5912,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
42903,
1330,
33918,
1958,
19... | 2.254045 | 6,861 |
import math
import unittest
import numpy as np
import knee.evaluation as evaluation
if __name__ == '__main__':
unittest.main() | [
11748,
10688,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10329,
13,
18206,
2288,
355,
12660,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12... | 3.069767 | 43 |
import argparse
from .openpose.lib.utils.common import Human
from typing import Sequence
import torch
import torch.nn as nn
from . import converter
from .openpose.lib.network.rtpose_vgg import get_model
from .openpose.evaluate.coco_eval import get_outputs
from .openpose.lib.utils.paf_to_pose import paf_to_pose_cpp
from .openpose.lib.config import cfg, update_config
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', help='experiment configure file name',
default='./compoelem/detect/openpose/experiments/vgg19_368x368_sgd.yaml', type=str)
parser.add_argument('--weight', type=str,
default='./compoelem/detect/openpose/pose_model.pth')
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args = {
# "cfg":'./compoelem/detect/openpose/experiments/vgg19_368x368_sgd.yaml',
# "opts":[],
# "weight":'./compoelem/detect/openpose/pose_model.pth',
# }
# update config file
update_config(cfg, args)
model = get_model('vgg19')
model.load_state_dict(torch.load(args.weight))
model = nn.DataParallel(model)
model.float()
model.eval() | [
11748,
1822,
29572,
198,
6738,
764,
9654,
3455,
13,
8019,
13,
26791,
13,
11321,
1330,
5524,
198,
6738,
19720,
1330,
45835,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
764,
1330,
38394,
198,
6738,
764,
... | 2.433269 | 517 |
from __future__ import print_function
############################################################################################
#
# The MIT License (MIT)
#
# Intel AI DevJam IDC Demo Classification Server
# Copyright (C) 2018 Adam Milton-Barker (AdamMiltonBarker.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Title: IDC Classification DevCloud Trainer
# Description: Trains a custom Inception V3 model for classification of invasive ductal carcinoma (IDC).
# Acknowledgements: Uses code from chesterkuo imageclassify-movidius (https://github.com/chesterkuo/imageclassify-movidius)
# Uses data from paultimothymooney Predict IDC in Breast Cancer Histology Images (Kaggle)
# Config: Configuration can be found in required/confs.json
# Last Modified: 2018-08-07
#
# Usage:
#
# $ python3.5 Trainer.py DataSort
# $ python3.5 Trainer.py Train
#
############################################################################################
print("")
print("")
print("!! Welcome to the IDC Classification DevCloud Trainer, please wait while the program initiates !!")
print("")
import os, sys
print("-- Running on Python "+sys.version)
print("")
import time, math, random, json, glob
import tools.inception_preprocessing
from sys import argv
from datetime import datetime
import tensorflow as tf
import numpy as np
from builtins import range
from tools.inception_v3 import inception_v3, inception_v3_arg_scope
from tools.DataSort import DataSort
from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.framework import graph_util
slim = tf.contrib.slim
print("-- Imported Required Modules")
print("")
config = tf.ConfigProto(intra_op_parallelism_threads=12, inter_op_parallelism_threads=2, allow_soft_placement=True, device_count = {'CPU': 12})
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["OMP_NUM_THREADS"] = "12"
os.environ["KMP_BLOCKTIME"] = "30"
os.environ["KMP_SETTINGS"] = "1"
os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
print("-- Setup Environment Settings")
print("")
Trainer = Trainer()
DataSort = DataSort()
if __name__ == "__main__":
main(sys.argv[1:]) | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
29113,
29113,
14468,
7804,
4242,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
220,
198,
2,
8180,
9552,
6245,
30380,
4522,
34,
34588,
40984,
9652,
198,
2,
15069,
357,
3... | 3.299599 | 998 |
from stapy.sta.time import Time
from stapy.sta.abstract_entity import AbstractEntity
# TODO resultQuality DQ_Element
| [
6738,
336,
12826,
13,
38031,
13,
2435,
1330,
3862,
198,
6738,
336,
12826,
13,
38031,
13,
397,
8709,
62,
26858,
1330,
27741,
32398,
198,
198,
2,
16926,
46,
1255,
35013,
360,
48,
62,
20180,
198
] | 3.371429 | 35 |
import d2bot.visualizer as visualizer
import d2bot.core.game_env as game_env
import d2bot.simulator as simulator
from d2bot.torch.a3c.ActorCritic import ActorCritic
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import random
import time
import sys
from tensorboardX import SummaryWriter
writer = SummaryWriter()
'''
model from https://github.com/ikostrikov/pytorch-a3c
'''
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == "visible":
test()
test_without_gui()
| [
11748,
288,
17,
13645,
13,
41464,
7509,
355,
5874,
7509,
198,
11748,
288,
17,
13645,
13,
7295,
13,
6057,
62,
24330,
355,
983,
62,
24330,
198,
11748,
288,
17,
13645,
13,
14323,
8927,
355,
35375,
198,
198,
6738,
288,
17,
13645,
13,
13... | 2.632479 | 234 |
import numpy as np
import dash
from dash.dependencies import Input, Output
from plotly.graph_objs import *
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
# function to plot
# default ranges for x0 & x1
xranges = [[0,1], [-np.pi, np.pi]]
# dropdown to pick which x to plot against
xchooser = dcc.Dropdown(
id='xchooser',
options=[{'label':'x0', 'value':'0'},{'label':'x1', 'value':'1'}],
value='0')
# the user can also modify the ranges manually
minsetter = dcc.Input(id='minsetter', type='number', value=xranges[0][0])
maxsetter = dcc.Input(id='maxsetter', type='number', value=xranges[0][1])
app.layout = html.Div([
html.Div(xchooser, style={'width':'15%'}),
html.Div(['Min: ',minsetter,'Max: ',maxsetter]),
html.Div([dcc.Graph(id='trend_plot')], style={'width':'80%','float':'right'})
])
@app.callback(Output('minsetter','value'),[Input('xchooser','value')])
@app.callback(Output('maxsetter','value'),[Input('xchooser','value')])
@app.callback(Output('trend_plot','figure'),
[Input('xchooser','value'),Input('minsetter','value'),Input('maxsetter','value')])
if __name__ == '__main__':
app.run_server()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
14470,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
6738,
7110,
306,
13,
34960,
62,
672,
8457,
1330,
1635,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
... | 2.55042 | 476 |
'''
统计标注类别和各类别数量
2020.4.21
'''
import operator
import sys
import argparse
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
path = 'D:\wsm\pycharm_pjs\data-analysis\scripts\\traffic_anno.txt' #标注txt文件路径
results_files_path = 'D:\wsm\pycharm_pjs\data-analysis\scripts\\' #结果输出位置
image_num = 13910 #手动输入图片数量#由selfimage_annotation.py输出得到
draw_plot = True
gt_counter_per_class = {}
'''
从txt统计类别信息到字典gt_counter_per_class
'''
with open(path,'r') as f:
for line in f.readlines():
class_list = line.strip().split(',')[1:]
print(class_list)
for class_name in class_list:
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
gt_counter_per_class[class_name] = 1
print(gt_counter_per_class)
# 类别列表、类别数
gt_classes = list(gt_counter_per_class.keys())
#print(gt_classes)
n_classes = len(gt_classes)
"""
Plot - adjust axes
"""
"""
Draw plot using Matplotlib
"""
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
true_p_bar = gt_counter_per_class
window_title = "Ground-Truth Info"
plot_title = "Ground-Truth\n"
plot_title += "(" + str(image_num) + " pictures and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/traffic_anno Ground-Truth Info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
) | [
7061,
6,
201,
198,
163,
119,
253,
164,
106,
94,
43718,
229,
37345,
101,
163,
109,
119,
26344,
104,
161,
240,
234,
28938,
226,
163,
109,
119,
26344,
104,
46763,
108,
34932,
237,
201,
198,
42334,
13,
19,
13,
2481,
201,
198,
7061,
6,... | 1.92395 | 881 |
import math
max = 2000000
primes = [2]
for i in xrange(3,max,2):
prime = True
limit = math.ceil(math.sqrt(i)) # tip from SO
for j in xrange(3, int(limit)+1, 2):
if prime:
if i % j == 0:
prime = False
if prime:
primes.append(i)
sum = 0
for prime in primes:
sum += prime;
print sum;
| [
11748,
10688,
198,
198,
9806,
796,
939,
2388,
198,
198,
1050,
999,
796,
685,
17,
60,
198,
198,
1640,
1312,
287,
2124,
9521,
7,
18,
11,
9806,
11,
17,
2599,
198,
197,
35505,
796,
6407,
198,
197,
32374,
796,
10688,
13,
344,
346,
7,
... | 2.20438 | 137 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
#查看操作系统
print os.name
#获取工作目录
print os.getcwd()
#获取某个目录下的所有文件名
print os.listdir('E:\workspace\Python-oldboy')
#运行一个shell命令 调用计算器
# os.system("calc")
#删除某个文件
#os.remove("E:\workspace\Python-oldboy\my_study\sys_study")
#判断是文件还是文件夹
print os.path.isfile("E:\workspace\Python-oldboy\my_study\sys_study\oneos.py")
print os.path.isdir("E:\workspace\Python-oldboy\my_study\sys_study")
#路径拆分 把完整路径分为目录+文件名
print os.path.split("E:\workspace\Python-oldboy\my_study\sys_study\oneos.py")
#结果 ('E:\\workspace\\Python-oldboy\\my_study\\sys_study', 'oneos.py')
print os.path.split("E:\workspace\Python-oldboy\my_study\sys_study")
#结果 ('E:\\workspace\\Python-oldboy\\my_study', 'sys_study')
print os.path.split('E:\workspace\Python-oldboy\my_study\')
#结果 ('E:\\workspace\\Python-oldboy\\my_study')
#注意后两个就是最后面是否有\有的话是目录,没有的话是文件 | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
28686,
201,
198,
201,
198,
2,
162,
253,
98,
40367,
233,
162,
241,
235,
43291,
163,
111,
119,
... | 1.578584 | 579 |
"""aws_codeartifact_poetry.helpers.cmd unit tests."""
import os
import subprocess
import sys
from unittest.mock import MagicMock, patch
import pytest
from _pytest.logging import LogCaptureFixture
from aws_codeartifact_poetry.helpers.catch_exceptions import CLIError
from aws_codeartifact_poetry.helpers.cmd import exec_cmd
from aws_codeartifact_poetry.helpers.logging import setup_logging
@pytest.fixture(autouse=True)
def enable_logging():
"""Enable logging fixture."""
setup_logging('aws_codeartifact_poetry', 'INFO', None)
return None
@patch('subprocess.run')
def test_exec_cmd(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and return exit code 0."""
mock_exec_cmd.return_value.returncode = 0
cmd = ['ls', '-la']
working_dir = '.'
exec_cmd(cmd, working_dir)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=sys.stdout,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: ls -la' in caplog.text
@patch('subprocess.run')
def test_exec_cmd_non_zero_exit_code(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and return exit code 1 and raise an exception."""
mock_exec_cmd.return_value.returncode = 1
cmd = ['ls', '-la']
working_dir = '.'
with pytest.raises(CLIError) as ex:
exec_cmd(cmd, working_dir)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=sys.stdout,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: ls -la' in caplog.text
assert 'Error executing command: ls -la' in str(ex)
@patch('subprocess.run')
def test_exec_cmd_return_stdout(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and return stdout as a string."""
mock_exec_cmd.return_value.returncode = 0
mock_exec_cmd.return_value.stdout = 'https://testing.com'
cmd = ['dgx-deploy', 'spa', 'deploy']
working_dir = '.'
result = exec_cmd(cmd, working_dir, True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=subprocess.PIPE,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: dgx-deploy spa deploy' in caplog.text
assert result == 'https://testing.com'
@patch('subprocess.run')
def test_exec_cmd_return_stdout_non_zero_exit_code(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command with return stdout flag and return exit code 1 and raise an exception."""
mock_exec_cmd.return_value.returncode = 1
mock_exec_cmd.return_value.stderr = 'error on upload files'
mock_exec_cmd.return_value.stdout = 'uploading file.txt'
cmd = ['dgx-deploy', 'spa', 'deploy']
working_dir = '.'
with pytest.raises(CLIError) as ex:
exec_cmd(cmd, working_dir, True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=subprocess.PIPE,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: dgx-deploy spa deploy' in caplog.text
assert "Error executing command: dgx-deploy spa deploy" in str(ex)
assert "exit_code: 1" in str(ex)
assert "stdout: 'uploading file.txt'" in str(ex)
assert "stderr: 'error on upload files'" in str(ex)
@patch('subprocess.run')
def test_exec_cmd_return_stderr(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and return stderr as a string."""
mock_exec_cmd.return_value.returncode = 0
mock_exec_cmd.return_value.stderr = 'some error'
cmd = ['mkdir', 'dir']
working_dir = '.'
result = exec_cmd(cmd, working_dir, return_stderr=True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=subprocess.PIPE,
stdout=sys.stdout,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: mkdir dir' in caplog.text
assert result == 'some error'
@patch('subprocess.run')
def test_exec_cmd_return_stdout_return_stderr(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and return stdout and stderr as a tuple."""
mock_exec_cmd.return_value.returncode = 0
mock_exec_cmd.return_value.stderr = 'fake stderr message'
mock_exec_cmd.return_value.stdout = 'fake stdout message'
cmd = ['npm', 'install']
working_dir = '.'
stdout, stderr = exec_cmd(cmd, working_dir, True, True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: npm install' in caplog.text
assert stdout == 'fake stdout message'
assert stderr == 'fake stderr message'
@patch('subprocess.run')
def test_exec_cmd_combine_outputs(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and one output with stdout and stderr."""
mock_exec_cmd.return_value.returncode = 0
mock_exec_cmd.return_value.stdout = 'fake stdout message and fake stderr message'
cmd = ['nx', 'run', 'build']
working_dir = '.'
exit_code, output = exec_cmd(cmd, working_dir, combine_outputs=True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: nx run build' in caplog.text
assert output == 'fake stdout message and fake stderr message'
assert exit_code == 0
@patch('subprocess.run')
def test_exec_cmd_combine_outputs_with_error(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and one output with stdout and stderr."""
mock_exec_cmd.return_value.returncode = 1
mock_exec_cmd.return_value.stdout = 'error message'
cmd = ['nx', 'run', 'test']
working_dir = '.'
exit_code, output = exec_cmd(cmd, working_dir, combine_outputs=True)
mock_exec_cmd.assert_called_once_with(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
assert 'Running command: nx run test' in caplog.text
assert exit_code == 1
assert output == 'error message'
@patch('subprocess.run')
def test_exec_cmd_with_custom_env_vars(
mock_exec_cmd: MagicMock
):
"""Should execute the command with custom environment variables."""
mock_exec_cmd.return_value.returncode = 0
cmd = ['npm', 'install']
working_dir = '.'
exec_cmd(cmd, working_dir, env_vars={'NODE_ENV': 'ci'})
envs = os.environ.copy()
envs.update({'NODE_ENV': 'ci'})
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=sys.stdout,
cwd=working_dir,
encoding='utf-8',
env=envs)
@patch('subprocess.run')
def test_exec_cmd_with_hide_secrets(
mock_exec_cmd: MagicMock,
caplog: LogCaptureFixture
):
"""Should execute the command and replace secrets with `****`."""
mock_exec_cmd.return_value.returncode = 0
secret = 'my-secret-value'
cmd = ['login', secret]
working_dir = '.'
exec_cmd(cmd, working_dir, hide_secrets=[secret])
assert 'Running command: login ****' in caplog.text
mock_exec_cmd.assert_called_once_with(cmd,
stderr=sys.stderr,
stdout=sys.stdout,
cwd=working_dir,
encoding='utf-8',
env=os.environ.copy())
| [
37811,
8356,
62,
8189,
433,
29660,
62,
7501,
11973,
13,
16794,
364,
13,
28758,
4326,
5254,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
... | 1.921282 | 4,929 |
import os
import mimetypes
import fnmatch
import yaml
from genesis.shell import ShellProxy, ProcessQuery
from genesis.utils import expand, is_windows
from genesis.config import load_yaml
from genesis.scm import get_scm
# TODO: simplify here
| [
11748,
28686,
198,
11748,
17007,
2963,
12272,
198,
11748,
24714,
15699,
198,
198,
11748,
331,
43695,
198,
198,
6738,
48861,
13,
29149,
1330,
17537,
44148,
11,
10854,
20746,
198,
6738,
48861,
13,
26791,
1330,
4292,
11,
318,
62,
28457,
198,... | 3.507042 | 71 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2829,
17752,
355,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
26209,
13,
2348,
541,
323,... | 2.692308 | 52 |
#!/usr/bin/python
from flask import Flask, render_template
from flask_socketio import SocketIO
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
socketio = SocketIO(app, cors_allowed_origins="*")
@app.route('/')
api.add_resource(Flag, '/flag')
api.add_resource(InternalAPI, '/api/internal')
@socketio.on('my event')
if __name__ == '__main__':
socketio.run(app, port=5000, host='localhost')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
6738,
42903,
62,
44971,
952,
1330,
47068,
9399,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
628,
198,
1324,
796,
46947... | 2.806452 | 155 |
import math
import numpy as np
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.3 | 10 |
from docs_snippets.concepts.io_management.load_from_config import execute_with_config
| [
6738,
34165,
62,
16184,
3974,
1039,
13,
43169,
82,
13,
952,
62,
27604,
13,
2220,
62,
6738,
62,
11250,
1330,
12260,
62,
4480,
62,
11250,
628
] | 3.346154 | 26 |
from typing import Any
from app import models, schemas
from app.api import deps
from app.core.celery_app import celery_app
from app.utils.utils import send_test_email
from fastapi import APIRouter, Depends
from pydantic.networks import EmailStr
router = APIRouter()
@router.post("/test-celery/", response_model=schemas.Msg, status_code=201)
def test_celery(
msg: schemas.Msg,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Test Celery worker.
"""
celery_app.send_task("app.worker.test_celery", args=[msg.msg])
return {"msg": "Word received"}
@router.post("/test-email/", response_model=schemas.Msg, status_code=201)
def test_email(
email_to: EmailStr,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Test emails.
"""
send_test_email(email_to=email_to)
return {"msg": "Test email sent"}
| [
6738,
19720,
1330,
4377,
198,
198,
6738,
598,
1330,
4981,
11,
3897,
5356,
198,
6738,
598,
13,
15042,
1330,
390,
862,
198,
6738,
598,
13,
7295,
13,
7015,
88,
62,
1324,
1330,
18725,
1924,
62,
1324,
198,
6738,
598,
13,
26791,
13,
26791... | 2.585165 | 364 |
'''
Simple Client Counter for VLC VLM
'''
| [
7061,
6,
198,
26437,
20985,
15034,
329,
569,
5639,
569,
31288,
198,
7061,
6,
628
] | 2.866667 | 15 |
import importlib
import os
import io
import logging
from setuptools import setup, find_packages
from typing import List
import pathlib
PARENT = pathlib.Path(__file__).parent
logger = logging.getLogger(__name__)
# Kept manually in sync with dask_pipes.__version__
# noinspection PyUnresolvedReferences
spec = importlib.util.spec_from_file_location("dask_pipes.version",
os.path.join("dask_pipes", 'version.py'))
# noinspection PyUnresolvedReferences
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
version = mod.version
try:
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
if __name__ == "__main__":
do_setup()
| [
11748,
1330,
8019,
198,
11748,
28686,
198,
11748,
33245,
198,
11748,
18931,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
19720,
1330,
7343,
198,
11748,
3108,
8019,
198,
198,
27082,
3525,
796,
3108,
8019,
13,
... | 2.60596 | 302 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for energy_demand.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
from __future__ import absolute_import, division, print_function
from pytest import fixture
@fixture(scope='function')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
360,
13513,
369,
701,
395,
13,
9078,
329,
2568,
62,
28550,
13,
628,
220,
220,
220,
1002,
... | 2.91129 | 124 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 18:33:09 2022
@author: marco
"""
import pandas as pd
import numpy as np
import random
import math
import os
from scipy.linalg import pinv as inv
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
import matplotlib.pyplot as plt
os.chdir('C://Users//marco//Desktop//Projects//Bootstrapping')
cwd = os.getcwd()
print("Current working directory: {0}".format(cwd))
import warnings # `do not disturbe` mode
warnings.filterwarnings('ignore')
df = pd.read_excel('Data.xlsx', index_col=0)
############## Block Bootstrapping
# Loop for block bootstrapping
df=df.to_numpy()
df1 = block_bs(df=df,B=10000000,k=3)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
2365,
2608,
1248,
25,
2091,
25,
2931,
33160,
201,
198,
201,
198,
31,
9800,
25,
1667,
1073,
201,
198,
37811,
201,
198,
201,
198,... | 2.359375 | 320 |
import random
import json
for i in range(10):
# print("\n{\"Temperature\":{},\"SOC\":{}}\n".format(48.00, 58.00));
print(json.dumps({'Temperature':random.randint(10,100), 'SOC':random.randint(10,100)}))
| [
11748,
4738,
201,
198,
11748,
33918,
201,
198,
201,
198,
1640,
1312,
287,
2837,
7,
940,
2599,
201,
198,
220,
220,
220,
1303,
3601,
7203,
59,
77,
90,
7879,
42492,
30478,
90,
5512,
7879,
50,
4503,
30478,
90,
11709,
59,
77,
1911,
18982... | 2.235294 | 102 |
import sys
sys.setrecursionlimit(10**6)
N, Q = map(int, input().split())
ab = [[int(i) for i in input().split()] for _ in range(N - 1)]
cd = [[int(i) for i in input().split()] for _ in range(Q)]
G = [[] for _ in range(N)]
for a, b in ab:
G[a - 1].append(b - 1)
G[b - 1].append(a - 1)
#Euler tour
vs = [-1] * (len(G) * 2 - 1) #DFSの訪問順
depth = [-1] * (len(G) * 2 - 1) #根からの深さ
id = [-1] * len(G) #各頂点がvsに初めて登場するindex
k = 0
dfs(0, -1, 0)
for c, d in cd:
print('Road' if (depth[id[c - 1]] - depth[id[d - 1]]) % 2 else 'Town')
| [
11748,
25064,
198,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
1174,
21,
8,
198,
198,
45,
11,
1195,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
397,
796,
16410,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
3... | 1.862543 | 291 |
from amuse.test import amusetest
import pickle
from amuse.support.exceptions import AmuseException
from amuse.units import core
from amuse.units import si
from amuse.units import nbody_system
from amuse.units import generic_unit_system
from amuse.units.quantities import zero
from amuse.units.units import *
from amuse.units.constants import *
from amuse.datamodel import Particles, parameters
import subprocess
import pickle
import sys
import os
| [
6738,
26072,
13,
9288,
1330,
716,
385,
316,
395,
198,
198,
11748,
2298,
293,
198,
198,
6738,
26072,
13,
11284,
13,
1069,
11755,
1330,
1703,
1904,
16922,
198,
198,
6738,
26072,
13,
41667,
1330,
4755,
198,
6738,
26072,
13,
41667,
1330,
... | 2.560748 | 214 |
# -*- coding: utf-8 -*-
"""
File Name: model
Description : 模型层
Author : mick.yi
date: 2019/4/1
"""
import keras
from keras import layers, Input, Model
import tensorflow as tf
from east.layers.base_net import resnet50
from east.layers.losses import balanced_cross_entropy, iou_loss, angle_loss
from east.layers.rbox import dist_to_box
def merge_block(f_pre, f_cur, out_channels, index):
"""
east网络特征合并块
:param f_pre:
:param f_cur:
:param out_channels:输出通道数
:param index:block index
:return:
"""
# 上采样
up_sample = layers.UpSampling2D(size=2, name="east_up_sample_f{}".format(index - 1))(f_pre)
# 合并
merge = layers.Concatenate(name='east_merge_{}'.format(index))([up_sample, f_cur])
# 1*1 降维
x = layers.Conv2D(out_channels, (1, 1), padding='same', name='east_reduce_channel_conv_{}'.format(index))(merge)
x = layers.BatchNormalization(name='east_reduce_channel_bn_{}'.format(index))(x)
x = layers.Activation(activation='relu', name='east_reduce_channel_relu_{}'.format(index))(x)
# 3*3 提取特征
x = layers.Conv2D(out_channels, (3, 3), padding='same', name='east_extract_feature_conv_{}'.format(index))(x)
x = layers.BatchNormalization(name='east_extract_feature_bn_{}'.format(index))(x)
x = layers.Activation(activation='relu', name='east_extract_feature_relu_{}'.format(index))(x)
return x
def east(features):
"""
east网络头
:param features: 特征列表: f1, f2, f3, f4分别代表32,16,8,4倍下采样的特征
:return:
"""
f1, f2, f3, f4 = features
# 特征合并分支
h2 = merge_block(f1, f2, 128, 2)
h3 = merge_block(h2, f3, 64, 3)
h4 = merge_block(h3, f4, 32, 4)
# 提取g4特征
x = layers.Conv2D(32, (3, 3), padding='same', name='east_g4_conv')(h4)
x = layers.BatchNormalization(name='east_g4_bn')(x)
x = layers.Activation(activation='relu', name='east_g4_relu')(x)
# 预测得分
predict_score = layers.Conv2D(1, (1, 1), name='predict_score_map')(x)
# 预测距离
predict_geo_dist = layers.Conv2D(4, (1, 1), activation='relu', name='predict_geo_dist')(x) # 距离必须大于零
# 预测角度
predict_geo_angle = layers.Conv2D(1, (1, 1), name='predict_geo_angle')(x)
return predict_score, predict_geo_dist, predict_geo_angle
def compile(keras_model, config, loss_names=[]):
"""
编译模型,增加损失函数,L2正则化以
:param keras_model:
:param config:
:param loss_names: 损失函数列表
:return:
"""
# 优化目标
optimizer = keras.optimizers.SGD(
lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM,
clipnorm=config.GRADIENT_CLIP_NORM)
# 增加损失函数,首先清除之前的,防止重复
keras_model._losses = []
keras_model._per_input_losses = {}
for name in loss_names:
layer = keras_model.get_layer(name)
if layer is None or layer.output in keras_model.losses:
continue
loss = (tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.add_loss(loss)
# 增加L2正则化
# 跳过批标准化层的 gamma 和 beta 权重
reg_losses = [
keras.regularizers.l2(config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
keras_model.add_loss(tf.add_n(reg_losses))
# 编译
keras_model.compile(
optimizer=optimizer,
loss=[None] * len(keras_model.outputs)) # 使用虚拟损失
# 为每个损失函数增加度量
for name in loss_names:
if name in keras_model.metrics_names:
continue
layer = keras_model.get_layer(name)
if layer is None:
continue
keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* config.LOSS_WEIGHTS.get(name, 1.))
keras_model.metrics_tensors.append(loss)
def add_metrics(keras_model, metric_name_list, metric_tensor_list):
"""
增加度量
:param keras_model: 模型
:param metric_name_list: 度量名称列表
:param metric_tensor_list: 度量张量列表
:return: 无
"""
for name, tensor in zip(metric_name_list, metric_tensor_list):
keras_model.metrics_names.append(name)
keras_model.metrics_tensors.append(tf.reduce_mean(tensor, keepdims=True))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
220,
220,
9220,
6530,
171,
120,
248,
220,
220,
220,
220,
2746,
201,
198,
220,
220,
12489,
1058,
10545,
101,
94,
161,
252,
233,
161,
109,
224,
2... | 1.803873 | 2,427 |
from scipy.special import comb
from scipy.special import beta as betafunc
import numpy as np
| [
6738,
629,
541,
88,
13,
20887,
1330,
1974,
198,
6738,
629,
541,
88,
13,
20887,
1330,
12159,
355,
731,
1878,
19524,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.321429 | 28 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
try:
from secret_key import *
except ImportError:
SETTINGS_DIR=os.path.abspath(os.path.dirname(__file__))
generate_secret_key(os.path.join(SETTINGS_DIR, 'secret_key.py'))
from secret_key import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'operators',
'vnfs',
'oocran',
'pools',
'bbus',
'ns',
'ues',
'vims',
'scripts',
'alerts',
'schedulers',
'keys',
'images',
'scenarios',
'bootstrapform',
'django_celery_beat',
'django_celery_results',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oocran.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [''],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oocran.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
#DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'oocran',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# 'USER': 'oocran',
# 'PASSWORD': 'oocran',
#}}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'repositories')
MEDIA_URL = '/resources/'
LOGIN_REDIRECT_URL = '/operators'
# Celery
BROKER_URL = "amqp://oocran:oocran@localhost:5672/oocran"
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'django-db'
CELERY_RESULT_BACKEND = 'django-cache'
# InfluxDB
INFLUXDB = {
'default': {
'host': '127.0.0.1',
'port': '8086',
'username': 'admin',
'password': 'admin',
}
}
# Grafana
GRAFANA = 'localhost:3000' | [
11748,
28686,
628,
198,
2,
10934,
13532,
2641,
262,
1628,
588,
428,
25,
28686,
13,
6978,
13,
22179,
7,
33,
11159,
62,
34720,
11,
2644,
8,
198,
33,
11159,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
1590... | 2.227273 | 1,804 |
import re
import sys
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# alternatives: seaborn, plotnine,
from PyQt5.QtCore import Qt,pyqtSignal
from PyQt5.QtWidgets import QFormLayout
from Orange.widgets.widget import OWWidget, Input
from Orange.widgets import gui
from Orange.data import Table, Domain
from Orange.widgets.utils.itemmodels import DomainModel
| [
11748,
302,
198,
11748,
25064,
198,
6738,
2603,
29487,
8019,
13,
26875,
1330,
11291,
198,
6738,
2603,
29487,
8019,
13,
1891,
2412,
13,
1891,
437,
62,
39568,
19,
9460,
1330,
11291,
6090,
11017,
48,
5603,
1130,
355,
11291,
6090,
11017,
19... | 3.282443 | 131 |
####################################################################################################################################
####################################################################################################################################
####
#### MIT License
####
#### ParaMonte: plain powerful parallel Monte Carlo library.
####
#### Copyright (C) 2012-present, The Computational Data Science Lab
####
#### This file is part of the ParaMonte library.
####
#### Permission is hereby granted, free of charge, to any person obtaining a
#### copy of this software and associated documentation files (the "Software"),
#### to deal in the Software without restriction, including without limitation
#### the rights to use, copy, modify, merge, publish, distribute, sublicense,
#### and/or sell copies of the Software, and to permit persons to whom the
#### Software is furnished to do so, subject to the following conditions:
####
#### The above copyright notice and this permission notice shall be
#### included in all copies or substantial portions of the Software.
####
#### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#### IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#### DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#### OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#### OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
####
#### ACKNOWLEDGMENT
####
#### ParaMonte is an honor-ware and its currency is acknowledgment and citations.
#### As per the ParaMonte library license agreement terms, if you use any parts of
#### this library for any purposes, kindly acknowledge the use of ParaMonte in your
#### work (education/research/industry/development/...) by citing the ParaMonte
#### library as described on this page:
####
#### https://github.com/cdslaborg/paramonte/blob/master/ACKNOWLEDGMENT.md
####
####################################################################################################################################
####################################################################################################################################
import numpy as np
import typing as tp
import pandas as pd
import weakref as wref
import _paramonte as pm
import _pmutils as pmutils
from paramonte.vis.Target import Target
from paramonte.vis._BasePlot import BasePlot
Struct = pmutils.Struct
newline = pmutils.newline
####################################################################################################################################
#### LineScatterPlot class
####################################################################################################################################
class LineScatterPlot(BasePlot):
"""
This is the LineScatterPlot class for generating instances
of line or scatter plots or the combination of the two in
two or three dimensions based on the visualization tools
of the ``matplotlib`` and ``seaborn`` Python libraries.
**Usage**
First generate an object of this class by optionally
passing the following parameters described below. Then call
the ``make()`` method. The generated object is also callable
with the same input parameters as the object's constructor.
**Parameters**
plotType
A string indicating the name of the plot to be constructed.
dataFrame (optional)
A pandas dataFrame whose data will be plotted.
methodName (optional)
The name of the ParaMonte sample requesting the BasePlot.
reportEnabled (optional)
A boolean whose value indicates whether guidelines should be
printed in the standard output.
resetPlot (optional)
A function that resets the properties of the plot as desired
from outside. If provided, a pointer to this function will be
saved for future internal usage.
**Attributes**
xcolumns
An attribute that determines the columns of dataFrame
to be visualized as the X-axis. It can have three forms:
1. A list of column indices in dataFrame.
2. A list of column names in dataFrame.columns.
3. A ``range(start,stop,step)`` of column indices.
Examples:
1. ``xcolumns = [0,1,4,3]``
2. ``xcolumns = ["SampleLogFunc","SampleVariable1"]``
3. ``xcolumns = range(17,7,-2)``
The default behavior includes all columns of the dataFrame.
ycolumns
An attribute that determines the columns of dataFrame
to be visualized as the Y-axis. It can have three forms:
1. A list of column indices in dataFrame.
2. A list of column names in dataFrame.columns.
3. A ``range(start,stop,step)`` of column indices.
Examples:
1. ``ycolumns = [0,1,4,3]``
2. ``ycolumns = ["SampleLogFunc","SampleVariable1"]``
3. ``ycolumns = range(17,7,-2)``
The default behavior includes all columns of the dataFrame.
zcolumns (exists only in 3D plot objects)
An attribute that determines the columns of dataFrame
to be visualized as the Z-axis. It can have three forms:
1. A list of column indices in dataFrame.
2. A list of column names in dataFrame.columns.
3. A ``range(start,stop,step)`` of column indices.
Examples:
1. ``zcolumns = [0,1,4,3]``
2. ``zcolumns = ["SampleLogFunc","SampleVariable1"]``
3. ``zcolumns = range(17,7,-2)``
The default behavior includes all columns of the dataFrame.
ccolumns
An attribute that determines the columns of dataFrame
to be used for color mapping. It can have three forms:
1. A list of column indices in dataFrame.
2. A list of column names in dataFrame.columns.
3. A ``range(start,stop,step)`` of column indices.
Examples:
1. ``ccolumns = [0,1,4,3]``
2. ``ccolumns = ["SampleLogFunc","SampleVariable1"]``
3. ``ccolumns = range(17,7,-2)``
If ``ccolumns`` is set to ``None``, then no color-mapping
will be made. If it is set to an empty list ``[]``, then
the values from the ``rows`` attribute will be used for
color-mapping.
rows
An attribute that determines the rows of dataFrame to be
visualized. It can be either:
1. A ``range(start,stop,step)``, or,
2. A list of row indices in dataFrame.index.
Examples:
1. ``rows = range(17,7,-2)``
2. ``rows = [i for i in range(7,17)]``
The default behavior includes all rows of the dataFrame.
plot (exists only for line or lineScatter plots in 2D and 3D)
A structure with two attributes:
enabled
A boolean indicating whether a call to the ``plot()``
function of the matplotlib library should be made
or not.
kws
A structure whose components are directly passed as
keyword arguments to the ``plot()`` function.
Example usage:
.. code-block:: python
plot.enabled = True
plot.kws.linewidth = 1
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
scatter (exists only for scatter / lineScatter plots in 2D and 3D)
A structure with two attributes:
enabled
A boolean indicating whether a call to the
``scatter()`` function of the matplotlib library
should be made or not.
kws
A structure whose components are directly passed as
keyword arguments to the ``scatter()`` function.
Example usage:
.. code-block:: python
scatter.enabled = True
scatter.kws.s = 2
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
lineCollection (exists only for 2D / 3D line / lineScatter plots)
A structure with two attributes:
enabled
A boolean indicating whether a call to the
``LineCollection()`` class of the matplotlib
library should be made or not. This will result
in line plots that are color-mapped.
kws
A structure whose components are directly passed as
keyword arguments to the ``LineCollection()`` class.
Example usage:
.. code-block:: python
lineCollection.enabled = True
lineCollection.kws.linewidth = 1
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
set
A structure with two attributes:
enabled
A boolean indicating whether a call to the ``set()``
function of the seaborn library should be made or not.
kws
A structure whose components are directly passed as
keyword arguments to the ``set()`` function.
Example usage:
.. code-block:: python
set.kws.style = "darkgrid"
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
axes (available only in 1D and 2D plots)
A structure with one attribute:
kws
A structure whose components are directly passed as
keyword arguments to the ``gca()`` function of the
matplotlib library.
Example usage:
.. code-block:: python
axes.kws.faceColor = "w"
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
axes3d (available only in 3D plots)
A structure with one attribute:
kws
A structure whose components are directly passed as
keyword arguments to the ``Axes3D()`` function of the
matplotlib library.
Example usage:
.. code-block:: python
axes3d.kws.faceColor = "w"
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
figure
A structure with two attributes:
enabled
A boolean indicating whether a call to the ``figure()``
function of the matplotlib library should be made or not.
If a call is made, a new figure will be generated.
Otherwise, the current active figure will be used.
kws
A structure whose components are directly passed as
keyword arguments to the ``figure()`` function.
Example usage:
.. code-block:: python
figure.kws.faceColor = "w"
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
colorbar (exists only for plots that require colorbar)
A structure with two attributes:
enabled
A boolean indicating whether a call to the ``colorbar()``
function of the matplotlib library should be made or not.
If a call is made, a new figure will be generated.
Otherwise, the current active figure will be used.
kws
A structure whose components are directly passed as
keyword arguments to the ``colorbar()`` function of
the matplotlib library.
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
A colorbar will be added to a plot only if a color-mappings
is requested in the plot.
legend (may not exist for some types of plots)
A structure with two attributes:
enabled
A boolean indicating whether a call to the ``legend()``
function of the matplotlib library should be made or not.
If a call is made, a new figure will be generated.
Otherwise, the current active figure will be used.
kws
A structure whose components are directly passed as
keyword arguments to the ``legend()`` function.
Example usage:
.. code-block:: python
legend.kws.labels = ["Variable1", "Variable2"]
**NOTE**
If a desired property is missing among the ``kws``
attributes, simply add the field and its value to
the component.
A legend will be added to a plot only if no color-mappings are
requested in the plot.
currentFig
A structure whose attributes are the outputs of various plotting
tools used to make the current figure. These include the handle
to the current figure, the handle to the current axes in the plot,
the handle to the colorbar (if any exists), and other Python
plotting tools used to make to generate the figure.
target (available only in 1D and 2D plot objects)
A callable object of the ParaMonte library's ``Target`` class
which can be used to add target point or lines to the current
active plot.
**Returns**
An object of class ``LineScatterPlot``.
---------------------------------------------------------------------------
"""
################################################################################################################################
#### __init__
################################################################################################################################
################################################################################################################################
#### _reset
################################################################################################################################
################################################################################################################################
#### __call__
################################################################################################################################
def __call__( self
, reself : tp.Optional[ bool ] = False
, **kwargs
):
"""
Call the ``make()`` method of the current
instance of the class.
**Parameters**
Any arguments that can be passed to the
``make()`` method of the plot object.
**Returns**
Any return value from the ``make()``
method of the plot object.
"""
return self.make(reself, **kwargs)
################################################################################################################################
#### make
################################################################################################################################
def make( self
, reself : tp.Optional[ bool ] = False
, **kwargs
):
"""
Generate a line/scatter plot from the
selected columns of the object's dataframe.
**Parameters**
reself
A logical variable. If ``True``, an instance of
the object will be returned to the calling routine
upon exit. The default value is ``False``.
**Returns**
The object self if ``reself = True`` otherwise, ``None``.
However, this method causes side-effects by manipulating
the existing attributes of the object.
"""
for key in kwargs.keys():
if hasattr(self,key):
setattr(self, key, kwargs[key])
elif key=="dataFrame":
setattr( self, "_dfref", wref.ref(kwargs[key]) )
else:
raise Exception ( "Unrecognized input '"+key+"' class attribute detected." + newline
+ self._getDocString()
)
# set what to plot
cEnabled = self.ccolumns is not None
from collections.abc import Iterable
if self.ccolumns is not None and not isinstance(self.ccolumns, Iterable): self.ccolumns = [self.ccolumns]
# if no colormap, then
if self._type.isLine and not cEnabled: self.plot.enabled = True
############################################################################################################################
#### scatter plot properties
############################################################################################################################
if self._type.isScatter:
if isinstance(self.scatter.kws,Struct):
if "s" not in vars(self.scatter.kws).keys(): self.scatter.kws.s = 2
if "c" not in vars(self.scatter.kws).keys(): self.scatter.kws.c = None
if "cmap" not in vars(self.scatter.kws).keys() or self.scatter.kws.cmap is None: self.scatter.kws.cmap = "autumn"
if "alpha" not in vars(self.scatter.kws).keys(): self.scatter.kws.alpha = 1
if "edgeColor" not in vars(self.scatter.kws).keys(): self.scatter.kws.edgeColor = None
if "zorder" not in vars(self.scatter.kws).keys(): self.scatter.kws.zorder = 2
if not cEnabled: self.scatter.kws.cmap = None
else:
raise Exception ( "The scatter.kws component of the current LineScatterPlot object must" + newline
+ "be an object of class Struct(), essentially a structure with components" + newline
+ "whose names are the input arguments to the scatter() function of the" + newline
+ "matplotlib library." + newline
+ self._getDocString()
)
############################################################################################################################
#### line plot properties
############################################################################################################################
if self._type.isLine:
if isinstance(self.plot.kws,Struct):
if "linewidth" in vars(self.plot.kws).keys():
if self.plot.kws.linewidth==0: self.plot.kws.linewidth = 1
else:
self.plot.kws.linewidth = 1
if "zorder" not in vars(self.plot.kws).keys(): self.plot.kws.zorder = 1
else:
raise Exception ( "The plot.kws component of the current LineScatterPlot object must" + newline
+ "be an object of class Struct(), essentially a structure with components" + newline
+ "whose names are the input arguments to the plot() function of the" + newline
+ "matplotlib library." + newline
+ self._getDocString()
)
if isinstance(self.lineCollection.kws, Struct):
if "cmap" not in vars(self.lineCollection.kws).keys() or self.lineCollection.kws.cmap is None: self.lineCollection.kws.cmap = "autumn"
if "alpha" not in vars(self.lineCollection.kws).keys(): self.lineCollection.kws.alpha = 1
if "linewidth" not in vars(self.lineCollection.kws).keys(): self.lineCollection.kws.linewidth = 1
else:
objectType = "LineCollection"
if self._type.is3d: objectType = "Line3DCollection"
raise Exception ( "The lineCollection.kws component of the current LineScatterPlot object must" + newline
+ "be an object of class Struct(), essentially a structure with components" + newline
+ "whose names are the input arguments to the " + objectType + "() class of the" + newline
+ "matplotlib library." + newline
+ self._getDocString()
)
############################################################################################################################
#### legend properties
############################################################################################################################
if self.legend.enabled:
if not isinstance(self.legend.kws,Struct):
raise Exception ( "The legend.kws component of the current LineScatterPlot object must" + newline
+ "be an object of class Struct(), essentially a structure with components" + newline
+ "whose names are the input arguments to the legend() function of the" + newline
+ "matplotlib library." + newline
+ self._getDocString()
)
############################################################################################################################
#### figure properties
############################################################################################################################
if self.figure.enabled:
if isinstance(self.figure.kws, Struct):
if "dpi" not in vars(self.figure.kws).keys(): self.figure.kws.dpi = 150
if "faceColor" not in vars(self.figure.kws).keys(): self.figure.kws.faceColor = "w"
if "edgeColor" not in vars(self.figure.kws).keys(): self.figure.kws.edgeColor = "w"
else:
raise Exception ( "The figure.kws component of the current LineScatterPlot object must" + newline
+ "be an object of class Struct(), essentially a structure with components" + newline
+ "whose names are the input arguments to the figure() function of the" + newline
+ "matplotlib library." + newline
+ self._getDocString()
)
############################################################################################################################
############################################################################################################################
if self._isdryrun: return
############################################################################################################################
############################################################################################################################
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
if self._type.is3d: from mpl_toolkits.mplot3d.art3d import Line3DCollection
############################################################################################################################
#### generate figure and axes if needed
############################################################################################################################
self._constructBasePlot()
############################################################################################################################
#### check data type
############################################################################################################################
self._checkDataType()
############################################################################################################################
#### check rows presence. This must be checked here, because it depends on the integrity of the in input dataFrame.
############################################################################################################################
if self.rows is None: self.rows = range(len(self._dfref().index))
############################################################################################################################
#### check columns presence. This must be checked here, because it depends on the integrity of the in input dataFrame.
############################################################################################################################
if self.xcolumns is None:
lgxicol = 0
xcolindex = []
xcolnames = ["Count"]
if self._type.isScatter : self.scatter._xvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
if self._type.isLine : self.plot._xvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
else:
xcolnames, xcolindex = pm.dfutils.getColNamesIndex(self._dfref().columns,self.xcolumns)
if self.ycolumns is None:
lgyicol = 0
ycolindex = []
ycolnames = ["Count"]
if self._type.isScatter : self.scatter._yvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
if self._type.isLine : self.plot._yvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
else:
ycolnames, ycolindex = pm.dfutils.getColNamesIndex(self._dfref().columns,self.ycolumns)
if self._type.is3d:
if self.zcolumns is None:
lgzicol = 0
zcolindex = []
zcolnames = ["Count"]
if self._type.isScatter : self.scatter._zvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
if self._type.isLine : self.plot._zvalues = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
else:
zcolnames, zcolindex = pm.dfutils.getColNamesIndex(self._dfref().columns, self.zcolumns)
############################################################################################################################
#### set colormap data
############################################################################################################################
if cEnabled:
if len(self.ccolumns)==0:
ccolindex = []
ccolnames = ["Count"]
if self._type.isScatter : self.scatter.kws.c = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
if self._type.isLine : cdata = np.array( self._dfref().index[self.rows] + self._indexOffset ).flatten()
else:
ccolnames, ccolindex = pm.dfutils.getColNamesIndex(self._dfref().columns,self.ccolumns)
else:
ccolindex = []
ccolnames = []
#self.scatter.kws.c = None
############################################################################################################################
#### check the consistency of the lengths
############################################################################################################################
xcolindexlen = len(xcolindex)
ycolindexlen = len(ycolindex)
ccolindexlen = len(ccolindex)
maxLenColumns = np.max ( [ xcolindexlen
, ycolindexlen
, ccolindexlen
]
)
if xcolindexlen!=maxLenColumns and xcolindexlen>1: raise Exception("length of xcolumns must be either 1 or equal to the lengths of ycolumns or ccolumns.")
if ycolindexlen!=maxLenColumns and ycolindexlen>1: raise Exception("length of ycolumns must be either 1 or equal to the lengths of xcolumns or ccolumns.")
if ccolindexlen!=maxLenColumns and ccolindexlen>1: raise Exception("length of ccolumns must be either 1 or equal to the lengths of xcolumns or ycolumns.")
if self._type.is3d:
zcolindexlen = len(zcolindex)
if zcolindexlen!=maxLenColumns and zcolindexlen>1: raise Exception("length of zcolumns must be either 1 or equal to the lengths of xcolumns or ycolumns.")
############################################################################################################################
#### assign data in case of single column assignments
############################################################################################################################
if xcolindexlen==1:
lgxicol = 0
if self._type.isScatter : self.scatter._xvalues = self._dfref().iloc[self.rows,xcolindex].values.flatten()
if self._type.isLine : self.plot._xvalues = self._dfref().iloc[self.rows,xcolindex].values.flatten()
if ycolindexlen==1:
lgyicol = 0
if self._type.isScatter : self.scatter._yvalues = self._dfref().iloc[self.rows,ycolindex].values.flatten()
if self._type.isLine : self.plot._yvalues = self._dfref().iloc[self.rows,ycolindex].values.flatten()
if self._type.is3d:
if zcolindexlen==1:
lgzicol = 0
if self._type.isScatter : self.scatter._zvalues = self._dfref().iloc[self.rows,zcolindex].values.flatten()
if self._type.isLine : self.plot._zvalues = self._dfref().iloc[self.rows,zcolindex].values.flatten()
if cEnabled:
if ccolindexlen==1:
if self._type.isScatter : self.scatter.kws.c = self._dfref().iloc[self.rows,ccolindex].values.flatten()
if self._type.isLine : cdata = self._dfref().iloc[self.rows,ccolindex].values.flatten()
############################################################################################################################
#### add line/scatter plot
############################################################################################################################
if self.legend.enabled: self.legend._labels = []
for i in range(maxLenColumns):
if xcolindexlen>1:
lgxicol = i
if self._type.isScatter : self.scatter._xvalues = self._dfref().iloc[self.rows,xcolindex[i]].values.flatten()
if self._type.isLine : self.plot._xvalues = self._dfref().iloc[self.rows,xcolindex[i]].values.flatten()
if ycolindexlen>1:
lgyicol = i
if self._type.isScatter : self.scatter._yvalues = self._dfref().iloc[self.rows,ycolindex[i]].values.flatten()
if self._type.isLine : self.plot._yvalues = self._dfref().iloc[self.rows,ycolindex[i]].values.flatten()
if cEnabled:
if ccolindexlen>1:
if self._type.isScatter : self.scatter.kws.c = self._dfref().iloc[self.rows,ccolindex[i]].values.flatten()
if self._type.isLine : cdata = self._dfref().iloc[self.rows,ccolindex[i]].values.flatten()
if self.legend.enabled:
if xcolindexlen<2 and ycolindexlen>1:
self.legend._labels.append(ycolnames[lgyicol])
elif xcolindexlen>1 and ycolindexlen<2:
self.legend._labels.append(xcolnames[lgxicol])
else:
self.legend._labels.append( xcolnames[lgxicol] + "-" + ycolnames[lgyicol] )
if self._type.is3d:
if zcolindexlen>1:
lgzicol = i
if self._type.isScatter : self.scatter._zvalues = self._dfref().iloc[self.rows,zcolindex[i]].values.flatten()
if self._type.isLine : self.plot._zvalues = self._dfref().iloc[self.rows,zcolindex[i]].values.flatten()
if self.legend.enabled:
if zcolindexlen>1: self.legend._labels[-1] += "-" + zcolnames[lgzicol]
########################################################################################################################
#### add scatter plot
########################################################################################################################
if self._type.isScatter and self.scatter.enabled:
if self._type.is3d:
self.currentFig.scatter = self.currentFig.axes.scatter ( self.scatter._xvalues
, self.scatter._yvalues
, self.scatter._zvalues
, **vars(self.scatter.kws)
)
else:
self.currentFig.scatter = self.currentFig.axes.scatter ( self.scatter._xvalues
, self.scatter._yvalues
, **vars(self.scatter.kws)
)
########################################################################################################################
#### add line plot
########################################################################################################################
if self._type.isLine:
if self.plot.enabled:
if self._type.is3d:
self.currentFig.plot = self.currentFig.axes.plot( self.plot._xvalues
, self.plot._yvalues
, self.plot._zvalues
, **vars(self.plot.kws)
)
else:
self.currentFig.plot = self.currentFig.axes.plot( self.plot._xvalues
, self.plot._yvalues
, **vars(self.plot.kws)
)
if cEnabled and self.lineCollection.enabled:
self.lineCollection.kws.norm = norm = plt.Normalize(cdata.min(), cdata.max())
if self._type.is3d:
# properly and automatically set the axes limits via plot()
self.currentFig.plot = self.currentFig.axes.plot( self.plot._xvalues
, self.plot._yvalues
, self.plot._zvalues
, linewidth = 0
)
# now add the lineCollection
points = np.array([self.plot._xvalues, self.plot._yvalues, self.plot._zvalues]).T.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lineCollection = Line3DCollection( segments, **vars(self.lineCollection.kws) )
else:
# properly and automatically set the axes limits via plot()
self.currentFig.plot = self.currentFig.axes.plot( self.plot._xvalues
, self.plot._yvalues
, linewidth = 0
)
# now add the lineCollection
points = np.array([self.plot._xvalues, self.plot._yvalues]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lineCollection = LineCollection( segments, **vars(self.lineCollection.kws) )
lineCollection.set_array(cdata)
#lineCollection.set_linewidth(0.5)
#lineCollection.set_solid_capstyle("round")
self.currentFig.lineCollection = self.currentFig.axes.add_collection(lineCollection)
############################################################################################################################
#### add colorbar
############################################################################################################################
cbarEnabled = cEnabled and self.colorbar.enabled and (ccolindexlen<2) # and (not hasattr(self.currentFig,"colorbar"))
if cbarEnabled:
self.colorbar.kws.mappable = None
if self._type.isLine and self.lineCollection.enabled:
self.colorbar.kws.mappable = self.currentFig.lineCollection
elif self._type.isScatter and self.scatter.enabled:
self.colorbar.kws.mappable = self.currentFig.scatter
if self.colorbar.kws.mappable is not None:
self.colorbar.kws.ax = self.currentFig.axes
self.currentFig.colorbar = self.currentFig.figure.colorbar( **vars(self.colorbar.kws) )
self.currentFig.colorbar.set_label( label = ", ".join(ccolnames) )
############################################################################################################################
#### set axes scales
############################################################################################################################
if self._xscale is not None: self.currentFig.axes.set_xscale(self._xscale)
if self._yscale is not None: self.currentFig.axes.set_yscale(self._yscale)
if self._zscale is not None and self._type.is3d: self.currentFig.axes.set_zscale(self._zscale)
############################################################################################################################
#### set axes limits
############################################################################################################################
if self._xlimit is not None:
currentLim = list(self.currentFig.axes.get_xlim())
if self._xlimit[0] is not None: currentLim[0] = self._xlimit[0]
if self._xlimit[1] is not None: currentLim[1] = self._xlimit[1]
self.currentFig.axes.set_xlim(currentLim)
if self._ylimit is not None:
currentLim = list(self.currentFig.axes.get_ylim())
if self._ylimit[0] is not None: currentLim[0] = self._ylimit[0]
if self._ylimit[1] is not None: currentLim[1] = self._ylimit[1]
self.currentFig.axes.set_ylim(currentLim)
if self._zlimit is not None and self._type.is3d:
currentLim = list(self.currentFig.axes.get_zlim())
if self._zlimit[0] is not None: currentLim[0] = self._zlimit[0]
if self._zlimit[1] is not None: currentLim[1] = self._zlimit[1]
self.currentFig.axes.set_zlim(currentLim)
############################################################################################################################
#### add axes labels
############################################################################################################################
if self._xlabel is None:
if xcolindexlen>1:
self.currentFig.axes.set_xlabel("Variable Values")
else:
self.currentFig.axes.set_xlabel(xcolnames[0])
else:
self.currentFig.axes.set_xlabel(self._xlabel)
if self._ylabel is None:
if ycolindexlen>1:
self.currentFig.axes.set_ylabel("Variable Values")
else:
self.currentFig.axes.set_ylabel(ycolnames[0])
else:
self.currentFig.axes.set_ylabel(self._ylabel)
if self._type.is3d:
if self._zlabel is None:
if zcolindexlen>1:
self.currentFig.axes.set_zlabel("Variable Values")
else:
self.currentFig.axes.set_zlabel(zcolnames[0])
else:
self.currentFig.axes.set_zlabel(self._zlabel)
############################################################################################################################
#### set legend and other BasePlot properties
############################################################################################################################
self._finalizeBasePlot()
if not self._type.is3d: self.target.currentFig.axes = self.currentFig.axes
############################################################################################################################
if reself: return self
################################################################################################################################
#### _getDocString
################################################################################################################################
################################################################################################################################
#### helpme
################################################################################################################################
def helpme(self, topic=None):
"""
Print the documentation for the input string topic.
If the topic does not exist, the documentation for
the object will be printed.
**Parameters**
topic (optional)
A string containing the name of the object
for which help is needed.
**Returns**
None
**Example**
.. code-block:: python
:linenos:
helpme()
helpme("make")
helpme("helpme")
helpme("getLogLinSpace")
"""
try:
exec("print(self."+topic+".__doc__)")
except:
print(self._getDocString())
return None
################################################################################################################################
| [
29113,
29113,
29113,
29113,
4242,
198,
29113,
29113,
29113,
29113,
4242,
198,
4242,
198,
4242,
220,
220,
17168,
13789,
198,
4242,
198,
4242,
220,
220,
2547,
64,
9069,
660,
25,
8631,
3665,
10730,
22489,
40089,
5888,
13,
198,
4242,
198,
4... | 2.288013 | 20,447 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author: IBM Corporation
# description: Highly-customizable Ansible module
# for installing and configuring IBM Spectrum Scale (GPFS)
# company: IBM
# license: Apache-2.0
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'IBM',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: ibm_ss_cluster
short_description: IBM Spectrum Scale Cluster Management
version_added: "0.0"
description:
- This module can be used to create or delete an IBM Spectrum Scale
Cluster or retrieve information about the cluster.
options:
op:
description:
- An operation to execute on the IBM Spectrum Scale Cluster.
Mutually exclusive with the state operand.
required: false
state:
description:
- The desired state of the cluster.
required: false
default: "present"
choices: [ "present", "absent" ]
stanza:
description:
- Cluster blueprint that defines membership and node attributes
required: false
name:
description:
- The name of the cluster to be created, deleted or whose
information is to be retrieved
required: false
'''
EXAMPLES = '''
# Retrive information about an existing IBM Spectrum Scale cluster
- name: Retrieve IBM Spectrum Scale Cluster information
ibm_ss_cluster:
op: list
# Create a new IBM Spectrum Scale Cluster
- name: Create an IBM Spectrum Scale Cluster
ibm_ss_cluster:
state: present
stanza: "/tmp/stanza"
name: "host-01"
# Delete an existing IBM Spectrum Scale Cluster
- name: Delete an IBM Spectrum Scale Cluster
ibm_ss_cluster:
state: absent
name: "host-01"
'''
RETURN = '''
changed:
description: A boolean indicating if the module has made changes
type: boolean
returned: always
msg:
description: The output from the cluster create/delete operations
type: str
returned: when supported
rc:
description: The return code from the IBM Spectrum Scale mm command
type: int
returned: always
results:
description: The JSON document containing the cluster information
type: str
returned: when supported
'''
import os
import json
import sys
from ansible.module_utils.basic import AnsibleModule
#TODO: FIX THIS. If the modules and utils are located in a non standard
# path, the PYTHONPATH will need to be exported in the .bashrc
#from ansible.module_utils.ibm_ss_utils import runCmd, parse_aggregate_cmd_output, RC_SUCCESS
from ansible.module_utils.ibm_ss_utils import runCmd, parse_aggregate_cmd_output, RC_SUCCESS
MMLSCLUSTER_SUMMARY_FIELDS=['clusterSummary','cnfsSummary', 'cesSummary']
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1772,
25,
19764,
10501,
198,
2,
6764,
25,
38254,
12,
23144,
13821,
28038,
856,
8265,
220,
198,
2,
329,
15975,
290,
... | 2.732824 | 1,048 |
# Branch and bound method
import queue
class Node:
'''
upbound: upbound value of node
value: value of current node
weight: weight of current node
level: level number of the current node on the subset tree
'''
upbound = 0
value = 0
weight = 0
level = 0
class Global:
'''
n: number of items
capacity: capacity of bag
value: value of each item
weight: weight of each item
currentW: current weight of bag
currentV: current value of bag
bestP: current global best solution
perV: value of per weight
order: order of each item
select: selcction result
count: number of accessing leaf node
'''
capacity = 0
value = []
weight = []
currentW = 0
currentV = 0
bestP = 0
perV = []
order = []
select = []
Final = []
count = 0
heap = queue.LifoQueue()
if __name__ == '__main__':
BranchBoundRun()
| [
2,
20551,
290,
5421,
2446,
201,
198,
11748,
16834,
201,
198,
201,
198,
4871,
19081,
25,
201,
198,
220,
220,
220,
705,
7061,
201,
198,
220,
220,
220,
510,
7784,
25,
510,
7784,
1988,
286,
10139,
201,
198,
220,
220,
220,
1988,
25,
22... | 2.354312 | 429 |
import numpy as np
import dataguzzler as dg
import dg_metadata as dgm
import dg_file as dgf
fin = "/home/linuxadm/usr_local/src/freecad-git032314/build/data/Mod/Robot/Lib/Kuka/kr125_3.wrl"
fout = "/tmp/robot.dgs"
wfmdict={}
fh=open(fin,"r");
wfmdict["robot"]=dg.wfminfo()
wfmdict["robot"].Name="robot"
wfmdict["robot"].dimlen=np.array((),dtype='i8')
dgm.AddMetaDatumWI(wfmdict["robot"],dgm.MetaDatum("VRML97Geom",fh.read()))
dgf.savesnapshot(fout,wfmdict)
| [
11748,
299,
32152,
355,
45941,
198,
198,
11748,
4818,
363,
4715,
1754,
355,
288,
70,
198,
11748,
288,
70,
62,
38993,
355,
288,
39870,
198,
11748,
288,
70,
62,
7753,
355,
288,
70,
69,
198,
198,
15643,
796,
12813,
11195,
14,
23289,
32... | 2.008658 | 231 |
from setuptools import setup, find_packages
setup(
package_dir={"": "src"},
packages=find_packages("src") + ['omero.plugins'],
use_scm_version={"write_to": "src/napari_omero/_version.py"},
setup_requires=["setuptools_scm"],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
5301,
62,
15908,
28,
4895,
1298,
366,
10677,
25719,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
7203,
10677,
4943,
1343,
37250,
296... | 2.53125 | 96 |
print(part_2())
| [
628,
628,
198,
4798,
7,
3911,
62,
17,
28955,
198
] | 2.1 | 10 |
import requests
import lxml
from bs4 import BeautifulSoup
import discord
import os
token = os.environ.get("S3_KEY")
webpage = "https://www.worldometers.info/coronavirus/"
# ------------- start data gathering ---------------------- #
# ------------- end data gathering ---------------------- #
client = discord.Client()
dataGather = dataGather()
@client.event
client.run(token)
| [
11748,
7007,
198,
11748,
300,
19875,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
36446,
198,
11748,
28686,
198,
198,
30001,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
50,
18,
62,
20373,
4943,
198,
12384,
7700,
796,
366... | 3.391304 | 115 |
# -*- coding: utf-8 -*-
"""Integration/acceptance tests for `ndex2.client` package."""
import os
import re
import sys
import io
import time
import unittest
import json
import uuid
from datetime import datetime
import requests
from requests.exceptions import HTTPError
import ndex2
from ndex2.nice_cx_network import NiceCXNetwork
from ndex2.client import Ndex2
from ndex2.client import DecimalEncoder
from ndex2.exceptions import NDExUnauthorizedError
from ndex2.exceptions import NDExNotFoundError
from ndex2.exceptions import NDExError
SKIP_REASON = 'NDEX2_TEST_SERVER, NDEX2_TEST_USER, NDEX2_TEST_PASS ' \
'environment variables not set, cannot run integration' \
' tests with server'
@unittest.skipUnless(os.getenv('NDEX2_TEST_SERVER') is not None, SKIP_REASON)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
34500,
1358,
14,
13635,
590,
5254,
329,
4600,
358,
1069,
17,
13,
16366,
63,
5301,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198... | 2.721088 | 294 |