content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
# Find the largest and sqrt pair grid size # Number of cells in an outermost grid. # Distance to 'access port'. # Distance will always be the sum of the two sides. # i.e. From both sides print(dist(1)) print('') print(dist(12)) print('') print(dist(23)) print(dist(1024)) print('') print(dist(347991)) print('')
[ 2, 9938, 262, 4387, 290, 19862, 17034, 5166, 10706, 2546, 198, 198, 2, 7913, 286, 4778, 287, 281, 12076, 1712, 10706, 13, 198, 198, 2, 34600, 284, 705, 15526, 2493, 4458, 198, 2, 34600, 481, 1464, 307, 262, 2160, 286, 262, 734, 5389...
2.75
116
#!/usr/bin/env python """Utilities working with Flask UIs""" __author__ = 'Michael Meisinger, Stephen Henrie' import traceback import flask from flask import request, jsonify import sys import json import simplejson from pyon.public import BadRequest, OT, get_ion_ts_millis from pyon.util.containers import get_datetime from interface.objects import ActorIdentity, SecurityToken, TokenTypeEnum CONT_TYPE_JSON = "application/json" CONT_TYPE_HTML = "text/html" # ------------------------------------------------------------------------- # Content encoding helpers # Set standard json functions json_dumps = json.dumps json_loads = simplejson.loads # Faster loading than regular json # ------------------------------------------------------------------------- # UI helpers def get_auth(): """ Returns a dict with user session values from server session. """ return dict(user_id=flask.session.get("actor_id", ""), actor_id=flask.session.get("actor_id", ""), username=flask.session.get("username", ""), full_name=flask.session.get("full_name", ""), attributes=flask.session.get("attributes", {}), roles=flask.session.get("roles", {}), is_logged_in=bool(flask.session.get("actor_id", "")), is_registered=bool(flask.session.get("actor_id", "")), valid_until=flask.session.get("valid_until", 0)) def set_auth(actor_id, username, full_name, valid_until, **kwargs): """ Sets server session based on user attributes. """ flask.session["actor_id"] = actor_id or "" flask.session["username"] = username or "" flask.session["full_name"] = full_name or "" flask.session["valid_until"] = valid_until or 0 flask.session["attributes"] = kwargs.copy() flask.session["roles"] = {} flask.session.modified = True def clear_auth(): """ Clears server session and empties user attributes. """ flask.session["actor_id"] = "" flask.session["username"] = "" flask.session["full_name"] = "" flask.session["valid_until"] = 0 flask.session["attributes"] = {} flask.session["roles"] = {} flask.session.modified = True class OAuthClientObj(object): """ Object holding information about an OAuth2 client for flask-oauthlib. """ client_id = None client_secret = "foo" is_confidential = False _redirect_uris = "https://foo" _default_scopes = "scioncc" @classmethod def from_actor_identity(cls, actor_obj): """ Factory method from a suitable ActorIdentity object """ if not actor_obj or not isinstance(actor_obj, ActorIdentity) or not actor_obj.details or \ actor_obj.details.type_ != OT.OAuthClientIdentityDetails: raise BadRequest("Bad actor identity object") oauth_client = OAuthClientObj() oauth_client.actor = actor_obj oauth_client.client_id = actor_obj._id oauth_client.is_confidential = actor_obj.details.is_confidential oauth_client._redirect_uris = actor_obj.details.redirect_uris oauth_client._default_scopes = actor_obj.details.default_scopes return oauth_client @property @property @property @property class OAuthTokenObj(object): """ Object holding information for an OAuth2 token for flask-oauthlib. """ access_token = None refresh_token = None token_type = None client_id = None expires = None user = None _scopes = None _token_obj = None @classmethod def from_security_token(cls, token_obj): """ Factory method from a SecurityToken object """ if not token_obj or not isinstance(token_obj, SecurityToken) \ or not token_obj.token_type in (TokenTypeEnum.OAUTH_ACCESS, TokenTypeEnum.OAUTH_REFRESH): raise BadRequest("Bad token object") oauth_token = OAuthTokenObj() oauth_token.access_token = token_obj.attributes.get("access_token", "") oauth_token.refresh_token = token_obj.attributes.get("refresh_token", "") oauth_token.token_type = "Bearer" oauth_token._scopes = token_obj.attributes.get("scopes", "") oauth_token.client_id = token_obj.attributes.get("client_id", "") oauth_token.expires = get_datetime(token_obj.expires, local_time=False) oauth_token.user = {"actor_id": token_obj.actor_id} oauth_token._token_obj = token_obj return oauth_token @property
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 18274, 2410, 1762, 351, 46947, 471, 3792, 37811, 198, 198, 834, 9800, 834, 796, 705, 13256, 2185, 1710, 263, 11, 7970, 6752, 5034, 6, 198, 198, 11748, 12854, 1891, 198, 117...
2.633508
1,719
import pandas as pd import os from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import OneHotEncoder,LabelEncoder from scipy import sparse import featureCombinator
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 28686, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 4512, 62, 9288, 62, 35312, 198, 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 5239, 1330, 2764, 38469, 7509, 11, 309, 69, 312,...
3.69863
73
hour = int(input('Hora de inicio:')) min = int(input('Minuto de inicio:')) dura = int(input('Duración del evento:')) hours = (hour + (min + dura) // 60) % 24 minutes = (min + dura) % 60 print(f'{hours}:{minutes}')
[ 9769, 796, 493, 7, 15414, 10786, 39, 5799, 390, 287, 46441, 32105, 4008, 220, 198, 1084, 796, 493, 7, 15414, 10786, 9452, 9390, 390, 287, 46441, 32105, 4008, 198, 67, 5330, 796, 493, 7, 15414, 10786, 36927, 32009, 18840, 1619, 1785, 7...
2.438202
89
# # voice-skill-sdk # # (C) 2021, Deutsche Telekom AG # # This file is distributed under the terms of the MIT license. # For details see the file LICENSE in the top directory. # import json import asyncio import pathlib import datetime from datetime import date from fastapi.testclient import TestClient from skill_sdk import ui from skill_sdk.utils import util from skill_sdk.__version__ import __spi_version__ LOCALHOST = "http://localhost" def test_if_ui_generated(): """Tests files existence, not real UI unit test""" ui_root = pathlib.Path(ui.__file__).parent required_files = [ (ui_root / "index.html").exists(), len(list((ui_root / "css").glob("app.*.css"))) == 1, len(list((ui_root / "css").glob("chunk-vendors.*.css"))) == 1, len(list((ui_root / "js").glob("app.*.js"))) == 1, len(list((ui_root / "js").glob("chunk-vendors.*.js"))) == 1, ] assert all((_ for _ in required_files)) def test_spi_version(): """SPI Version is hardcoded into the TestIntent.vue""" ui_root = pathlib.Path(ui.__file__).parent assert [ js for js in (ui_root / "js").glob("app.*.js") if f'spiVersion:"{__spi_version__}"' in js.read_text() ] != []
[ 2, 198, 2, 3809, 12, 42401, 12, 21282, 74, 198, 2, 198, 2, 357, 34, 8, 33448, 11, 36763, 14318, 74, 296, 13077, 198, 2, 198, 2, 770, 2393, 318, 9387, 739, 262, 2846, 286, 262, 17168, 5964, 13, 198, 2, 1114, 3307, 766, 262, 239...
2.481113
503
from StringIO import StringIO import nose.tools as NT from wsgiref.simple_server import demo_app from paste.fixture import TestApp import firepython as FPY import firepython.utils as FU import firepython._const as FC import firepython.middleware as FM try: import gprof2dot except ImportError: gprof2dot = None
[ 6738, 10903, 9399, 1330, 10903, 9399, 198, 11748, 9686, 13, 31391, 355, 24563, 198, 198, 6738, 266, 45213, 557, 69, 13, 36439, 62, 15388, 1330, 13605, 62, 1324, 198, 6738, 17008, 13, 69, 9602, 1330, 6208, 4677, 198, 198, 11748, 2046, ...
3.196078
102
#file config file_fits_flux_column = "Flux column" file_fits_time_column = "Time column" file_fits_hdulist_column = "Data column for hdulist" file_ascii_skiprows = "Skipped rows in ascii file" file_ascii_use_cols = "Used columns in ascii file" #plot conf plot_show = "Show plots" plot_save = "Save plots" #general conf general_kic = "KIC ID" general_binary_path = "Binary path" general_background_result_path = "Background result path" general_background_data_path = "Background data path" general_analysis_result_path = "Path for results" general_nr_of_cores = "Number of cores used" general_sequential_run = "Sequential run" general_run_diamonds = "Run DIAMONDS" general_check_bayes_run = "Check Bayes factor after run" general_use_pcb = "Activate PCB" #analysis conf analysis_file_path = "Paths to lightcurves" analysis_folder_prefix = "Prefix of folder" analysis_noise_values = "Noise values for run" analysis_target_magnitude = "Target magnitude for run" analysis_nr_magnitude_points = "Number of magnitude points" analysis_nr_noise_points = "Number of noise points" analysis_number_repeats = "Number of repeats" analysis_obs_time_value = "Target observation time" analysis_upper_mag_limit = "Upper mag limit" analysis_nu_max_outer_guess = "Nu max guess" #categories cat_general = "General" cat_files = "Files" cat_analysis = "Analysis" cat_plot = "Plot" #List of ids analysis_list_of_ids = "List of IDs" #Internal internal_literature_value = "Literature value nu max" internal_delta_nu = "Literature value delta nu" internal_flag_worked = "Run worked flag" internal_noise_value = "Noise value" internal_mag_noise = "Magnitude added noise" internal_run_number = "Run number" internal_mag_value = "Magnitude" internal_teff = "T_eff" internal_path = "Working Path" internal_force_run = "Force run" internal_multiple_mag = "Multiple magnitudes" internal_id = "Internal id" internal_use_kp_mag = "Use kepler magnitude method for computing noise"
[ 2, 7753, 4566, 198, 7753, 62, 21013, 62, 69, 22564, 62, 28665, 796, 366, 37, 22564, 5721, 1, 198, 7753, 62, 21013, 62, 2435, 62, 28665, 796, 366, 7575, 5721, 1, 198, 7753, 62, 21013, 62, 31298, 377, 396, 62, 28665, 796, 366, 6601,...
3.058085
637
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, str_or_none, url_or_none, )
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 302, 198, 198, 6738, 764, 11321, 1330, 14151, 11627, 40450, 198, 6738, 11485, 26791, 1330, 357, 198, 220, 220, 220, 493,...
2.542857
70
from dataclasses import dataclass from typing import TypeVar, Union @dataclass class Source: """ The base configuration of all value resolutions. Inherit from this class to describe source configuration for your own resolvers. """ AnySource = Union[str, Source] TSource = TypeVar("TSource", bound=AnySource)
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 19720, 1330, 5994, 19852, 11, 4479, 628, 198, 31, 19608, 330, 31172, 198, 4871, 8090, 25, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 383, 2779, 8398, 286, 477, 1988, 21811...
3.363636
99
# ------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ------------------------------------------------------------- import unittest import numpy as np from systemds.context import SystemDSContext from systemds.operator.algorithm import pca from systemds.operator import List from systemds.script_building.dag import OutputType if __name__ == "__main__": unittest.main(exit=False)
[ 2, 20368, 1783, 32501, 198, 2, 198, 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, ...
4.103448
290
# creation of class employee new_employee1 = Employee("raj", "kumar", 5000) # the gives memory address location of employee1 print("The newly created employee :", new_employee1)
[ 2, 6282, 286, 1398, 6538, 628, 198, 3605, 62, 7033, 1453, 16, 796, 36824, 7203, 430, 73, 1600, 366, 74, 44844, 1600, 23336, 8, 198, 198, 2, 262, 3607, 4088, 2209, 4067, 286, 6538, 16, 198, 198, 4798, 7203, 464, 8308, 2727, 6538, 1...
3.5
52
import ray import glob import yaml import lmdb import numpy as np import torch from torch.utils.data import Dataset from common.augmenter import augment from utils import filter_sem # CHANNELS = [ # 4, # Pedestrians # 6, # Road lines # 7, # Road masks # 8, # Side walks # 10, # Vehicles # # 12, # Traffic light poles # # 18, # Traffic boxes # ] @ray.remote if __name__ == '__main__': dataset = MainDataset('/ssd2/dian/challenge_data/main_trajs_nocrash_nonoise', '/home/dianchen/carla_challenge/experiments/config_nocrash.yaml') # wide_rgb, wide_sem, narr_rgb, lbls, locs, rots, spds, cmd = dataset[30] # print (cmd)
[ 11748, 26842, 198, 11748, 15095, 198, 11748, 331, 43695, 198, 11748, 300, 9132, 65, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 16092, 292, 316, 198, 6738, 2219, 13, 559, 5154, 263...
2.341297
293
directory = "c:\\files\\other_dir\\" file = open(directory, 'r') # r, w, a => modes file.readlines() # file.read()reads till UOF # file.write() should have w mode # shelve module stores values in binary file # shelve.open() retrieves a dictionary like shelf value
[ 198, 34945, 796, 366, 66, 25, 6852, 16624, 6852, 847, 62, 15908, 6852, 1, 198, 198, 7753, 796, 1280, 7, 34945, 11, 705, 81, 11537, 1303, 374, 11, 266, 11, 257, 5218, 12881, 198, 7753, 13, 961, 6615, 3419, 198, 2, 2393, 13, 961, ...
3.152941
85
cid = str(input('Em que ciidade você nasceu? ')).strip() print(cid[:5].capitalize() == 'Santo')
[ 66, 312, 796, 965, 7, 15414, 10786, 10161, 8358, 269, 72, 312, 671, 12776, 25792, 25221, 344, 84, 30, 705, 29720, 36311, 3419, 198, 4798, 7, 66, 312, 58, 25, 20, 4083, 27544, 1096, 3419, 6624, 705, 50, 14723, 11537, 198 ]
2.341463
41
from astropy.stats import LombScargle import numpy as np import matplotlib.pyplot as plt from astropy.table import Table import glob import pandas as pd import scipy.signal as sig import scipy.stats as stats from astropy.convolution import convolve, Box1DKernel, Gaussian1DKernel import time dir = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28610/' file = 'hlsp_everest_k2_llc_229228610-c08_kepler_v2.0_lc.fits' fileLis = glob.glob('/Volumes/Zoe Bell Backup/everest/c08/229200000/*/*.fits') goodFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28988/hlsp_everest_k2_llc_229228988-c08_kepler_v2.0_lc.fits' badFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28995/hlsp_everest_k2_llc_229228995-c08_kepler_v2.0_lc.fits' #badFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28967/hlsp_everest_k2_llc_229228967-c08_kepler_v2.0_lc.fits' okayFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28386/hlsp_everest_k2_llc_229228386-c08_kepler_v2.0_lc.fits' testFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28407/hlsp_everest_k2_llc_229228407-c08_kepler_v2.0_lc.fits' weirdFile = '/Volumes/Zoe Bell Backup/everest/c08/229200000/28726/hlsp_everest_k2_llc_229228726-c08_kepler_v2.0_lc.fits' #maxes weren't being found by my func (now are) campaign0files = glob.glob('/Volumes/Zoe Bell Backup/everest/c00/*/*/*.fits') campaign5files = glob.glob('/Volumes/Zoe Bell Backup/everest/c05/*/*/*.fits') campaign7files = glob.glob('/Volumes/Zoe Bell Backup/everest/c07/*/*/*.fits') campaign8files = glob.glob('/Volumes/Zoe Bell Backup/everest/c08/*/*/*.fits') campaign13files = glob.glob('/Volumes/Zoe Bell Backup/everest/c13/*/*/*.fits') def mergeFiles(campaign_output_file_name, root_dir = '/Volumes/Zoe Bell Backup/'): # implicit assumption using nearest instance of non-unique row ''' Takes the file name (including extension) of an output of bothFindP and optionally the root directory where this file and the everest files are saved. Merges the GAIA data for K2 with the output of bothFindP, saves this as a .csv file in the bothFindPOutput folder in the same root directory, and returns it as a pandas dataframe. Before doing so, eliminates duplicate rows in the GAIA data, taking the one with the lowest angular distance. Assumes the GAIA data is saved in the root directory as k2_dr2_1arcsec.fits and the bothFindP output is stored in the BothFindPOutput folder in the root directory. ''' k2_dr2 = Table.read(root_dir + 'k2_dr2_1arcsec.fits', format='fits') #k2_dr2 = Table.read(root_dir + 'everest/c08/k2_dr2_1arcsec.fits', format='fits') k2_dr2 = k2_dr2.to_pandas() ss = np.argsort(k2_dr2['k2_gaia_ang_dist']) uu = np.unique(k2_dr2['epic_number'].values[ss], return_index=True)[1] good_k2_dr2 = k2_dr2.iloc[ss[uu],:] campaign_output = pd.read_csv(root_dir + 'BothFindPOutput/' + campaign_output_file_name) name_column = campaign_output['File Name'].str.partition('llc_')[2].str.partition('-c')[0] name_column = pd.to_numeric(name_column) campaign_output = campaign_output.assign(epic_number=name_column) merged_output = campaign_output.merge(good_k2_dr2, left_on='epic_number', right_on='epic_number') merged_output.to_csv(root_dir + 'BothFindPOutput/Merged' + campaign_output_file_name) return merged_output def concatenateFiles(merged_output_lis): ''' Takes a list of outputs from mergeFiles and returns the concatentation of all of these, so the data from each can be analyzed together by makePlots. ''' concatenated_file = merged_output_lis[0] if len(merged_output_lis)>1: for i in range(1,len(merged_output_lis)): concatenated_file = concatenated_file.append(merged_output_lis[i]) return concatenated_file def makePlots(merged_output, campaign, lower_threshold=0.1, upper_threshold=0.1, linear=False, close_only=False, LS=False, ACF=False, root_dir='/Volumes/Zoe Bell Backup/'): ''' Takes the output of mergeFiles (or concatenateFiles) and a string name for the corresponding campaign(s) and prints and saves the following plots: a comparison of LS Best Period, ACF First Period, and ACF Linear Period, Period v. BP-RP, a histogram of periods for BP-RP>=0.2, and M_G v. BP-RP colored by period. Several quality cuts are made to the data for the latter three plots. Optionally takes the lower and upper thresholds for selecting the data (if both equal 0.1, then selects stars with LS and ACF periods within 10% of each other), whether to use the linear or first ACF period (with boolean linear), whether to only look at stars within 300 parsecs (with boolean close_only), whether to just consider LS periods using the false alarm probability for selecting data (with boolean LS), whether to just consider ACF periods using the ACF ratio for selection data (with boolean ACF), and the root directory in which to save the plots within the folder Plots!. ''' plt.figure(figsize=(10,7)) x = np.arange(0.0, 1.7, 0.1) log_LS_period = np.log10(merged_output['LS Best Period']) log_ACF_first_period = np.log10(merged_output['ACF First Period']) log_ACF_best_period = np.log10(merged_output['ACF Best Period']) low_prob = np.where(merged_output['False Alarm Prob']==0.0) #gets 4000 stars w/ campaign 8 with ==0.0 ## high_ACF = np.where(merged_output['Autocorrelation Ratio']>=0.26) #0.26 gets 4000 stars w/ campaign 8 close = np.where(merged_output['r_est']<=300) if close_only: log_LS_period = log_LS_period.values[close] log_ACF_first_period = log_ACF_first_period.values[close] log_ACF_best_period = log_ACF_best_period.values[close] plt.subplot(221) plt.plot(log_LS_period, log_ACF_first_period, 'bo', alpha=0.1) if LS: plt.plot(log_LS_period.values[low_prob], log_ACF_first_period.values[low_prob], 'ro', alpha=0.1) elif ACF: plt.plot(log_LS_period.values[high_ACF], log_ACF_first_period.values[high_ACF], 'yo', alpha=0.1) plt.plot(x, x,lw=3, color='g') plt.plot(x, x+np.log10(1-lower_threshold), 'g--') plt.plot(x, x+np.log10(1+upper_threshold), 'g--') plt.xlabel('log(LS Best Period)') plt.ylabel('log(ACF First Period)') plt.subplot(223) plt.plot(log_LS_period, log_ACF_best_period, 'bo', alpha=0.1) if LS: plt.plot(log_LS_period.values[low_prob], log_ACF_best_period.values[low_prob], 'ro', alpha=0.1) elif ACF: plt.plot(log_LS_period.values[high_ACF], log_ACF_best_period.values[high_ACF], 'yo', alpha=0.1) plt.plot(x, x,lw=3, color='g') plt.plot(x, x+np.log10(1-lower_threshold), 'g--') plt.plot(x, x+np.log10(1+upper_threshold), 'g--') plt.xlabel('log(LS Best Period)') plt.ylabel('log(ACF Linear Period)') plt.subplot(224) plt.plot(log_ACF_first_period, log_ACF_best_period, 'bo', alpha=0.1) if LS: plt.plot(log_ACF_first_period.values[low_prob], log_ACF_best_period.values[low_prob], 'ro', alpha=0.1) elif ACF: plt.plot(log_ACF_first_period.values[high_ACF], log_ACF_best_period.values[high_ACF], 'yo', alpha=0.1) plt.plot(x, x,lw=3, color='g') plt.plot(x, x+np.log10(1-lower_threshold), 'g--') plt.plot(x, x+np.log10(1+upper_threshold), 'g--') plt.xlabel('log(ACF First Period)') plt.ylabel('log(ACF Linear Period)') plt.suptitle('Period Comparison') plt.tight_layout() plt.subplots_adjust(top=0.9) if LS: plt.savefig(root_dir + 'Plots!/' + campaign + ' LS Method Comparison with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) elif ACF: plt.savefig(root_dir + 'Plots!/' + campaign + ' ACF Method Comparison with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: plt.savefig(root_dir + 'Plots!/' + campaign + ' Method Comparison with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) plt.show() M_G = merged_output[u'phot_g_mean_mag'].values - 5. * np.log10(merged_output[u'r_est'].values) + 5 if LS: good = np.where((merged_output['False Alarm Prob']==0.0) & ### (np.isfinite(merged_output[u'parallax'])) & (merged_output[u'parallax_error'] < 0.1) & (merged_output[u'r_modality_flag'] == 1) & (merged_output[u'r_result_flag'] == 1) & (np.isfinite(merged_output[u'bp_rp'])) & (merged_output[u'phot_bp_mean_flux_error']/merged_output[u'phot_bp_mean_flux'] < 0.01) & (merged_output[u'phot_rp_mean_flux_error']/merged_output[u'phot_rp_mean_flux'] < 0.01) & (merged_output[u'phot_g_mean_flux_error']/merged_output[u'phot_g_mean_flux'] < 0.01) & (M_G>=4))[0] elif ACF: good = np.where((merged_output['Autocorrelation Ratio']>=0.26) & (np.isfinite(merged_output[u'parallax'])) & (merged_output[u'parallax_error'] < 0.1) & (merged_output[u'r_modality_flag'] == 1) & (merged_output[u'r_result_flag'] == 1) & (np.isfinite(merged_output[u'bp_rp'])) & (merged_output[u'phot_bp_mean_flux_error']/merged_output[u'phot_bp_mean_flux'] < 0.01) & (merged_output[u'phot_rp_mean_flux_error']/merged_output[u'phot_rp_mean_flux'] < 0.01) & (merged_output[u'phot_g_mean_flux_error']/merged_output[u'phot_g_mean_flux'] < 0.01) & (M_G>=4))[0] else: if linear: good = np.where((merged_output['ACF Best Period']/merged_output['LS Best Period']>1-lower_threshold) & (merged_output['ACF Best Period']/merged_output['LS Best Period']<1+upper_threshold) & (np.isfinite(merged_output[u'parallax'])) & (merged_output[u'parallax_error'] < 0.1) & (merged_output[u'r_modality_flag'] == 1) & (merged_output[u'r_result_flag'] == 1) & (np.isfinite(merged_output[u'bp_rp'])) & (merged_output[u'phot_bp_mean_flux_error']/merged_output[u'phot_bp_mean_flux'] < 0.01) & (merged_output[u'phot_rp_mean_flux_error']/merged_output[u'phot_rp_mean_flux'] < 0.01) & (merged_output[u'phot_g_mean_flux_error']/merged_output[u'phot_g_mean_flux'] < 0.01) & (M_G>=4))[0] else: good = np.where((merged_output['ACF First Period']/merged_output['LS Best Period']>1-lower_threshold) & (merged_output['ACF First Period']/merged_output['LS Best Period']<1+upper_threshold) & (np.isfinite(merged_output[u'parallax'])) & (merged_output[u'parallax_error'] < 0.1) & (merged_output[u'r_modality_flag'] == 1) & (merged_output[u'r_result_flag'] == 1) & (np.isfinite(merged_output[u'bp_rp'])) & (merged_output[u'phot_bp_mean_flux_error']/merged_output[u'phot_bp_mean_flux'] < 0.01) & (merged_output[u'phot_rp_mean_flux_error']/merged_output[u'phot_rp_mean_flux'] < 0.01) & (merged_output[u'phot_g_mean_flux_error']/merged_output[u'phot_g_mean_flux'] < 0.01) & (M_G>=4))[0] if close_only: r_est = merged_output[u'r_est'].values[good] good = good[np.where((r_est <= 300.))[0]] selected_LS_periods = merged_output['LS Best Period'].values[good] if linear: selected_ACF_periods = merged_output['ACF Best Period'].values[good] else: selected_ACF_periods = merged_output['ACF First Period'].values[good] bp_rp = merged_output['bp_rp'].values[good] #plt.figure(figsize=(7,4)) ### if LS: plt.plot(bp_rp, selected_LS_periods, 'bo', alpha=0.05) # change to alpha=0.05 for combos else: plt.plot(bp_rp, selected_ACF_periods, 'bo', alpha=0.05) # change to alpha=0.05 for combos plt.yscale('log') #plt.xlim(0.5,2.7) ### #plt.ylim(1,100) ### plt.xlabel('bp_rp') if LS: plt.ylabel('LS Best Period') plt.savefig(root_dir + 'Plots!/' + campaign + ' LS Period v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: name = root_dir + 'Plots!/' + campaign if linear: plt.ylabel('ACF Linear Period') name = name + ' Linear' else: plt.ylabel('ACF First Period') if ACF: plt.savefig(name + ' ACF Period v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: plt.savefig(name + ' Period v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) plt.show() ok = np.where(merged_output['bp_rp'].values[good]>=2) if LS: plt.hist(selected_LS_periods[ok], bins=15) plt.xlabel('LS Best Period (where bp_rp>=2)') plt.savefig(root_dir + 'Plots!/' + campaign + ' LS Period Histogram with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: name = root_dir + 'Plots!/' + campaign plt.hist(selected_ACF_periods[ok], bins=15) if linear: plt.xlabel('ACF Linear Period (where bp_rp>=2)') name = name + ' Linear' else: plt.xlabel('ACF First Period (where bp_rp>=2)') if ACF: plt.savefig(name + ' ACF Period Histogram with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: plt.savefig(name + ' Period Histogram with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) plt.show() if LS: plt.scatter(bp_rp, M_G[good], c=selected_LS_periods, alpha=0.7, s=2, cmap=plt.cm.get_cmap('Spectral_r')) else: plt.scatter(bp_rp, M_G[good], c=selected_ACF_periods, alpha=0.7, s=2, cmap=plt.cm.get_cmap('Spectral_r')) plt.ylim(12,-2) plt.xlabel('bp_rp') plt.ylabel('M_G') cb = plt.colorbar() if LS: cb.set_label('LS Best Period (days)') plt.savefig(root_dir + 'Plots!/' + campaign + ' LS M_G v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: name = root_dir + 'Plots!/' + campaign if linear: cb.set_label('ACF Linear Period (days)') name = name + ' Linear' else: cb.set_label('ACF First Period (days)') if ACF: plt.savefig(name + ' ACF M_G v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) else: plt.savefig(name + ' M_G v. BP-RP with Lower Threshold = ' + str(lower_threshold) + ' and Upper Threshold = ' + str(upper_threshold) + '.png', dpi=150) plt.show() def bothFindP(files, output_name, gen_LSplots=False, gen_ACFplots=False, gen_file_type='.png', gen_min_period=0.1, gen_max_period=30, gen_min_max_distance=1, gen_medfilt_kernel_size=11, gen_fcor_box_kernel_size=11, gen_acf_box_kernal_size=100, gen_root_dir = '/Volumes/Zoe Bell Backup/'): ''' Takes a list of file names for .fits files from the Everest K2 data and a name for the output file, and optionally whether you want LS or ACF plots saved, the file type you want it saved as, the minimum and maximum periods you want to look for, the minimum distance between maxes in the ACF you want to consider, several kernal sizes of various filters and smoothing functions, and the root directory in which to save the output .csv file within the folder BothFindPOutput. Returns the outputs of LSfindP and ACFfindP for each file in a list and saves them in a .csv file with the following headers: 'File Name', 'LS Best Period', 'Max Power', 'False Alarm Prob', 'ACF Best Period', 'ACF First Period', 'Autocorrelation Ratio', and 'Peaks List.' ''' start_time = time.clock() lis = [] for file_name in files: start = file_name.rfind('/') + 1 name = file_name[start:-5] data = Table.read(file_name, format='fits') new_row = LSfindP(name, data, plots=gen_LSplots, file_type=gen_file_type, min_period=gen_min_period, max_period=gen_max_period, root_dir=gen_root_dir) + ACFfindP(name, data, plots=gen_ACFplots, file_type=gen_file_type, min_period=gen_min_period, max_period=gen_max_period, min_max_distance=gen_min_max_distance, fcor_box_kernel_size=gen_fcor_box_kernel_size, acf_box_kernal_size=gen_acf_box_kernal_size, root_dir=gen_root_dir)[1:] lis.append(new_row) output = pd.DataFrame(data=lis, columns=['File Name', 'LS Best Period', 'Max Power', 'False Alarm Prob', 'ACF Best Period', 'ACF First Period', 'Autocorrelation Ratio', 'Peaks List']) output.to_csv(gen_root_dir + 'BothFindPOutput/' + output_name + '.csv') end_time = time.clock() print(str(end_time-start_time) + ' seconds') return output def LSfindMultipleP(files, output_name, gen_plots=False, gen_file_type='.png', gen_min_period = 0.1, gen_max_period = 30, gen_root_dir = '/Volumes/Zoe Bell Backup/'): ''' Takes a list of file names for .fits files from the Everest K2 data and a name for the output file, and optionally whether you want plots saved, the file type you want them saved as, the minimum and maximum periods you want to look for, and the root directory in which to save the output .csv within the folder FindPOutput. Returns the output of LSfindP for each file in a list and saves them in a .csv file. ''' lis = [] for file_name in files: start = file_name.rfind('/') + 1 name = file_name[start:-5] data = Table.read(file_name, format='fits') lis.append(LSfindP(name, data, plots=gen_plots, file_type=gen_file_type, min_period=gen_min_period, max_period=gen_max_period, root_dir=gen_root_dir)) output = pd.DataFrame(data=lis, columns=['File Name', 'Best Period','Max Power','False Alarm Prob']) output.to_csv(gen_root_dir + 'FindPOutput/' + output_name + '.csv') return output def LSfindP(file_name, data, plots=False, file_type='.png', min_period = 0.1, max_period = 30, root_dir = '/Volumes/Zoe Bell Backup/'): ''' Takes the name of a .fits file (without the extension) from the Everest K2 data and the data from that file read into a table, and optionally whether you want a plot saved, the file type you want it saved as, the minimum and maximum periods you want to look for, and the root directory in which to save the plot within the folder LSPlotOutputs. Uses the Lomb-Scargle periodogram method to return the file name, the period that best fits the corrected flux data in that range, the power at that period, and the associated false alarm probability. ''' #start = file_name.rfind('/') + 1 #name = file_name[start:-5] name = file_name #data = Table.read(file_name, format='fits') ok = np.where((data['QUALITY']==0) & (np.isfinite(data['TIME'])) & (np.isfinite(data['FCOR']) & (np.isfinite(data['FRAW_ERR'])))) t = np.array(data['TIME'][ok]) fcor = np.array(data['FCOR'][ok]) frawErr = np.array(data['FRAW_ERR'][ok]) ls = LombScargle(t, fcor, frawErr) freq, power = ls.autopower(minimum_frequency=1/max_period, maximum_frequency=1/min_period) best_freq = freq[np.argmax(power)] max_power = np.max(power) if(plots): plt.figure(figsize=(10,7)) plt.subplot(211) plt.plot(1/freq, power) plt.title('Periodogram') plt.xlabel('Period') plt.ylabel('Power') plt.annotate('best period', xy=(1/best_freq, max_power), xytext=(1/best_freq*0.5, max_power*0.9), arrowprops=dict(facecolor='black', width=1, headwidth=5)) plt.subplot(212) t_fit = np.linspace(np.min(t),np.max(t)) # make just select first and last f_fit = LombScargle(t, fcor, frawErr).model(t_fit, best_freq) plt.plot(t, fcor) plt.plot(t_fit, f_fit) plt.title('Comparison of Data and Model') plt.xlabel('Time') plt.ylabel('Flux') plt.suptitle(name) plt.tight_layout() plt.subplots_adjust(top=0.9) plt.savefig(root_dir + 'LSPlotOutputs/' + name + file_type, dpi=150) plt.close() return [name, 1/best_freq, max_power, ls.false_alarm_probability(max_power)] def ACFfindMultipleP(files, output_name, gen_plots=False, gen_file_type='.png', gen_min_period=0.1, gen_max_period=30, gen_min_max_distance=0.2, gen_medfilt_kernel_size=11, gen_fcor_box_kernel_size=11, gen_acf_box_kernal_size=100, gen_root_dir = '/Volumes/Zoe Bell Backup/'): ''' Takes a list of file names for .fits files from the Everest K2 data and a name for the output file, and optionally whether you want plots saved, the file type you want it saved as, the minimum and maximum periods you want to look for, the minimum distance between maxes in the ACF you want to consider, several kernal sizes of various filters and smoothing functions, and the root directory in which to save the output .csv within the folder ACFfindPOutput. Returns the output of ACFfindP for each file in a list and saves them in a .csv file. ''' start_time = time.clock() lis = [] for file_name in files: start = file_name.rfind('/') + 1 name = file_name[start:-5] data = Table.read(file_name, format='fits') lis.append(ACFfindP(name, data, plots=gen_plots, file_type=gen_file_type, min_period=gen_min_period, max_period=gen_max_period, min_max_distance=gen_min_max_distance, fcor_box_kernel_size=gen_fcor_box_kernel_size, acf_box_kernal_size=gen_acf_box_kernal_size, root_dir=gen_root_dir)) output = pd.DataFrame(data=lis, columns=['File Name', 'Best Period', 'First Period', 'Autocorrelation Ratio', 'Period List']) output.to_csv(gen_root_dir + 'ACFfindPOutput/' + output_name + '.csv') end_time = time.clock() print(str(end_time-start_time) + ' seconds') return output def ACFfindP(file_name, data, plots=False, file_type='.png', min_period=0.1, max_period=30, min_max_distance=0.22, medfilt_kernel_size=11, fcor_box_kernel_size=11, acf_box_kernal_size=100, root_dir = '/Volumes/Zoe Bell Backup/'): #maybe default should be 40? ''' Takes the name of a .fits file (without the extension) from the Everest K2 data and the data from that file read into a table, and optionally whether you want a plot saved, the file type you want it saved as, the minimum and maximum periods you want to look for, the minimum distance between maxes in the ACF you want to consider, several kernal sizes of various filters and smoothing functions, and the root directory in which to save the plot within the folder ACFPlotOutputs. Uses the Auto-Correlation Function method to return the file name, the period that best fits the corrected flux data in that range (based on all maxes found), the first peak found, the ratio of the autocorrelation magnitude of the highest peak to the magnitude when shifted by zero, and the first 10 peaks found. ''' #start = file_name.rfind('/') + 1 #name = file_name[start:-5] name = file_name #data = Table.read(file_name, format='fits') ok = np.where((data['QUALITY']==0) & (np.isfinite(data['TIME'])) & (np.isfinite(data['FCOR']) & (np.isfinite(data['FRAW_ERR'])))) t = np.array(data['TIME'][ok]) fcor = list(data['FCOR'][ok]) fcor_median = fcor/np.median(fcor)-1 fcor_median = sig.medfilt(fcor_median, kernel_size=medfilt_kernel_size) fcor_median = convolve(fcor_median, Box1DKernel(fcor_box_kernel_size, mode='center')) N = len(fcor) # t_step = np.nanmedian(t[1:]-t[0:-1]) # 29.4 min expected but this ended up chronically giving us too short overall time frames t_step = (np.nanmax(t) - np.nanmin(t)) / np.float(np.size(t)) t_range = np.arange(N)*t_step print(t_step) plt.plot((t-t[0]) - t_range) plt.xlabel('Data Point') plt.ylabel('Actual Time Elapsed - Regularized Time Elapsed (Days)') plt.show() arr = sig.correlate(fcor_median,fcor_median, mode='full') ACF = arr[N-1:] baseline_ACF = ACF[0] okP = np.where(((t_range>min_period) & (t_range<max_period))) t_search = t_range[okP] ACF_search = ACF[okP] ACF_search = convolve(ACF_search, Box1DKernel(acf_box_kernal_size)) # 200 good; automatically does linear_interp, can change mode='center' maxes = findMaxes(t_search, ACF_search, min_max_distance) periods = maxes[0] ACFs = maxes[1] first_peak = -1 if len(periods)>1: first_peak = periods[1] if len(periods)<2: if(plots): plt.figure(figsize=(10,7)) plt.title('Unsmoothed v. Smoothed') plt.xlabel('Time Shift (days)') plt.ylabel('Autocorrelation') plt.plot(t_range, ACF) plt.plot(t_search, ACF_search) formated_periods = [] for n in periods[:10]: plt.axvline(n) ##### formated_periods.append(format(n, '.2f')) plt.suptitle(formated_periods) plt.tight_layout() plt.subplots_adjust(top=0.9) plt.savefig(root_dir + 'ACFPlotOutputs/' + name + file_type, dpi=150) plt.close() return [name, -1, -1, -1, periods[:10]] max_ACF = np.max(ACFs[1:]) linPossible = len(periods)>2 if linPossible: linInfo = stats.linregress(range(len(periods)-1), periods[1:]) linSlope = linInfo[0] # /actual best period linIntercept = linInfo[1] else: linSlope = periods[1] if(plots): plt.figure(figsize=(10,7)) plt.subplot(211) plt.title('Unsmoothed v. Smoothed') plt.xlabel('Time Shift (days)') plt.ylabel('Autocorrelation') plt.plot(t_range, ACF) plt.plot(t_search, ACF_search) formated_periods = [] for n in periods[:10]: plt.axvline(n) formated_periods.append(format(n, '.2f')) if linPossible: plt.subplot(212) plt.title('Maxes with the Best Period ' + format(linSlope, '.2f')) plt.xlabel('Index') plt.ylabel('Time Shift (days)') plt.plot(periods[1:], 'bo') x = range(len(periods)-1) y = [] for x_val in x: y.append(linIntercept + linSlope*x_val) plt.plot(x, y) plt.suptitle(formated_periods) plt.tight_layout() plt.subplots_adjust(top=0.9) plt.savefig(root_dir + 'ACFPlotOutputs/' + name + file_type, dpi=150) plt.close() return [name, linSlope, first_peak, max_ACF/baseline_ACF, periods[:10]] def findMaxes(t_search, ACF_search, min_max_distance): ''' Takes a list of time shifts, a list of their associated autocorrelation values, and the minimum distance between maxes in the ACF you want to allow. Returns a list of the list of time shifts and the list of associated autocorrelation values found to be at local maximums of the ACF data. ''' grad = np.gradient(ACF_search) zeros = [] for i in range(5, len(grad)-5): # orginally 1 not 5 if (grad[i-5]>0) & (grad[i+5]<0): if (len(zeros)>0): if (t_search[i]-t_search[zeros[-1]]) >= min_max_distance: #only counting maxes at least one day away from each other (try going down to .2 days) zeros.append(i) else: zeros.append(i) gradSnd = np.gradient(grad) maxes = [] for zero in zeros: if gradSnd[zero]<0: maxes.append(zero) return [t_search[maxes], ACF_search[maxes]] # get things onto GitHub # fine-tune where statement # get things within 10% of ACF/LS median # commenting/README # r_est do within 200-300 parsecs # look at ACF plot, put vertical line down where period is predicted -- nothing looks weird # look at actual v. regularized time elapsed -- by end, 8-10 more days have passed according to actual # put in Guassian for peaks # took 5,290.2 secs (about 1 hour and 30 min) to run bothFindP with all plots on 7,748 files (all of campaign 0) # took 14,624.8 secs (about 4 hours) to run bothFindP with all plots on 21,407 files (all of campaign 13) # grab plus 10 (5) minus 20 (25) # assume Lomb-Scargle is right # POSTER STUFF # save as dpi = 300 for poster printing # funding for poster: 'This work was supported by an NSF Astronomy and Astrophysics Postdoctoral Fellowship under award AST-1501418.' # be able to switch to linear ACF period # switchable directories root_dir = '/Volumes/Zoe Bell Backup/' # make concatenate func that concatenates merged files # took 9,241.7 secs (about 2 hours and 35 min) to run bothFindP with all plots on 13,483 files (all of campaign seven) # took 15,598.1 secs (about 4 hours and 20 min) to run bothFindP with all plots on 23,074 files (all of campaign five) # look at LS prob v. amplitude # add campaign to plot file names # write two sentences in README ```` python code ```` (how to run, how to interpret output) # look at differences in 2014 paper # add quality cuts # make histogram of logP for bp_rp>=2 (like in McQ '13) with _ = plt.hist # save period-color, period historgram, and color-magnitude diagrams # also plot Mg (intrinsic brightness) v. BP-RP; color data by Prot (Mg = mg-5*log(1000/parallax)+5) # do both for 10% and 20% # use pd.merge() (or join) (pandas) to match up two files based on star ID # to read in file, once table use myTable.to_pandas() (something like that) to convert to pandas dataframe # compare 3 log periods as scatter plots; make transparent (alpha = 0.3) # trust those within 10% of the 1:1 line # of those plot Prot v. BP-RP with y axis log scale # took 14,496.4 secs (about 4.0 hours) to run bothFindP with all plots on 21,387 files (all of campaign eight) # took 429.1 secs (about 7 minutes) to run bothFindP with all plots on 622 files # add license to GitHub—-license.md (MIT one recommended) # documentation! # took 58.3 secs to run with ACF with plots on 622 files # make new wrapper func # change where figs save # see if can get min max distance down to 0.2 days (make something that can be fiddled with, also do so for smoothing things) # took 52.5 secs to run on 622 files # print out time (import time) # average over spacing of peaks—-graph # check outputs against lomb-scargle # look at 2013 paper for how to use ACF (use same limits) # scipy spline to smooth, pandas rolling/running stats package for media (like boxcar for mean) # check boxcar (why artifact at beginning?) # write func that calcs derivs and finds zeros and neg curvature # boxcar smooth (running average) for gross ones # use full width at half max for box size # save plot files # N = npsize(fcor) # ACF = arr[N:] # np.nanmedian(t[1:]-t[0:-1]) # tan = np.arange(N)*av. time step # put stuff to GitHub for prob problem # autocorrelation func (ACF) in astropy? scipy.correlate2d or something (looking for first local max > 0.2 days), # return period and height of ACF output # pandas to output file (to_csv) # output=pd.DataFrame(lis) # name cols # output.to_csv() # add label to periodogram at peak, name and lable /... .fitz/png, make two-panel figure (subplot) # l = kdjf('/) # name[l[-1]+1:-4]+'.png' (make ftype a variable) # plt.figure(figsize(optional,takes x and y inches)) --stuff-- plt.savefig(name) plt.close() # panda.rolling rolling median; import glob.glob as glob to read in multiple files # create function(file name) return period and uncertainty/power, optional args plots=False, smooth window=10, period range # push to GitHub # plot np.arange(0.001, 0.1, 0.001)
[ 6738, 6468, 28338, 13, 34242, 1330, 28503, 3351, 853, 293, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 6468, 28338, 13, 11487, 1330, 8655, 198, 11748, 15095, 198, 11748, 1...
2.323031
14,042
def first_bad_pair(sequence, k): """Return the first index of a pair of elements in sequence[] for indices k-1, k+1, k+2, k+3, ... where the earlier element is not less than the later element. If no such pair exists, return -1.""" if 0 < k < len(sequence) - 1: if sequence[k-1] >= sequence[k+1]: return k-1 for i in range(k+1, len(sequence)-1): if sequence[i] >= sequence[i+1]: return i return -1 def almostIncreasingSequence(sequence): """Return whether it is possible to obtain a strictly increasing sequence by removing no more than one element from the array.""" j = first_bad_pair(sequence, -1) if j == -1: return True # List is increasing if first_bad_pair(sequence, j) == -1: return True # Deleting earlier element makes increasing if first_bad_pair(sequence, j+1) == -1: return True # Deleting later element makes increasing return False # Deleting either does not make increasingdef almostIncreasingSequence(sequence): if len(sequence)==2: return sequence==sorted(list(sequence)) else: for i in range(0,len(sequence)): newsequence=sequence[:i]+sequence[i+1:] if (newsequence==sorted(list(newsequence))) and len(newsequence)==len(set(newsequence)): return True break else: result=False return result if __name__=="__main__": print almostIncreasingSequence([3,5,67,98,3])
[ 4299, 717, 62, 14774, 62, 24874, 7, 43167, 11, 479, 2599, 198, 220, 220, 220, 37227, 13615, 262, 717, 6376, 286, 257, 5166, 286, 4847, 287, 8379, 21737, 198, 220, 220, 220, 329, 36525, 479, 12, 16, 11, 479, 10, 16, 11, 479, 10, ...
2.491803
610
from typing import Dict style: Dict[str, str] = { 'reset': '\033[0;0;0m', 'red': '\033[1;31m', 'liteRed': '\033[1;91m', 'green': '\033[1;92m', 'liteGreen': '\033[1;92m', 'black': '\033[1;30m', 'yellow': '\033[1;33m', 'liteYellow': '\033[1;93m', 'blue': '\033[1;34m', 'liteBlue': '\033[1;94m', 'magenta': '\033[1;35m', 'magentaClaro': '\033[1;95m', 'cyan': '\033[1;36m', 'cyanClaro': '\033[1; 96m', 'liteGray': '\033[1;37m', 'darkGray': '\033[1;90m', 'white': '\033[1;97m', 'bold': '\033[;1m', 'invert': '\033[;7m', 'italic': '\033[;3m', }
[ 6738, 19720, 1330, 360, 713, 198, 198, 7635, 25, 360, 713, 58, 2536, 11, 965, 60, 796, 1391, 198, 220, 220, 220, 705, 42503, 10354, 705, 59, 44427, 58, 15, 26, 15, 26, 15, 76, 3256, 198, 220, 220, 220, 705, 445, 10354, 705, 59, ...
1.741573
356
import logging; logger = logging.getLogger("minimalKB."+__name__); DEBUG_LEVEL=logging.DEBUG from minimalkb.exceptions import KbServerError def query(db, vars, patterns, models): """ 'vars' is the list of unbound variables that are expected to be returned. Each of them must start with a '?'. 'patterns' is a list/set of 3-tuples (s,p,o). Each tuple may contain unbound variables, that MUST start with a '?'. """ vars = set(vars) allvars = set() for p in patterns: allvars |= set(get_vars(p)) if not allvars >= vars: logger.warn("Some requested vars are not present in the patterns. Returning []") return [] if len(patterns) == 1: return singlepattern(db, patterns[0], models) independentpatterns = {p for p in patterns if nb_variables(p) == 1} dependentpatterns = set(patterns) - independentpatterns directpatterns = {} candidates = {} for v in allvars: directpatterns[v] = {p for p in patterns if v in p} # first, execute simple queries to determine potential candidates: # resolve patterns that contain *only* the desired output variable for p in (independentpatterns & directpatterns[v]): if v not in candidates: candidates[v] = simplequery(db, p, models) else: # intersection with previous candidates candidates[v] = candidates[v] & simplequery(db, p, models) # if any of the requested var appears in an independant pattern but has no match for # this pattern, return [] for var in allvars: if var in candidates and not candidates[var]: return [] if len(vars) == 1: var = vars.pop() # no dependent pattern? no need to filter! if not dependentpatterns: return list(candidates[var]) candidate = set() for pattern in dependentpatterns: if var not in pattern: raise NotImplementedError("Can not handle pattern %s with requested variable %s." % (pattern, var)) s, p, o = [prepare(tok) for tok in pattern] if not candidate: candidate = selectfromset(db, s, p, o, models) else: candidate &= selectfromset(db, s, p, o, models) return list(candidate) else: if not dependentpatterns: raise NotImplementedError("Multiple variable in independent patterns not yet supported.") raise NotImplementedError("Only a single variable in queries can be currently requested.") ### TODO !!! ### while dependentpatterns: pattern = dependentpatterns.pop() s, p, o = pattern stmts = [(r[1], r[2], r[3]) for r in matchingstmt(db, pattern, models)] if is_variable(s): pass def singlepattern(db, pattern, models): """ Returns the list of statements that match a single pattern (like "* likes ?toto"). If only one unbound variable is present, it returns the list of possible values for this variable. If 2 or 3 tokens are unbound, it returns a list of complete statments (s,p,o). """ if nb_variables(pattern) == 1: return list(simplequery(db, pattern, models)) else: results = matchingstmt(db, pattern, models) return [[res[1], res[2], res[3]] for res in results] def matchingstmt(db, pattern, models = [], assertedonly = False): """Returns the list of statements matching a given pattern. If assertedonly is True, statements infered by reasoning are excluded. """ s,p,o = pattern params = {'s':s, 'p':p, 'o':o, } # workaround to feed a variable number of models models = list(models) for i in range(len(models)): params["m%s"%i] = models[i] query = "SELECT * FROM triples " conditions = [] if not is_variable(s): conditions += ["subject=:s"] if not is_variable(p): conditions += ["predicate=:p"] if not is_variable(o): conditions += ["object=:o"] if assertedonly: conditions += ["inferred=0"] if models: conditions += ["model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))] if conditions: query += "WHERE (" + " AND ".join(conditions) + ")" return [row for row in db.execute(query, params)] def simplequery(db, pattern, models = [], assertedonly = False): """ A 'simple query' is a query with only *one* unbound variable. Return the list of possible values for this variable """ s,p,o = pattern params = {'s':s, 'p':p, 'o':o, } # workaround to feed a variable number of models models = list(models) for i in range(len(models)): params["m%s"%i] = models[i] query = "SELECT " if is_variable(s): query += "subject FROM triples WHERE (predicate=:p AND object=:o)" elif is_variable(p): query += "predicate FROM triples WHERE (subject=:s AND object=:o)" elif is_variable(o): query += "object FROM triples WHERE (subject=:s AND predicate=:p)" else: query += "hash FROM triples WHERE (subject=:s AND predicate=:p AND object=:o)" if assertedonly: query += " AND inferred=0" if models: query += " AND model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))])) return {row[0] for row in db.execute(query, params)}
[ 11748, 18931, 26, 49706, 796, 18931, 13, 1136, 11187, 1362, 7203, 1084, 4402, 22764, 526, 10, 834, 3672, 834, 1776, 198, 30531, 62, 2538, 18697, 28, 6404, 2667, 13, 30531, 198, 198, 6738, 10356, 971, 65, 13, 1069, 11755, 1330, 509, 65...
2.467021
2,259
import datetime def get_expire_time(minutes: int) -> datetime.datetime: """ get expire time after minutes from now args minutes: int return expire_time: datetime.datetime """ now = datetime.datetime.now() expire_time = now + datetime.timedelta(minutes=minutes) return expire_time
[ 11748, 4818, 8079, 201, 198, 201, 198, 201, 198, 4299, 651, 62, 1069, 5111, 62, 2435, 7, 1084, 1769, 25, 493, 8, 4613, 4818, 8079, 13, 19608, 8079, 25, 201, 198, 220, 220, 220, 37227, 651, 24264, 640, 706, 2431, 422, 783, 201, 198...
2.431655
139
#!/usr/bin/env python3.7 # Mastering Object-Oriented Python 2e # # Code Examples for Mastering Object-Oriented Python 2nd Edition # # Chapter 20. Example 1. # """ Blackjack Cards and Decks ========================= This module contains a definition of :class:`Card`, :class:`Deck` and :class:`Shoe` suitable for Blackjack. The :class:`Card` class hierarchy --------------------------------- The :class:`Card` class hierarchy includes the following class definitions. :class:`Card` is the superclass as well as being the class for number cards. :class:`FaceCard` defines face cards: J, Q and K. :class:`AceCard` defines the Ace. This is special in Blackjack because it creates a soft total for a hand. We create cards using the :func:`card` factory function to create the proper :class:`Card` subclass instances from a rank and suit. The :class:`Suit` enumeration has all of the Suit instances. :: >>> from ch20_ex1 import cards >>> ace_clubs= cards.card( 1, cards.suits[0] ) >>> ace_clubs 'A♣' >>> ace_diamonds= cards.card( 1, cards.suits[1] ) >>> ace_clubs.rank == ace_diamonds.rank True The :class:`Deck` and :class:`Shoe` class hierarchy --------------------------------------------------- The basic :class:`Deck` creates a single 52-card deck. The :class:`Shoe` subclass creates a given number of decks. A :class:`Deck` can be shuffled before the cards can be extracted with the :meth:`pop` method. A :class:`Shoe` must be shuffled and *burned*. The burn operation sequesters a random number of cards based on a mean and standard deviation. The mean is a number of cards (52 is the default.) The standard deviation for the burn is also given as a number of cards (2 is the default.) """ # Example Sphinx-style Documentation # ------------------------------------- # Imports from enum import Enum from typing import Optional class Suit(str, Enum): """ Enumeration of all possible values for a card's suit. """ Club = "♣" Diamond = "♦" Heart = "♥" Spade = "♠" class Card: """ Definition of a numeric rank playing card. Subclasses will define :py:class:`FaceCard` and :py:class:`AceCard`. :ivar rank: int rank of the card :ivar suit: Suit suit of the card :ivar hard: int Hard point total for a card :ivar soft: int Soft total; same as hard for all cards except Aces. """ def __init__( self, rank: int, suit: Suit, hard: int, soft: Optional[int] = None ) -> None: """Define the values for this card. :param rank: Numeric rank in the range 1-13. :param suit: Suit object (often a character from '♣♡♢♠') :param hard: Hard point total (or 10 for FaceCard or 1 for AceCard) :param soft: The soft total for AceCard, otherwise defaults to hard. """ self.rank = rank self.suit = suit self.hard = hard self.soft = soft if soft is not None else hard class FaceCard(Card): """ Subclass of :py:class:`Card` with Ranks 11-13 represented by J, Q, and K. """ rank_str = {11: "J", 12: "Q", 13: "K"} class AceCard(Card): """ Subclass of :py:class:`Card` with rank of 1 represented by A. """ def card(rank: int, suit: Suit) -> Card: """ Create a :py:class:`Card` instance from rank and suit. Can raise :py:exc:`TypeError` for ranks out of the range 1 to 13, inclusive. :param suit: Suit object :param rank: Numeric rank in the range 1-13 :returns: :py:class:`Card` instance :raises TypeError: rank out of range c >>> from Chapter_20.ch20_ex1 import card >>> str(card(3, Suit.Heart)) '3♥' >>> str(card(1, Suit.Heart)) 'A♥' """ if rank == 1: return AceCard(rank, suit, 1, 11) elif 2 <= rank < 11: return Card(rank, suit, rank) elif 11 <= rank < 14: return FaceCard(rank, suit, 10) else: raise TypeError
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 13, 22, 198, 2, 5599, 278, 9515, 12, 46, 380, 4714, 11361, 362, 68, 198, 2, 198, 2, 6127, 21066, 329, 5599, 278, 9515, 12, 46, 380, 4714, 11361, 362, 358, 5061, 198, 2, 198, 2, ...
2.80629
1,399
""" A utility file for testing helpers """ import numpy as np import pandas as pd def df_compare(left, right, columns=None, atol=1.e-8, rtol=1.e-5, equal_nan=True): """ Compares two dataframe in an approximate manner. Checks: - Columns - Indices - Float columns (through tolerances) - Integer columns - Other columns """ if columns is not None: if not isinstance(columns, (list, tuple)): columns = [columns] col_set = set(columns) if not (set(left.columns) >= col_set): raise KeyError("Left DataFrame did not contain all tested columns") if not (set(right.columns) >= col_set): raise KeyError( "Right DataFrame did not contain all tested columns") left = left[columns] right = right[columns] else: if set(left.columns) != set(right.columns): raise KeyError( "Right and Left DataFrames do not have the same columns") # Order the dataframe left = left[right.columns] # Check index and sort assert right.index.equals(left.index) left = left.loc[right.index] # Check floats fcols = [ name for name, tp in zip(left.columns, left.dtypes) if tp.kind == "f" ] fclose = np.allclose( left[fcols], right[fcols], atol=atol, rtol=rtol, equal_nan=equal_nan) if not fclose: raise AssertionError("DF_compare: Mismatch in float columns.") # Check ints icols = [ name for name, tp in zip(left.columns, left.dtypes) if tp.kind == "i" ] iclose = np.allclose(left[icols], right[icols]) if not fclose: raise AssertionError("DF_compare: Mismatch in integer columns.") # Check everything else remaining_cols = list(set(left.columns) - set(fcols) - set(icols)) rclose = left[remaining_cols].equals(right[remaining_cols]) if not rclose: raise AssertionError("DF_compare: Mismatch in non-numeric columns.") return True def dict_compare(left, right, atol=1.e-9, rtol=1.e-5): """ A testing function that attempts to compare two different complex dictionaries. This function can currently handle the following data types: - int - str - float - set, list, tuple - np.ndarray - pd.DataFrame """ if set(left) != set(right): raise KeyError("Right and Left dicts do not contain the same keys.") for key in list(left): lv = left[key] rv = right[key] match = True if isinstance(lv, (str, int, np.int32, np.int64)): match = lv == rv elif isinstance(lv, set): match = lv == set(rv) elif isinstance(lv, (float, np.ndarray, list)): match = np.allclose(lv, rv, atol=atol, rtol=rtol) elif isinstance(lv, pd.DataFrame): match = df_compare(lv, rv, atol=atol, rtol=rtol) elif isinstance(lv, dict): match = dict_compare(lv, rv, atol=atol, rtol=rtol) else: raise TypeError("dict_compare: Misunderstood compare type '%s'." % str(type(lv))) if match is False: raise AssertionError( "dict_compare: Mismatch for key %s, comparing %s to %s" % (key, str(lv), str(rv))) return True def dl_compare(left, right, atom_checks=None): """ Attempts to compare two dataframes Checks: - Term and parameter lengths - Similar term and parameters - one/two/three/four body lengths - one/two/three/four body values Currently assumes both DL's can be loaded into memory. """ ### Compare all terms within the DL if atom_checks is None: atom_checks = ["charge", "xyz"] left_uids = left.list_term_uids() right_uids = left.list_term_uids() # First make sure the number of terms is the same for k in list(left_uids): if not set(left_uids[k]) == set(right_uids[k]): raise KeyError( "dl_compare: Mismatch in the number of parameters between left (%d) and right (%d)." % (len(left_uids[k]), len(right_uids[k]))) # Make sure the terms in left matches the terms in right. conversion_dict = {} # Loop over orders for k in list(left_uids): # Loop over left uids conversion_dict[k] = {} ruid_tmps = right_uids[k][:] for luid in left_uids[k]: pl = left.get_term_parameter(k, luid) # Loop over right uid's popping ones we used for ruid in ruid_tmps: pr = left.get_term_parameter(k, ruid) # Check match, pop right uid, and break this loop back to luid iterator if (pr[0] == pl[0]) and dict_compare(pr[1], pl[1]): conversion_dict[k][ruid] = luid ruid_tmps.remove(ruid) break # After all luid's have been used, our ruid list should be empty if len(ruid_tmps): raise KeyError( "dl_compare: Did not find a match for all parameter terms") ### Find matching atoms and compare atom properties left_atom_missing = set(atom_checks) - set(left.list_atom_properties()) if len(left_atom_missing): raise KeyError( "dl_compare: left dataframe was missing %s atom properies" % str(left_atom_missing)) right_atom_missing = set(atom_checks) - set(right.list_atom_properties()) if len(right_atom_missing): raise KeyError( "dl_compare: right dataframe was missing %s atom properies" % str(right_atom_missing)) left_atom = left.get_atoms(atom_checks, by_value=True) right_atom = right.get_atoms(atom_checks, by_value=True) # print(left_atom) # print(right_atom) if left_atom.shape != right_atom.shape: raise IndexError( "dl_compare: The number of atoms in the left and right DL's does not match." ) # Assume order is the same for now assert df_compare(left_atom.reset_index(), right_atom.reset_index()) # Reorder based on coordinates # left_coords = left_atom[["X", "Y", "Z"]].values # right_coords = left_atom[["X", "Y", "Z"]].values # tmp_mat = left_coords[:, None, :] - right_coords # distance_matrix = np.sqrt(np.einsum('ijk,ijk->ij', tmp_mat, tmp_mat)) # if np.sum(distance_matrix.min(axis=0) > 1.e-6): # raise IndexError("dl_compare: Not all coordintes match") # reorder = distance_matrix.argmin(axis=0) # if reorder.shape[0] != np.unqiue(reorder).shape[0]: # raise IndexError("dl_compare: ") ### Find matching terms # Build uid to uid dict for order in list(conversion_dict): for k, v in conversion_dict[order].items(): if k != v: raise KeyError( "dl_compare: Does not yet support non-identical parameter key dictionaries" ) for order in list(conversion_dict): if len(conversion_dict[order]) < 1: continue assert df_compare( left.get_terms(order).reset_index(), right.get_terms(order).reset_index()) ### Find matching non-bonds # left_nb_types = set(left.list_stored_nb_types()) # right_nb_types = set(right.list_stored_nb_types()) # if (left_nb_types != right_nb_types): # missing = left_nb_types ^ right_nb_types # raise KeyError("dl_compare: Mismatch in list_nb_parameters. Symmetric difference: %s" % missing) # for nb_form in left_nb_types: # dict_compare(left.list_nb_parameters(nb_form), right.list_nb_parameters(nb_form)) return True
[ 37811, 198, 32, 10361, 2393, 329, 4856, 49385, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 628, 198, 4299, 47764, 62, 5589, 533, 7, 9464, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220...
2.232685
3,494
#!/usr/bin/env python # import os.path import setuptools setuptools.setup( name='rmq-definitions', version='1.0.1', description=('Deterministicly sorting and formatting of RabbitMQ ' 'definition backups'), author='Gavin M. Roy', author_email='gavinmroy@gmail.com', url='https://github.com/gmr/rmq-definitions', install_requires=read_requirements('installation.txt'), license='BSD', py_modules=['rmq_definitions'], entry_points={'console_scripts': [ 'rmq-definitions=rmq_definitions:main' ]}, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: System :: Archiving :: Backup', 'Topic :: System :: Systems Administration', 'Topic :: Utilities'], test_suite='nose.collector', tests_require=read_requirements('testing.txt'), zip_safe=True )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 198, 11748, 28686, 13, 6978, 198, 11748, 900, 37623, 10141, 628, 198, 198, 2617, 37623, 10141, 13, 40406, 7, 198, 220, 220, 220, 1438, 11639, 26224, 80, 12, 4299, 50101, 3256, ...
2.641115
574
#!/usr/bin/python3 # My first program on Python! print ("Hello World!") for cntr in range(0,11): print (cntr)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 2011, 717, 1430, 319, 11361, 0, 198, 4798, 5855, 15496, 2159, 2474, 8, 198, 220, 220, 220, 329, 269, 429, 81, 287, 2837, 7, 15, 11, 1157, 2599, 198, 220, 220, 220, 3601, 357, 66,...
2.408163
49
from logbook import FileHandler from alephnull.finance.blotter import ORDER_STATUS
[ 6738, 2604, 2070, 1330, 9220, 25060, 198, 6738, 31341, 746, 8423, 13, 69, 14149, 13, 2436, 313, 353, 1330, 38678, 62, 35744, 2937, 628, 628, 628, 198 ]
3.296296
27
import ast import re from astnode import * p_elif = re.compile(r'^elif\s?') p_else = re.compile(r'^else\s?') p_try = re.compile(r'^try\s?') p_except = re.compile(r'^except\s?') p_finally = re.compile(r'^finally\s?') p_decorator = re.compile(r'^@.*') if __name__ == '__main__': # node = ast.parse(''' # # for i in range(1, 100): # # sum = sum + i # # # # sorted(arr, reverse=True) # # sorted(my_dict, key=lambda x: my_dict[x], reverse=True) # # m = dict ( zip ( new_keys , keys ) ) # # for f in sorted ( os . listdir ( self . path ) ) : # # pass # for f in sorted ( os . listdir ( self . path ) ) : pass # ''') # print ast.dump(node, annotate_fields=False) # print get_tree_str_repr(node) # print parse('for f in sorted ( os . listdir ( self . path ) ) : sum = sum + 1; sum = "(hello there)" ') # print parse('global _standard_context_processors') parse_django()
[ 11748, 6468, 198, 11748, 302, 198, 198, 6738, 6468, 17440, 1330, 1635, 198, 198, 79, 62, 417, 361, 796, 302, 13, 5589, 576, 7, 81, 6, 61, 417, 361, 59, 82, 8348, 8, 198, 79, 62, 17772, 796, 302, 13, 5589, 576, 7, 81, 6, 61, ...
2.366492
382
"""Contains toplevel, abstract objects mirroring the Sina schema.""" from __future__ import print_function import logging import collections import numbers import copy import six import sina.sjson as json logging.basicConfig() LOGGER = logging.getLogger(__name__) RESERVED_TYPES = ["run"] # Types reserved by Record's children # Disable redefined-builtin, invalid-name due to ubiquitous use of id and type # pylint: disable=invalid-name,redefined-builtin # We don't mind the high instance attribute count since this is essentially # a reflection of our schema. class Record(object): # pylint: disable=too-many-instance-attributes """ A record is any arbitrary object we've chosen to store. A record is guaranteed to have exactly two things: an id and a type. Records may also have data and/or documents associated with them. There are subtypes of Records with additional field support, such as Runs. On ingestion, the "type" field determines whether the object is a Record or a subtype. If using a type reserved for one of Record's children, create an instance of that child. """ # Disable the pylint check if and until the team decides to refactor the code def __init__(self, id, type, data=None, # pylint: disable=too-many-arguments curve_sets=None, library_data=None, files=None, user_defined=None): """ Create Record with its id, type, and optional args. Currently, data and files are expected to be lists of dicts. Lists of strings (ex: ['{"name":"foo"}']) won't be read correctly. See the Sina schema section of the documentation for what data and files should contain. :param id: The id of the record. Should be unique within a dataset :param type: The type of record. Some types are reserved for children, see sina.model.RESERVED_TYPES :param data: A dict of dicts representing the Record's data. :param curve_sets: A dict of dicts representing the Record's curve sets. :param library_data: A dict of dicts representing the Record's library data. :param files: A list of dicts representing the Record's files :param user_defined: A dictionary of additional miscellaneous data to store, such as notes. The backend will not index on this. """ self.raw = {} # Items in this block are going to raw behind the scenes (see __setattr__) self.id = id self.type = type self.data = data if data else {} self.curve_sets = curve_sets if curve_sets else {} self.library_data = library_data if library_data else {} self.files = files if files else {} self.user_defined = user_defined if user_defined else {} @property def id(self): """Get or set the Record's id.""" return self['id'] @id.setter @property def type(self): """Get or set the Record's type.""" return self['type'] @type.setter @property def data(self): """Get or set the Record's data dictionary.""" return self['data'] @data.setter @property def curve_sets(self): """Get or set the Record's curve dictionary.""" return self['curve_sets'] @curve_sets.setter def curve_sets(self, curve_sets): """ Set the Record's curve sets to a new dict of curve sets. This only works for rec.curve_sets = {"curveset_1" ...}. Indexing in won't trigger it. """ self['curve_sets'] = curve_sets def get_curve_set(self, name): """ Return the CurveSet object associated with the provided name. This is used to get a CurveSet object to manipulate instead of the raw, clunky dict form. :param name: The name of the CurveSet to return. :returns: the CurveSet within the record associated with the name. :raises AttributeError: if no such CurveSet exists """ try: return CurveSet(name, self.curve_sets[name]) except KeyError: raise AttributeError('Record "{}" has no curve set "{}"'.format(self.id, name)) @property def library_data(self): """Get or set the Record's data dictionary.""" return self['library_data'] @library_data.setter @property def files(self): """Get or set the Record's file list.""" return self['files'] @files.setter @property def user_defined(self): """Get or set the Record's user-defined dictionary.""" return self['user_defined'] @user_defined.setter def __getitem__(self, key): """ Get the entry in this record with the given key. A Record object mimics a dictionary in how it's accessed, with the data it represents available within a dictionary called "raw". Here, we reroute ex: foo = my_rec["data"]["spam"] to go through this raw dictionary. Essentially, it becomes foo = my_rec.raw["data"]["spam"]. """ return self.raw[key] def __setitem__(self, key, value): """ Set the entry in this record with the given key. A Record object mimics a dictionary in how it's accessed, with the data it represents available within a dictionary called "raw". Here, we reroute ex: my_rec["data"]["spam"] = 2 to go through this raw dictionary. Essentially, it becomes my_rec.raw["data"]["spam"] = 2. """ self.raw[key] = value def __delitem__(self, key): """ Delete the entry in this record with the given key. A Record object mimics a dictionary in how it's accessed, with the data it represents available within a dictionary called "raw". Here, we reroute ex: del my_rec["data"]["spam"] to go through this raw dictionary. Essentially, it becomes del my_rec.raw["data"]["spam"] """ del self.raw[key] def __repr__(self): """Return a string representation of a model Record.""" return ('Model Record <id={}, type={}>' .format(self.id, self.type)) def add_data(self, name, value, units=None, tags=None): """ Add a data entry to a Record. Will throw an error if that datum is already part of the Record. :param name: The name describing the data (ex: "direction", "volume", "time") :param value: The data's value (ex: "northwest", 12, [0, 1, 3, 6]) :param units: Units for the value. Optional (ex: "cm^3", "seconds") :param tags: List of tags describing this data. Optional (ex: ["inputs", "opt"]) :raises ValueError: if a datum with that name is already part of the record. """ if name in self.data: raise ValueError('Duplicate datum: "{}" is already an entry in Record "{}".' .format(name, self.id)) self.set_data(name, value, units, tags) def set_data(self, name, value, units=None, tags=None): """ Set a data entry for a Record. If that datum doesn't exist, add it. If it does, update it. :param name: The name describing the data (ex: "direction", "volume", "time") :param value: The data's value (ex: "northwest", 12, [0, 1, 3, 6]) :param units: Units for the value. Optional (ex: "cm^3", "seconds") :param tags: List of tags describing this data. Optional (ex: ["inputs", "opt"]) """ datum = {"value": value} if units is not None: datum["units"] = units if tags is not None: datum["tags"] = tags self.data[name] = datum def remove_file(self, uri): """ Remove file info from a Record. Will throw an error if a file is not in the Record. :param uri: The uri that uniquely describes the file. (ex: "/g/g10/doe/foo.txt") """ if uri in self.files: del self.files[uri] def add_file(self, uri, mimetype=None, tags=None): """ Add file info to a Record. Will throw an error if a file with that uri is already recorded in the Record. :param uri: The uri that uniquely describes the file. (ex: "/g/g10/doe/foo.txt") :param mimetype: The mimetype of the file. Optional (ex: "text/html") :param tags: List of tags describing this file. Optional (ex: ["post-processing"]) :raises ValueError: if a file with that uri is already recorded in the Record. """ if uri in self.files: raise ValueError('Duplicate file: "{}" is already a file in Record "{}".' .format(uri, self.id)) else: self.set_file(uri, mimetype, tags) def set_file(self, uri, mimetype=None, tags=None): """ Set a file's info for a Record. If that file doesn't exist, add its info. If it does, update it. :param uri: The uri that uniquely describes the file. (ex: "/g/g10/doe/foo.txt") :param mimetype: The mimetype of the file. Optional (ex: "text/html") :param tags: List of tags describing this file. Optional (ex: ["post-processing"]) """ file_info = {} if mimetype is not None: file_info["mimetype"] = mimetype if tags is not None: file_info["tags"] = tags self.files[uri] = file_info def add_curve_set(self, curve_set): """ Add a curve set to the Record. :param curve_set: Either a name for a new CurveSet or a pre-created CurveSet :returns: the CurveSet object created/provided. Users can add curves to it directly. :raises ValueError: if a CurveSet with that name already exists. :raises ValueError: if neither <curve_set_name> nor <curve_set> are supplied. """ curve_set_name = curve_set.name if isinstance(curve_set, CurveSet) else curve_set if curve_set_name in self.curve_sets: raise ValueError('Duplicate curve set: "{}" is already a curve set in Record "{}".' .format(curve_set_name, self.id)) return self.set_curve_set(curve_set) def set_curve_set(self, curve_set): """ Set a curve set within a Record. :param curve_set: Either a name for a new CurveSet or a pre-created CurveSet :returns: the CurveSet object created/provided. Users can add curves to it directly. :raises ValueError: if neither <curve_set_name> nor <curve_set> are supplied. """ if isinstance(curve_set, CurveSet): curve_set_name = curve_set.name curve_raw = curve_set.raw else: curve_set_name = curve_set curve_raw = {} self.curve_sets[curve_set_name] = curve_raw return CurveSet(curve_set_name, self.curve_sets[curve_set_name]) def to_json(self): """ Create a JSON string from a Record. :returns: A JSON string representing this Record """ return json.dumps(self.raw) def _library_data_is_valid(self, library_data, prefix=""): """ Test whether library data is valid. This is called on a library_data arg rather than self.library_data because library_data can occur at multiple levels (the library data of library data, etc). This method will recursively check the validity of any library data nested within the provided library_data arg. :param library_data: A set of data describing a library attached to a record, potentially including library_data of its own. :param prefix: A prefix describing the location of the library_data within a nested structure. Used in warnings to help users find malformed entries. :returns: a list of any warnings, to be used in is_valid() """ warnings = [] if not isinstance(library_data, dict): (warnings.append("Record {}'s {}library_data field must be a dictionary!" .format(self.id, prefix))) else: for library_name, library in library_data.items(): inner_prefix = prefix + library_name + "/" if "data" in library: warnings += self._data_is_valid(library["data"], inner_prefix) # There's no special validation on curve sets, presumably because we # don't object to name collision between curve sets and data. if "library_data" in library: warnings += self._library_data_is_valid(library["library_data"], inner_prefix) return warnings def _data_is_valid(self, data, prefix=""): """ Test whether data is valid. This is called on a data arg rather than self.data because data can occur at multiple levels (a record's data, a record's library's data, etc). :param data: A set of data for a record or library. :param prefix: A prefix describing the location of the data within a nested structure. Used in warnings to help users find malformed entries. :returns: a list of any warnings, to be used in is_valid() """ warnings = [] if not isinstance(data, dict): warnings.append("Record {}'s {}data field must be a dictionary!" .format(self.id, prefix)) else: for entry in data: # Check data entry is a dictionary if not isinstance(data[entry], dict): warnings.append("At least one {}data entry belonging to " "Record {} is not a dictionary. " "Value: {}".format(prefix, self.id, entry)) break if "value" not in data[entry]: warnings.append("At least one {}data entry belonging " "to Record {} is missing a value. " "Value: {}".format(prefix, self.id, entry)) break if isinstance(data[entry]['value'], dict): warnings.append("At least one {}data entry belonging " "to Record {} has a dictionary for a value." "Value: {}".format(prefix, self.id, entry)) if isinstance(data[entry]['value'], list): try: (validated_list, scalar_index, string_index) = _is_valid_list( list_of_data=data[entry]["value"]) except ValueError as context: warnings.append(str(context)) break if not validated_list: (warnings.append( "A {}data entry may not have a list of different types. " "They must all be scalars or all strings. Check " "indicies: {}, {}".format(prefix, scalar_index, string_index))) break if (data[entry].get("tags") and (isinstance(data[entry].get("tags"), six.string_types) or not isinstance(data[entry].get("tags"), collections.Sequence))): (warnings.append("At least one {}data value entry belonging " "to Record {} has a malformed tag " "list. Value: {}".format(prefix, self.id, entry))) return warnings # Disable the pylint check if and until the team decides to refactor the code def is_valid(self, print_warnings=None): # pylint: disable=too-many-branches """Test whether a Record's members are formatted correctly. The ingester expects certain types to be reserved, and for data and files to follow a specific format. This method will describe any issues with a Record. :param print_warnings: if true, will print warnings. Warnings are passed to the logger only by default. :returns: A tuple containing true or false if valid for ingestion and a list of warnings. """ warnings = [] # We should issue a warning if type is reserved and we are not # actually a reserved object. This check is removed for now because it # warrants significant code changes in sql/cass modules. # For files/data, we break immediately on finding any error--in # practice these lists can be thousands of entries long, in which case # the error is probably in an importer script (and so present in all # files/data) and doesn't warrant spamming the logger. for file_info in self.files.values(): if not isinstance(file_info, dict): (warnings.append("At least one file entry belonging to " "Record {} is not a dictionary. Value: {}" .format(self.id, file_info))) break # Python2 and 3 compatible way of checking if the tags are # a list, tuple, etc (but not a string) if (file_info.get("tags") and (isinstance(file_info.get("tags"), six.string_types) or not isinstance(file_info.get("tags"), collections.Sequence))): (warnings.append("At least one file entry belonging to " "Record {} has a malformed tag list. File: {}" .format(self.id, file_info))) # Test data warnings += self._data_is_valid(self.data) # Test library_data warnings += self._library_data_is_valid(self.library_data) # Test as JSON try: json.dumps(self.raw) except ValueError: (warnings.append("Record {}'s raw is invalid JSON.'".format(self.id))) if not isinstance(self.user_defined, dict): (warnings.append("Record {}'s user_defined section is not a " "dictionary. User_defined: {}".format(self.id, self.user_defined))) if warnings: warnstring = "\n".join(warnings) if print_warnings: print(warnstring) LOGGER.warning(warnstring) return False, warnings return True, warnings class CurveSet(object): """ A set of dependent and independent curves, representing one entry in Record.curve_sets. Wraps an existing entry in a Record's curve sets, allowing convenience methods for users to add new curves. """ def __init__(self, name, raw=None): """ Create a curve set that wraps a curveset dict from a Record. :param name: The name associated with the curveset. :param raw: The dict to wrap. """ if raw is None: raw = {} self.name = name self.raw = raw if self.raw.get("independent") is None: self.raw["independent"] = {} if self.raw.get("dependent") is None: self.raw["dependent"] = {} # For convenient dot notation access self.independent = self.raw["independent"] self.dependent = self.raw["dependent"] def __getitem__(self, key): """ Get the entry in this curve set with the given key. A CurveSet, like a Record, mimics a dictionary in how it's accessed, with the data it represents available within a dictionary called "raw". """ return self.raw[key] def __setitem__(self, key, value): """Set the entry in this curve set with the given key.""" self.raw[key] = value # The large number of arguments is to allow users to build a whole curve conveniently. # pylint: disable=too-many-arguments def _add_curve(self, curve_name, value=None, units=None, tags=None, curve_obj=None, dependent=True): """ Handle the logic for creating a new curve. <curve> is used before <value>, <units>, and <tags>. That is, if a user supplies both a curve and a set of values, the set of values will be applied to the curve, and then the curve will be set (preserving any units or tags) :param curve_name: The name of the curve :param value: The list of scalars making up the curve :param units: The units, if any, associated with <value> :param tags: The tags, if any, associated with this specific curve :param curve_obj: A Curve object, if any, to apply the above to. :param dependent: Whether this curve is dependent. If false, independent. :returns: the curve object created or updated. :raises ValueError: if a curve with that name is already in the CurveSet """ curve_category = "dependent" if dependent else "independent" if curve_obj is None: preexisting_curve = self.raw[curve_category].get(curve_name) if preexisting_curve is None: curve = {} else: raise ValueError("Duplicate curve: {} is already a[n] {} curve in set {}." .format(curve_name, curve_category, self.name)) else: # Extract only the dict! We'll recreate the wrapper. curve = curve_obj.raw for var, name in ((value, "value"), (units, "units"), (tags, "tags")): if var is not None: curve[name] = var self.raw[curve_category][curve_name] = curve return Curve(curve_name, curve) def add_dependent(self, curve_name, value=None, units=None, tags=None, curve_obj=None): """ Add a dependent curve to the curve set. <curve> is used before <value>, <units>, and <tags>. That is, if a user supplies both a curve and a set of values, the set of values will be applied to the curve, and then the curve will be set (preserving any units or tags) :param curve_name: The name of the curve :param value: The list of scalars making up the curve :param units: The units, if any, associated with <value> :param tags: The tags, if any, associated with this specific curve :param curve_obj: A Curve object, if any, to apply the above to. If none is provided, a new curve will be made. :raises ValueError: if a dependent curve by that name already exists. """ return self._add_curve(curve_name, value, units, tags, curve_obj, dependent=True) def add_independent(self, curve_name, value=None, units=None, tags=None, curve_obj=None): """ Add an independent curve to the curve set. Params are identical to add_dependent(). :raises ValueError: if an independent curve by that name already exists. """ return self._add_curve(curve_name, value, units, tags, curve_obj, dependent=False) def get_dependent(self, curve_name): """Return the dependent curve with the given name, raise an error if there's none.""" try: return self.dependent[curve_name] except KeyError: raise AttributeError('CurveSet "{}" has no dependent curve "{}"'.format(self.name, curve_name)) def get_independent(self, curve_name): """Return the independent curve with the given name, raise an error if there's none.""" try: return self.independent[curve_name] except KeyError: raise AttributeError('CurveSet "{}" has no independent curve "{}"'.format(self.name, curve_name)) def get(self, curve_name): """ Return the curve with the given name, raise an error if there's none. Note that a CurveSet shouldn't have dependents and independents with the same name, but this method doesn't perform validation; if there is such an overlap, only the independent will be returned. """ try: return self.get_independent(curve_name) except AttributeError: try: return self.get_dependent(curve_name) except AttributeError: raise AttributeError('CurveSet "{}" has no curve "{}"'.format(self.name, curve_name)) @staticmethod def as_dict(curveset): """ Given a CurveSet or dict, return a dict. For when you're not sure whether you have a dictionary or CurveSet and want a dict. If it's a dict, it's returned. Else, the name is dropped and the CurveSet's raw returned. """ if isinstance(curveset, CurveSet): return curveset.raw return curveset @staticmethod def as_curve_set(curveset): """ Given a CurveSet or dict, return a CurveSet. For when you're not sure whether you have a dictionary or CurveSet and want a CurveSet. If it's a CurveSet, it's returned. Else, the dict is used to make an unnamed CurveSet. """ if isinstance(curveset, CurveSet): return curveset return CurveSet(name="<unnamed CurveSet>", raw=curveset) class Curve(object): """A single curve within a CurveSet.""" def __init__(self, name, raw): """Create a curve that wraps a dict entry from a curveset.""" self.name = name self.raw = raw if raw.get("value") is None: self.raw["value"] = [] def __getitem__(self, key): """ Get the entry in this curve with the given key. A Curve, like a Record, mimics a dictionary in how it's accessed, with the data it represents available within a dictionary called "raw". """ return self.raw[key] def __setitem__(self, key, value): """Set the entry in this curve with the given key.""" self.raw[key] = value # Disable pylint check to if and until the team decides to address the issue class Relationship(object): # pylint: disable=too-few-public-methods """ A Relationship is a triple describing the relationship between two objects. Every relationship has exactly three things: the id of its object, the id of its subject, and the predicate describing their relationship. A Relationship translates in English to: <subject> <predicate> <object>, ex: Task142 contains Run6249. """ def __init__(self, object_id, subject_id, predicate): """Create Relationship from triple info.""" self.object_id = object_id self.subject_id = subject_id self.predicate = predicate def __repr__(self): """Return a string representation of a model Relationship.""" return ('Model Relationship <object_id={}, subject_id={}, predicate={}>' .format(self.object_id, self.subject_id, self.predicate)) def to_json_dict(self): """ Create an object ready to dump as JSON. Relationship's internal names don't match the schema (ex: we call object object_id to avoid overwriting Python's "object".) This performs the necessary name-swaps. You probably want to use to_json(). :return: A dictionary representing the relationship, ready to dump. """ return {"subject": self.subject_id, "predicate": self.predicate, "object": self.object_id} def to_json(self): """ Create a JSON string from a Relationship. :returns: A JSON string representing this Relationship """ return json.dumps(self.to_json_dict()) class Run(Record): """ A Run is a Record subtype representing one 'finalized' run of some code. More precisely, a run represents a single set of inputs, their resulting outputs, and some amount of metadata. Outputs include scalars and documents. Metadata includes things like the application that generated the run. Runs have several special types of metadata (which are tracked as instance attributes), and hold additional, miscellaneous data in 'user_defined'. """ def __init__(self, id, application, # pylint: disable=too-many-arguments user=None, version=None, user_defined=None, data=None, curve_sets=None, files=None, library_data=None): """Create Run from Record info plus metadata.""" super(Run, self).__init__(id=id, type="run", user_defined=user_defined, data=data, curve_sets=curve_sets, files=files, library_data=library_data) self.application = application self.user = user self.version = version @property def application(self): """Return the Run's application.""" return self['application'] @application.setter def application(self, application): """Set the Run's application.""" self['application'] = application @property def user(self): """Return the Run's user.""" return self['user'] @user.setter def user(self, user): """Set the Run's user.""" self['user'] = user @property def version(self): """Return the Run's version.""" return self['version'] @version.setter def version(self, version): """Set the Run's version.""" self['version'] = version def __repr__(self): """Return a string representation of a model Run.""" return('Model Run <id={}, application={}, user={}, version={}>' .format(self.id, self.application, self.user, self.version)) def _is_valid_list(list_of_data): """ Check if a list of data is valid. Validity means that all entries in the list are either strings or all are scalars. :param list_of_data: The list of data to check is valid. :returns: A Tuple consisting of a Boolean and two Integers. The Boolean is True if the list is valid, False otherwise. If False, the two Integers are two indexes of differing types of values (the first being an index of a scalar and the second being a index of a string). If True they are None. """ LOGGER.debug('Checking if list of length %i is valid.', len(list_of_data)) is_scalar = False is_string = False latest_scalar = None latest_string = None for index, list_entry in enumerate(list_of_data): if isinstance(list_entry, numbers.Real): latest_scalar = index is_scalar = True elif isinstance(list_entry, six.string_types): latest_string = index is_string = True else: raise ValueError("List of data contains entry that isn't a " "string or scalar. value: {}, type: {}, index:" " {}.".format(list_entry, type(list_entry), index)) if is_scalar and is_string: LOGGER.debug('Found invalid list.') return (False, latest_scalar, latest_string) return (True, None, None) class _FlatRecord(Record): """ A faux Record used in the final step of insertion. FlatRecord removes the relationship between the record.raw and other data, allowing "hierarchical" Records (see: library_data) to be flattened for insertion into a non- hierarchical backend. These are not meant to be accessed by the user, and live for only a short time, existing purely to separate the responsibility of flattening data from the backend. """ def __getitem__(self, key): """Override Record behavior to avoid raw access.""" return self.__dict__[key] def __setitem__(self, key, value): """Override Record behavior to avoid raw access.""" self.__dict__[key] = value def __delitem__(self, key): """Override Record behavior to avoid raw access.""" del self.__dict__[key] class _FlatRun(Run): """ A faux Run used in the same way as _FlatRecord. Provided only for safety and compatability. Runs are deprecated. """ def __getitem__(self, key): """Override Run behavior to avoid raw access.""" return self.__dict__[key] def __setitem__(self, key, value): """Override Run behavior to avoid raw access.""" self.__dict__[key] = value def __delitem__(self, key): """Override Run behavior to avoid raw access.""" del self.__dict__[key] def flatten_library_content(record): """ Extract all library data, curve_sets, etc. into the path-like form used by backends. Ex: a record that has "library_data": "my_lib": {"runtime": {"value: 223}} would have that added to its "data" field as "my_lib/runtime": {"value: 223}. :returns: A FlatRecord with library data and curve sets brought to the "top level" using path-like naming. The raw is unaltered, meaning it does not strictly match the data. """ if not record.library_data: return record old_raw = copy.deepcopy(record.raw) if isinstance(record, Run): record.raw.pop("type") record = _FlatRun(**record.raw) else: record = _FlatRecord(**record.raw) record.raw = old_raw def extract_to_data(library_data, prefix): """Flatten nested library_data up into the toplevel data.""" lib_prefix = prefix for library_name, library in library_data.items(): lib_prefix += (library_name + "/") if "data" in library: for datum_name, datum in library["data"].items(): record["data"][lib_prefix+datum_name] = datum if "curve_sets" in library: for curve_set_name, curve_set in library["curve_sets"].items(): record["curve_sets"][lib_prefix+curve_set_name] = curve_set for curve_type in ["independent", "dependent"]: updated_curves = {} for name in curve_set[curve_type].keys(): updated_curves[lib_prefix+name] = curve_set[curve_type][name] curve_set[curve_type] = updated_curves curve_order = curve_type+"_order" if curve_set.get(curve_order): curve_set[curve_order] = [lib_prefix+x for x in curve_set[curve_order]] if "library_data" in library: extract_to_data(library["library_data"], lib_prefix) extract_to_data(record["library_data"], "") return record def generate_record_from_json(json_input): """ Generate a Record from the json input. :param json_input: A JSON representation of a Record. :raises: ValueError if given invalid json input. """ LOGGER.debug('Generating record from json input: %s', json_input) # Must create record first try: record = Record(id=json_input['id'], type=json_input['type'], user_defined=json_input.get('user_defined'), data=json_input.get('data'), library_data=json_input.get('library_data'), curve_sets=json_input.get('curve_sets'), files=json_input.get('files')) except KeyError as context: msg = 'Missing required key <{}>.'.format(context) LOGGER.error(msg) raise ValueError(msg) # Then set raw to json_input to grab any additional information. record.raw.update({key: val for key, val in json_input.items() if key not in ['id', 'type', 'user_defined', 'data', 'library_data', 'curve_sets', 'files']}) return record def generate_run_from_json(json_input): """ Generate a Run from the json input. :param json_input: A JSON representation of a Run. :raises: ValueError if given invalid json input. """ LOGGER.debug('Generating run from json input: %s', json_input) # Programatically-created Records try: run = Run(id=json_input['id'], user=json_input.get('user'), user_defined=json_input.get('user_defined'), version=json_input.get('version'), application=json_input['application'], data=json_input.get('data'), curve_sets=json_input.get('curve_sets'), files=json_input.get('files')) except KeyError as context: msg = 'Missing required key <{}>.'.format(context) LOGGER.error(msg) raise ValueError(msg) # Then set raw to json_input to grab any additional information. run.raw.update({key: val for key, val in json_input.items() if key not in ['id', 'user', 'user_defined', 'version', 'type', 'application', 'data', 'curve_sets', 'files']}) return run def convert_record_to_run(record): """ Build a Run using a Record and run metadata found in the Record's raw. Given a Record with all the characteristics of a Run (type is "run", "application" field set, etc.), use the Record's raw data to build a Run object instead. :param record: A Record object to build the Run from. :returns: A Run representing the Record plus metadata. :raises ValueError: if given a Record that can't be converted to a Run. """ LOGGER.debug('Converting %s to run.', record) if record.type == 'run': return generate_run_from_json(json_input=record.raw) else: msg = ('Record must be of subtype Run to convert to Run. Given ' '{}.'.format(record.id)) LOGGER.warn(msg) raise ValueError(msg)
[ 37811, 4264, 1299, 284, 1154, 626, 11, 12531, 5563, 10162, 278, 262, 28743, 32815, 526, 15931, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 18931, 198, 11748, 17268, 198, 11748, 3146, 198, 11748, 4866, 198, 198, 11748, ...
2.382883
16,253
import os os.environ["PMD_CMD"] = "/opt/pmd-bin/bin/run.sh pmd" os.environ["APP_SRC_DIR"] = "/usr/local/src"
[ 11748, 28686, 198, 198, 418, 13, 268, 2268, 14692, 5868, 35, 62, 34, 12740, 8973, 796, 12813, 8738, 14, 4426, 67, 12, 8800, 14, 8800, 14, 5143, 13, 1477, 9114, 67, 1, 198, 418, 13, 268, 2268, 14692, 24805, 62, 50, 7397, 62, 34720,...
2.037037
54
#!/usr/bin/env python # -*- coding: utf-8 -*- from compiler.phases.code_generator import code_generator from compiler.phases.tokenizer import tokenizer from compiler.phases.transformer import transformer, visitor from compiler.phases.parser import parser
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 17050, 13, 746, 1386, 13, 8189, 62, 8612, 1352, 1330, 2438, 62, 8612, 1352, 198, 6738, 17050, 13, 746, 138...
3.394737
76
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * class Scale(MakefilePackage): """SCALE (Scalable Computing for Advanced Library and Environment) is a basic library for weather and climate model of the earth and planets aimed to be widely used in various models. The SCALE library is developed with co-design by researchers of computational science and computer science.""" homepage = "https://scale.riken.jp/" url = "https://scale.riken.jp/archives/scale-5.4.4.tar.gz" maintainers = ['t-yamaura'] version('5.4.4', sha256='7d0ec4069c15d8b9ec7166f32c9a2eda772d975a8e08e420e6b16891ceebb316', preferred=True) version('5.3.6', sha256='3ab0d42cdb16eee568c65b880899e861e464e92088ceb525066c726f31d04848') version('5.2.6', sha256='e63141d05810e3f41fc89c9eb15e2319d753832adabdac8f7c8dd7acc0f5f8ed') depends_on('openmpi', type=('build', 'link', 'run')) depends_on('netcdf-c') depends_on('netcdf-fortran') depends_on('parallel-netcdf') patch('fj-own_compiler.patch', when='%fj') parallel = False
[ 2, 15069, 2211, 12, 1238, 2481, 13914, 45036, 3549, 2351, 4765, 11, 11419, 290, 584, 198, 2, 1338, 441, 4935, 34152, 13, 4091, 262, 1353, 12, 5715, 27975, 38162, 9947, 2393, 329, 3307, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, ...
2.535642
491
from sqlalchemy import Column, Integer, DateTime from datetime import datetime from av_dashboard.our_base import Base
[ 6738, 44161, 282, 26599, 1330, 29201, 11, 34142, 11, 7536, 7575, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 1196, 62, 42460, 3526, 13, 454, 62, 8692, 1330, 7308, 198 ]
3.806452
31
#from loganalysis.lte.ltelog import LteLog #from loganalysis.mesh.meshlog import MeshLog #__all__ = ['lte']
[ 2, 6738, 2604, 20930, 13, 75, 660, 13, 2528, 417, 519, 1330, 406, 660, 11187, 198, 2, 6738, 2604, 20930, 13, 76, 5069, 13, 76, 5069, 6404, 1330, 47529, 11187, 198, 2, 834, 439, 834, 796, 37250, 75, 660, 20520 ]
2.675
40
# Задача 6, Вариант 2 # Создайте игру, в которой компьютер загадывает название одного из двенадцати созвездий, входящих в зодиакальный круг, а игрок должен его угадать. # Андреев Ф.И. # 24.05.2016 import random sozvezdies = random.randrange(12) sozvezdie = ("Овен","Телец","Близнецы","Рак","Лев","Дева","Весы","Скорпион","Стрелец","Козерог","Водолей","Рыбы") print ('Отгадайте название одного из двенадцати созвездий, входящих в зодиакальный круг ') user_sozvezdie = input ('Введите Ваш вариант: ') while user_sozvezdie.lower() != sozvezdie[sozvezdies].lower(): user_chudo = input ('Вы не угадали,попробуйте ещё раз: ') print ('Вы угадали!') input ('Нажмите Enter для выхода')
[ 2, 12466, 245, 16142, 43666, 16142, 141, 229, 16142, 718, 11, 12466, 240, 16142, 21169, 18849, 16142, 22177, 20375, 362, 220, 201, 198, 2, 12466, 94, 25443, 115, 43666, 16142, 140, 117, 20375, 16843, 12466, 116, 140, 111, 21169, 35072, ...
1.170854
597
from Model.NQModel import NQModel from Model.LossFn import LossFn import torch import time import sklearn import datetime import Model.datasetutils as datasetutils import Model.tensorboardutils as boardutils import torch.utils.tensorboard as tensorboard import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm_notebook as tqdm import transformers TensorBoardLocation = 'runs/NQ_TIME:{}'.format(int((time.time() - 1583988084)/60)) print (" ~~~~~~~~~~ Board Location : " + TensorBoardLocation) epochs = 1 # no loop use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") writer = tensorboard.SummaryWriter(TensorBoardLocation) traingen, validgen = datasetutils.get_dataset(device) print (" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dataset Fetched ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ") num_steps = len(traingen) val_steps = len(validgen) model = NQModel() print (" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Model Fetched ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ") dist_init = dist.init_process_group(dist.Backend.GLOO,init_method='file:shared_file' ,world_size=6, rank = 0, timeout=datetime.timedelta(0,15)) print (" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Process Group Initiated ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ") model_parallel = DistributedDataParallel(model) print (" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parallel Model Created ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ") optim = transformers.AdamW(model_p.parameters()) scheduler = transformers.get_cosine_schedule_with_warmup(optim, num_warmup_steps=100, num_training_steps=800,num_cycles=0.5, last_epoch=-1) AnswerTypes = ['Wrong Ans', 'Short Ans', 'Yes No'] YesNoLabels = ['No', 'Yes'] train()
[ 6738, 9104, 13, 45, 48, 17633, 1330, 399, 48, 17633, 198, 6738, 9104, 13, 43, 793, 37, 77, 1330, 22014, 37, 77, 198, 11748, 28034, 198, 11748, 640, 198, 11748, 1341, 35720, 198, 11748, 4818, 8079, 198, 11748, 9104, 13, 19608, 292, 3...
3.158088
544
#!/usr/bin/env python # Copyright 2021 NXP # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause import runcore import rundef __all__ = ["runcore", "rundef"]
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 33448, 399, 27481, 198, 2, 1439, 2489, 10395, 13, 198, 2, 220, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 18, 12, 2601, 682, 198, 198, 11748,...
2.597015
67
from aio_forms.fields.string_field import StringField
[ 6738, 257, 952, 62, 23914, 13, 25747, 13, 8841, 62, 3245, 1330, 10903, 15878, 628 ]
3.666667
15
from .log import get_logger, disable from .profilling import timeit, memit, available_memory __all__ = ['timeit', 'memit', 'available_memory', 'get_logger', 'disable']
[ 6738, 764, 6404, 1330, 651, 62, 6404, 1362, 11, 15560, 198, 6738, 764, 5577, 4509, 1330, 640, 270, 11, 1066, 270, 11, 1695, 62, 31673, 628, 198, 834, 439, 834, 796, 37250, 2435, 270, 3256, 705, 11883, 270, 3256, 705, 15182, 62, 3167...
3.148148
54
import matplotlib.pyplot as plt from random import * import helper # Configuration iterations = 40 # Population population_size = 10 # Function function_min = -100 function_max = 100 # Genetic variables mutation_rate = 0.1 elite_size = 2 best_distances = [] points = random_population() fig, axs = plt.subplots(1, 2) axs[0].set_title("Punkty") for _ in range(iterations): points = next_generation(points) best_distances.append(calculate_best_distances(points)) iter_plot() axs[1].plot(list(range(iterations)), best_distances) axs[1].set_title("Najlepszy fitness (0 = najlepszy)") plt.show()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 4738, 1330, 1635, 198, 11748, 31904, 198, 198, 2, 28373, 198, 2676, 602, 796, 2319, 198, 198, 2, 20133, 198, 39748, 62, 7857, 796, 838, 198, 198, 2, 15553, 198, 881...
2.676724
232
# Copyright ©2020-2021 The American University in Cairo and the Cloud V Project. # # This file is part of the DFFRAM Memory Compiler. # See https://github.com/Cloud-V/DFFRAM for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import opendbpy as odb except ImportError: print( """ You need to install opendb (Ahmed Ghazy's fork): https://github.com/ax3ghazy/opendb Build normally then go to ./build/src/swig/python and run: python3 setup.py install (On macOS rename the .dylib to .so first) """) exit(78) try: import click except ImportError: print("You need to install click: python3 -m pip install click") exit(78) from .util import eprint from .data import Block, Slice, HigherLevelPlaceable, Placeable from .row import Row import os import re import sys import math import pprint import argparse import traceback from pathlib import Path from functools import reduce @click.command() @click.option('-o', '--output', required=True) @click.option('-l', '--lef', required=True) @click.option('-t', '--tech-lef', "tlef", required=True) @click.option('-s', '--size', required=True, help="RAM Size (ex. 8x32, 16x32…)") @click.option('-r', '--represent', required=False, help="File to print out text representation of hierarchy to. (Pass /dev/stderr or /dev/stdout for stderr or stdout.)") @click.option('-d', '--write-dimensions', required=False, help="File to print final width and height to (in the format {width}x{height}") @click.option('--unplace-fills/--no-unplace-fills', default=False, help="Removes placed fill cells to show fill-free placement. Debug option.") @click.option('--experimental', is_flag=True, default=False, help="Uses the new regexes for BB.wip.v.") @click.argument('def_file', required=True, nargs=1)
[ 2, 15069, 10673, 42334, 12, 1238, 2481, 383, 1605, 2059, 287, 23732, 290, 262, 10130, 569, 4935, 13, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 262, 360, 5777, 24115, 14059, 3082, 5329, 13, 198, 2, 4091, 3740, 1378, 12567, 13, 785, ...
3.146375
731
from ghost_jukebox import app import os import random import re import requests import string from werkzeug.utils import secure_filename # get filename from content-disposition header # This will download the image into the static folder
[ 6738, 10905, 62, 73, 4649, 3524, 1330, 598, 198, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 302, 198, 11748, 7007, 198, 11748, 4731, 198, 198, 6738, 266, 9587, 2736, 1018, 13, 26791, 1330, 5713, 62, 34345, 628, 198, 2, 651, 2947...
3.983607
61
import math import time import os import time import subprocess from datetime import datetime from pythonosc import dispatcher from pythonosc import osc_server from pythonosc import osc_message_builder from pythonosc import udp_client import os import sys import glob import serial import time os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' bind_host = "192.168.0.104" #listening ip bind_port = 5000 #listening port target_host = "127.0.0.1" target_port = 4545 last_sec = 0 count = 0 client = udp_client.SimpleUDPClient(target_host, target_port) """ def alpha_handler(address,alpha,val): client.send_message("/muse/elements/alpha_absolute", val) def beta_handler(address,beta,val): client.send_message("/muse/elements/beta_absolute", val) def delta_handler(address,delta,val): client.send_message("/muse/elements/delta_absolute", val) def theta_handler(address,theta,val): client.send_message("/muse/elements/theta_absolute", val) def gamma_handler(address,gamma,val): client.send_message("/muse/elements/gamma_absolute", val) """ if __name__ == "__main__": dispatcher = dispatcher.Dispatcher() dispatcher.map("/muse/eeg", eeg_handler, "EEG") """ dispatcher.map("/muse/elements/alpha_absolute", alpha_handler, "ALPHA") dispatcher.map("/muse/elements/beta_absolute", beta_handler, "BETA") dispatcher.map("/muse/elements/delta_absolute", delta_handler, "DELTA") dispatcher.map("/muse/elements/theta_absolute", theta_handler, "THETA") dispatcher.map("/muse/elements/gamma_absolute", gamma_handler, "GAMMA") """ server = osc_server.ThreadingOSCUDPServer( (bind_host, bind_port), dispatcher) print("Listening on {}".format(server.server_address)) server.serve_forever()
[ 11748, 10688, 198, 11748, 640, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 850, 14681, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 21015, 17500, 1330, 49952, 198, 6738, 21015, 17500, 1330, 267, 1416, 62, 15388, 198, 6738, 2101...
2.756757
629
#!/usr/bin/env python3 # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ r"""Utility that replaces http_archive with local_repository in WORKSPACE files. Usage: ./kokoro/testutils/replace_http_archive_with_local_reposotory.py \ -f <workspace directory> \ -t <tink local base path> For examples: ./kokoro/testutils/replace_http_archive_with_local_reposotory.py \ -f "cc/WORKSPACE" \ -t "../../tink" """ import argparse import textwrap def _replace_http_archive_with_local_repository(workspace_content: str, tink_base_path: str) -> None: """Replaces http_archive with local_repository in workspace_content. Args: workspace_content: Content of the WORKSPACE file to modify. tink_base_path: Path to the local Tink folder. Returns: The modified WORKSPACE file content. """ # Remove loading of http_archive. http_archive_load = textwrap.dedent("""\ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") """) workspace_content = workspace_content.replace(http_archive_load, '') # Tink Base. tink_base_before = textwrap.dedent("""\ http_archive( name = "tink_base", urls = ["https://github.com/google/tink/archive/master.zip"], strip_prefix = "tink-master/", )""") tink_base_after = textwrap.dedent("""\ local_repository( name = "tink_base", path = "{}", )""".format(tink_base_path)) workspace_content = workspace_content.replace(tink_base_before, tink_base_after) # Tink C++. tink_cc_before = textwrap.dedent("""\ http_archive( name = "tink_cc", urls = ["https://github.com/google/tink/archive/master.zip"], strip_prefix = "tink-master/cc", )""") tink_cc_after = textwrap.dedent("""\ local_repository( name = "tink_cc", path = "{}/cc", )""".format(tink_base_path)) workspace_content = workspace_content.replace(tink_cc_before, tink_cc_after) return workspace_content if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 15069, 33160, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2...
2.578257
1,067
# encoding: utf-8 """ tokeniser.py Created by Thomas Mangin on 2015-06-05. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """ from exabgp.configuration.core.format import tokens from exabgp.protocol.family import AFI from collections import deque from exabgp.vendoring import six
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 37811, 198, 30001, 5847, 13, 9078, 198, 198, 41972, 416, 5658, 27609, 259, 319, 1853, 12, 3312, 12, 2713, 13, 198, 15269, 357, 66, 8, 3717, 12, 5539, 1475, 64, 27862, 13, 1439, 2489, 10395, 13,...
3.118182
110
"""MediaPlayer platform for Music Assistant integration.""" import logging from typing import Optional from homeassistant.components.media_player import MediaPlayerEntity from homeassistant.components.media_player.const import ( ATTR_MEDIA_ENQUEUE, MEDIA_TYPE_PLAYLIST, SUPPORT_BROWSE_MEDIA, SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, ) import voluptuous as vol from homeassistant.components.media_player.errors import BrowseError from homeassistant.helpers import config_validation as cv, entity_platform from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.util.dt import utcnow from musicassistant_client import MusicAssistant from .const import ( DEFAULT_NAME, DISPATCH_KEY_PLAYER_REMOVED, DISPATCH_KEY_PLAYER_UPDATE, DISPATCH_KEY_PLAYERS, DISPATCH_KEY_QUEUE_TIME_UPDATE, DISPATCH_KEY_QUEUE_UPDATE, DOMAIN, ) from .media_source import ( ITEM_ID_SEPERATOR, MASS_URI_SCHEME, PLAYABLE_MEDIA_TYPES, async_create_item_listing, async_create_server_listing, async_parse_uri, ) SUPPORTED_FEATURES = ( SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_STOP | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SHUFFLE_SET | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA | SUPPORT_VOLUME_STEP | SUPPORT_CLEAR_PLAYLIST | SUPPORT_BROWSE_MEDIA ) MEDIA_TYPE_RADIO = "radio" SERVICE_PLAY_ALERT = "play_alert" ATTR_URL = "url" ATTR_VOLUME = "volume" ATTR_FORCE = "force" ATTR_ANNOUNCE = "announce" _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Music Assistant MediaPlayer(s) from Config Entry.""" mass = hass.data[DOMAIN][config_entry.entry_id] media_players = {} async def async_update_media_player(player_data): """Add or update Music Assistant MediaPlayer.""" player_id = player_data["player_id"] if player_id not in media_players: # new player! if not player_data["available"]: return # we don't add unavailable players media_player = MassPlayer(mass, player_data) media_players[player_id] = media_player async_add_entities([media_player]) else: # update for existing player async_dispatcher_send( hass, f"{DISPATCH_KEY_PLAYER_UPDATE}_{player_id}", player_data ) async def async_remove_media_player(player_id): """Handle player removal.""" for player in media_players.values(): if player.player_id != player_id: continue await player.async_mark_unavailable() # start listening for players to be added or changed by the server component async_dispatcher_connect(hass, DISPATCH_KEY_PLAYERS, async_update_media_player) async_dispatcher_connect( hass, DISPATCH_KEY_PLAYER_REMOVED, async_remove_media_player ) # add service to play alert platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_PLAY_ALERT, { vol.Required(ATTR_URL): cv.string, vol.Optional(ATTR_VOLUME, default=0): cv.positive_float, vol.Optional(ATTR_ANNOUNCE, default=False): cv.boolean, vol.Optional(ATTR_FORCE, default=True): cv.boolean, }, "async_play_alert", ) class MassPlayer(MediaPlayerEntity): """Representation of Music Assistant player.""" def __init__(self, mass: MusicAssistant, player_data: dict): """Initialize MediaPlayer entity.""" self._mass = mass self._player_data = player_data self._queue_data = {} self._queue_cur_item = {} self._cur_image = None async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( async_dispatcher_connect( self.hass, f"{DISPATCH_KEY_PLAYER_UPDATE}_{self.player_id}", self.async_update_callback, ) ) self.async_on_remove( async_dispatcher_connect( self.hass, f"{DISPATCH_KEY_QUEUE_UPDATE}", self.async_update_queue_callback, ) ) self.async_on_remove( async_dispatcher_connect( self.hass, f"{DISPATCH_KEY_QUEUE_TIME_UPDATE}", self.async_update_queue_time_callback, ) ) # fetch queue state once queue_data = await self._mass.get_player_queue(self.player_id) self._queue_data = queue_data if queue_data["cur_item"] is not None: self._queue_cur_item = queue_data["cur_item"] async def async_update_callback(self, player_data): """Handle player updates.""" self._player_data = player_data self.async_write_ha_state() async def async_mark_unavailable(self): """Handle player removal, mark player as unavailable (as it might come back).""" self._player_data["available"] = False self.async_write_ha_state() async def async_update_queue_callback(self, queue_data): """Handle player queue updates.""" if queue_data["queue_id"] == self._player_data["active_queue"]: # received queue update for this player (or it's parent) queue_data["updated_at"] = utcnow() self._queue_data = queue_data if queue_data["cur_item"] is not None: self._queue_cur_item = queue_data["cur_item"] else: self._queue_cur_item = {} self._cur_image = await self._mass.get_media_item_image_url( self._queue_cur_item ) self.async_write_ha_state() async def async_update_queue_time_callback(self, queue_data): """Handle player queue time updates.""" if queue_data["queue_id"] == self._player_data["active_queue"]: # received queue time update for this player (or it's parent) self._queue_data["cur_item_time"] = queue_data["cur_item_time"] self._queue_data["updated_at"] = utcnow() self.async_write_ha_state() @property def device_state_attributes(self) -> dict: """Return device specific state attributes.""" return { "player_id": self.player_id, "active_queue": self._queue_data.get("queue_name"), } @property def available(self): """Return True if entity is available.""" return self._player_data.get("available") @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORTED_FEATURES @property def device_info(self): """Return the device info.""" manufacturer = self._player_data.get("device_info", {}).get( "manufacturer", DEFAULT_NAME ) model = self._player_data.get("device_info", {}).get("model", "") return { "identifiers": {(DOMAIN, self.unique_id)}, "name": self.name, "manufacturer": manufacturer, "model": model, "via_hub": (DOMAIN, self._mass.server_id), } @property def media_position_updated_at(self): """When was the position of the current playing media valid.""" return self._queue_data.get("updated_at") @property def player_id(self): """Return the id of this player.""" return self._player_data["player_id"] @property def unique_id(self): """Return a unique id for this media player.""" return f"mass_{self.player_id}" @property def should_poll(self): """Return True if entity has to be polled for state.""" return False @property def name(self): """Return device name.""" return self._player_data["name"] @property def media_content_id(self): """Content ID of current playing media.""" if self._queue_cur_item: return self._queue_cur_item["uri"] return None @property def media_content_type(self): """Content type of current playing media.""" return self._queue_cur_item.get("media_type") @property def media_title(self): """Return title currently playing.""" return self._queue_cur_item.get("name") @property def media_album_name(self): """Album name of current playing media (Music track only).""" if self._queue_cur_item and self._queue_cur_item.get("album"): return self._queue_cur_item["album"]["name"] return None @property def media_artist(self): """Artist of current playing media (Music track only).""" if self._queue_cur_item and self._queue_cur_item.get("artists"): artist_names = (i["name"] for i in self._queue_cur_item["artists"]) return "/".join(artist_names) return None @property def media_album_artist(self): """Album artist of current playing media (Music track only).""" if self._queue_cur_item and self._queue_cur_item.get("album"): if self._queue_cur_item["album"].get("artist"): return self._queue_cur_item["album"]["artist"]["name"] return None @property def media_image_url(self): """Image url of current playing media.""" return self._cur_image @property def media_position(self): """Return position currently playing.""" return self._queue_data.get("cur_item_time") @property def media_duration(self): """Return total runtime length.""" return self._queue_cur_item.get("duration") @property def volume_level(self): """Return current volume level.""" return self._player_data["volume_level"] / 100 @property def is_volume_muted(self): """Return mute state.""" return self._player_data["muted"] @property def state(self): """Return current playstate of the device.""" return self._player_data["state"] @property def shuffle(self): """Boolean if shuffle is enabled.""" return self._queue_data.get("shuffle_enabled") async def async_media_play(self): """Send play command to device.""" await self._mass.player_command(self.player_id, "play") async def async_media_pause(self): """Send pause command to device.""" await self._mass.player_command(self.player_id, "pause") async def async_media_stop(self): """Send stop command to device.""" await self._mass.player_command(self.player_id, "stop") async def async_media_next_track(self): """Send next track command to device.""" await self._mass.player_command(self.player_id, "next") async def async_media_previous_track(self): """Send previous track command to device.""" await self._mass.player_command(self.player_id, "previous") async def async_set_volume_level(self, volume): """Send new volume_level to device.""" volume = int(volume * 100) await self._mass.player_command( self.player_id, "volume_set", volume_level= volume ) async def async_mute_volume(self, mute=True): """Send mute/unmute to device.""" await self._mass.player_command( self.player_id, "volume_mute", is_muted= mute ) async def async_volume_up(self): """Send new volume_level to device.""" await self._mass.player_command(self.player_id, "volume_up") async def async_volume_down(self): """Send new volume_level to device.""" await self._mass.player_command(self.player_id, "volume_down") async def async_turn_on(self): """Turn on device.""" await self._mass.player_command(self.player_id, "power_on") async def async_turn_off(self): """Turn off device.""" await self._mass.player_command(self.player_id, "power_off") async def async_set_shuffle(self, shuffle: bool): """Set shuffle state.""" await self._mass.player_queue_set_shuffle(self.player_id, shuffle) async def async_clear_playlist(self): """Clear players playlist.""" await self._mass.player_queue_clear(self.player_id) async def async_play_media(self, media_type, media_id, **kwargs): """Send the play_media command to the media player.""" queue_opt = "add" if kwargs.get(ATTR_MEDIA_ENQUEUE) else "play" if media_id.startswith(MASS_URI_SCHEME): # got uri from source/media browser media = await async_parse_uri(media_id) await self._mass.play_media(self.player_id, dict(media), queue_opt) elif media_type in PLAYABLE_MEDIA_TYPES and ITEM_ID_SEPERATOR in media_id: # direct media item # TODO: Can't we just use the URI for the media browser ?! provider, item_id = media_id.split(ITEM_ID_SEPERATOR) await self._mass.play_media( self.player_id, {"media_type": media_type, "item_id": item_id, "provider": provider}, queue_opt, ) elif "/" not in media_id and media_type == MEDIA_TYPE_PLAYLIST: # library playlist by name for playlist in await self._mass.get_library_playlists(): if playlist["name"] == media_id: await self._mass.play_media(self.player_id, playlist, queue_opt) break elif "/" not in media_id and media_type == MEDIA_TYPE_RADIO: # library radio by name for radio in await self._mass.get_library_radios(): if radio["name"] == media_id: await self._mass.play_media(self.player_id, radio, queue_opt) break elif "tts_proxy" in media_id: # TTS broadcast message await self.async_play_alert(media_id, announce=True) else: # assume supported uri await self._mass.play_uri(self.player_id, media_id, queue_opt) async def async_play_alert( self, url: str, volume: int = 0, force: bool = True, announce: bool = False ): """ Play alert (e.g. tts message) on player. Will pause the current playing queue and resume after the alert is played. :param url: Url to the sound effect/tts message that should be played. :param volume: Volume relative to current player's volume. :param force: Play alert even if player is currently powered off. :param announce: Announce the alert by prepending an alert sound. """ await self._mass.play_alert( player_id=self.player_id, url=url, volume=volume, force=force, announce=announce ) async def async_browse_media(self, media_content_type=None, media_content_id=None): """Implement the websocket media browsing helper.""" if media_content_type in [None, "library"] or media_content_id.endswith( "/root" ): # main/library listing requested (for this mass instance) return await async_create_server_listing(self._mass) if media_content_id.startswith(MASS_URI_SCHEME): # sublevel requested media_item = await async_parse_uri(media_content_id) if self._mass.server_id != media_item["mass_id"]: # should not happen, but just in case raise BrowseError("Invalid Music Assistance instance") return await async_create_item_listing(self._mass, media_item)
[ 37811, 13152, 14140, 3859, 329, 7849, 15286, 11812, 526, 15931, 198, 11748, 18931, 198, 6738, 19720, 1330, 32233, 198, 198, 6738, 1363, 562, 10167, 13, 5589, 3906, 13, 11431, 62, 7829, 1330, 6343, 14140, 32398, 198, 6738, 1363, 562, 10167...
2.319738
7,012
# (C) Copyright 2017 Inova Development Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Click context file. Used to communicate between click commands. This defines the context object that is passed to each click command. """ from __future__ import absolute_import, unicode_literals import click_spinner import click import smipyping def xstr(s): # pylint: disable=invalid-name """returns the input or 'None' to allow printing None""" return 'None' if s is None else s class ClickContext(object): """ Manage the click context object """ @property def config_file(self): """ :term:`string`: Name of the config file used. """ return self._config_file @property def db_type(self): """ :term:`string`: Type of db used. This must be one of the strings defined by :data:`~smipyping.config.DB_POSSIBLE_TYPES` """ return self._db_type @property def db_info(self): """ :term:`dict`: Detailed info on db used. Varies by db type. This defines the configuration parameters for opening the db defined by db_type and the directory containing the directory for the config file. """ # TODO this should have been named db_config return self._db_info @property def verbose(self): """ :class:`py:bool`: verbose display flag """ return self._verbose @property def log_level(self): """ :class:`py:string`: string defining the log level """ return self._log_level @property def log_file(self): """ :class:`py:string`: Nname of file if log to file is specified """ return self._log_file @property def log_components(self): """ :class:`py:bool`: verbose display flag """ return self._log_components @property def output_format(self): """ :term:`string`: Output format defined for displaying data. """ return self._output_format @property def targets_tbl(self): """ :term:`targets_tbl file`: Handle of the Targets table. This is initialized late to allow help commands to execute without any database. """ if self._targets_tbl: return self._targets_tbl try: targets_tbl = smipyping.TargetsTable.factory( self.db_info, self.db_type, self.verbose, output_format=self.output_format) self._targets_tbl = targets_tbl return self._targets_tbl except ValueError as ve: raise click.ClickException("Invalid database. Targets table " "load fails. Exception %s" % ve) @property def spinner(self): """ :class:`~click_spinner.Spinner` object. """ return self._spinner def execute_cmd(self, cmd): """ Call the cmd executor defined by cmd with the spinner """ self.spinner.start() try: cmd() finally: self.spinner.stop()
[ 2, 357, 34, 8, 15069, 2177, 554, 10071, 7712, 3457, 13, 198, 2, 1439, 6923, 33876, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, ...
2.418017
1,543
#!/usr/bin/env python import os, sys if len(sys.argv) > 1: os.environ['DBS_STRATEGY'] = sys.argv[1] import DQMOffline.EGamma.electronDataDiscovery as dbs os.environ['TEST_HARVESTED_FILE'] = 'rfio:/castor/cern.ch/cms'+dbs.search()[0] os.system('root -b -l -q electronWget.C')
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28686, 11, 25064, 198, 361, 18896, 7, 17597, 13, 853, 85, 8, 1875, 352, 25, 198, 220, 28686, 13, 268, 2268, 17816, 35, 4462, 62, 18601, 6158, 31212, 20520, 796, 25064, 13...
2.186047
129
__all__ = [ "sqliter", ]
[ 834, 439, 834, 796, 685, 198, 220, 220, 220, 366, 25410, 2676, 1600, 198, 60, 198 ]
1.8125
16
import pandas as pd array = [1,2,4,5,6,7,8,9,12,14,15,18,19,25,29,35,38,40,45,48,49,50] array_ds = pd.Series(array) print("Série original") print(array_ds) media_ds = array_ds.mean() print("Valor médio da série") print(media_ds) std_ds = array_ds.std() print("Desvio padrão da série") print(std_ds)
[ 11748, 19798, 292, 355, 279, 67, 198, 198, 18747, 796, 685, 16, 11, 17, 11, 19, 11, 20, 11, 21, 11, 22, 11, 23, 11, 24, 11, 1065, 11, 1415, 11, 1314, 11, 1507, 11, 1129, 11, 1495, 11, 1959, 11, 2327, 11, 2548, 11, 1821, 11, ...
2.013245
151
""" Copyright 2017-2018 Fizyr (https://fizyr.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import division import numpy as np import cv2 import h5py from PIL import Image from .transform import change_transform_origin def read_image_bgr(path): """ Read an image in BGR format. Args path: Path to the image. """ hdf5_file=h5py.File(path, 'r') dataset_hdf5=hdf5_file.get('dataset') image=np.array(dataset_hdf5) # image = np.asarray(Image.open(path).convert('RGB')) ## Change from image input to HDF5 input ## import IPython;IPython.embed() return image.copy() def preprocess_image(x, mode='caffe'): """ Preprocess an image by subtracting the ImageNet mean. Args x: np.array of shape (None, None, 3) or (3, None, None). mode: One of "caffe" or "tf". - caffe: will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. Returns The input with the ImageNet mean subtracted. """ # mostly identical to "https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py" # except for converting RGB -> BGR since we assume BGR already # covert always to float32 to keep compatibility with opencv x = x.astype(np.float32) if mode == 'tf': x /= 127.5 x -= 1. elif mode == 'caffe': x[..., 0] -= 103.939 x[..., 1] -= 116.779 x[..., 2] -= 123.68 return x def adjust_transform_for_image(transform, image, relative_translation): """ Adjust a transformation for a specific image. The translation of the matrix will be scaled with the size of the image. The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image. """ depth, height, width, channels = image.shape # print('debug adjust_transform_for_image --> cek depth, height, width, channels') # import IPython;IPython.embed() result = transform # print('debug adjust_transform_for_image --> cek result sebelum relative_translation') # import IPython;IPython.embed() # Scale the translation with the image size if specified. if relative_translation: # result[0:2, 2] *= [width, height] result[0:2, 2] *= [width, height] # Move the origin of transformation. result = change_transform_origin(transform, (0.5 * width, 0.5 * height)) return result # # print('debug adjust_transform_for_image --> cek result SEBELUM change_transform_origin') # # import IPython;IPython.embed() # result = change_transform_origin(transform, (0.5 * width, 0.5 * height)) # print('debug adjust_transform_for_image --> cek result SETELAH change_transform_origin') # import IPython;IPython.embed() # result_array=np.stack((result, result, result, result, result, result, result, result, # result, result, result, result, result, result, result, result, # result, result, result, result, result, result, result, result, # result, result, result, result, result, result, result, result), axis=0) # return result, result_array class TransformParameters: """ Struct holding parameters determining how to apply a transformation to an image. Args fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap' interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4' cval: Fill value to use with fill_mode='constant' relative_translation: If true (the default), interpret translation as a factor of the image size. If false, interpret it as absolute pixels. """ def apply_transform(matrix, image, params): """ Apply a transformation to an image. The origin of transformation is at the top left corner of the image. The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image. Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space. Args matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply. image: The image to transform. params: The transform parameters (see TransformParameters) """ # output = cv2.warpAffine( # image, # matrix[:2, :], # dsize = (image.shape[1], image.shape[0]), # flags = params.cvInterpolation(), # borderMode = params.cvBorderMode(), # borderValue = params.cval, # ) # return output ## GANTI OUTPUT OPEN CV2 # import IPython;IPython.embed() i=0 output_list=[] output_array=[] # print('DEBUG: cek image dan matrix') # import IPython;IPython.embed() for i in range(len(image)): image_translation=image[i] output = cv2.warpAffine( image_translation, matrix[:2, :], dsize = (image_translation.shape[1], image_translation.shape[0]), flags = params.cvInterpolation(), borderMode = params.cvBorderMode(), borderValue = params.cval, ) # output = cv2.warpAffine( # image, # matrix[:2, :], # dsize = (image[i].shape[1], image[i].shape[0]), # flags = params.cvInterpolation(), # borderMode = params.cvBorderMode(), # borderValue = params.cval, # ) output_list.append(output) # i=i+1 output_array=np.stack((output_list[0], output_list[1], output_list[2], output_list[3], output_list[4], output_list[5], output_list[6], output_list[7], output_list[8], output_list[9], output_list[10], output_list[11], output_list[12], output_list[13], output_list[14], output_list[15], output_list[16], output_list[17], output_list[18], output_list[19], output_list[20], output_list[21], output_list[22], output_list[23], output_list[24], output_list[25], output_list[26], output_list[27], output_list[28], output_list[29], output_list[30], output_list[31]), axis=0) # import IPython;IPython.embed() return output_array def compute_resize_scale(image_shape, min_side=800, max_side=1333): """ Compute an image scale such that the image size is constrained to min_side and max_side. Args min_side: The image's min side will be equal to min_side after resizing. max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side. Returns A resizing scale. """ (rows, cols, _) = image_shape smallest_side = min(rows, cols) # rescale the image so the smallest side is min_side scale = min_side / smallest_side # check if the largest side is now greater than max_side, which can happen # when images have a large aspect ratio largest_side = max(rows, cols) if largest_side * scale > max_side: scale = max_side / largest_side return scale def resize_image(img, min_side=800, max_side=1333): """ Resize an image such that the size is constrained to min_side and max_side. Args min_side: The image's min side will be equal to min_side after resizing. max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side. Returns A resized image. """ # # compute scale to resize the image # scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side) # # resize the image with the computed scale # img = cv2.resize(img, None, fx=scale, fy=scale) # return img, scale i=0 img_list=[] img_array=[] for i in range(len(img)): # # resize the image with the computed scale scale = compute_resize_scale(img[i].shape, min_side=min_side, max_side=max_side) img[i] = cv2.resize(img[i], None, fx=scale, fy=scale) img_list.append(img[i]) img_array=np.stack((img_list[0], img_list[1], img_list[2], img_list[3], img_list[4], img_list[5], img_list[6], img_list[7], img_list[8], img_list[9], img_list[10], img_list[11], img_list[12], img_list[13], img_list[14], img_list[15], img_list[16], img_list[17], img_list[18], img_list[19], img_list[20], img_list[21], img_list[22], img_list[23], img_list[24], img_list[25], img_list[26], img_list[27], img_list[28], img_list[29], img_list[30], img_list[31]), axis=0) return img_array, scale
[ 37811, 198, 15269, 2177, 12, 7908, 376, 528, 2417, 357, 5450, 1378, 69, 528, 2417, 13, 785, 8, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393...
2.585257
3,595
w_widht = 500 w_height = 500 colors = { 'grid': (255,255,255), 'background': (0,0,0), 'x': (255,0,0), 'o': (0,255,0), 'cross': (0,0,255)}
[ 86, 62, 28029, 4352, 796, 5323, 201, 198, 86, 62, 17015, 796, 5323, 201, 198, 4033, 669, 796, 1391, 220, 705, 25928, 10354, 357, 13381, 11, 13381, 11, 13381, 828, 201, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 70...
1.57377
122
#! /usr/bin/env python # -*- coding: utf-8 -*- """Test for input data (Librispeech corpus).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest sys.path.append('../../') from librispeech.path import Path from librispeech.input_data import read_audio from utils.measure_time_func import measure_time path = Path( data_path='/n/sd8/inaguma/corpus/librispeech/data', htk_save_path='/n/sd8/inaguma/corpus/librispeech/htk') htk_paths = { 'train100h': path.htk(data_type='train100h'), 'dev_clean': path.htk(data_type='dev_clean'), 'dev_other': path.htk(data_type='dev_other'), 'test_clean': path.htk(data_type='test_clean'), 'test_other': path.htk(data_type='test_other') } wav_paths = { 'train100h': path.wav(data_type='train100h'), 'dev_clean': path.wav(data_type='dev_clean'), 'dev_other': path.wav(data_type='dev_other'), 'test_clean': path.wav(data_type='test_clean'), 'test_other': path.wav(data_type='test_other') } CONFIG = { 'feature_type': 'logmelfbank', 'channels': 40, 'sampling_rate': 16000, 'window': 0.025, 'slide': 0.01, 'energy': False, 'delta': True, 'deltadelta': True } if __name__ == '__main__': unittest.main()
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 14402, 329, 5128, 1366, 357, 25835, 2442, 431, 3055, 35789, 21387, 15931, 198, 198, 6738, 11593, 37443, 8...
2.38
550
import os import sys import yaml import json import signal import argparse from shutil import which from .identify import ( is_json, opposing_file_extension_for, detect_indentation, has_trailing_whitespace, ) from .hash import md5_hash_for default_editors = [ 'nano', 'pico', 'xo', 'emacs', 'vim', 'vi', ] def main(): """ Generates a JSON copy of your YAML files for convenient editing. Also works in reverse, if you like YAML for some reason. """ args = _parse_args() [native_load, native_dump], [foreign_load, foreign_dump] = get_funcs( args.file_name, ) indent = args.indent if indent is None: indent = detect_indentation(args.file_name) or 2 editor = os.getenv('EDITOR') if editor is None: editor = [ed for ed in default_editors if which(ed) is not None][0] trailing = args.trailing_whitespace if trailing is None: trailing = has_trailing_whitespace(args.file_name) new_file_name = os.path.join( '/tmp', md5_hash_for(args.file_name) + opposing_file_extension_for(args.file_name), ) # Prepare for a SIGINT before any files are modified signal.signal(signal.SIGINT, _sigint_handler) _interchange_contents( args.file_name, new_file_name, foreign_dump, native_load, indent, trailing=trailing, ) os.system(f'{editor} {new_file_name}') _interchange_contents( new_file_name, args.file_name, native_dump, foreign_load, indent, trailing=trailing, ) os.remove(new_file_name)
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 331, 43695, 220, 198, 11748, 33918, 198, 11748, 6737, 198, 11748, 1822, 29572, 198, 198, 6738, 4423, 346, 1330, 543, 198, 198, 6738, 764, 738, 1958, 1330, 357, 198, 220, 220, 220, 318, 62, ...
2.22339
761
""" Project Euler Problem 64: https://projecteuler.net/problem=64 All square roots are periodic when written as continued fractions. For example, let us consider sqrt(23). It can be seen that the sequence is repeating. For conciseness, we use the notation sqrt(23)=[4;(1,3,1,8)], to indicate that the block (1,3,1,8) repeats indefinitely. Exactly four continued fractions, for N<=13, have an odd period. How many continued fractions for N<=10000 have an odd period? References: - https://en.wikipedia.org/wiki/Continued_fraction """ from math import floor, sqrt def continuous_fraction_period(n: int) -> int: """ Returns the continued fraction period of a number n. >>> continuous_fraction_period(2) 1 >>> continuous_fraction_period(5) 1 >>> continuous_fraction_period(7) 4 >>> continuous_fraction_period(11) 2 >>> continuous_fraction_period(13) 5 """ numerator = 0.0 denominator = 1.0 ROOT = int(sqrt(n)) integer_part = ROOT period = 0 while integer_part != 2 * ROOT: numerator = denominator * integer_part - numerator denominator = (n - numerator**2) / denominator integer_part = int((ROOT + numerator) / denominator) period += 1 return period def solution(n: int = 10000) -> int: """ Returns the count of numbers <= 10000 with odd periods. This function calls continuous_fraction_period for numbers which are not perfect squares. This is checked in if sr - floor(sr) != 0 statement. If an odd period is returned by continuous_fraction_period, count_odd_periods is increased by 1. >>> solution(2) 1 >>> solution(5) 2 >>> solution(7) 2 >>> solution(11) 3 >>> solution(13) 4 """ count_odd_periods = 0 for i in range(2, n + 1): sr = sqrt(i) if sr - floor(sr) != 0: if continuous_fraction_period(i) % 2 == 1: count_odd_periods += 1 return count_odd_periods if __name__ == "__main__": print(f"{solution(int(input().strip()))}")
[ 37811, 198, 16775, 412, 18173, 20647, 5598, 25, 3740, 1378, 16302, 68, 18173, 13, 3262, 14, 45573, 28, 2414, 198, 198, 3237, 6616, 11135, 389, 27458, 618, 3194, 355, 3767, 49876, 13, 198, 1890, 1672, 11, 1309, 514, 2074, 19862, 17034, ...
2.629583
791
import hashlib import base64 from datetime import datetime
[ 11748, 12234, 8019, 198, 11748, 2779, 2414, 198, 6738, 4818, 8079, 1330, 4818, 8079 ]
4.142857
14
# -*- coding: utf-8 -*- import re import email from email.header import decode_header from email.utils import parsedate_tz, mktime_tz import imaplib import datetime from logger import logging
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 302, 198, 11748, 3053, 198, 6738, 3053, 13, 25677, 1330, 36899, 62, 25677, 198, 6738, 3053, 13, 26791, 1330, 44267, 378, 62, 22877, 11, 33480, 2435, 62, 2287...
3.233333
60
import csv import datetime import json import os import re import shlex import subprocess import sys from collections import OrderedDict from pathlib import Path from typing import List from typing import Optional from typing import Tuple import pytest from prettytable import PrettyTable from yattag import Doc from nncf.config import NNCFConfig from tests.common.helpers import PROJECT_ROOT from tests.common.helpers import TEST_ROOT BG_COLOR_GREEN_HEX = 'ccffcc' BG_COLOR_YELLOW_HEX = 'ffffcc' BG_COLOR_RED_HEX = 'ffcccc' DIFF_TARGET_MIN_GLOBAL = -0.1 DIFF_TARGET_MAX_GLOBAL = 0.1 DIFF_FP32_MIN_GLOBAL = -1.0 DIFF_FP32_MAX_GLOBAL = 0.1 OPENVINO_DIR = PROJECT_ROOT.parent / 'intel' / 'openvino' if not os.path.exists(OPENVINO_DIR): OPENVINO_DIR = PROJECT_ROOT.parent / 'intel' / 'openvino_2021' ACC_CHECK_DIR = OPENVINO_DIR / 'deployment_tools' / 'open_model_zoo' / 'tools' / 'accuracy_checker' MO_DIR = OPENVINO_DIR / 'deployment_tools' / 'model_optimizer' Tsc = TestSotaCheckpoints @pytest.fixture(autouse=True, scope="class") @pytest.fixture(autouse=True, scope="class") @pytest.fixture(autouse=True, scope="class")
[ 11748, 269, 21370, 198, 11748, 4818, 8079, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 427, 2588, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 3108, 8019, 1330,...
2.62069
435
from .hvs_protocol_translator import HvsProtocolTranslator
[ 6738, 764, 71, 14259, 62, 11235, 4668, 62, 7645, 41880, 1330, 367, 14259, 19703, 4668, 8291, 41880, 198 ]
3.277778
18
# populateMaintFee() # by Allan Niemerg # Populates a Mysql database with patent maintenance fee data from files found on # http://www.google.com/googlebooks/uspto.html import MySQLdb as mdb
[ 2, 48040, 44, 2913, 37, 1453, 3419, 198, 2, 416, 31908, 11556, 368, 6422, 198, 2, 8099, 15968, 257, 337, 893, 13976, 6831, 351, 12701, 9262, 6838, 1366, 422, 3696, 1043, 319, 220, 198, 2, 2638, 1378, 2503, 13, 13297, 13, 785, 14, ...
3.216667
60
#!/usr/bin/python import glob import yaml import os from os import path import sys if __name__ == "__main__": main(sys.argv[1:])
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 15095, 198, 11748, 331, 43695, 198, 11748, 28686, 198, 6738, 28686, 1330, 3108, 198, 11748, 25064, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, ...
2.555556
54
VERSION = '0.17.3'
[ 43717, 796, 705, 15, 13, 1558, 13, 18, 6, 198 ]
1.9
10
if __name__ == '__main__': main()
[ 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.1
20
# -*- coding: utf-8 -*- u""" Make canvasNode into static one. Read input/output port into static attributes. """ import sys import textwrap import maya.api.OpenMaya as api import maya.OpenMaya as oldapi import maya.cmds as cmds import FabricEngine.Core import kraken.plugins.maya_plugin.conversion as conv # ================================================================================== MAYA_API_VERSION = oldapi.MGlobal.apiVersion() __author__ = 'yamahigashi' __version__ = '0.0.1' # _TYPE_IDS = 0x001A0002 maya_useNewAPI = True # ================================================================================== # ----------------------------------------------------------------------------- def getClient(): """Gets the Fabric client from the DCC. This ensures that the same client is used, instead of a new one being created each time one is requiredself. Returns: Fabric Client. """ print("get fabric client") contextID = cmds.fabricSplice('getClientContextID') if not contextID: cmds.fabricSplice('constructClient') contextID = cmds.fabricSplice('getClientContextID') options = { 'contextID': contextID, 'guarded': False } client = FabricEngine.Core.createClient(options) print(client) return client
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 84, 37811, 198, 12050, 21978, 19667, 656, 9037, 530, 13, 198, 5569, 5128, 14, 22915, 2493, 656, 9037, 12608, 13, 198, 37811, 198, 11748, 25064, 198, 11748, 2420, 37150, ...
3.25
404
import numpy as np from time import time def random_range_integers(low: int, high: int, size: int, seed=0, unique=True) -> list: """ :param low: :param high: :param size: :param seed: :param unique: :return: specified sized list range with unique values, can be have both positive and negative integers. """ np.random.seed(seed) if unique: return list(set(np.random.randint(low=low, high=high, size=size))) else: return list(np.random.randint(low=low, high=high, size=size))
[ 11748, 299, 32152, 355, 45941, 201, 198, 6738, 640, 1330, 640, 201, 198, 201, 198, 201, 198, 201, 198, 4299, 4738, 62, 9521, 62, 18908, 364, 7, 9319, 25, 493, 11, 1029, 25, 493, 11, 2546, 25, 493, 11, 9403, 28, 15, 11, 3748, 28,...
2.420601
233
#!/usr/bin/env python # -*- coding: utf-8 -*- import time, wiringpi # 定数 SPK_PIN = 5 # 圧電スピーカーのGPIO番号 if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 640, 11, 29477, 14415, 198, 198, 2, 10263, 106, 248, 46763, 108, 198, 4303, 42, 62, 44032, 796, 642, 220, 1303...
1.746988
83
import sublime, sublime_plugin, os, re, sys, tempfile import threading, subprocess, http.server, socketserver directory = os.path.dirname(os.path.realpath(__file__)) libs_path = os.path.join(directory, "itdchelper") sublime_text_file = os.path.dirname(sublime.__file__) + r'\sublime_text.exe' if libs_path not in sys.path: sys.path.append(libs_path) threading.Thread(target=HttpServerThread).start()
[ 11748, 41674, 11, 41674, 62, 33803, 11, 28686, 11, 302, 11, 25064, 11, 20218, 7753, 198, 11748, 4704, 278, 11, 850, 14681, 11, 2638, 13, 15388, 11, 37037, 18497, 198, 198, 34945, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 69...
2.728477
151
from django.conf.urls import url from tsuru_autoscale.datasource import views urlpatterns = [ url(r'^$', views.list, name='datasource-list'), url(r'^new/$', views.new, name='datasource-new'), url(r'^(?P<name>[\w\s-]+)/remove/$', views.remove, name='datasource-remove'), url(r'^(?P<name>[\w\s-]+)/$', views.get, name='datasource-get'), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 19016, 198, 198, 6738, 256, 11793, 84, 62, 2306, 17500, 1000, 13, 19608, 292, 1668, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 19016, 7, 81, 6, 61, 3, 32...
2.275641
156
#!/usr/bin/env python import cv2 import datetime import json import os import pandas as pd import pims import random import sys import time import termite as trmt if __name__ == '__main__': tracker = GeneralTracker('settings/tracking.json') tracker.track()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 269, 85, 17, 198, 11748, 4818, 8079, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 279, 12078, 198, 11748, 4738, 198, 11748, 25064, ...
3.033708
89
from OLD.storage_module.server_data import DiskServerData, FAQPhraseData from OLD.faq_module.provide_faq import migrate_to_class import OLD.universal_module.utils import OLD.universal_module.text from discord.ext import commands from OLD.faq_module import text import logging import typing import sys logger = logging.getLogger("Main") sys.excepthook = OLD.universal_module.utils.log_exception_handler
[ 6738, 440, 11163, 13, 35350, 62, 21412, 13, 15388, 62, 7890, 1330, 31664, 10697, 6601, 11, 18749, 2725, 22789, 6601, 198, 6738, 440, 11163, 13, 13331, 80, 62, 21412, 13, 15234, 485, 62, 13331, 80, 1330, 32492, 62, 1462, 62, 4871, 198,...
3.176923
130
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import job_api_pb2 as job__api__pb2 class JobServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.UpdateModel = channel.unary_unary( '/fedscale.JobService/UpdateModel', request_serializer=job__api__pb2.UpdateModelRequest.SerializeToString, response_deserializer=job__api__pb2.UpdateModelResponse.FromString, ) self.Train = channel.unary_unary( '/fedscale.JobService/Train', request_serializer=job__api__pb2.TrainRequest.SerializeToString, response_deserializer=job__api__pb2.TrainResponse.FromString, ) self.Fetch = channel.unary_unary( '/fedscale.JobService/Fetch', request_serializer=job__api__pb2.FetchRequest.SerializeToString, response_deserializer=job__api__pb2.FetchResponse.FromString, ) self.Stop = channel.unary_unary( '/fedscale.JobService/Stop', request_serializer=job__api__pb2.StopRequest.SerializeToString, response_deserializer=job__api__pb2.StopResponse.FromString, ) self.ReportExecutorInfo = channel.unary_unary( '/fedscale.JobService/ReportExecutorInfo', request_serializer=job__api__pb2.ReportExecutorInfoRequest.SerializeToString, response_deserializer=job__api__pb2.ReportExecutorInfoResponse.FromString, ) self.Test = channel.unary_unary( '/fedscale.JobService/Test', request_serializer=job__api__pb2.TestRequest.SerializeToString, response_deserializer=job__api__pb2.TestResponse.FromString, ) class JobServiceServicer(object): """Missing associated documentation comment in .proto file.""" def UpdateModel(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Train(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Fetch(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Stop(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ReportExecutorInfo(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Test(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') # This class is part of an EXPERIMENTAL API. class JobService(object): """Missing associated documentation comment in .proto file.""" @staticmethod @staticmethod @staticmethod @staticmethod @staticmethod @staticmethod
[ 2, 2980, 515, 416, 262, 308, 49, 5662, 11361, 8435, 17050, 13877, 13, 8410, 5626, 48483, 0, 198, 37811, 11792, 290, 4382, 6097, 11188, 284, 1237, 672, 3046, 12, 23211, 2594, 526, 15931, 198, 11748, 1036, 14751, 198, 198, 11748, 1693, ...
2.494287
1,663
""" Source: https://stackoverflow.com/questions/17275334/what-is-a-correct-way-to-filter-different-loggers-using-python-logging """ import logging LOG_LEVEL_MAP = { "info": logging.INFO, "debug": logging.DEBUG, "error": logging.ERROR, "warning": logging.WARNING, "critical": logging.CRITICAL, "off": logging.NOTSET } DEFAULT_CONFIG = { 'level': logging.INFO, 'format': "%(asctime)s.%(msecs)03d (%(name)s) %(levelname)s - %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S" } if __name__ == "__main__": setup_logger(**DEFAULT_CONFIG) log = get_logger("logger") log.info("Test message.")
[ 37811, 198, 7416, 25, 3740, 1378, 25558, 2502, 11125, 13, 785, 14, 6138, 507, 14, 1558, 23195, 31380, 14, 10919, 12, 271, 12, 64, 12, 30283, 12, 1014, 12, 1462, 12, 24455, 12, 39799, 12, 6404, 5355, 12, 3500, 12, 29412, 12, 6404, ...
2.307971
276
from typing import List, Tuple, Union, Dict, Any, Optional import pathlib import logging import pandas as pd from wandb_utils.misc import all_data_df, write_df import wandb logger = logging.getLogger(__name__)
[ 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 4479, 11, 360, 713, 11, 4377, 11, 32233, 198, 11748, 3108, 8019, 198, 11748, 18931, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 11569, 65, 62, 26791, 13, 44374, 1330, 477, 62, 7890, ...
3.117647
68
#!/usr/bin/python import math def outlierCleaner(predictions, ages, net_worths): """ Clean away the 10% of points that have the largest residual errors (difference between the prediction and the actual net worth). Return a list of tuples named cleaned_data where each tuple is of the form (age, net_worth, error). """ nb_cleaned = int(math.ceil(len(predictions) * 0.1)) cleaned_data = [] #Calculate all residual errors for prediction, age, net_worth in zip(predictions, ages, net_worths): error = (prediction - net_worth)**2 cleaned_data.append((age, net_worth, error)) #Sort with highest error first cleaned_data.sort(key=lambda x: x[2], reverse=True) #Remove highest errors: cleaned_data = cleaned_data[nb_cleaned:] return cleaned_data
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 10688, 628, 198, 4299, 503, 2505, 32657, 263, 7, 28764, 9278, 11, 9337, 11, 2010, 62, 9268, 82, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 220, 220, 220, 220, 5985, ...
2.621951
328
# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. common_security_group_default_rule_info = { 'type': 'object', 'properties': { 'from_port': {'type': 'integer'}, 'id': {'type': 'integer'}, 'ip_protocol': {'type': 'string'}, 'ip_range': { 'type': 'object', 'properties': { 'cidr': {'type': 'string'} }, 'additionalProperties': False, 'required': ['cidr'], }, 'to_port': {'type': 'integer'}, }, 'additionalProperties': False, 'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'], } create_get_security_group_default_rule = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'security_group_default_rule': common_security_group_default_rule_info }, 'additionalProperties': False, 'required': ['security_group_default_rule'] } } delete_security_group_default_rule = { 'status_code': [204] } list_security_group_default_rules = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'security_group_default_rules': { 'type': 'array', 'items': common_security_group_default_rule_info } }, 'additionalProperties': False, 'required': ['security_group_default_rules'] } }
[ 2, 15069, 1946, 41804, 10501, 13, 220, 1439, 2489, 10395, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393...
2.32992
879
"""Test-discovery helper. Modifies sys.path according to the OS of the environment in which the tests are being run.""" import os, sys if os.name == "nt": # If windows use backslash for subdirectory in os.scandir(): sys.path.append(os.path.abspath(subdirectory.name + "\\")) else: # in TravisCI default env specifically, os.name evaluates to str "posix" for subdirectory in os.scandir(): sys.path.append(os.path.abspath(subdirectory.name+ "/")) print(f"Test-discovery helper ran")
[ 37811, 14402, 12, 67, 40821, 31904, 13, 3401, 6945, 25064, 13, 6978, 1864, 284, 262, 7294, 286, 262, 198, 38986, 287, 543, 262, 5254, 389, 852, 1057, 526, 15931, 198, 198, 11748, 28686, 11, 25064, 198, 198, 361, 28686, 13, 3672, 6624,...
2.91954
174
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 220, 198 ]
3.285714
7
import networkx #import pymysql import instagram_scrape
[ 11748, 3127, 87, 198, 2, 11748, 279, 4948, 893, 13976, 198, 198, 11748, 916, 6713, 62, 1416, 13484, 198 ]
3
19
# Open3D: www.open3d.org # The MIT License (MIT) # See license file or visit www.open3d.org for details import copy import numpy as np import open3d as o3d if __name__ == "__main__": print("Testing vector in open3d ...") print("") print("Testing o3d.utility.IntVector ...") vi = o3d.utility.IntVector([1, 2, 3, 4, 5]) # made from python list vi1 = o3d.utility.IntVector(np.asarray([1, 2, 3, 4, 5])) # made from numpy array vi2 = copy.copy(vi) # valid copy vi3 = copy.deepcopy(vi) # valid copy vi4 = vi[:] # valid copy print(vi) print(np.asarray(vi)) vi[0] = 10 np.asarray(vi)[1] = 22 vi1[0] *= 5 vi2[0] += 1 vi3[0:2] = o3d.utility.IntVector([40, 50]) print(vi) print(vi1) print(vi2) print(vi3) print(vi4) print("") print("Testing o3d.utility.DoubleVector ...") vd = o3d.utility.DoubleVector([1, 2, 3]) vd1 = o3d.utility.DoubleVector([1.1, 1.2]) vd2 = o3d.utility.DoubleVector(np.asarray([0.1, 0.2])) print(vd) print(vd1) print(vd2) vd1.append(1.3) vd1.extend(vd2) print(vd1) print("") print("Testing o3d.utility.Vector3dVector ...") vv3d = o3d.utility.Vector3dVector([[1, 2, 3], [0.1, 0.2, 0.3]]) vv3d1 = o3d.utility.Vector3dVector(vv3d) vv3d2 = o3d.utility.Vector3dVector(np.asarray(vv3d)) vv3d3 = copy.deepcopy(vv3d) print(vv3d) print(np.asarray(vv3d)) vv3d[0] = [4, 5, 6] print(np.asarray(vv3d)) # bad practice, the second [] will not support slice vv3d[0][0] = -1 print(np.asarray(vv3d)) # good practice, use [] after converting to numpy.array np.asarray(vv3d)[0][0] = 0 print(np.asarray(vv3d)) np.asarray(vv3d1)[:2, :2] = [[10, 11], [12, 13]] print(np.asarray(vv3d1)) vv3d2.append([30, 31, 32]) print(np.asarray(vv3d2)) vv3d3.extend(vv3d) print(np.asarray(vv3d3)) print("") print("Testing o3d.utility.Vector3iVector ...") vv3i = o3d.utility.Vector3iVector([[1, 2, 3], [4, 5, 6]]) print(vv3i) print(np.asarray(vv3i)) print("")
[ 2, 4946, 18, 35, 25, 7324, 13, 9654, 18, 67, 13, 2398, 198, 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 4091, 5964, 2393, 393, 3187, 7324, 13, 9654, 18, 67, 13, 2398, 329, 3307, 198, 198, 11748, 4866, 198, 11748, 299, 32152, 35...
1.948624
1,090
from http import HTTPStatus from starlette.requests import Request from starlette.responses import JSONResponse from src.apps.kms.backend.controllers.KmsController import KmsController from src.contexts.kms.cryptokeys.application.rotate_one.RotateCryptoKeyCommand import RotateCryptoKeyCommand from src.contexts.kms.cryptokeys.infrastructure.CryptoKeysHttpResponseErrorHandler import \ JsonResponseErrorHandler from src.contexts.shared.domain.CommandBus import CommandBus from src.contexts.shared.domain.errors.DomainError import DomainError
[ 6738, 2638, 1330, 14626, 19580, 198, 198, 6738, 3491, 21348, 13, 8897, 3558, 1330, 19390, 198, 6738, 3491, 21348, 13, 16733, 274, 1330, 19449, 31077, 198, 198, 6738, 12351, 13, 18211, 13, 74, 907, 13, 1891, 437, 13, 3642, 36667, 13, 4...
3.611842
152
import os, sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from treeds import * if __name__ == "__main__": test_tree_plot()
[ 11748, 28686, 11, 25064, 198, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 828, 705, 492, 6, 4008, 198, 6738, 2054, 5379, 1330, 1635, 198, 198, 361, 11593, 367...
2.42623
61
from flask import Flask from flask_dropzone import Dropzone from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class import os app = Flask(__name__) dropzone = Dropzone(app) app.config['SECRET_KEY'] = 'supersecretkeygoeshere' # Dropzone settings app.config['DROPZONE_UPLOAD_MULTIPLE'] = True app.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True app.config['DROPZONE_ALLOWED_FILE_TYPE'] = 'image/*' app.config['DROPZONE_REDIRECT_VIEW'] = 'results' # where we are redirected in cas of dropzon # Uploads settings base_dir = os.path.abspath(os.path.dirname(__file__)) app.config['UPLOADED_PHOTOS_DEST'] = os.path.join('app', 'static', 'img', 'uploads')#base_dir + ('\\static\\img\\uploads') app.config['INFERRED_PHOTOS_DEST'] = os.path.join('app', 'static', 'img', 'inferred')#base_dir + ('\\static\\img\\inferred') photos = UploadSet('photos', IMAGES) configure_uploads(app, photos) patch_request_class(app) # set maximum file size, default is 16MB from app import views if __name__ == "__main__": app.run()
[ 6738, 42903, 1330, 46947, 198, 6738, 42903, 62, 14781, 11340, 1330, 14258, 11340, 198, 6738, 42903, 62, 39920, 1330, 36803, 7248, 11, 17425, 62, 39920, 11, 45325, 11, 8529, 62, 25927, 62, 4871, 198, 198, 11748, 28686, 198, 198, 1324, 79...
2.831978
369
from player_chair import *
[ 6738, 2137, 62, 16337, 1330, 1635, 198 ]
3.857143
7
import Nodes.Statements.Iterative.DoWhile def p_do_while_01(p): "iterative_statement : DO compound_statement WHILE LEFT_PAREN expression RIGHT_PAREN" p[0] = Nodes.Statements.Iterative.DoWhile.Node(p[5], p[2]) def p_do_while_02(p): "iterative_statement : DO compound_statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON" p[0] = Nodes.Statements.Iterative.DoWhile.Node(p[5], p[2]) def p_do_while_03(p): "iterative_statement : DO compound_statement WHILE expression" p[0] = Nodes.Statements.Iterative.DoWhile.Node(p[4], p[2]) def p_do_while_04(p): "iterative_statement : DO compound_statement WHILE expression SEMICOLON" p[0] = Nodes.Statements.Iterative.DoWhile.Node(p[4], p[2])
[ 11748, 399, 4147, 13, 17126, 3196, 13, 29993, 876, 13, 5211, 3633, 628, 198, 4299, 279, 62, 4598, 62, 4514, 62, 486, 7, 79, 2599, 198, 220, 220, 220, 366, 2676, 876, 62, 26090, 1058, 8410, 13061, 62, 26090, 7655, 41119, 12509, 9792,...
2.522648
287
import sys import pandas as pd import xlrd from mem_leak_detection import Precog sys.path.append('../') from datetime import timedelta
[ 11748, 25064, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2124, 75, 4372, 198, 6738, 1066, 62, 293, 461, 62, 15255, 3213, 1330, 28737, 519, 198, 17597, 13, 6978, 13, 33295, 10786, 40720, 11537, 198, 6738, 4818, 8079, 1330, 28805, ...
3.162791
43
# GENERATED BY KOMAND SDK - DO NOT EDIT from .add_user_to_group.action import AddUserToGroup from .add_user_to_groups_by_id.action import AddUserToGroupsById from .create_user.action import CreateUser from .disable_user_account.action import DisableUserAccount from .enable_user_account.action import EnableUserAccount from .force_user_to_change_password.action import ForceUserToChangePassword from .get_group_by_name.action import GetGroupByName from .get_user_info.action import GetUserInfo from .remove_user_from_group.action import RemoveUserFromGroup from .revoke_sign_in_sessions.action import RevokeSignInSessions from .update_user_info.action import UpdateUserInfo
[ 2, 24700, 1137, 11617, 11050, 509, 2662, 6981, 26144, 532, 8410, 5626, 48483, 198, 6738, 764, 2860, 62, 7220, 62, 1462, 62, 8094, 13, 2673, 1330, 3060, 12982, 2514, 13247, 198, 6738, 764, 2860, 62, 7220, 62, 1462, 62, 24432, 62, 1525,...
3.438776
196
from rest_framework import serializers from allies.models import Ally
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 7681, 13, 27530, 1330, 43039, 628, 198 ]
4.294118
17
# Copyright (c) 2013 Bull. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from blazar import exceptions from blazar.i18n import _ # oshost plugin related exceptions # floating ip plugin related exceptions # Network plugin related exceptions # Device plugin related exceptions
[ 2, 15069, 357, 66, 8, 2211, 8266, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921...
3.679825
228
textures = itk.coocurrence_texture_features_image_filter(image, mask_image=mask, number_of_bins_per_axis=10, histogram_minimum=0, histogram_maximum=4200, neighborhood_radius=6) texture_array = itk.array_view_from_image(textures) view(texture_array[:,:,:,1].reshape(texture_array.shape[:-1]).copy(), mode='z', cmap='jet')
[ 5239, 942, 796, 340, 74, 13, 1073, 420, 33928, 62, 41293, 62, 40890, 62, 9060, 62, 24455, 7, 9060, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
1.515
400
# Source : https://leetcode.com/problems/lru-cache/ # Author : zheyuuu # Date : 2020-07-19 ##################################################################################################### # # Design and implement a data structure for Least Recently Used (LRU) cache. It should support the # following operations: get and put. # # get(key) - Get the value (will always be positive) of the key if the key exists in the cache, # otherwise return -1. # put(key, value) - Set or insert the value if the key is not already present. When the cache reached # its capacity, it should invalidate the least recently used item before inserting a new item. # # The cache is initialized with a positive capacity. # # Follow up: # Could you do both operations in O(1) time complexity? # # Example: # # LRUCache cache = new LRUCache( 2 /* capacity */ ); # # cache.put(1, 1); # cache.put(2, 2); # cache.get(1); // returns 1 # cache.put(3, 3); // evicts key 2 # cache.get(2); // returns -1 (not found) # cache.put(4, 4); // evicts key 1 # cache.get(1); // returns -1 (not found) # cache.get(3); // returns 3 # cache.get(4); // returns 4 # #####################################################################################################
[ 2, 8090, 1058, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 75, 622, 12, 23870, 14, 198, 2, 6434, 1058, 1976, 20342, 12303, 84, 198, 2, 7536, 220, 220, 1058, 12131, 12, 2998, 12, 1129, 198, 198, 29113, 29113, 29113, 4...
3.240506
395
from enum import IntEnum, unique @unique
[ 6738, 33829, 1330, 2558, 4834, 388, 11, 3748, 628, 198, 31, 34642, 198 ]
3.307692
13
import pytest from evalml.automl import AutoMLSearch from evalml.pipelines.components.transformers import ReplaceNullableTypes from evalml.problem_types import ProblemTypes, is_time_series @pytest.mark.parametrize("input_type", ["pd", "ww"]) @pytest.mark.parametrize("automl_algorithm", ["iterative", "default"]) @pytest.mark.parametrize("problem_type", ProblemTypes.all_problem_types) @pytest.mark.parametrize( "test_description, column_names", [ ( "all null", ["dates", "all_null"], ), # Should result only in Drop Null Columns Transformer ("only null int", ["int_null"]), ("only null bool", ["bool_null"]), ("only null age", ["age_null"]), ("nullable types", ["numerical", "int_null", "bool_null", "age_null"]), ("just nullable target", ["dates", "numerical"]), ], )
[ 11748, 12972, 9288, 198, 198, 6738, 5418, 4029, 13, 2306, 296, 75, 1330, 11160, 5805, 18243, 198, 6738, 5418, 4029, 13, 79, 541, 20655, 13, 5589, 3906, 13, 35636, 364, 1330, 40177, 35067, 540, 31431, 198, 6738, 5418, 4029, 13, 45573, ...
2.50289
346
#!/usr/bin/env python3 import sys import os.path target_disk_image, bootloader = sys.argv[1:] ext = os.path.splitext(target_disk_image)[-1].lower() assert(ext in (".dsk", ".do", ".po", ".2mg")) if ext == ".2mg": offset = 64 else: offset = 0 with open(bootloader, 'rb') as f: boot = f.read() assert(len(boot) == 512) with open(target_disk_image, 'rb') as f: data = bytearray(f.read()) data[offset:offset+len(boot)] = boot with open(target_disk_image, 'wb') as f: f.write(data)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 11748, 28686, 13, 6978, 198, 198, 16793, 62, 39531, 62, 9060, 11, 6297, 29356, 796, 25064, 13, 853, 85, 58, 16, 47715, 198, 2302, 796, 28686, 13, 6978, 13...
2.405797
207
import numpy as np import pandas as pd from scipy import stats from statsmodels.tsa.api import VAR from .util import context from forecaster import retriever, timeseries df = retriever.get_data('bitcoin') start_date = pd.to_datetime('2013-12-01') end_date = pd.to_datetime('2016-02-01') mask = (df.index.values > start_date) & (df.index.values <= end_date) date = df.index.values # Price price = df['price'] timeseries.metrics(price) timeseries.plot(date, price, 'Price') # # Price zscore # price_zscore = stats.zscore(price) # timeseries.metrics(price_zscore) # timeseries.plot(date, price_zscore, 'Price zscore') # Standardized price stand_price = timeseries.standardize_laggedly(price) df['price'] = stand_price restricted_df = df.dropna() timeseries.metrics(restricted_df['price']) timeseries.plot(restricted_df.index.values, restricted_df['price'], 'Standardized price') # # # Standardized price zscore # # stand_price_zscore = timeseries.standardize_laggedly(price_zscore) # # df['price'] = stand_price_zscore # # restricted_df = df.dropna() # # timeseries.metrics(restricted_df['price']) # # timeseries.plot(restricted_df.index.values, restricted_df['price'], 'Standardized price zscore') # # Stationary price # stat_price = timeseries.stationarize(price) # df['price'] = stat_price # restricted_df = df.dropna() # timeseries.metrics(restricted_df['price']) # timeseries.plot(restricted_df.index.values, restricted_df['price'], 'Stationary price') # # # Stationary price zscore # # stat_price_zscore = timeseries.stationarize(price_zscore) # # df['price'] = stat_price_zscore # # restricted_df = df.dropna() # # timeseries.metrics(restricted_df['price']) # # timeseries.plot(restricted_df.index.values, restricted_df['price'], 'Stationary price zscore') # # Diff price # diff_price = np.log(price).diff() # df['price'] = diff_price # restricted_df = df.dropna() # timeseries.metrics(restricted_df['price']) # timeseries.plot(restricted_df.index.values, restricted_df['price'], 'Diff price') # # Positive reply # positive = df['positive_reply'] # timeseries.metrics(positive) # timeseries.plot(date, positive, 'Positive reply') # # # Positive zscore # # positive_zscore = stats.zscore(positive) # # timeseries.metrics(positive_zscore) # # timeseries.plot(date, positive_zscore, 'Positive zscore') # # Standardized positive # stand_positive = timeseries.standardize_laggedly(positive) # df['positive_reply'] = stand_positive # restricted_df = df.dropna() # timeseries.metrics(restricted_df['positive_reply']) # timeseries.plot(restricted_df.index.values, restricted_df['positive_reply'], 'Standardized positive') # # # Standardized positive zscore # # stand_positive_zscore = timeseries.standardize_laggedly(positive_zscore) # # df['positive_reply'] = stand_positive_zscore # # restricted_df = df.dropna() # # timeseries.metrics(restricted_df['positive_reply']) # # timeseries.plot(restricted_df.index.values, restricted_df['positive_reply'], 'Standardized positive zscore') # # Stationary positive # stat_positive = timeseries.stationarize(positive) # df['positive_reply'] = stat_positive # restricted_df = df.dropna() # timeseries.metrics(restricted_df['positive_reply']) # timeseries.plot(restricted_df.index.values, restricted_df['positive_reply'], 'Stationary positive') # # # Stationary positive zscore # # stat_positive_zscore = timeseries.stationarize(positive_zscore) # # df['positive_reply'] = stat_positive_zscore # # restricted_df = df.dropna() # # timeseries.metrics(restricted_df['positive_reply']) # # timeseries.plot(restricted_df.index.values, restricted_df['positive_reply'], 'Stationary positive zscore') # # Diff positive # diff_positive = np.log(positive).diff() # df['positive_reply'] = diff_positive # restricted_df = df.dropna() # timeseries.metrics(restricted_df['positive_reply']) # timeseries.plot(restricted_df.index.values, restricted_df['positive_reply'], 'Diff positive')
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 629, 541, 88, 1330, 9756, 198, 6738, 9756, 27530, 13, 912, 64, 13, 15042, 1330, 569, 1503, 198, 198, 6738, 764, 22602, 1330, 4732, 198, 198, 6738, 1674, ...
3.051898
1,291
import socket import subprocess import sys from datetime import datetime subprocess.call('clear', shell=True) remoteServer = input("enter remote host to start scan: ") remoteServerIp = socket.gethostbyname(remoteServer) print("-" * 60) print("please wait, scanning remote host", remoteServerIp) print("-" * 60) t1 = datetime.now() try: for port in range(1, 1025): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((remoteServerIp, port)) if result == 0: print("port {} : Open".format(port)) sock.close() except KeyboardInterrupt: print("exiting program") sys.exit() except socket.gaierror: print("hostname could not be resolved, exiting") sys.exit() except socket.error: print("couldn't connect to server ") sys.exit() t2 = datetime.now() total = t2 - t1 print('Scanning complete:', total)
[ 11748, 17802, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 6738, 4818, 8079, 1330, 4818, 8079, 628, 198, 7266, 14681, 13, 13345, 10786, 20063, 3256, 7582, 28, 17821, 8, 198, 198, 47960, 10697, 796, 5128, 7203, 9255, 6569, 2583, 284, ...
2.737805
328
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ProfileOperations(object): """ProfileOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~footprint_monitoring_management_client.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def list_by_subscription( self, **kwargs # type: Any ): # type: (...) -> Iterable["models.ProfileList"] """Retrieves the information about all Footprint profiles under a subscription. Get all Footprint profiles under a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ProfileList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~footprint_monitoring_management_client.models.ProfileList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ProfileList"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2020-02-01-preview" return ItemPaged( get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.FootprintMonitoring/profiles'} # type: ignore def list_by_resource_group( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["models.ProfileList"] """Retrieves the information about all Footprint profiles under a resource group. Get all Footprint profiles under a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ProfileList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~footprint_monitoring_management_client.models.ProfileList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ProfileList"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2020-02-01-preview" return ItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.FootprintMonitoring/profiles'} # type: ignore def get( self, resource_group_name, # type: str profile_name, # type: str **kwargs # type: Any ): # type: (...) -> "models.Profile" """Retrieves the information about a single Footprint profile. Get a Footprint profile resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param profile_name: Name of the Footprint profile resource. :type profile_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Profile, or the result of cls(response) :rtype: ~footprint_monitoring_management_client.models.Profile :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Profile"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2020-02-01-preview" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'profileName': self._serialize.url("profile_name", profile_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.DefaultErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Profile', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.FootprintMonitoring/profiles/{profileName}'} # type: ignore def create_or_update( self, resource_group_name, # type: str profile_name, # type: str start_delay_milliseconds, # type: int measurement_count, # type: int tags=None, # type: Optional[Dict[str, str]] location=None, # type: Optional[str] description=None, # type: Optional[str] cold_path_sampling_percentage_rate=None, # type: Optional[float] reporting_endpoints=None, # type: Optional[List[str]] **kwargs # type: Any ): # type: (...) -> "models.Profile" """Creates or updates a Footprint profile with the specified properties. Creates or updates a Footprint profile resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param profile_name: Name of the Footprint profile resource. :type profile_name: str :param start_delay_milliseconds: The delay in milliseconds that the clients should wait for until they start performing measurements. :type start_delay_milliseconds: int :param measurement_count: The number of measurements to perform. :type measurement_count: int :param tags: Tags for the resource. :type tags: dict[str, str] :param location: Region where the Azure resource is located. :type location: str :param description: The description of the Footprint profile. :type description: str :param cold_path_sampling_percentage_rate: The default sampling percentage for cold path measurement storage. :type cold_path_sampling_percentage_rate: float :param reporting_endpoints: The endpoints which to upload measurements to. :type reporting_endpoints: list[str] :keyword callable cls: A custom type or function that will be passed the direct response :return: Profile, or the result of cls(response) :rtype: ~footprint_monitoring_management_client.models.Profile :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Profile"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) _parameters = models.Profile(tags=tags, location=location, description=description, start_delay_milliseconds=start_delay_milliseconds, measurement_count=measurement_count, cold_path_sampling_percentage_rate=cold_path_sampling_percentage_rate, reporting_endpoints=reporting_endpoints) api_version = "2020-02-01-preview" content_type = kwargs.pop("content_type", "application/json") # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'profileName': self._serialize.url("profile_name", profile_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(_parameters, 'Profile') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.DefaultErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('Profile', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Profile', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.FootprintMonitoring/profiles/{profileName}'} # type: ignore def delete( self, resource_group_name, # type: str profile_name, # type: str **kwargs # type: Any ): # type: (...) -> None """Deletes an existing Footprint profile. Deletes a Footprint profile resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param profile_name: Name of the Footprint profile resource. :type profile_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2020-02-01-preview" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'profileName': self._serialize.url("profile_name", profile_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.DefaultErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.FootprintMonitoring/profiles/{profileName}'} # type: ignore def update( self, resource_group_name, # type: str profile_name, # type: str tags=None, # type: Optional[Dict[str, str]] **kwargs # type: Any ): # type: (...) -> "models.Profile" """Updates an existing Footprint profile resource. Updates a Footprint profile resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param profile_name: Name of the Footprint profile resource. :type profile_name: str :param tags: The tags for this resource. :type tags: dict[str, str] :keyword callable cls: A custom type or function that will be passed the direct response :return: Profile, or the result of cls(response) :rtype: ~footprint_monitoring_management_client.models.Profile :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Profile"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) _parameters = models.ProfilePatch(tags=tags) api_version = "2020-02-01-preview" content_type = kwargs.pop("content_type", "application/json") # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1), 'profileName': self._serialize.url("profile_name", profile_name, 'str', max_length=64, min_length=5, pattern=r'^[a-zA-Z0-9]'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(_parameters, 'ProfilePatch') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.DefaultErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Profile', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.FootprintMonitoring/profiles/{profileName}'} # type: ignore
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 19...
2.645208
6,897
from ..service import Service from ..exception import AppwriteException
[ 6738, 11485, 15271, 1330, 4809, 198, 6738, 11485, 1069, 4516, 1330, 2034, 13564, 16922, 198 ]
4.8
15
# -*- coding: utf-8 -*- """Main module.""" import suitable def apply(api: suitable.api.Api, config: dict, quiet: bool = False) -> dict: """ installs dnsmasq """ results = dict() results['pacman'] = api.pacman(name='dnsmasq', state='present') if 'dnsmasq_conf' in config.keys(): results['dnsmasq_conf'] = api.copy( dest='/etc/dnsmasq.conf', content=config['dnsmasq_conf'] ) if 'resolv_conf' in config.keys(): results['resolv_conf'] = api.copy( dest='/etc/resolv.conf', content=config['resolv_conf'] ) results['service'] = api.service( name='dnsmasq', state='started', enabled=True) if not quiet: print(results) return dict(results)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 13383, 8265, 526, 15931, 198, 198, 11748, 11080, 628, 198, 4299, 4174, 7, 15042, 25, 11080, 13, 15042, 13, 32, 14415, 11, 4566, 25, 8633, 11, 5897, 25, 205...
2.185714
350
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import unittest from . import hello if '__main__' == __name__: unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 555, 715, 395, 198, 198, 6738, 764, 1330, 23748, 628, 198, 198, 361, 705, 834, 12417, 834, 6, 6624, ...
2.293103
58
import struct from spherov2.listeners.async_ import CollisionDetected from spherov2.listeners.core import PowerStates
[ 11748, 2878, 198, 198, 6738, 599, 11718, 85, 17, 13, 4868, 36014, 13, 292, 13361, 62, 1330, 7778, 1166, 11242, 11197, 198, 6738, 599, 11718, 85, 17, 13, 4868, 36014, 13, 7295, 1330, 4333, 42237, 628 ]
3.333333
36
from amadeus import Client, ResponseError amadeus = Client() try: ''' What's the airline name for the IATA code BA? ''' response = amadeus.reference_data.airlines.get(airlineCodes='BA') # print(response.data) except ResponseError as error: raise error
[ 6738, 716, 671, 385, 1330, 20985, 11, 18261, 12331, 198, 198, 321, 671, 385, 796, 20985, 3419, 198, 198, 28311, 25, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 1867, 338, 262, 18091, 1438, 329, 262, 314, 13563, 2438, 23715, 3...
2.78
100
from itertools import islice from django.core import mail from django.contrib.auth import get_user_model from django.contrib.gis.geos import Polygon, Point from djmoney.money import Money from rest_framework.test import APITestCase from api.models import Contact, Pricing, Product, PricingGeometry, Order, OrderItem, OrderType UserModel = get_user_model() class PricingTests(APITestCase): """ Test Pricings """
[ 6738, 340, 861, 10141, 1330, 318, 75, 501, 198, 6738, 42625, 14208, 13, 7295, 1330, 6920, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 70, 271, 13, 46...
3.203008
133
import argparse import numpy as np import os import random from lm_eval import tasks from lm_eval.utils import join_iters EXAMPLE_DIVIDER = "!!@@##@@!! -- Example {i}\n" if __name__ == "__main__": main()
[ 11748, 1822, 29572, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 4738, 198, 6738, 300, 76, 62, 18206, 1330, 8861, 198, 6738, 300, 76, 62, 18206, 13, 26791, 1330, 4654, 62, 270, 364, 198, 198, 6369, 2390, 16437, 6...
2.730769
78