content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#!/usr/bin/env python # -*- coding: utf-8 -*- from conans import ConanFile, tools import os
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 369, 504, 1330, 31634, 8979, 11, 4899, 198, 11748, 28686, 628 ]
2.611111
36
""" Compute Dice between test ground truth and predictions from groupwise registration. """ import os import nibabel as nib import glob import numpy as np from core import utils_2d from core.metrics_2d import OverlapMetrics if __name__ == '__main__': gt_path = '../../../../../../dataset/C0T2LGE/label_center_data/test/*label.nii.gz' pred_path = '../../../../../../results/MSCMR/test_predictions_1.5mm_group3_fusion15/*label.nii.gz' pred_names = utils_2d.strsort(glob.glob(pred_path)) gt_names = utils_2d.strsort([name for name in glob.glob(gt_path) if os.path.basename(name).split('_')[1] == 'DE']) pred_gt_names = dict(zip(pred_names, gt_names)) print(pred_gt_names) average_dice = [] myo_dice = [] LV_dice = [] RV_dice = [] for name in pred_names: pred_label = load_nifty(name) one_hot_pred = one_hot_label(pred_label, (0, 200, 500, 600)) gt_label = load_nifty(pred_gt_names[name]) gt_label = np.concatenate([gt for gt in np.dsplit(gt_label, gt_label.shape[-1]) if np.all([np.sum(gt==i) > 0 for i in [200, 500, 600]])], axis=-1) one_hot_gt = one_hot_label(gt_label, (0, 200, 500, 600)) Dice = OverlapMetrics(n_class=4, mode='np') dice = Dice.averaged_foreground_dice(one_hot_gt, one_hot_pred) m_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=1) l_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=2) r_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=3) average_dice.append(dice) myo_dice.append(m_dice) LV_dice.append(l_dice) RV_dice.append(r_dice) print("Average foreground Dice for %s: %.4f" % (os.path.basename(name), dice)) print("Myocardium Dice for %s: %.4f" % (os.path.basename(name), m_dice)) print("LV Dice for %s: %.4f" % (os.path.basename(name), l_dice)) print("RV Dice for %s: %.4f" % (os.path.basename(name), r_dice)) print("Average prediction Dice: %.4f" % np.mean(average_dice)) print("Average myocardium Dice: %.4f" % np.mean(myo_dice)) print("Average LV Dice: %.4f" % np.mean(LV_dice)) print("Average RV Dice: %.4f" % np.mean(RV_dice))
[ 37811, 201, 198, 7293, 1133, 34381, 1022, 1332, 2323, 3872, 290, 16277, 422, 1448, 3083, 9352, 13, 201, 198, 201, 198, 201, 198, 37811, 201, 198, 11748, 28686, 201, 198, 11748, 33272, 9608, 355, 33272, 201, 198, 11748, 15095, 201, 198, ...
2.044287
1,129
"""Main farm access.""" from __future__ import annotations import os from datetime import datetime from typing import Dict, Iterable, Iterator, List, Type, Union from farmos_ext.area import Area from farmos_ext.asset import Asset, Equipment, Planting from farmos_ext.log import (Activity, Birth, Harvest, Input, Log, Maintenance, Medical, Observation, Purchase, Sale, Seeding, SoilTest, Transplanting) from farmos_ext.others import Content, Quantity from farmos_ext.term import Crop, CropFamily, Season, Term, Unit from farmOS import farmOS # pylint: disable=wrong-import-order from farmOS.client import BaseAPI # pylint: disable=wrong-import-order def farm(): """Access to farm with provided credentials.""" return Farm() # pylint: disable=too-many-public-methods def plantings(self, filters: Dict = None) -> Iterable[Planting]: if not filters: filters = {'type': 'planting'} else: filters.update({'type': 'planting'}) return self.assets(filters, Planting)
[ 37811, 13383, 5318, 1895, 526, 15931, 198, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 28686, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 19720, 1330, 360, 713, 11, 40806, 540, 11, 40806, 1352, 11, 7343, 11, 5994...
2.708229
401
''' Testing the CSV iterators ''' import responses from tenable.io.v3.base.iterators.explore_iterator import CSVChunkIterator USERS_BASE_URL = r'https://cloud.tenable.com/api/v3/assets/search' CSV_TEXT = ( 'created,display_ipv4_address,first_observed,id,' 'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,' 'is_public,last_observed,name,network.id,network.name,' 'observation_sources,sources,types,updated\n' '2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,' '"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,false,' 'false,true,2021-11-24T13:43:56.442Z,192.12.13.7,' '"00000000-0000-0000-0000-000000000000",Default,' '"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",' 'test_v3,host,2021-11-24T13:43:56.709Z\n' ) CSV_TEXT_2 = ( 'created,display_ipv4_address,first_observed,id,ipv4_addresses,' 'ipv6_addresses,is_deleted,is_licensed,is_public,last_observed,' 'name,network.id,network.name,observation_sources,sources,' 'types,updated\ncreated,display_ipv4_address,first_observed,id,' 'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,' 'is_public,last_observed,name,network.id,network.name,' 'observation_sources,sources,types,updated\n' '2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,' '"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,' 'false,false,true,2021-11-24T13:43:56.442Z,192.12.13.7,' '"00000000-0000-0000-0000-000000000000",Default,' '"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",' 'test_v3,host,2021-11-24T13:43:56.709Z\n' ) CSV_HEADERS = { 'Date': 'Wed, 08 Dec 2021 04:42:28 GMT', 'Content-Type': 'text/csv;charset=UTF-8', 'Content-Length': '508', 'Connection': 'keep-alive', 'Set-Cookie': 'nginx-cloud-site-id=qa-develop; path=/; ' 'HttpOnly; SameSite=Strict; Secure', 'X-Request-Uuid': '4d43db5bac4decd79fc198e06a8113bd', 'X-Continuation-Token': 'fasd563456fghfgfFGHFGHRT', 'X-Content-Type-Options': 'nosniff', 'X-Frame-Options': 'DENY', 'X-Xss-Protection': '1; mode=block', 'Cache-Control': 'no-store', 'Strict-Transport-Security': 'max-age=63072000; includeSubDomains', 'X-Gateway-Site-ID': 'nginx-router-jm8uw-us-east-1-eng', 'Pragma': 'no-cache', 'Expect-CT': 'enforce, max-age=86400', 'X-Path-Handler': 'tenable-io', }
[ 7061, 6, 198, 44154, 262, 44189, 11629, 2024, 198, 7061, 6, 198, 198, 11748, 9109, 198, 198, 6738, 3478, 540, 13, 952, 13, 85, 18, 13, 8692, 13, 2676, 2024, 13, 20676, 382, 62, 48727, 1330, 44189, 1925, 2954, 37787, 198, 198, 2937, ...
2.050862
1,160
from telegram.ext import * from telegram import * import time
[ 6738, 573, 30536, 13, 2302, 1330, 1635, 201, 198, 6738, 573, 30536, 1330, 1635, 201, 198, 11748, 640, 201 ]
3.368421
19
import os
[ 11748, 28686, 628 ]
3.666667
3
import nbviewerbot if __name__ == "__main__": nbviewerbot.cli()
[ 11748, 299, 65, 1177, 263, 13645, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 299, 65, 1177, 263, 13645, 13, 44506, 3419, 198 ]
2.225806
31
import json import math import requests import pandas as pd def fetch_data(ids): ''' A function to fetch data from the API. Parameters: ids (list): A list of ids (integrs) to fetch Returns: text (dict): A dictionary where keys are the ids and values are the text ''' results = {} # We'll loop over the ids to fetch the text data # We'll split ids into 1000 because of the limit of the API # Futrue work: # we can handle if the connection timed out or any other problem that would happen # we can add some assertion to make sure that ids are valid for i in range(math.ceil(len(ids)/1000)): sub_ids = json.dumps(ids[i*1000:1000*(i+1)]) while True: r = requests.post("https://recruitment.aimtechnologies.co/ai-tasks", sub_ids) # print(r.status_code) if r.status_code == 200: results.update(json.loads(r.text)) break; print(f"We managed to fetch {len(results)} samples of text.") return results if __name__ == '__main__': #Read the ids' file, then fetch data, and write the file to a csv source_data = pd.read_csv("files/dialect_dataset.csv") text_dict = fetch_data(list(source_data.loc[:,"id"].astype(str))) #We'll make sure that we managed to fetch all the ids if len(source_data) == len(text_dict): source_data.loc[:,"text"] = text_dict.values() source_data.to_csv("data/full_dialect_dataset.csv",encoding='utf-8-sig')
[ 11748, 33918, 198, 11748, 10688, 198, 11748, 7007, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 4299, 21207, 62, 7890, 7, 2340, 2599, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 317, 2163, 284, 21207, 1366, 422, 262, 7824, ...
2.351796
668
# tests/test.py from rw_data_proc.core import * import unittest
[ 2, 5254, 14, 9288, 13, 9078, 198, 6738, 374, 86, 62, 7890, 62, 36942, 13, 7295, 1330, 1635, 198, 11748, 555, 715, 395, 628, 198 ]
2.64
25
from booking.models import Schedule, ParkingSpace from datetime import datetime as dt from django import forms
[ 6738, 25452, 13, 27530, 1330, 19281, 11, 29259, 14106, 198, 6738, 4818, 8079, 1330, 4818, 8079, 355, 288, 83, 198, 198, 6738, 42625, 14208, 1330, 5107, 628, 628, 628, 198 ]
3.933333
30
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import uuid from importlib import reload from unittest import mock from unittest.mock import patch from google.api_core import operation from google.cloud import aiplatform from google.cloud.aiplatform import base from google.cloud.aiplatform import initializer from google.cloud.aiplatform.compat.types import ( matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index, ) from google.cloud.aiplatform.compat.services import ( index_endpoint_service_client, index_service_client, ) from google.protobuf import field_mask_pb2 import pytest # project _TEST_PROJECT = "test-project" _TEST_LOCATION = "us-central1" _TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" # index _TEST_INDEX_ID = "index_id" _TEST_INDEX_NAME = f"{_TEST_PARENT}/indexes/{_TEST_INDEX_ID}" _TEST_INDEX_DISPLAY_NAME = "index_display_name" # index_endpoint _TEST_INDEX_ENDPOINT_ID = "index_endpoint_id" _TEST_INDEX_ENDPOINT_NAME = f"{_TEST_PARENT}/indexEndpoints/{_TEST_INDEX_ENDPOINT_ID}" _TEST_INDEX_ENDPOINT_DISPLAY_NAME = "index_endpoint_display_name" _TEST_INDEX_ENDPOINT_DESCRIPTION = "index_endpoint_description" _TEST_INDEX_DESCRIPTION = "index_description" _TEST_INDEX_ENDPOINT_VPC_NETWORK = "projects/{}/global/networks/{}".format( "12345", "network" ) _TEST_LABELS = {"my_key": "my_value"} _TEST_DISPLAY_NAME_UPDATE = "my new display name" _TEST_DESCRIPTION_UPDATE = "my description update" _TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"} # deployment _TEST_DEPLOYED_INDEX_ID = "deployed_index_id" _TEST_DEPLOYED_INDEX_DISPLAY_NAME = "deployed_index_display_name" _TEST_MIN_REPLICA_COUNT = 2 _TEST_MAX_REPLICA_COUNT = 2 _TEST_ENABLE_ACCESS_LOGGING = False _TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range-1", "vertex-ai-ip-range-2"] _TEST_DEPLOYMENT_GROUP = "prod" _TEST_AUTH_CONFIG_AUDIENCES = ["a", "b"] _TEST_AUTH_CONFIG_ALLOWED_ISSUERS = [ "service-account-name-1@project-id.iam.gserviceaccount.com", "service-account-name-2@project-id.iam.gserviceaccount.com", ] # deployment_updated _TEST_MIN_REPLICA_COUNT_UPDATED = 4 _TEST_MAX_REPLICA_COUNT_UPDATED = 4 # request_metadata _TEST_REQUEST_METADATA = () # Lists _TEST_INDEX_ENDPOINT_LIST = [ gca_index_endpoint.IndexEndpoint( name=_TEST_INDEX_ENDPOINT_NAME, display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=_TEST_INDEX_ENDPOINT_DESCRIPTION, ), gca_index_endpoint.IndexEndpoint( name=_TEST_INDEX_ENDPOINT_NAME, display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=_TEST_INDEX_ENDPOINT_DESCRIPTION, ), gca_index_endpoint.IndexEndpoint( name=_TEST_INDEX_ENDPOINT_NAME, display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=_TEST_INDEX_ENDPOINT_DESCRIPTION, ), ] # Match _TEST_QUERIES = [ [ -0.11333, 0.48402, 0.090771, -0.22439, 0.034206, -0.55831, 0.041849, -0.53573, 0.18809, -0.58722, 0.015313, -0.014555, 0.80842, -0.038519, 0.75348, 0.70502, -0.17863, 0.3222, 0.67575, 0.67198, 0.26044, 0.4187, -0.34122, 0.2286, -0.53529, 1.2582, -0.091543, 0.19716, -0.037454, -0.3336, 0.31399, 0.36488, 0.71263, 0.1307, -0.24654, -0.52445, -0.036091, 0.55068, 0.10017, 0.48095, 0.71104, -0.053462, 0.22325, 0.30917, -0.39926, 0.036634, -0.35431, -0.42795, 0.46444, 0.25586, 0.68257, -0.20821, 0.38433, 0.055773, -0.2539, -0.20804, 0.52522, -0.11399, -0.3253, -0.44104, 0.17528, 0.62255, 0.50237, -0.7607, -0.071786, 0.0080131, -0.13286, 0.50097, 0.18824, -0.54722, -0.42664, 0.4292, 0.14877, -0.0072514, -0.16484, -0.059798, 0.9895, -0.61738, 0.054169, 0.48424, -0.35084, -0.27053, 0.37829, 0.11503, -0.39613, 0.24266, 0.39147, -0.075256, 0.65093, -0.20822, -0.17456, 0.53571, -0.16537, 0.13582, -0.56016, 0.016964, 0.1277, 0.94071, -0.22608, -0.021106, ] ] _TEST_NUM_NEIGHBOURS = 1 # All index mocks # All index_endpoint mocks
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 33160, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 4...
1.887006
2,832
import glob import os import re import sys import time import yaml if __name__ == "__main__": with open("config.yml", "r") as config: conf = yaml.load(config, Loader=yaml.SafeLoader) print("load config") reg = [] for pattern in conf["reg"]: print(" " + pattern) reg.append(re.compile(pattern)) vrcdir = os.environ["USERPROFILE"] + "\\AppData\\LocalLow\\VRChat\\VRChat\\" logfile = vrcdir + conf["logfile"] if len(sys.argv) > 1: logfile = sys.argv[1] if logfile == vrcdir: logfiles = glob.glob(vrcdir + "output_log_*.txt") logfiles.sort(key=os.path.getctime, reverse=True) logfile = logfiles[0] with open(logfile, "r", encoding="utf-8") as f: print("open logfile : ", logfile) loglines = tail(f, conf["past"]) for line in loglines: for pattern in reg: match = pattern.match(line) if not match: continue message = "" for group in match.groups(): message = message + group + " " print(message)
[ 11748, 15095, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 331, 43695, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 351, 1280, 7203, 11250, 13, 88, ...
2.091241
548
def _find_and_remove_star_table(columns, join_clause): """ Starting from 3 tables we have to deal with the "star-table" effect - a join with a joining table where we only wanna know e.g. the count(*) of the third table. In that case we don't need to join the third table - we just do a count over the join with the joining table. In general, the additional join is not an issue - but is seen as incorrect by the spider-evaluation and therefore we have to remove it. Example: SELECT T2.concert_name , T2.theme , count(*) FROM singer_in_concert AS T1 JOIN concert AS T2 ON T1.concert_id = T2.concert_id GROUP BY T2.concert_id ---> GOOD SELECT T1.concert_Name, T1.Theme, count(*) FROM concert AS T1 JOIN singer_in_concert AS T3 JOIN singer AS T2 GROUP BY T1.concert_ID -----> BAD, REMOVE "singer" join. """ # unfortunately auto tuple unpacking doesn't work anymore in python 3, therefore this comment: a "column" contains the 3 elements "aggregator, "column name", "table". star_tables = list(map(lambda column: column[2], filter(lambda column: column[1] == '*', columns))) # remove duplicates star_tables = list(set(star_tables)) assert len(star_tables) <= 1, "The case of having multiple star-joins is currently not supported (and not part of the spider-dataset)" if len(star_tables) == 1: star_table = star_tables[0] # we need to make sure the table we try to remove is not used at any other place - e.g. in the SELECT or in the WHERE clause. # only then we can safely remove it if len(list(filter(lambda column: column[1] != '*' and column[2] == star_table, columns))) == 0: # we only remove star-tables if they are the start or end table in the graph. # remember, an join_clause tuple looks like this: (start, start_alias, end, end_alias, entry_column, exit_column) start_edge = join_clause[0] start_edge_from, _, start_edge_to, _, _, _ = start_edge end_edge = join_clause[len(join_clause) - 1] end_edge_from, _, end_edge_to, _, _, _ = end_edge if start_edge_from == star_table: if second_table_in_edge_is_availabe_elswhere(start_edge_to, join_clause[1:]): return join_clause[1:] if end_edge_to == star_table: if second_table_in_edge_is_availabe_elswhere(end_edge_from, join_clause[:-1]): return join_clause[:-1] return join_clause def second_table_in_edge_is_availabe_elswhere(second_table, remaining_edges): """ By removing an edge, we basically remove two tables. If there schema is a "normal" schema, where the edges are "A --> B", "B --> C" this is not an issue. We we though have a non-linear schema, like "A --> B", "A --> C" we can't just remove the first edge - we would loose B completely! To avoid this we make sure the second table in the edge we plan to remove is available in another edge. A schema where we have to deal with this issue is e.g. "flight_2", where two relations go from "flights" to "airports". """ for edge in remaining_edges: start, _, end, _, _, _ = edge if second_table == start or second_table == end: return True return False
[ 628, 628, 198, 4299, 4808, 19796, 62, 392, 62, 28956, 62, 7364, 62, 11487, 7, 28665, 82, 11, 4654, 62, 565, 682, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 17962, 422, 513, 8893, 356, 423, 284, 1730, 351, 262, 366, 7364,...
2.682002
1,239
print('Test script')
[ 4798, 10786, 14402, 4226, 11537, 198 ]
3.5
6
import os from .exceptions import MarkdownError, MetadataError from .file import ContentFile # def __next__(self): # import ipdb; ipdb.set_trace() # for file in self._files: # return file # raise StopIteration # def sort(self, key=None, reverse=False): # return ContentIterator(self._files, key, reverse) # # class ContentIterator(object): # def __init__(self, items, key=None, reverse=False): # self.items = sorted(items, key=key, reverse=reverse) # self.i = 0 # # def __iter__(self): # return self # # def next(self): # if self.i >= len(self.items): # raise StopIteration # # item = self.items[self.i] # self.i += 1 # # return item # # def sorted(self, sort_key): # return ContentIterator(self, self.items, sort_key) # #
[ 11748, 28686, 198, 198, 6738, 764, 1069, 11755, 1330, 2940, 2902, 12331, 11, 3395, 14706, 12331, 198, 6738, 764, 7753, 1330, 14041, 8979, 628, 198, 220, 220, 220, 1303, 825, 11593, 19545, 834, 7, 944, 2599, 198, 220, 220, 220, 1303, 2...
2.231552
393
#! /usr/bin/python ''' Test for session module ''' import unittest import uestc_eams from .mock_server import LoginMockServer from .utils import HookedMethod, MakeResponse mock_login = LoginMockServer()
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 198, 198, 7061, 6, 628, 220, 220, 220, 6208, 329, 6246, 8265, 198, 198, 7061, 6, 198, 198, 11748, 555, 715, 395, 198, 11748, 334, 395, 66, 62, 68, 4105, 198, 198, 6738, 764, 76, 735, 62, ...
2.725
80
from App.numeros import numeros if __name__ == "__main__": x = int(input("Ingrese el nmero que desea evaluar: \n")) pi = numeros(x) pi.parImpar()
[ 6738, 2034, 13, 77, 6975, 418, 1330, 5470, 418, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 2124, 796, 493, 7, 15414, 7203, 27682, 260, 325, 1288, 299, 647, 78, 8358, 748, 18213, 5418, 84, ...
2.289855
69
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools from absl import app from tensorflow.python.compat import v2_compat from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import multi_process_runner from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.framework import config from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.util import nest def gather(strategy, value): """Gathers value from all workers. This is intended for tests before we implement an official all-gather API. Args: strategy: a `tf.distribute.Strategy`. value: a nested structure of n-dim `tf.distribute.DistributedValue` of `tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica. Cannot contain tf.sparse.SparseTensor. Returns: a (n+1)-dim `tf.Tensor`. """ return nest.map_structure(functools.partial(_gather, strategy), value) def _gather(strategy, value): """Gathers a single value.""" # pylint: disable=protected-access if not isinstance(value, values.DistributedValues): value = values.PerReplica([ops.convert_to_tensor(value)]) if not isinstance(strategy.extended, collective_all_reduce_strategy.CollectiveAllReduceExtended): return array_ops.stack(value._values) assert len(strategy.extended.worker_devices) == len(value._values) inputs = [array_ops.expand_dims_v2(v, axis=0) for v in value._values] return strategy.gather(values.PerReplica(inputs), axis=0) # pylint: enable=protected-access def set_logical_devices_to_at_least(device, num): """Create logical devices of at least a given number.""" if num < 1: raise ValueError("`num` must be at least 1 not %r" % (num,)) physical_devices = config.list_physical_devices(device) if not physical_devices: raise RuntimeError("No {} found".format(device)) if len(physical_devices) >= num: return # By default each physical device corresponds to one logical device. We create # multiple logical devices for the last physical device so that we have `num` # logical devices. num = num - len(physical_devices) + 1 logical_devices = [] for _ in range(num): if device.upper() == "GPU": logical_devices.append( context.LogicalDeviceConfiguration(memory_limit=2048)) else: logical_devices.append(context.LogicalDeviceConfiguration()) # Create logical devices from the last device since sometimes the first GPU # is the primary graphic card and may have less memory available. config.set_logical_device_configuration(physical_devices[-1], logical_devices) def main(enable_v2_behavior=True, config_logical_devices=True): """All-in-one main function for tf.distribute tests.""" if config_logical_devices: app.call_after_init(_set_logical_devices) if enable_v2_behavior: v2_compat.enable_v2_behavior() else: v2_compat.disable_v2_behavior() # TODO(b/131360402): configure default logical devices. multi_process_runner.test_main() def _op_dependencies(op): """Returns the data and control dependencies of a tf.Operation combined.""" deps = [] for node in itertools.chain(op.inputs, op.control_inputs): if isinstance(node, ops.Tensor): node = node.op assert isinstance(node, ops.Operation) deps.append(node) return deps def topological_sort_operations(operations): """Topological sorts a list of operations. This does a topological sort of the operations in a graph. The edges include both data dependencies and control dependencies. Note that the edge goes from an operation to its dependencies. Args: operations: a list of tf.Operation in the same graph. Returns: A map from a tf.Operation to its topological order. """ in_degrees = {} for op in operations: if op not in in_degrees: in_degrees[op] = 0 for next_op in _op_dependencies(op): in_degrees[next_op] = in_degrees.get(next_op, 0) + 1 nexts = [] for op, in_degree in in_degrees.items(): if in_degree == 0: nexts.append(op) order = {} next_order = 0 while nexts: op, nexts = nexts[0], nexts[1:] order[op] = next_order next_order += 1 for next_op in _op_dependencies(op): in_degrees[next_op] -= 1 if in_degrees[next_op] == 0: nexts.append(next_op) assert len(order) == len(operations) return order def _exists_dependency(start, end): """Returns whether there exists a dependency chain from start to end.""" nexts = [start] while nexts: op, nexts = nexts[0], nexts[1:] for next_op in _op_dependencies(op): if next_op == end: return True nexts.append(next_op) return False def assert_sequential_execution(order, operations): """Asserts there's a deterministic execution order between the operations. Args: order: a map from a tf.Operation to its topological order. operations: a list of operations that should be executed sequentially. It can be given in any order. """ # Topological ordering guarantees that, if there's a dependency from N_a to # N_b, then order[N_a] < order[N_b]. If there do exist a path of dependencies # among the operations, it always goes from a operation with a smaller # topological order to one with a larger topological order. Therefore, we only # need to sort the operations by their topological orders, and verify that # there's a path of dependency between adjacent pairs. operations = sorted(operations, key=lambda op: order[op]) for i in range(len(operations) - 1): if not _exists_dependency(operations[i], operations[i + 1]): print(operations[i].graph.as_graph_def()) raise AssertionError( "No dependency between {} and {}. Graph is dumped to stdout.".format( operations[i].name, operations[i + 1].name))
[ 2, 15069, 12131, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846,...
3.079909
2,190
import csv
[ 11748, 269, 21370, 628, 628 ]
2.8
5
import sys import numpy as np import scipy.integrate import scipy.special from ._dblquad import dblquad HAVE_PYGSL = False try: import pygsl.integrate import pygsl.sf HAVE_PYGSL = True except ImportError: pass
[ 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 18908, 4873, 198, 11748, 629, 541, 88, 13, 20887, 198, 198, 6738, 47540, 67, 2436, 47003, 1330, 288, 2436, 47003, 198, 198, 7801, 6089, 62, 47, 56, 38, ...
2.5
92
import numpy as np from numpy.testing import assert_allclose, run_module_suite from fast_ndimage import ( median_filter, sobel, convolve, correlate, gaussian_filter, gaussian_filter1d, uniform_filter, uniform_filter1d) # TODO: test threading if __name__ == "__main__": run_module_suite()
[ 11748, 299, 32152, 355, 45941, 198, 6738, 299, 32152, 13, 33407, 1330, 6818, 62, 439, 19836, 11, 1057, 62, 21412, 62, 2385, 578, 198, 6738, 3049, 62, 358, 9060, 1330, 357, 198, 220, 220, 220, 14288, 62, 24455, 11, 523, 6667, 11, 306...
2.81982
111
import sys from itertools import count, islice def write_sequence(num): """Write Recaman's sequence to a text file""" filename = "recaman.txt" with open(filename, mode="wt", encoding="utf-8") as f: f.writelines(f"{r}\n" for r in islice(sequence(), num)) if __name__ == '__main__': write_sequence(num=int(sys.argv[1]))
[ 11748, 25064, 198, 6738, 340, 861, 10141, 1330, 954, 11, 318, 75, 501, 628, 198, 198, 4299, 3551, 62, 43167, 7, 22510, 2599, 198, 220, 220, 220, 37227, 16594, 3311, 10546, 338, 8379, 284, 257, 2420, 2393, 37811, 198, 220, 220, 220, ...
2.540146
137
import unittest from unittest.mock import mock_open, patch from aoc.d8.main import metadata_sum, supervalue DATA = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\n" if __name__ == "__main__": unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 15290, 62, 9654, 11, 8529, 198, 198, 6738, 257, 420, 13, 67, 23, 13, 12417, 1330, 20150, 62, 16345, 11, 2208, 8367, 198, 198, 26947, 796, 366, 17, 513, 657, 513, ...
2.493976
83
import stko import pytest try: from openbabel import openbabel except ImportError: openbabel = None
[ 11748, 336, 7204, 198, 11748, 12972, 9288, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 1280, 65, 9608, 1330, 1280, 65, 9608, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 1280, 65, 9608, 796, 6045, 628 ]
2.894737
38
from rest_framework import status from rest_framework.response import Response from rest_framework.generics import GenericAPIView from ..permissions import IsAuthenticated from django.core.cache import cache from django.conf import settings from ..authentication import TokenAuthentication from ..app_settings import ( MembershipDeclineSerializer, )
[ 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 26209, 1330, 18261, 198, 6738, 1334, 62, 30604, 13, 8612, 873, 1330, 42044, 2969, 3824, 769, 198, 6738, 11485, 525, 8481, 1330, 1148, 47649, 3474, 198, 6738, 42625, 1420...
4.103448
87
from allennlp.common import Params from allennlp.data import Instance, Token, Batch from allennlp.data.fields import TextField from allennlp.data.samplers import BucketBatchSampler from allennlp.data.data_loaders import MultiProcessDataLoader from .sampler_test import SamplerTest
[ 6738, 477, 1697, 34431, 13, 11321, 1330, 2547, 4105, 198, 6738, 477, 1697, 34431, 13, 7890, 1330, 2262, 590, 11, 29130, 11, 347, 963, 198, 6738, 477, 1697, 34431, 13, 7890, 13, 25747, 1330, 8255, 15878, 198, 6738, 477, 1697, 34431, 13...
3.409639
83
import batoid import numpy as np import math from test_helpers import timer, do_pickle, all_obj_diff if __name__ == '__main__': test_properties() test_sag() test_intersect() test_intersect_vectorized() test_ne() test_fail()
[ 11748, 7365, 1868, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 10688, 198, 6738, 1332, 62, 16794, 364, 1330, 19781, 11, 466, 62, 27729, 293, 11, 477, 62, 26801, 62, 26069, 628, 628, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705,...
2.534653
101
from django.http.response import HttpResponseForbidden from .models import Counter, VisitLog from .utils import get_client_ip
[ 6738, 42625, 14208, 13, 4023, 13, 26209, 1330, 367, 29281, 31077, 1890, 37978, 198, 198, 6738, 764, 27530, 1330, 15034, 11, 16440, 11187, 198, 6738, 764, 26791, 1330, 651, 62, 16366, 62, 541, 628, 198 ]
3.685714
35
# # Copyright (c) 2019-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bdb_tools.readers import build_reader ws_cols = ["ws_item_sk", "ws_sold_date_sk", "ws_quantity"] item_cols = ["i_item_sk", "i_current_price"] imp_cols = [ "imp_item_sk", "imp_competitor_price", "imp_start_date", "imp_end_date", "imp_sk", ] ss_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_quantity"]
[ 2, 198, 2, 15069, 357, 66, 8, 13130, 12, 1238, 1828, 11, 15127, 23929, 44680, 6234, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, ...
2.914826
317
from .utils import TestCase from .utils import build_and_test_module from .utils import transpile_source
[ 6738, 764, 26791, 1330, 6208, 20448, 198, 6738, 764, 26791, 1330, 1382, 62, 392, 62, 9288, 62, 21412, 198, 6738, 764, 26791, 1330, 1007, 79, 576, 62, 10459, 628 ]
3.655172
29
from epidemioptim.environments.cost_functions.costs.death_toll_cost import DeathToll from epidemioptim.environments.cost_functions.costs.gdp_recess_cost import GdpRecess
[ 6738, 24574, 72, 40085, 13, 268, 12103, 13, 15805, 62, 12543, 2733, 13, 15805, 82, 13, 22595, 62, 83, 692, 62, 15805, 1330, 5830, 51, 692, 198, 6738, 24574, 72, 40085, 13, 268, 12103, 13, 15805, 62, 12543, 2733, 13, 15805, 82, 13, ...
3.035714
56
"""70 Binary Tree Level Order Traversal II""" """ Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None """
[ 37811, 2154, 220, 45755, 12200, 5684, 8284, 4759, 690, 282, 2873, 37811, 198, 37811, 198, 36621, 286, 12200, 19667, 25, 198, 4871, 12200, 19667, 25, 198, 220, 220, 220, 825, 11593, 15003, 834, 7, 944, 11, 1188, 2599, 198, 220, 220, 22...
2.60274
73
import typing import requests from urllib.parse import urlencode from .abstract_client import AbstractClient from yuque_py.exceptions.request_error import RequestError
[ 11748, 19720, 198, 198, 11748, 7007, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 2956, 11925, 8189, 198, 198, 6738, 764, 397, 8709, 62, 16366, 1330, 27741, 11792, 198, 6738, 331, 84, 4188, 62, 9078, 13, 1069, 11755, 13, 25927, 62, 182...
3.717391
46
from dry_rest_permissions.generics import DRYPermissions from rest_framework import viewsets from results.models.sports import Sport from results.serializers.sports import SportSerializer
[ 6738, 5894, 62, 2118, 62, 525, 8481, 13, 8612, 873, 1330, 10560, 56, 5990, 8481, 198, 6738, 1334, 62, 30604, 1330, 5009, 1039, 198, 198, 6738, 2482, 13, 27530, 13, 32945, 1330, 12771, 198, 6738, 2482, 13, 46911, 11341, 13, 32945, 1330...
4.130435
46
import unittest import os from pathlib import Path import numpy as np path = Path.cwd().parent.parent os.chdir(path) from pvsystemprofiler.utilities.equation_of_time import eot_da_rosa, eot_duffie if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 28686, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 299, 32152, 355, 45941, 198, 6978, 796, 10644, 13, 66, 16993, 22446, 8000, 13, 8000, 198, 418, 13, 354, 15908, 7, 6978, 8, 198, 6738, 279, 85, ...
2.62766
94
from raiden.constants import EMPTY_MERKLE_ROOT from raiden.tests.utils.factories import HOP1, HOP2, UNIT_SECRETHASH, make_block_hash from raiden.transfer.events import ContractSendChannelBatchUnlock from raiden.transfer.node import is_transaction_effect_satisfied from raiden.transfer.state_change import ContractReceiveChannelBatchUnlock
[ 6738, 9513, 268, 13, 9979, 1187, 1330, 38144, 9936, 62, 29296, 42, 2538, 62, 13252, 2394, 198, 6738, 9513, 268, 13, 41989, 13, 26791, 13, 22584, 1749, 1330, 367, 3185, 16, 11, 367, 3185, 17, 11, 4725, 2043, 62, 23683, 2200, 4221, 11...
3.269231
104
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.utils.channel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals # Standard Imports import tensorflow as tf from tfx.utils import channel from tfx.utils import types if __name__ == '__main__': tf.test.main()
[ 2, 15069, 13130, 3012, 11419, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, ...
3.665354
254
import configparser import requests from bs4 import BeautifulSoup token = getAccessToken() #searchMusicArtist("drake") #print(getArtistID('drake')) #print(getTopTenSongs('drake')) #print(scrapeLyricText('drake'))
[ 11748, 4566, 48610, 198, 11748, 7007, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 198, 30001, 796, 651, 15457, 30642, 3419, 198, 198, 2, 12947, 22648, 43020, 7203, 32491, 365, 4943, 198, 198, 2, 4798, 7, 1136, 43020, 2389, 10...
2.855263
76
def comment_dialog(data=None): """ Function takes in a JSON object, and uses the following format: https://api.slack.com/dialogs Returns created JSON object, then is sent back to Slack. """ text = "" state = "" project_holder = None item_holder = None if data is not None: if data["type"] == "message_action": text = data["message"]["text"] + "\n" # get attachment images from the massage if "attachments" in data["message"]: text += "Attachments:\n" for att in data["message"]["attachments"]: text += att["title"] + ":\n" if "image_url" in att: text += att["image_url"] + "\n" # get files from the massage if "files" in data["message"]: text += "Attach files:\n" for file in data["message"]["files"]: text += file["title"] + ":\n" text += file["url_private"] + "\n" if data["type"] == "interactive_message": if data["callback_id"] == "bot_project": label = data["original_message"]["attachments"][0]["fallback"] project_holder = [ { "label": label, "value": data["actions"][0]["value"] } ] state = data["actions"][0]["value"] elif data["callback_id"] == "bot_item": label = data["original_message"]["attachments"][0]["fallback"] item_holder = [ { "label": label, "value": data["actions"][0]["value"] } ] return { "title": "JamaConnect - Comment", "submit_label": "Submit", "callback_id": "comment", "elements": [ { "label": "Search Projects:", "type": "select", "name": "project", "optional": "true", "data_source": "external", "selected_options": project_holder }, { "label": "Project ID:", "type": "select", "name": "project_id", "optional": "true", "data_source": "external", "selected_options": project_holder }, { "label": "Item ID or Name:", "type": "select", "name": "item", "data_source": "external", "min_query_length": 0, "selected_options": item_holder }, { "type": "textarea", "label": "Comment", "name": "comment", "value": text } ], "state": state }
[ 4299, 2912, 62, 38969, 519, 7, 7890, 28, 14202, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 15553, 2753, 287, 257, 19449, 2134, 11, 290, 3544, 262, 1708, 5794, 25, 198, 220, 220, 220, 3740, 1378, 15042, 13, 6649, 441, 13, ...
1.77512
1,672
"""Test the HaHomematic config flow.""" from typing import Any from unittest.mock import patch from homeassistant import config_entries from homeassistant.components.hahm.config_flow import ( ATTR_BICDOS_RF_ENABLED, ATTR_BICDOS_RF_PORT, ATTR_HMIP_RF_ENABLED, ATTR_HOST, ATTR_HS485D_ENABLED, ATTR_INSTANCE_NAME, ATTR_PASSWORD, ATTR_PORT, ATTR_TLS, ATTR_USERNAME, ATTR_VIRTUAL_DEVICES_ENABLED, IF_BIDCOS_RF_NAME, IF_HMIP_RF_NAME, IF_HS485D_NAME, IF_VIRTUAL_DEVICES_NAME, CannotConnect, InvalidAuth, ) from homeassistant.components.hahm.const import DOMAIN from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM TEST_INSTANCE_NAME = "pytest" TEST_HOST = "1.1.1.1" TEST_USERNAME = "test-username" TEST_PASSWORD = "test-password"
[ 37811, 14402, 262, 9398, 28718, 368, 1512, 4566, 5202, 526, 15931, 198, 6738, 19720, 1330, 4377, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 1363, 562, 10167, 1330, 4566, 62, 298, 1678, 198, 6738, 1363, 562, 101...
2.283854
384
from functools import partial def register_as_decorator(func): """ Register extensions, transforms, or addons function as decorator. """ return wrapper
[ 6738, 1257, 310, 10141, 1330, 13027, 628, 198, 198, 4299, 7881, 62, 292, 62, 12501, 273, 1352, 7, 20786, 2599, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 17296, 18366, 11, 31408, 11, 393, 751, 684, 2163, 355, 11705, 1352, 13, 19...
3.166667
54
#|----------------------------------------------------------------------------- #| This source code is provided under the Apache 2.0 license -- #| and is provided AS IS with no warranty or guarantee of fit for purpose. -- #| See the project's LICENSE.md for details. -- #| Copyright (C) 2017-2020 Refinitiv. All rights reserved. -- #|----------------------------------------------------------------------------- #!/usr/bin/env python """ Simple example of outputting Market Price JSON data using Websockets with authentication """ import sys import time import getopt import requests import socket import json import websocket import threading from threading import Thread, Event from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # Global Default Variables app_id = '555' auth_hostname = '127.0.0.1' auth_port = '8443' hostname = '127.0.0.1' password = '' position = socket.gethostbyname(socket.gethostname()) token = '' user = '' port = '15000' # Global Variables web_socket_app = None web_socket_open = False def process_message(ws, message_json): """ Parse at high level and output JSON of message """ message_type = message_json['Type'] if message_type == "Refresh": if 'Domain' in message_json: message_domain = message_json['Domain'] if message_domain == "Login": process_login_response(ws, message_json) elif message_type == "Ping": pong_json = { 'Type':'Pong' } ws.send(json.dumps(pong_json)) print("SENT:") print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':'))) def process_login_response(ws, message_json): """ Send item request """ send_market_price_request(ws) def send_market_price_request(ws): """ Create and send simple Market Price request """ mp_req_json = { 'ID': 2, 'Key': { 'Name': 'TRI.N', }, } ws.send(json.dumps(mp_req_json)) print("SENT:") print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':'))) def on_message(ws, message): """ Called when message received, parse message into JSON for processing """ print("RECEIVED: ") message_json = json.loads(message) print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':'))) for singleMsg in message_json: process_message(ws, singleMsg) def on_error(ws, error): """ Called when websocket error has occurred """ print(error) def on_close(ws, close_status_code, close_msg): """ Called when websocket is closed """ global web_socket_open web_socket_open = False print("WebSocket Closed") def on_open(ws): """ Called when handshake is complete and websocket is open, send login """ print("WebSocket successfully connected!") global web_socket_open web_socket_open = True if __name__ == "__main__": # Get command line parameters try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "password=", "position=", "auth_hostname=", "auth_port="]) except getopt.GetoptError: print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]') sys.exit(2) for opt, arg in opts: if opt in ("--help"): print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]') sys.exit(0) elif opt in ("--hostname"): hostname = arg elif opt in ("--port"): port = arg elif opt in ("--app_id"): app_id = arg elif opt in ("--user"): user = arg elif opt in ("--password"): password = arg elif opt in ("--position"): position = arg elif opt in ("--auth_hostname"): auth_hostname = arg elif opt in ("--auth_port"): auth_port = arg # Send login info for authentication token print("Sending authentication request...") r = requests.post('https://{}:{}/getToken'.format(auth_hostname, auth_port), data={'username': user, 'password': password}, verify=True) auth_json = r.json() print("RECEIVED:") print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':'))) if auth_json['success'] is True: token = r.cookies['AuthToken'] print('Authentication Succeeded. Received AuthToken: {}'.format(token)) cookie = "AuthToken={};AuthPosition={};applicationId={};".format(token, position, app_id) # Start websocket handshake ws_address = "ws://{}:{}/WebSocket".format(hostname, port) print("Connecting to WebSocket " + ws_address + " ...") web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message, on_error=on_error, on_close=on_close, subprotocols=['tr_json2'], cookie=cookie) web_socket_app.on_open = on_open # Event loop wst = threading.Thread(target=web_socket_app.run_forever) wst.start() try: while True: time.sleep(1) except KeyboardInterrupt: web_socket_app.close() else: print('Authentication failed')
[ 2, 91, 10097, 32501, 198, 2, 91, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 770, 2723, 2438, 318, 2810, 739, 262, 24843, 362, 13, 15, 5964, 220, 220, 220, 220, 220, 1377, 198, 2, 91, 220, 290, 318, 2810, 7054, 3180, ...
2.378892
2,473
#!/usr/bin/python3 if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 628, 220, 220, 220, 220, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419 ]
2.032258
31
from unittest import TestCase from eskedit.kmethods import *
[ 6738, 555, 715, 395, 1330, 6208, 20448, 198, 6738, 1658, 9091, 270, 13, 74, 24396, 82, 1330, 1635, 628 ]
3.263158
19
from django.urls import reverse_lazy, reverse from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect from requests.auth import HTTPBasicAuth from .models import User, Node from .forms import CustomUserCreationForm, UserCreationForm from django.views.generic import ListView from django.views.generic.edit import UpdateView from django.views import View from django.views import generic import requests from users.serializers import * from django.contrib.messages.views import SuccessMessageMixin from django.contrib.auth.mixins import LoginRequiredMixin import json
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 62, 75, 12582, 11, 9575, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 651, 62, 15252, 62, 273, 62, 26429, 11, 8543, 11, 367, 29281, 31077, 11, 367, 29281, 31077, 7738, 1060, 198, 6738,...
3.639053
169
#from weakref import WeakValueDictionary import random, operator, weakref def format_service_group(group): """pretty prints the group""" rstr = '%s [%s]' if group.cover != None: # Spy for IntSec return rstr % (group.cover, group.cover.firm) elif group.spyon != None: return rstr % (group.spyon, group.spyon.firm) else: return rstr % (group, group.firm) def build_skill_table(skill): """makes an nx2 table of the skill's specs where n = len(skill.specs)""" table = [[spec, skill[spec]] for spec in skill] table.sort(lambda x, y: cmp(x[0], y[0])) if 'Energy Weapons' not in skill: table.append(['________________________', '__']) table.append(['________________________', '__']) table.append(['________________________', '__']) table.append(['________________________', '__']) return table
[ 2, 6738, 4939, 5420, 1330, 28788, 11395, 35, 14188, 198, 11748, 4738, 11, 10088, 11, 4939, 5420, 198, 198, 4299, 5794, 62, 15271, 62, 8094, 7, 8094, 2599, 198, 197, 37811, 37784, 20842, 262, 1448, 37811, 198, 197, 81, 2536, 796, 705, ...
2.938406
276
import asyncio import unittest from moex.service import Cbr, Moex if __name__ == '__main__': unittest.main()
[ 11748, 30351, 952, 198, 11748, 555, 715, 395, 198, 198, 6738, 6941, 1069, 13, 15271, 1330, 327, 1671, 11, 4270, 1069, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12...
2.636364
44
import streamlit as st import pandas as pd import numpy as np import plotly.express as px st.title("Relatrio de Aula") df = pd.read_csv('data/emocoes.csv') agg = pd.read_csv('data/agg.csv') Engajado = df[df['emocao'] == 'Engajado'] Engajado_agg = Engajado.groupby(['emocao', 'pessoa']).size().reset_index(name='size') Engajado_agg = Engajado_agg.sort_values(by=['size'], ascending=False) emotions_count = df.value_counts('emocao').reset_index() login_blocks = generate_login_block() password = login(login_blocks) drive_block = st.empty() google_drive = drive_block.text_input('Link da aula para processamento', '') id_block = st.empty() if google_drive != '': drive_block.empty() id_block.text("ID da Aula processada: 182916f6-756d-40d6-95fc-3283ba5efdf8") if is_authenticated(password): id_block.empty() drive_block.empty() clean_blocks(login_blocks) st.balloons() graph_columns() elif password: st.info("Aula no encontrada. Por favor, insira um ID vlido.")
[ 11748, 4269, 18250, 355, 336, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 7110, 306, 13, 42712, 355, 279, 87, 198, 198, 301, 13, 7839, 7203, 6892, 26646, 78, 390, 317, 4712, 4943, 198, 7568, ...
2.531486
397
import argparse import json import os from os import listdir from os.path import isfile, join if __name__ == '__main__': main()
[ 11748, 1822, 29572, 198, 11748, 33918, 198, 11748, 28686, 198, 6738, 28686, 1330, 1351, 15908, 198, 6738, 28686, 13, 6978, 1330, 318, 7753, 11, 4654, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220...
3.022222
45
col = 0 row = 0 header = "" headers = [] border_size = -1 while col < 1 or col > 100: col = input("How many columns do you want (1 to 100)? ") col = int(col) while row < 1 or col > 100: row = input("How many rows do you want (1 to 100)? ") row = int(row) while header != "Y" and header != "N": header = input("Do you want headers? Y/N ") # If headers are wanted, give them names if header == "Y": header = True for n in range(col): headers.append(input("Header #" + str(n + 1) + ": ")) else: header = False while border_size < 0 or border_size > 5: border_size = input("Enter a number for border size 1 to 5 ") border_size = int(border_size) # DEMOOOOOO table = Table(col, row, headers, border_size) table.make_table() table.add_headers(["1", "2", "4"]) print("Here are your current headers: ") print(table.get_headers()) print("Here is your current border size: ") print(table.get_border_size()) table.make_table() table.delete_cols(3, ["1", "2", "4"]) print("Here are your headers now: ") print(table.get_headers()) print("Let's check your column count: ") print(table.get_col_count()) # table.delete_cols(4) # should throw error table.set_row_count(3) table.add_rows(5) print("Row count should be 8 because I just set it to 3 and added 5: ") print(table.get_row_count())
[ 198, 4033, 220, 220, 220, 220, 220, 796, 657, 198, 808, 220, 220, 220, 220, 220, 796, 657, 198, 25677, 220, 220, 796, 13538, 198, 50145, 220, 796, 17635, 198, 20192, 62, 7857, 796, 532, 16, 198, 198, 4514, 951, 1279, 352, 393, 951...
2.667992
503
from .. import db from .base import BaseModel
[ 6738, 11485, 1330, 20613, 198, 6738, 764, 8692, 1330, 7308, 17633, 628 ]
3.916667
12
import dataclasses as dc from typing import Any, Dict, Iterable, List, Optional from loguru import logger from mate3.field_values import FieldValue, ModelValues from mate3.read import AllModelReads from mate3.sunspec.fields import IntegerField from mate3.sunspec.model_base import Model from mate3.sunspec.models import ( ChargeControllerConfigurationModel, ChargeControllerModel, FLEXnetDCConfigurationModel, FLEXnetDCRealTimeModel, FXInverterConfigurationModel, FXInverterRealTimeModel, OutBackModel, OutBackSystemControlModel, RadianInverterConfigurationModel, SinglePhaseRadianInverterRealTimeModel, SplitPhaseRadianInverterRealTimeModel, ) from mate3.sunspec.values import ( ChargeControllerConfigurationValues, ChargeControllerValues, FLEXnetDCConfigurationValues, FLEXnetDCRealTimeValues, FXInverterConfigurationValues, FXInverterRealTimeValues, OPTICSPacketStatisticsValues, OutBackSystemControlValues, OutBackValues, RadianInverterConfigurationValues, SinglePhaseRadianInverterRealTimeValues, SplitPhaseRadianInverterRealTimeValues, ) class DeviceValues: """ This is basically a way for storing state (i.e. current values) about all devices. It's the main interface for users to access values etc. """ def _get_single_device(self, name: str) -> ModelValues: """ Helper function so that e.g. if there's only one charge controller in self.charge_controllers, you can call self.charge_controller to get it. """ devices = getattr(self, f"{name}s") if len(devices) != 1: raise RuntimeError( ( f"Must be one, and only one, {name} device to be able to use `{name}` attribute - but there are " f"{len(devices)}" ) ) return list(devices.values())[0] def update(self, all_reads: AllModelReads) -> None: """ This is the key method, and is used to update the state of the devices with new values. """ # Update mate: self._update_model_and_config( all_reads=all_reads, model_class=OutBackModel, config_class=OutBackSystemControlModel, config_values_class=OutBackSystemControlValues, device_values=self.mate3s, device_class=Mate3DeviceValues, ) # Charge controller self._update_model_and_config( all_reads=all_reads, model_class=ChargeControllerModel, config_class=ChargeControllerConfigurationModel, config_values_class=ChargeControllerConfigurationValues, device_values=self.charge_controllers, device_class=ChargeControllerDeviceValues, ) # FNDCs self._update_model_and_config( all_reads=all_reads, model_class=FLEXnetDCRealTimeModel, config_class=FLEXnetDCConfigurationModel, config_values_class=FLEXnetDCConfigurationValues, device_values=self.fndcs, device_class=FNDCDeviceValues, ) # FX inverters self._update_model_and_config( all_reads=all_reads, model_class=FXInverterRealTimeModel, config_class=FXInverterConfigurationModel, config_values_class=FXInverterConfigurationValues, device_values=self.fx_inverters, device_class=FXInverterDeviceValues, ) # Single phase radian inverters self._update_model_and_config( all_reads=all_reads, model_class=SinglePhaseRadianInverterRealTimeModel, config_class=RadianInverterConfigurationModel, config_values_class=RadianInverterConfigurationValues, device_values=self.single_phase_radian_inverters, device_class=SinglePhaseRadianInverterDeviceValues, ) # Split phase radian inverters self._update_model_and_config( all_reads=all_reads, model_class=SplitPhaseRadianInverterRealTimeModel, config_class=RadianInverterConfigurationModel, config_values_class=RadianInverterConfigurationValues, device_values=self.split_phase_radian_inverters, device_class=SplitPhaseRadianInverterDeviceValues, ) def _update_model_and_config( self, all_reads: AllModelReads, model_class: Model, config_class: Model, config_values_class: ModelValues, device_values: Dict[int, ModelValues], device_class: ModelValues, ) -> None: model_field_reads_per_port = all_reads.get_reads_per_model_by_port(model_class) config_field_reads_per_port = all_reads.get_reads_per_model_by_port(config_class) # OK, there's a few options around whether the above variables contain anything. # - Both present, then we're good - continue. All devices should have a configuration class. # - Model isn't present - this means the device itself wasn't detected, so ignore. Note that usually this would # imply the config class is null (since the config shouldn't be there if the device isn't) except in the case # of Radian inverters, as the same config class is shared across both single and split phase devices (so that # if only one type is present, the other will have empty model values and non-empty config). # - Both are missing - this is covered by the above. # So, the short summary is we only care about devices where the model field values are present, and in all other # cases there *should* be config field values too. if model_field_reads_per_port is None: return else: if config_field_reads_per_port is None: logger.warning( ( f"Only model ({model_class}) field values and no config ({config_class}) fields were read. This" f" is undefined behaviour, so ignoring {model_class}." ) ) return # Check model and config have the same ports: if set(model_field_reads_per_port).symmetric_difference(set(config_field_reads_per_port)): raise RuntimeError("Config and models have different ports!") # Create/update any devices for the given ports: for port in model_field_reads_per_port: model_reads_this_port = model_field_reads_per_port[port] config_reads_this_port = config_field_reads_per_port[port] if port not in device_values: # OK, it's new - create it: config_values = self._create_new_model_values( model=config_class, values_class=config_values_class, device_address=config_reads_this_port["did"].address, ) device_values[port] = self._create_new_model_values( model=model_class, values_class=device_class, device_address=model_reads_this_port["did"].address, config=config_values, ) # Either way, update the field values: for reads, device_val in ( (model_reads_this_port, device_values[port]), (config_reads_this_port, device_values[port].config), ): for field_name, field_read in reads.items(): field_value = getattr(device_val, field_name) field_value._raw_value = field_read.raw_value field_value._implemented = field_read.implemented field_value._last_read = field_read.time # If there are any ports that were used for this device, but are no longer, remove them: old_device_ports = set(list(device_values.keys())) - set(model_field_reads_per_port.keys()) for port in old_device_ports: logger.warning( f"Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored." ) del device_values[port]
[ 11748, 4818, 330, 28958, 355, 30736, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 40806, 540, 11, 7343, 11, 32233, 198, 198, 6738, 2604, 14717, 1330, 49706, 198, 198, 6738, 16133, 18, 13, 3245, 62, 27160, 1330, 7663, 11395, 11, 910...
2.320313
3,584
#!/usr/bin/env python import rospy from srv_sub_pub.srv import * NAME = "add_two_ints_server" if __name__ == "__main__": add_two_ints_server()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 628, 198, 11748, 686, 2777, 88, 198, 6738, 19677, 85, 62, 7266, 62, 12984, 13, 27891, 85, 1330, 1635, 198, 198, 20608, 796, 366, 2860, 62, 11545, 62, 29503, 62, 15388, 1, 198, 198, 361, ...
2.323077
65
#!/usr/bin/env python # -*- coding: utf-8 -*- import tornado.web
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 33718, 13, 12384, 198 ]
2.321429
28
import multiprocessing as mp
[ 11748, 18540, 305, 919, 278, 355, 29034, 198 ]
3.625
8
import os from pyspark import StorageLevel from geospark.core.SpatialRDD import PolygonRDD from geospark.core.enums import IndexType, FileDataSplitter from geospark.core.geom.envelope import Envelope from geospark.core.spatialOperator import RangeQuery from tests.test_base import TestBase from tests.tools import tests_path input_location = os.path.join(tests_path, "resources/primaryroads-polygon.csv") splitter = FileDataSplitter.CSV gridType = "rtree" indexType = "rtree"
[ 11748, 28686, 198, 198, 6738, 279, 893, 20928, 1330, 20514, 4971, 198, 198, 6738, 4903, 2117, 668, 13, 7295, 13, 4561, 34961, 49, 16458, 1330, 12280, 14520, 49, 16458, 198, 6738, 4903, 2117, 668, 13, 7295, 13, 268, 5700, 1330, 12901, ...
3.116883
154
from typing import Dict from river import base __all__ = ["Renamer", "Prefixer", "Suffixer"]
[ 6738, 19720, 1330, 360, 713, 198, 198, 6738, 7850, 1330, 2779, 198, 198, 834, 439, 834, 796, 14631, 26764, 2382, 1600, 366, 36698, 844, 263, 1600, 366, 50, 1648, 844, 263, 8973, 628, 628 ]
2.882353
34
from open_publishing.core import FieldGroup from open_publishing.core import FieldDescriptor from open_publishing.core.enums import CatalogType, VLBCategory, AcademicCategory from open_publishing.core import SimpleField from open_publishing.extendable_enum_field import ExtendableEnumField from open_publishing.genre import GenresList from open_publishing.bisac import BisacList from .thema import ThemaList from .subject import SubjectField from .series import SeriesList from .institution import InstitutionField
[ 6738, 1280, 62, 12984, 20020, 13, 7295, 1330, 7663, 13247, 198, 6738, 1280, 62, 12984, 20020, 13, 7295, 1330, 7663, 24564, 1968, 273, 198, 6738, 1280, 62, 12984, 20020, 13, 7295, 13, 268, 5700, 1330, 44515, 6030, 11, 569, 43, 2749, 11...
3.358025
162
import urllib.request,json from .models import Source,Article # Getting Api Key api_key = None #Getting the base urls source_base_url = None article_base_url = None def get_sources(category): ''' Function that gets the json response to our url request ''' get_sources_url = source_base_url.format(category,api_key) with urllib.request.urlopen(get_sources_url) as url: get_sources_data = url.read() get_sources_response = json.loads(get_sources_data) source_results = None if get_sources_response['sources']: source_results_list = get_sources_response['sources'] source_results = process_results(source_results_list) return source_results def process_results(source_list): ''' Function that processes the source result and transform them to a list of Objects Args: source_list: A list of dictionaries that contain source details Returns : source_results: A list of source objects ''' source_results = [] for source_item in source_list: id = source_item.get('id') name = source_item.get('name') description = source_item.get('description') url = source_item.get('url') category = source_item.get('category') language = source_item.get('language') country = source_item.get('country') if url: source_object = Source(id,name,description,url,category,language,country) source_results.append(source_object) return source_results
[ 11748, 2956, 297, 571, 13, 25927, 11, 17752, 198, 6738, 764, 27530, 1330, 8090, 11, 14906, 198, 198, 2, 18067, 5949, 72, 7383, 198, 15042, 62, 2539, 796, 6045, 198, 2, 20570, 262, 2779, 2956, 7278, 198, 10459, 62, 8692, 62, 6371, 79...
2.617201
593
# normal libraries import math import numpy as np # priv_libraries from corai_util.finance.src.financials import compute_price, compute_integral from corai_util.finance.src.implied_vol import implied_volatility_newton, total_implied_vol_newton phi_heston = lambda xx: (1 - (1 - np.exp(-xx)) / xx) / xx phi_heston_lambda = lambda xx, lamb: phi_heston(xx * lamb) phi_heston_curry = lambda lamb: lambda xx: phi_heston_lambda(xx, lamb) phi_power_law = lambda eta, gamma: lambda theta: eta * theta ** (- gamma) # section ###################################################################### # ############################################################################# # parametrisation # section ###################################################################### # ############################################################################# # SSVI def natural_SVIparam2density(xx_for_density, parameters): # """ takes natural SVI parameters. """ """ Semantics: From Args: xx_for_density: parameters: Returns: """ w = total_implied_vol_ssvi w_dash = total_implied_vol_ssvi_dash w_dash_dash = total_implied_vol_ssvi_dash_dash return total_implied_vol2density_litzenberg(xx_for_density, w, w_dash, w_dash_dash, parameters) def natural_SVIparameters2price(log_asset_for_density, parameters, log_moneyness): """ takes natural SVI parameters.""" values_density_of_SVI = natural_SVIparam2density(log_asset_for_density, parameters) * np.exp(-log_asset_for_density) asset_for_density = np.exp(log_asset_for_density) # density of S_T s0 = compute_integral(asset_for_density, values_density_of_SVI) c_k = compute_price(asset_for_density, np.exp(log_moneyness), values_density_of_SVI) return values_density_of_SVI, c_k, s0 def natural_SVIparameters2TIV(val_density, parameters, log_moneyness): """ takes natural SVI parameters.""" values_density_of_SVI, c_k, s0 = natural_SVIparameters2price(val_density, parameters, log_moneyness) sigma = implied_volatility_newton(True, s0, np.exp(log_moneyness), 1, 0, 0, c_k) total_implied_vol = 1 * sigma * sigma total_implied_vol = total_implied_vol_newton(True, s0, np.exp(log_moneyness), 0, 0, c_k) return values_density_of_SVI, c_k, s0, total_implied_vol
[ 2, 3487, 12782, 198, 11748, 10688, 198, 198, 11748, 299, 32152, 355, 45941, 198, 2, 1953, 62, 75, 11127, 198, 6738, 1162, 1872, 62, 22602, 13, 69, 14149, 13, 10677, 13, 46921, 82, 1330, 24061, 62, 20888, 11, 24061, 62, 18908, 1373, ...
2.825455
825
import unittest from get_all_permutations_of_string import get_all_permutations_of_string, get_all_permutations_of_string_with_dups
[ 11748, 555, 715, 395, 198, 6738, 651, 62, 439, 62, 16321, 32855, 62, 1659, 62, 8841, 1330, 651, 62, 439, 62, 16321, 32855, 62, 1659, 62, 8841, 11, 651, 62, 439, 62, 16321, 32855, 62, 1659, 62, 8841, 62, 4480, 62, 646, 862 ]
3.046512
43
#! /usr/bin/env python # -*- mode: python; indent-tabs-mode: nil; -*- # vim:expandtab:shiftwidth=2:tabstop=2:smarttab: # # Copyright (C) 2011 Patrick Crews # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import shutil from lib.util.mysqlBaseTestCase import mysqlBaseTestCase server_requirements = [['--innodb-file-per-table']] servers = [] server_manager = None test_executor = None # we explicitly use the --no-timestamp option # here. We will be using a generic / vanilla backup dir backup_path = None
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 4235, 25, 21015, 26, 33793, 12, 8658, 82, 12, 14171, 25, 18038, 26, 532, 9, 12, 198, 2, 43907, 25, 11201, 392, 8658, 25, 30846, 10394, 28, 17, 25, 8658, 11338, ...
3.438596
342
print filter((lambda x: (x%2) ==0 ), [1,2,3,4,5,6]) print filter((lambda x: (x%2) !=0 ), [1,2,3,4,5,6])
[ 4798, 8106, 19510, 50033, 2124, 25, 357, 87, 4, 17, 8, 6624, 15, 10612, 685, 16, 11, 17, 11, 18, 11, 19, 11, 20, 11, 21, 12962, 201, 198, 4798, 8106, 19510, 50033, 2124, 25, 357, 87, 4, 17, 8, 14512, 15, 10612, 685, 16, 11, ...
1.857143
56
l=[0]*100 l[0]=1 l[1]=2 for x in range (2,100): l[x]=l[x-1]+l[x-2] #print l f=0 for c in l: if c%2==0 and c<4000000: f=f+c print f
[ 75, 41888, 15, 60, 9, 3064, 201, 198, 75, 58, 15, 22241, 16, 201, 198, 75, 58, 16, 22241, 17, 201, 198, 1640, 2124, 287, 2837, 357, 17, 11, 3064, 2599, 201, 198, 220, 220, 220, 300, 58, 87, 22241, 75, 58, 87, 12, 16, 48688, ...
1.5
104
#!/usr/bin/env/python3 """Recipe for training a wav2vec-based ctc ASR system with librispeech. The system employs wav2vec as its encoder. Decoding is performed with ctc greedy decoder. To run this recipe, do the following: > python train_with_wav2vec.py hparams/train_with_wav2vec.yaml The neural network is trained on CTC likelihood target and character units are used as basic recognition tokens. Training is performed on the full LibriSpeech dataset (960 h). Authors * Sung-Lin Yeh 2021 * Titouan Parcollet 2021 * Ju-Chieh Chou 2020 * Mirco Ravanelli 2020 * Abdel Heba 2020 * Peter Plantinga 2020 * Samuele Cornell 2020 """ import os import sys import torch import logging import speechbrain as sb from speechbrain.utils.distributed import run_on_main from hyperpyyaml import load_hyperpyyaml from pathlib import Path logger = logging.getLogger(__name__) # Define training procedure def dataio_prepare(hparams): """This function prepares the datasets to be used in the brain class. It also defines the data processing pipeline through user-defined functions.""" data_folder = hparams["data_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, ) if hparams["sorting"] == "ascending": # we sort training data to speed up training and get better results. train_data = train_data.filtered_sorted(sort_key="duration") # when sorting do not shuffle in dataloader ! otherwise is pointless hparams["train_dataloader_opts"]["shuffle"] = False elif hparams["sorting"] == "descending": train_data = train_data.filtered_sorted( sort_key="duration", reverse=True ) # when sorting do not shuffle in dataloader ! otherwise is pointless hparams["train_dataloader_opts"]["shuffle"] = False elif hparams["sorting"] == "random": pass else: raise NotImplementedError( "sorting must be random, ascending or descending" ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, ) valid_data = valid_data.filtered_sorted(sort_key="duration") # test is separate test_datasets = {} for csv_file in hparams["test_csv"]: name = Path(csv_file).stem test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=csv_file, replacements={"data_root": data_folder} ) test_datasets[name] = test_datasets[name].filtered_sorted( sort_key="duration" ) datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] # 2. Define audio pipeline: sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) label_encoder = sb.dataio.encoder.CTCTextEncoder() # 3. Define text pipeline: sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") special_labels = { "bos_label": hparams["bos_index"], "eos_label": hparams["eos_index"], "blank_label": hparams["blank_index"], } label_encoder.load_or_create( path=lab_enc_file, from_didatasets=[train_data], output_key="char_list", special_labels=special_labels, sequence_input=True, ) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, ["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"], ) return train_data, valid_data, test_datasets, label_encoder if __name__ == "__main__": # CLI: hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) # If distributed_launch=True then # create ddp_group with the right communication protocol sb.utils.distributed.ddp_init_group(run_opts) with open(hparams_file) as fin: hparams = load_hyperpyyaml(fin, overrides) # Create experiment directory sb.create_experiment_directory( experiment_directory=hparams["output_folder"], hyperparams_to_save=hparams_file, overrides=overrides, ) # Dataset prep (parsing Librispeech) from librispeech_prepare import prepare_librispeech # noqa # multi-gpu (ddp) save data preparation run_on_main( prepare_librispeech, kwargs={ "data_folder": hparams["data_folder"], "tr_splits": hparams["train_splits"], "dev_splits": hparams["dev_splits"], "te_splits": hparams["test_splits"], "save_folder": hparams["output_folder"], "merge_lst": hparams["train_splits"], "merge_name": "train.csv", "skip_prep": hparams["skip_prep"], }, ) # here we create the datasets objects as well as tokenization and encoding train_data, valid_data, test_datasets, label_encoder = dataio_prepare( hparams ) # Trainer initialization asr_brain = ASR( modules=hparams["modules"], hparams=hparams, run_opts=run_opts, checkpointer=hparams["checkpointer"], ) # We dynamicaly add the tokenizer to our brain class. # NB: This tokenizer corresponds to the one used for the LM!! asr_brain.tokenizer = label_encoder # Training asr_brain.fit( asr_brain.hparams.epoch_counter, train_data, valid_data, train_loader_kwargs=hparams["train_dataloader_opts"], valid_loader_kwargs=hparams["valid_dataloader_opts"], ) # Testing for k in test_datasets.keys(): # keys are test_clean, test_other etc asr_brain.hparams.wer_file = os.path.join( hparams["output_folder"], "wer_{}.txt".format(k) ) asr_brain.evaluate( test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 14, 29412, 18, 198, 37811, 37523, 329, 3047, 257, 266, 615, 17, 35138, 12, 3106, 269, 23047, 7054, 49, 1080, 351, 9195, 2442, 431, 3055, 13, 198, 464, 1080, 24803, 266, 615, 17, 35138, 355, 663,...
2.414743
2,469
from __future__ import annotations from ..utils import normalize_single_outer_inner_index
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 11485, 26791, 1330, 3487, 1096, 62, 29762, 62, 39605, 62, 5083, 62, 9630, 628 ]
4
23
"""Implements the EClientSocket interface for the Interactive Brokers API.""" import threading import ibapipy.config as config from ibapipy.core.network_handler import NetworkHandler def check(value): """Check to see if the specified value is equal to JAVA_INT_MAX or JAVA_DOUBLE_MAX and return None if such is the case; otherwise return 'value'. Interactive Brokers will set certain integers and floats to be their maximum possible value in Java. This is used as a sentinal value that should be replaced with an EOL when transmitting. Here, we check the value and, if it is a max, return None which the codec will interpret as an EOL. Keyword arguments: value -- integer or floating-point value to check """ if is_java_int_max(value) or is_java_double_max(value): return None else: return value def is_java_double_max(number): """Returns True if the specified number is equal to the maximum value of a Double in Java; False, otherwise. Keyword arguments: number -- number to check """ return type(number) == float and number == config.JAVA_DOUBLE_MAX def is_java_int_max(number): """Returns True if the specified number is equal to the maximum value of an Integer in Java; False, otherwise. Keyword arguments: number -- number to check """ return type(number) == int and number == config.JAVA_INT_MAX def listen(client, in_queue): """Listen to messages in the specified incoming queue and call the appropriate methods in the client. Keyword arguments: client -- client in_queue -- incoming message queue """ # Loop until we receive a stop message in the incoming queue while True: method, parms = in_queue.get() if method == 'stop': return elif method is None: continue elif hasattr(client, method): getattr(client, method)(*parms) else: parms = list(parms) parms.insert(0, method) getattr(client, 'update_unknown')(*parms)
[ 37811, 3546, 1154, 902, 262, 412, 11792, 39105, 7071, 329, 262, 21365, 2806, 15949, 7824, 526, 15931, 198, 11748, 4704, 278, 198, 11748, 24283, 499, 541, 88, 13, 11250, 355, 4566, 198, 6738, 24283, 499, 541, 88, 13, 7295, 13, 27349, 6...
2.865306
735
import os from datetime import datetime PROJECT_PATH = os.path.dirname(os.path.realpath(__file__)) IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') #numerical libs import numpy as np import random import matplotlib matplotlib.use('TkAgg') import cv2 # torch libs import torch from torch.utils.data.sampler import * import torchvision.transforms as transforms from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader from torch.utils.data.sampler import * from torch.nn import init import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import torch.optim as optim from torch.nn.parallel.data_parallel import data_parallel # std libs import collections import copy import numbers import math import inspect import shutil from timeit import default_timer as timer import csv import pandas as pd import pickle import glob import sys from distutils.dir_util import copy_tree import time import matplotlib.pyplot as plt import skimage import skimage.color import skimage.morphology from scipy import ndimage print('@%s: ' % os.path.basename(__file__)) if 1: SEED = int(time.time()) random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed_all(SEED) print ('\tset random seed') print ('\t\tSEED=%d'%SEED) if 1: # uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms torch.backends.cudnn.benchmark = True torch.backends.cudnn.enabled = True print('\tset cuda environment') print('\t\ttorch.__version__ =', torch.__version__) print('\t\ttorch.version.cuda =', torch.version.cuda) print('\t\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version()) try: print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =',os.environ['CUDA_VISIBLE_DEVICES']) NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')) except Exception: print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =','None') NUM_CUDA_DEVICES = 1 print('\t\ttorch.cuda.device_count() =', torch.cuda.device_count()) print('\t\ttorch.cuda.current_device() =', torch.cuda.current_device()) print('')
[ 11748, 28686, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 31190, 23680, 62, 34219, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 5305, 6978, 7, 834, 7753, 834, 4008, 198, 25256, 5064, 38311, 220, 220, 796, 4818, ...
2.527149
884
import numpy as np import cv2 import matplotlib.pyplot as plt import numpy as np from keras.models import model_from_json # Load the saved model json_file = open('models/model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("models/model.h5") print("Loaded saved model from disk.") # evaluate loaded model on test data
[ 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 41927, 292, 13, 27530, 1330, 2746, 62, 6738, 62, 17752, 198, 198, 2, ...
3.110345
145
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue May 12 18:28:54 2020 @author: Dr J A Christen (CIMAT-CONACYT, Mexico) jac at cimat.mx Instantaneous reproduction numbers calculations. Rts_P, Implementation of Cori et al (2013) Rts_AR, new filtering version using an autoregressive linear model of Capistrn, Capella and Christen (2020): https://arxiv.org/abs/2012.02168, 05DIC2021 01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1. Go directly to __main__ for examples. """ import os from datetime import date, timedelta from pickle import load, dump from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt from numpy import sum as np_sum from scipy.stats import erlang, gamma, nbinom, uniform, beta from scipy.stats import t as t_student from matplotlib.pyplot import subplots, rcParams, close from matplotlib.dates import drange from pytwalk import pytwalk from plotfrozen import PlotFrozenDist def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\ Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]): """Calculate Rt as in: Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez, A New Framework and Software to Estimate Time-Varying Reproduction Numbers During Epidemics, American Journal of Epidemiology, Volume 178, Issue 9, 1 November 2013, Pages 15051512, https://doi.org/10.1093/aje/kwt133 data: array with case incidence. tau: Use a window tau (default 7) to calculate R_{t,\tau}'s. n: calculate n R_{t,\tau}'s to the past n days (default 30). IP_dist: 'frozen' infectiousness profile distribution, default erlang( a=3, scale=8/3), chosen for covid19. Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s. Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t. q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t. If q ia a single integer, return a simulation of the Rts of size q, for each Rt Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s. """ if isinstance( q, list): ## Return a list of quantiles q = array(q)/100 rt = zeros(( len(q), n)) simulate = False else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt if q == 2: # return a and b of post gamma rt = zeros(( q, n)) else: rt = zeros(( q, n)) simulate = True m = len(data) w = diff(IP_dist.cdf( arange( 0, m+1))) w /= sum(w) w = flip(w) for t in range(max(m-n,0), m): S1 = 0.0 S2 = 0.0 if sum(data[:t]) <= 10:# Only for more than 10 counts continue for k in range(tau): I = data[:(t-k)] ## window of reports S2 += data[(t-k)] S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k #print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b)) if simulate: if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b) rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b) else: rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q) else: rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)) return rt def PlotRts_P( data_fnam, init_date, trim=0,\ tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\ q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None): """Makes a board with the Rt evolution for the past n days (n=30). All parameters are passed to function Rts_P. csv_fnam is an optional file name toi save the Rts info. ax is an Axis hadle to for the plot, if None, it creates one and retruns it. """ if type(data_fnam) == str: data = loadtxt(data_fnam) else: data = data_fnam.copy() data_fnam = " " if trim < 0: data = data[:trim,:] rts = Rts_P(data=data[:,1],\ tau=tau, n=n, IP_dist=IP_dist, q=q,\ Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b) m = data.shape[0] last_date = init_date + timedelta(m) if ax == None: fig, ax = subplots(figsize=( n/3, 3.5) ) for i in range(n): h = rts[:,i] ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha) ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha) ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color ) ax.set_title(data_fnam + r", $R_t$, dist. posterior.") ax.set_xlabel('') ax.set_xticks(range(n)) ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right') ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30) ax.axhline(y=1, color='green') ax.axhline(y=2, color='red') ax.axhline(y=3, color='darkred') ax.set_ylim((0.5,3.5)) ax.set_yticks(arange( 0.4, 3.4, step=0.2)) ax.tick_params( which='major', axis='y', labelsize=10) ax.grid(color='grey', linestyle='--', linewidth=0.5) #fig.tight_layout() if csv_fnam != None: days = drange( last_date-timedelta(n), last_date, timedelta(days=1)) ### To save all the data for the plot, ### columns: year, month, day, q_05, q_25, q_50, q_75, q_95 ### 0 1 2 3 4 5 6 7 sv = -ones(( len(days), 3+len(q))) for i,day in enumerate(days): d = date.fromordinal(int(day)) sv[ i, 0] = d.year sv[ i, 1] = d.month sv[ i, 2] = d.day sv[ i, 3:] = rts[:,i] q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q]) savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='') return ax """ def loglikelihood_NB( x, mu, psi): mu_psi = mu/psi return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\ -(x + psi)*log(1 + mu_psi) + x*log(mu_psi) """ def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\ Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]): """Calculate Rt Using a Negative Binomial instead of Poisson. Here one needs to fix psi = 1/theta (= 10). Extension of (not documented): Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez, A New Framework and Software to Estimate Time-Varying Reproduction Numbers During Epidemics, American Journal of Epidemiology, Volume 178, Issue 9, 1 November 2013, Pages 15051512, https://doi.org/10.1093/aje/kwt133 data: array with case incidence. tau: Use a window tau (default 7) to calculate R_{t,\tau}'s. n: calculate n R_{t,\tau}'s to the past n days (default 30). IP_dist: 'frozen' infectiousness profile distribution, default erlang( a=3, scale=8/3), chosen for covid19. Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s. Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t. q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t. If q ia a single integer, return a simulation of the Rts, for each Rt Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s. """ if isinstance( q, list): ## Return a list of quantiles q = array(q)/100 quantiles = zeros(len(q)) rt = zeros(( len(q), n)) simulate = False else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt rt = zeros(( q, n)) simulate = True m = len(data) w = diff(IP_dist.cdf( arange( 0, m+1))) w /= sum(w) w = flip(w) R = linspace( 0.1, 3.0, num=100) DeltaR = R[1]-R[0] #omega = 1 #theta = THETA_MEAN #0.01 #psi = 1/theta #fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5)) for t in range(max(m-n,0), m): #S1 = 0.0 log_likelihood_I = zeros(R.shape) ## Same size of array for values for R if sum(data[:t]) <= 10:# Only for more than 10 counts continue for k in range(tau): I = data[:(t-k)] ## window of reports Gammak = I @ w[(m-(t-k)):] #\Gamma_k #S1 += Gammak I_k = data[(t-k)] log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi) log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b) pdf = exp(log_post) pdf /= sum(pdf)*DeltaR cdf = cumsum(pdf)*DeltaR if simulate: u = uniform.rvs() rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]] else: for i,qua in enumerate(q): quantiles[i] = R[where(cdf < qua)[0][-1]] rt[:,t-(m-n)] = quantiles return rt def PlotRts_NB( data_fnam, init_date, psi, trim=0,\ tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\ q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None): """Makes a board with the Rt evolution for the past n days (n=30). All parameters are passed to function Rts_NB. csv_fnam is an optional file name toi save the Rts info. ax is an Axis hadle to for the plot, if None, it creates one and retruns it. """ if type(data_fnam) == str: data = loadtxt(data_fnam) else: data = data_fnam.copy() data_fnam = " " if trim < 0: data = data[:trim,:] rts = Rts_NB(data=data[:,1],\ tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\ Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b) m = data.shape[0] last_date = init_date + timedelta(m) if ax == None: fig, ax = subplots(figsize=( n/3, 3.5) ) for i in range(n): h = rts[:,i] ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25) ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25) ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' ) ax.set_title(data_fnam + r", $R_t$, dist. posterior.") ax.set_xlabel('') ax.set_xticks(range(n)) ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right') ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30) ax.axhline(y=1, color='green') ax.axhline(y=2, color='red') ax.axhline(y=3, color='darkred') ax.set_ylim((0.5,3.5)) ax.set_yticks(arange( 0.4, 3.4, step=0.2)) ax.tick_params( which='major', axis='y', labelsize=10) ax.grid(color='grey', linestyle='--', linewidth=0.5) #fig.tight_layout() if csv_fnam != None: days = drange( last_date-timedelta(n), last_date, timedelta(days=1)) ### To save all the data for the plot, ### columns: year, month, day, q_05, q_25, q_50, q_75, q_95 ### 0 1 2 3 4 5 6 7 sv = -ones(( len(days), 3+len(q))) for i,day in enumerate(days): d = date.fromordinal(int(day)) sv[ i, 0] = d.year sv[ i, 1] = d.month sv[ i, 2] = d.day sv[ i, 3:] = rts[:,i] q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q]) savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='') return ax ##### Dirctionary with general information for the metro zone or region to be analyzed: ##### id Name not used Population init date ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\ "15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\ "31-01": ["Mrida", 2, 1.237697e6, date(2020, 3, 7)],\ "17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\ "12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\ "25-01": ["Culiacn", 2, 0.962871e6, date(2020, 3, 1)],\ "23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]} ### The correponding data files have two columns separated by space, deaths and incidence. ### Each row is one day. ### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc. if __name__=='__main__': rcParams.update({'font.size': 14}) close('all') #Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 ) fig, ax = subplots( num=30, figsize=( 4.5, 3.5)) PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax) ### Plota the erlang( a=5, scale=9/5 ) alternative PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax) ax.set_xlim((0,20)) ax.grid(color='grey', linestyle='--', linewidth=0.5) ax.set_ylabel(r"Density") ax.set_xlabel("days") ax.set_title("") fig.tight_layout() fig.savefig("../figs/Covid19_SerialTimeDist.png") ### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01'] n=60 ## Number of days to calculate the Rt's trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days x_jump = 7 ## For ploting, put ticks every x_jump days. for i,clave in enumerate(claves): print(clave) ### Open an instance of the Rts_AR class: tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n) tst.CalculateRts() # Most be called before ploting the Rt's ### Plot the Rts: fig, ax = subplots( num=i+1, figsize=( 8, 3.5)) ### Plot Cori et al (2013) Poisson model version: PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\ n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black') ### Plot ours: tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave) ax.set_title("") ax.set_ylabel(r"$R_t$") ax.set_xlabel("") ax.set_title(ZMs[clave][0] + ", Mexico") fig.tight_layout() fig.savefig("../figs/%s_Rts_AR.png" % (clave,)) if clave == '9-01': m_max = tst.m ax.set_xlabel("day.month, 2020") fig.tight_layout() fig.savefig("../figs/%s_Rts_AR.png" % (clave,)) ### Figure with Cori et al (2013) posterior distributions of '31-01' and '9-01' fig1, ax1 = subplots( num=20, nrows=1, ncols=2, figsize=( 10, 3.5)) color = [ "red", "black", "darkred"] for i,clave in enumerate([ '31-01', '9-01']): tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n) a, b = Rts_P( tst.data, tau=7, n=30, q=2) ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data, '.-', color=color[i], label=ZMs[clave][0]) PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[i]) last_date = tst.init_date + timedelta(tst.m) ax1[0].set_xlabel('') ax1[0].set_xticks(range(0,tst.m,x_jump*2)) ax1[0].set_xticklabels([(last_date-timedelta(tst.m-i)).strftime("%d.%m") for i in range(0,tst.m,x_jump*2)], ha='right') ax1[0].tick_params( which='major', axis='x', labelsize=10, labelrotation=30) ax1[0].set_xlabel("day.month, 2020") #ax1[0].set_ylim((0,1.1*max(tst.data[-n:]))) ax1[0].grid(color='grey', linestyle='--', linewidth=0.5) ax1[0].set_ylabel(r"Incidence") ax1[0].legend(loc=0, shadow = False) ### Add '31-01', with incidence multiplied by 10 clave = '31-01' tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n) a, b = Rts_P( tst.data*10, tau=7, n=30, q=2) ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data*10, '.-', color=color[2]) PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[2]) ax1[1].set_xticks(arange(0.8,1.4,0.2)) ax1[1].set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y")) ax1[1].grid(color='grey', linestyle='--', linewidth=0.5) fig1.tight_layout() fig1.savefig("../figs/Rts_Compare.png") ### Comparison of results changing the serial time distribution fig, ax = subplots( num=31, figsize=( 4.5, 3.5)) tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n) tst.CalculateRts() tst.PlotPostRt( i=n, ax=ax) #### Here we change the serial time: Any other positive density could be used. tst = Rts_AR( clave, IP_dist=erlang( a=5, scale=9/5), init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n) tst.CalculateRts() tst.PlotPostRt( i=n, ax=ax, color='grey') ax.set_xlim((0.5,2.5)) ax.set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y")) ax.grid(color='grey', linestyle='--', linewidth=0.5) ax.set_title("") fig.tight_layout() fig.savefig("../figs/%s_Rts_Compare.png" % (clave,)) """ ################# Example of use of Rts_NB_psi and Rts_NB (not documented) T=100000 for clave in claves: #Instance of the object and run the MCMC tst = Rts_NB_psi( clave, init_date=ZMs[clave][3], n=n) if T > 0: tst.RunMCMC(T=T) ### Plot the Rts close(1) fig, ax = subplots( num=1, figsize=( 10, 3.5) ) tst.PlotRts( ax=ax) ax.set_title( ZMs[clave][0] + r", $R_t$ NB_psi.") fig.savefig("../figs/%s_Rts_NB_psi.png" % (clave,)) ### Plot the posterior distribution of \psi close(3) fig, ax = subplots( num=3, figsize=( 5,5) ) tst.PlotPostPsi(ax=ax) ax.set_title(ZMs[clave][0]) fig.savefig("../figs/%s_Rts_NB_Post_psi.png" % clave) ### Fix \psi with the postrior expeted value and use that for PlotRts_NB close(2) fig, ax = subplots( num=2, figsize=( 10, 3.5) ) psi = mean(tst.psi_samples) #Posterior mean of psi PlotRts_NB( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3],\ n=n, psi=psi, ax=ax) ax.set_title( ZMs[clave][0] + r", $R_t$ NB, fixed $\psi$.") fig.savefig("../figs/%s_Rts.png" % (clave,)) """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 30030, 1737, 1105, 1248, 25, 2078, 25, 4051, 12131, 198, 198, 31, 9800, 25, 1583, 449, 31...
1.980179
9,384
def zombieCluster(zombies): cm=clusterManager(clusters={i:cluster(members=[i]) for i in xrange(len(zombies))}) for i,row in enumerate(zombies): for j,column in enumerate(row): if column == '1': cm.merge(i,j) return cm.count()
[ 198, 4299, 220, 15956, 2601, 5819, 7, 89, 12676, 2599, 198, 220, 220, 220, 12067, 28, 565, 5819, 13511, 7, 565, 13654, 34758, 72, 25, 565, 5819, 7, 30814, 41888, 72, 12962, 329, 1312, 287, 2124, 9521, 7, 11925, 7, 89, 12676, 4008, ...
2.123077
130
import FWCore.ParameterSet.Config as cms process = cms.Process("ProcessOne") process.load("CondCore.DBCommon.CondDBCommon_cfi") process.CondDBCommon.DBParameters.authenticationPath = '/nfshome0/popcondev/conddb' # # Choose the output database # process.CondDBCommon.connect = 'oracle://cms_orcon_prod/CMS_COND_42X_ECAL_LASP' #process.CondDBCommon.connect = 'sqlite_file:DB.db' process.MessageLogger = cms.Service("MessageLogger", debugModules = cms.untracked.vstring('*'), destinations = cms.untracked.vstring('cout') ) process.source = cms.Source("EmptyIOVSource", firstValue = cms.uint64(1), lastValue = cms.uint64(1), timetype = cms.string('runnumber'), interval = cms.uint64(1) ) process.PoolDBESSource = cms.ESSource("PoolDBESSource", process.CondDBCommon, timetype = cms.untracked.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('EcalLaserAPDPNRatiosRcd'), tag = cms.string('EcalLaserAPDPNRatios_last') )) ) process.PoolDBOutputService = cms.Service("PoolDBOutputService", process.CondDBCommon, logconnect = cms.untracked.string('sqlite_file:DBLog.db'), timetype = cms.untracked.string('timestamp'), toPut = cms.VPSet(cms.PSet( record = cms.string('EcalLaserAPDPNRatiosRcd'), tag = cms.string('EcalLaserAPDPNRatios_last') )) ) # # Be sure to comment the following line while testing # #process.PoolDBOutputService.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG') process.Test1 = cms.EDAnalyzer("ExTestEcalLaserAnalyzer", SinceAppendMode = cms.bool(True), record = cms.string('EcalLaserAPDPNRatiosRcd'), loggingOn = cms.untracked.bool(True), Source = cms.PSet( # maxtime is mandatory # it can be expressed either as an absolute time with format YYYY-MM-DD HH24:MI:SS # or as a relative time w.r.t. now, using -N, where N is expressed in units # of hours # maxtime = cms.string("-40"), maxtime = cms.string("2012-12-12 23:59:59"), sequences = cms.string("16"), OnlineDBUser = cms.string('CMS_ECAL_LASER_COND'), # debug must be False for production debug = cms.bool(False), # if fake is True, no insertion in the db is performed fake = cms.bool(True), OnlineDBPassword = cms.string('0r4cms_3c4l_2011'), OnlineDBSID = cms.string('CMS_OMDS_LB') ) ) process.p = cms.Path(process.Test1)
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 198, 14681, 796, 269, 907, 13, 18709, 7203, 18709, 3198, 4943, 198, 14681, 13, 2220, 7203, 25559, 14055, 13, 35, 2749, 2002, 261, 13, 25559, 35, 2749, 2002, 261, 62,...
1.901399
1,572
from algosdk import logic from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn from ..contract_strings import algofi_manager_strings as manager_strings from .prepend import get_init_txns from ..utils import TransactionGroup, Transactions, randint, int_to_bytes OPT_IN_MIN_BALANCE=0.65 def prepare_staking_contract_optin_transactions(manager_app_id, market_app_id, sender, storage_address, suggested_params): """Returns a :class:`TransactionGroup` object representing a staking contract opt in group transaction. The sender and storage account opt in to the staking application and the storage account is rekeyed to the manager account address, rendering it unable to be transacted against by the sender and therefore immutable. :param manager_app_id: id of the manager application :type manager_app_id: int :param max_atomic_opt_in_market_app_ids: max opt in market app ids :type max_atomic_opt_in_market_app_ids: list :param sender: account address for the sender :type sender: string :param storage_address: address of the storage account :type storage_address: string :param suggested_params: suggested transaction params :type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object :return: :class:`TransactionGroup` object representing a manager opt in group transaction :rtype: :class:`TransactionGroup` """ txn_payment = PaymentTxn( sender=sender, sp=suggested_params, receiver=storage_address, amt=int(OPT_IN_MIN_BALANCE*1e6) ) txn_market = ApplicationOptInTxn( sender=storage_address, sp=suggested_params, index=market_app_id ) txn_user_opt_in_manager = ApplicationOptInTxn( sender=sender, sp=suggested_params, index=manager_app_id ) app_address = logic.get_application_address(manager_app_id) txn_storage_opt_in_manager = ApplicationOptInTxn( sender=storage_address, sp=suggested_params, index=manager_app_id, rekey_to=app_address ) txn_group = TransactionGroup([txn_payment, txn_market, txn_user_opt_in_manager, txn_storage_opt_in_manager]) return txn_group def prepare_stake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, market_address, oracle_app_id, asset_id=None): """Returns a :class:`TransactionGroup` object representing a stake transaction against the algofi protocol. The sender sends assets to the staking account and is credited with a stake. :param sender: account address for the sender :type sender: string :param suggested_params: suggested transaction params :type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object :param storage_account: storage account address for sender :type storage_account: string :param amount: amount of asset to supply for minting collateral :type amount: int :param manager_app_id: id of the manager application :type manager_app_id: int :param market_app_id: id of the asset market application :type market_app_id: int :param market_address: account address for the market application :type market_address: string :param oracle_app_id: id of the asset market application :type oracle_app_id: int :param asset_id: asset id of the asset being supplied, defaults to None (algo) :type asset_id: int, optional :return: :class:`TransactionGroup` object representing a mint to collateral group transaction :rtype: :class:`TransactionGroup` """ supported_oracle_app_ids = [oracle_app_id] supported_market_app_ids = [market_app_id] prefix_transactions = get_init_txns( transaction_type=Transactions.MINT_TO_COLLATERAL, sender=sender, suggested_params=suggested_params, manager_app_id=manager_app_id, supported_market_app_ids=supported_market_app_ids, supported_oracle_app_ids=supported_oracle_app_ids, storage_account=storage_account ) txn0 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=manager_app_id, app_args=[manager_strings.mint_to_collateral.encode()], ) txn1 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=market_app_id, app_args=[manager_strings.mint_to_collateral.encode()], foreign_apps=[manager_app_id], accounts=[storage_account] ) if asset_id: txn2 = AssetTransferTxn( sender=sender, sp=suggested_params, receiver=market_address, amt=amount, index=asset_id ) else: txn2 = PaymentTxn( sender=sender, sp=suggested_params, receiver=market_address, amt=amount ) txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2]) return txn_group def prepare_unstake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, oracle_app_id, asset_id=None): """Returns a :class:`TransactionGroup` object representing a remove stake group transaction against the algofi protocol. The sender requests to remove stake from a stake acount and if successful, the stake is removed. :param sender: account address for the sender :type sender: string :param suggested_params: suggested transaction params :type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object :param storage_account: storage account address for sender :type storage_account: string :param amount: amount of collateral to remove from the market :type amount: int :param asset_id: asset id of the asset underlying the collateral :type asset_id: int :param manager_app_id: id of the manager application :type manager_app_id: int :param market_app_id: id of the market application of the collateral :type market_app_id: int :param oracle_app_id: id of the oracle application of the collateral :type oracle_app_id: int :return: :class:`TransactionGroup` object representing a remove collateral underlying group transaction :rtype: :class:`TransactionGroup` """ supported_market_app_ids = [market_app_id] supported_oracle_app_ids = [oracle_app_id] prefix_transactions = get_init_txns( transaction_type=Transactions.REMOVE_COLLATERAL_UNDERLYING, sender=sender, suggested_params=suggested_params, manager_app_id=manager_app_id, supported_market_app_ids=supported_market_app_ids, supported_oracle_app_ids=supported_oracle_app_ids, storage_account=storage_account ) txn0 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=manager_app_id, app_args=[manager_strings.remove_collateral_underlying.encode(), int_to_bytes(amount)] ) if asset_id: txn1 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=market_app_id, app_args=[manager_strings.remove_collateral_underlying.encode()], foreign_apps=[manager_app_id], foreign_assets=[asset_id], accounts=[storage_account] ) else: txn1 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=market_app_id, app_args=[manager_strings.remove_collateral_underlying.encode()], foreign_apps=[manager_app_id], accounts=[storage_account] ) txn_group = TransactionGroup(prefix_transactions + [txn0, txn1]) return txn_group def prepare_claim_staking_rewards_transactions(sender, suggested_params, storage_account, manager_app_id, market_app_id, oracle_app_id, foreign_assets): """Returns a :class:`TransactionGroup` object representing a claim rewards underlying group transaction against the algofi protocol. The sender requests to claim rewards from the manager acount. If not, the account sends back the user the amount of asset underlying their posted collateral. :param sender: account address for the sender :type sender: string :param suggested_params: suggested transaction params :type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object :param storage_account: storage account address for sender :type storage_account: string :param manager_app_id: id of the manager application :type manager_app_id: int :param market_app_id: id of the market application of the collateral :type market_app_id: int :param oracle_app_id: id of the oracle application :type oracle_app_id: int :param foreign_assets: list of rewards assets in the staking contract :type foreign_assets: list :return: :class:`TransactionGroup` object representing a claim rewards transaction :rtype: :class:`TransactionGroup` """ supported_market_app_ids = [market_app_id] supported_oracle_app_ids = [oracle_app_id] prefix_transactions = get_init_txns( transaction_type=Transactions.CLAIM_REWARDS, sender=sender, suggested_params=suggested_params, manager_app_id=manager_app_id, supported_market_app_ids=supported_market_app_ids, supported_oracle_app_ids=supported_oracle_app_ids, storage_account=storage_account ) txn0 = ApplicationNoOpTxn( sender=sender, sp=suggested_params, index=manager_app_id, app_args=[manager_strings.claim_rewards.encode()], accounts=[storage_account], foreign_assets=foreign_assets ) txn_group = TransactionGroup(prefix_transactions + [txn0]) return txn_group
[ 6738, 435, 70, 418, 34388, 1330, 9156, 198, 6738, 435, 70, 418, 34388, 13, 37443, 13, 7645, 2673, 1330, 15678, 27871, 818, 46047, 77, 11, 31433, 27871, 818, 46047, 77, 11, 15678, 2949, 18257, 46047, 77, 11, 28784, 46047, 77, 11, 31433...
2.665321
3,717
import rumps import requests import json API_URL = 'https://koinex.in/api/ticker' UPDATE_INTERVAL = 60 CURRENCIES = { 'Bitcoin': 'BTC', 'Ethereum': 'ETH', 'Ripple': 'XRP', 'Litecoin': 'LTC', 'Bitcoin Cash': 'BCH', } if __name__ == "__main__": KoinexStatusBarApp().run()
[ 11748, 7440, 862, 198, 11748, 7007, 198, 11748, 33918, 198, 198, 17614, 62, 21886, 796, 705, 5450, 1378, 7204, 500, 87, 13, 259, 14, 15042, 14, 83, 15799, 6, 198, 198, 16977, 62, 41358, 23428, 796, 3126, 198, 198, 34, 31302, 24181, ...
2.282443
131
from flask import Flask, jsonify from time import strftime from socket import gethostname from socket import gethostbyname app = Flask(__name__) if __name__ == '__main__': app.run(debug=True , port=8888 , host='0.0.0.0')
[ 6738, 42903, 1330, 46947, 11, 33918, 1958, 198, 6738, 640, 1330, 965, 31387, 198, 6738, 17802, 1330, 651, 4774, 3672, 198, 6738, 17802, 1330, 651, 4774, 1525, 3672, 198, 1324, 796, 46947, 7, 834, 3672, 834, 8, 198, 361, 11593, 3672, 8...
2.986667
75
import numpy as np from numpy.core.fromnumeric import mean from numpy.core.numeric import True_ from numpy.testing._private.utils import rand from polynomial_regression import PolynomialRegression from generate_regression_data import generate_regression_data from metrics import mean_squared_error # mse from math import log # use if scale too large to see error from k_nearest_neighbor import KNearestNeighbor try: import matplotlib.pyplot as plt except: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt if __name__ == '__main__': # Number 7, split A degree = 4 N = 100 x, y = generate_regression_data(degree, N, amount_of_noise=0.1) rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers x_training, y_training = x[rand_sampl[:10]], y[rand_sampl[:10]] x_test, y_test = x[rand_sampl[10:]], y[rand_sampl[10:]] plots = [] mse_training = [] mse_test = [] # to 9 degrees for i in range(9): poly = PolynomialRegression(i) poly.fit(x_training, y_training) poly.visualize(x_training, y_training, path=f"../plots_N7_splitA/training_plot_degree_{i}", title=f"Training Plot Degree {i}") # test will be red poly.visualize(x_test, y_test, path=f"../plots_N7_splitA/test_plot_degree_{i}", title=f"Test Plot Degree {i}", color='r') y_hat_training = poly.predict(x_training) # predicted value mse_training.append(mean_squared_error(y_training, y_hat_training)) y_hat_test = poly.predict(x_test) mse_test.append(mean_squared_error(y_test, y_hat_test)) plots.append(poly) plt.clf() # clear figure plt.figure() # log was needed to scale plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error") plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error") plt.title("Error as a Function of Degree") plt.xlabel("degree") plt.ylabel("error") plt.legend() plt.grid(True) plt.savefig("../plots_N7_splitA/error_as_a_function_of_degree.png") # get the two lowest errors low_test_err_degree = mse_test.index(min(mse_test)) low_training_err_degree = mse_training.index(min(mse_training)) plt.clf() # clear figure plt.figure() plt.scatter(x_training, y_training) plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}") plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}") plt.title("Lowest Training and Test Errors") plt.xlabel("x") plt.ylabel("y") plt.legend() plt.grid(True) plt.savefig("../plots_N7_splitA/lowest_training_and_test_error.png") # Number 10, split A k = {1, 3, 5, 7, 9} kplots = [] mse_training_k = [] mse_test_k = [] kx_training = np.reshape(x_training, (-1,2)) ky_training = np.reshape(y_training, (-1,2)) kx_test = np.reshape(x_test, (-1, 2)) ky_test = np.reshape(y_test, (-1,2)) #print(kx_training) #print(kx_training.shape) for i in k: knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean") knn.fit(kx_training, ky_training) #print(f"x_training = {x_training.shape}") k_training = knn.predict(kx_training) mse_training_k.append(mean_squared_error(ky_training, k_training)) k_test = knn.predict(kx_test) mse_test_k.append(mean_squared_error(ky_test, k_test)) kplots.append(knn) plt.clf() # clear figure plt.figure() plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error") plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error") plt.title("Error as a Function of k") plt.xlabel("k") plt.ylabel("error") plt.legend() plt.grid(True) plt.savefig("../plots_N10_splitA/error_as_a_function_of_k.png") low_test_err_k = mse_test_k.index(min(mse_test_k)) plt.clf() # clear figure plt.figure() plt.scatter(x_training, y_training) plt.plot(np.sort(kplots[low_test_err_k]), kplots[low_test_err_k], label=f"lowest test error curve with k = {low_test_err_k}") plt.title("Lowest Test Error") plt.xlabel("x") plt.ylabel("y") plt.legend() plt.grid(True) plt.savefig("../plots_N10_splitA/lowest_test_error.png") # Number 9, split B rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers x_training, y_training = x[rand_sampl[:50]], y[rand_sampl[:50]] x_test, y_test = x[rand_sampl[50:]], y[rand_sampl[50:]] plots = [] mse_training = [] mse_test = [] # to 9 degrees for i in range(9): poly = PolynomialRegression(i) poly.fit(x_training, y_training) poly.visualize(x_training, y_training, path=f"../plots_N9_splitB/training_plot_degree_{i}", title=f"Training Plot Degree {i}") # test will be red poly.visualize(x_test, y_test, path=f"../plots_N9_splitB/test_plot_degree_{i}", title=f"Test Plot Degree {i}", color='r') y_hat_training = poly.predict(x_training) # predicted value mse_training.append(mean_squared_error(y_training, y_hat_training)) y_hat_test = poly.predict(x_test) mse_test.append(mean_squared_error(y_test, y_hat_test)) plots.append(poly) plt.clf() # clear figure plt.figure() # log was needed to scale plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error") plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error") plt.title("Error as a Function of Degree") plt.xlabel("degree") plt.ylabel("error") plt.legend() plt.grid(True) plt.savefig("../plots_N9_splitB/error_as_a_function_of_degree.png") # get the two lowest errors low_test_err_degree = mse_test.index(min(mse_test)) low_training_err_degree = mse_training.index(min(mse_training)) plt.clf() # clear figure plt.figure() plt.scatter(x_training, y_training) plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}") plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}") plt.title("Lowest Training and Test Errors") plt.xlabel("x") plt.ylabel("y") plt.legend() plt.grid(True) plt.savefig("../plots_N9_splitB/lowest_training_and_test_error.png") # Number 10, split B k = {1, 3, 5, 7, 9} kplots = [] mse_training_k = [] mse_test_k = [] kx_training = np.reshape(x_training, (-1,2)) ky_training = np.reshape(y_training, (-1,2)) kx_test = np.reshape(x_test, (-1, 2)) ky_test = np.reshape(y_test, (-1,2)) #print(kx_training) #print(kx_training.shape) for i in k: knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean") knn.fit(kx_training, ky_training) #print(f"x_training = {x_training.shape}") k_training = knn.predict(kx_training) mse_training_k.append(mean_squared_error(ky_training, k_training)) k_test = knn.predict(kx_test) mse_test_k.append(mean_squared_error(ky_test, k_test)) kplots.append(poly) plt.clf() # clear figure plt.figure() plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error") plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error") plt.title("Error as a Function of k") plt.xlabel("k") plt.ylabel("error") plt.legend() plt.grid(True) plt.savefig("../plots_N10_splitB/error_as_a_function_of_k.png") low_test_err_k = mse_test_k.index(min(mse_test_k)) plt.clf() # clear figure plt.figure() plt.scatter(x_training, y_training) plt.plot(np.sort(kplots[low_test_err_k].X_training), kplots[low_test_err_k].f, label=f"lowest test error curve with k = {low_test_err_k}") plt.title("Lowest Test Error") plt.xlabel("x") plt.ylabel("y") plt.legend() plt.grid(True) plt.savefig("../plots_N10_splitB/lowest_test_error.png")
[ 11748, 299, 32152, 355, 45941, 198, 6738, 299, 32152, 13, 7295, 13, 6738, 77, 39223, 1330, 1612, 198, 6738, 299, 32152, 13, 7295, 13, 77, 39223, 1330, 6407, 62, 198, 6738, 299, 32152, 13, 33407, 13557, 19734, 13, 26791, 1330, 43720, 1...
2.161517
3,981
# EG10-20 Twinkle Twinkle classes import time import snaps tune = [Note(note=0, duration=0.4), Note(note=0, duration=0.4), Note(note=7, duration=0.4), Note(note=7, duration=0.4), Note(note=9, duration=0.4), Note(note=9, duration=0.4), Note(note=7, duration=0.8), Note(note=5, duration=0.4), Note(note=5, duration=0.4), Note(note=4, duration=0.4), Note(note=4, duration=0.4), Note(note=2, duration=0.4), Note(note=2, duration=0.4), Note(note=0, duration=0.8)] for note in tune: note.play()
[ 2, 41513, 940, 12, 1238, 1815, 19894, 1815, 19894, 6097, 198, 198, 11748, 640, 198, 11748, 23429, 198, 198, 83, 1726, 796, 685, 6425, 7, 11295, 28, 15, 11, 9478, 28, 15, 13, 19, 828, 5740, 7, 11295, 28, 15, 11, 9478, 28, 15, 13,...
2.133333
255
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import os import numpy as np import mxnet as mx from mxnet.test_utils import * curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) from common import setup_module, with_seed from mxnet.gluon import utils if __name__ == '__main__': test_consistency(False)
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
3.441088
331
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # LavaVu conf based on conf.py from underworld2 # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import os, sys from os.path import dirname, join, abspath sys.path.insert(0, abspath(join(dirname(__file__), '..'))) sys.path.insert(0, abspath(join(dirname(__file__), '..', '..'))) import setup as lsetup # -- Project information ----------------------------------------------------- project = 'LavaVu' copyright = '2020, Monash University' author = 'Owen Kaluza, Monash University' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = lsetup.version print('BUILDING LAVAVU DOCS FOR VERSION', release) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'sphinx.ext.githubpages', 'sphinx_markdown_tables', 'myst_parser', # 'nbsphinx', ] napoleon_google_docstring = False napoleon_numpy_docstring = True napoleon_include_init_with_doc = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True napoleon_use_keyword = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] #source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' #html_theme = 'pyramid' #import sphinx_rtd_theme #html_theme = "sphinx_rtd_theme" #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { # Set the width of the content area. Defaults to '900px' 'sidebar_width': '300px', 'page_width': '90%', #'fixed_sidebar': 'true', #Need to scroll for full table of contents 'font_family': 'sans', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # #html_sidebars = {} html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'LavaVudoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'LavaVu.tex', 'LavaVu Documentation', 'Owen Kaluza', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'lavavu', 'LavaVu Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'LavaVu', 'LavaVu Documentation', author, 'LavaVu', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # setup mock classes so no building is required # generate rst files import os import sys # add current directory for `generate_api_documentation` sys.path.append(os.path.dirname(__name__)) # add top project directory as well sys.path.insert(0, os.path.join(os.path.dirname(__name__),'../../lavavu')) try: import lavavu import convert import points import tracers import control except (Exception) as e: from mock import Mock as MagicMock MOCK_MODULES = ['scipy', 'numpy', '_LavaVuPython'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) import generate_api_documentation import subprocess subprocess.call("./run-nb-to-rst.sh", shell=True)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 28373, 2393, 329, 262, 45368, 28413, 10314, 27098, 13, 198, 2, 406, 4170, 53, 84, 1013, 1912, 319, 1013, 13, 9078, 422, 42494, 17, 198, 2, 198, 2, 770, ...
3.152057
2,407
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import collections import mkkm_mr import networkx as nx from sklearn.cluster import KMeans, SpectralClustering from snf_simple import SNF from pamogk import config from pamogk import label_mapper from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp from pamogk.gene_mapper import uniprot_mapper from pamogk.kernels.lmkkmeans_train import lmkkmeans_train from pamogk.kernels.pamogk import kernel from pamogk.lib.sutils import * from pamogk.pathway_reader import cx_pathway_reader as cx_pw # see https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html from pamogk.result_processor.label_analysis import LabelAnalysis # import sys # sys.path.insert(0, '/Users/fma/dev/bilkent/research/snf') # sys.path.insert(0, '/Users/fma/dev/bilkent/research/mkkm-mr') parser = argparse.ArgumentParser(description='Run PAMOGK-mut algorithms on pathways') parser.add_argument('--run-id', '-rid', metavar='run-id', dest='run_id', type=str, help='Unique Run ID') parser.add_argument('--rs-patient-data', '-rs', metavar='file-path', dest='rnaseq_patient_data', type=str2path, help='rnaseq pathway ID list', default=config.DATA_DIR / 'kirc_data/unc.edu_KIRC_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_tumor.txt') parser.add_argument('--rp-patient-data', '-rp', metavar='file-path', dest='rppa_patient_data', type=str2path, help='rppa pathway ID list', default=config.DATA_DIR / 'kirc_data/kirc_rppa_data') parser.add_argument('--som-patient-data', '-s', metavar='file-path', dest='som_patient_data', type=str2path, help='som mut pathway ID list', default=config.DATA_DIR / 'kirc_data/kirc_somatic_mutation_data.csv') parser.add_argument('--label', '-m', metavar='label', dest='label', type=str, default='th196', help='Label value that will be smoothed') # used values: [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] parser.add_argument('--smoothing-alpha', '-a', metavar='alpha', dest='smoothing_alpha', type=float, default=0.01, help='Smoothing alpha in range of 0-1') parser.add_argument('--drop-percent', '-p', metavar='drop-percent', dest='drop_percent', type=int, default=1, help='Drop percentage in range of 0-100') parser.add_argument('--threshold', '-t', metavar='threshold', dest='threshold', type=float, default=1.96, help='Cut off threshold') parser.add_argument('--continuous', '-c', metavar='bool', dest='continuous', type=str2bool, default=True, help='Whether to produce continuous values for under/over expressed') parser.add_argument('--normalize-kernels', '-nk', dest='kernel_normalization', type=str2bool, default=True, help='Kernel Normalization') args = {} def label_som_patient_genes(self, all_pw_map, patients): """Labels all patients with matching level of expression Parameters ---------- all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph` a dictionary of all pathways we are using patients: :obj:`list` list of patients with mutation mappings """ # check if we already stored all over/under expression pathway data if so restore them if self.som_pathways_save_valid(all_pw_map): return self.restore_som_pathways(all_pw_map) num_pat = len(patients) # if there are missing ones calculate all of them log('Somatic mutation patient pathway labeling') for ind, patient in enumerate(patients): pid = patient['pat_id'] genes = patient['mutated_nodes'] # get uniprot gene ids from indices genes = np.array([genes]) logr(f'Checking patient for somatic mutation {ind + 1:4}/{num_pat} pid={pid}') label_mapper.mark_label_on_pathways('som', pid, all_pw_map, genes, self.label) log() self.save_som_pathways(all_pw_map) return all_pw_map def cluster_cont(self, kernels, n_clusters): snf_K = 20 # number of neighbors, usually (10~30) snf_t = 20 # number of iterations, usually (10~20) # SNF # W = snf_compute.snf(*kernels, K=snf_K, t=snf_t) W = SNF(kernels, K=snf_K, t=snf_t) # KMeans labels = self.kmeans_cluster(W, n_clusters) np_save_npz(self.result_dir / f'pamogk-snf-kmeans-k={n_clusters}', labels=labels) # Spectral labels = SpectralClustering(n_clusters, affinity='precomputed').fit_predict(W) np_save_npz(self.result_dir / f'pamogk-snf-spectral-k={n_clusters}', labels=labels) KH = mkkm_mr.lib.kernel_centralize(kernels) KH = mkkm_mr.lib.kernel_normalize(KH) num_ker = kernels.shape[0] gamma0 = np.ones((num_ker, 1)) / num_ker avgKer = mkkm_mr.lib.combine_kernels(KH, gamma0) H = mkkm_mr.lib.kernel_kmeans_iter(avgKer, n_clusters) labels = self.kmeans_cluster(H, n_clusters) np_save_npz(self.result_dir / f'pamogk-kmeans-k={n_clusters}.csv', labels=labels) # AAAI - 16 - MKKM-MR M = mkkm_mr.lib.calM(KH) lambdas = np.power(2., self.log2_lambdas) for log2_lambda, lambda_ in zip(self.log2_lambdas, lambdas): log(f'running for n_clusters={n_clusters} log2_lambda={log2_lambda}') [H, weights, obj] = mkkm_mr.mkkm_mr(KH, M, n_clusters, lambda_) labels = self.kmeans_cluster(H, n_clusters) out_file = self.result_dir / f'pamogk-mkkm-k={n_clusters}-log2_lambda={log2_lambda}' np_save_npz(out_file, labels=labels, weights=weights, obj=obj) if __name__ == '__main__': create_experiment().run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 17268, 198, 198, 11748, 33480, 13276, 62, 43395, 198, 11748, 3127, 87, 355, 299, 87, 198, 6738, 1341, ...
2.23908
2,610
import requests from lxml import html from bs4 import BeautifulSoup import json import codecs import re #In this variable I will store the information as a dictionary with this structure: # {number : "Name"} ms_dict = {} links_dict = {"links" : []} for index in range(1,27000): print(index) page = requests.get('http://www.handschriftencensus.de/'+ str(index)) c = page.content soup = BeautifulSoup(c, "lxml") ms_label = soup.find_all("th", class_="ort") if len(ms_label) > 0: ms_label = ms_label[0].text.rstrip() ms_dict[ "h" + str(index)] = ms_label inhalt = soup.find_all("a", class_="aw") for el in inhalt: work_id = re.findall('/\d+$', el['href'])[0][1:] links_dict['links'].append( { "source": "h" + str(index), "target": "w" + work_id } ) # In td id="inhalt" get the href, and only the number. Create the links at the same time # work = work[0].text # work = work.replace("'","") # final_dict[index +1] = {"title":work} # # signaturen = soup.find_all("ol", class_="signaturen") # if len(signaturen) > 0: # final_dict[index+1]["manuscripts"] = [] # signaturen = signaturen[0] # for elem in signaturen: # if len(elem) > 1: # manuscript = elem.find_all("a")[0] # # final_dict[index+1]["manuscripts"].append(manuscript.text) index = index + 1 #Save data as json with codecs.open('manuscripts_ids.json', 'w', 'utf-8') as outfile: json.dump(ms_dict,outfile, indent=2) with codecs.open('links.json', 'w', 'utf-8') as outfile: json.dump(links_dict,outfile, indent=2) #To save the data as a csv # table = pd.DataFrame.from_dict(final_dict, orient='index') # table.to_csv("Handschriftencensus_full.csv", encoding="utf-8")
[ 11748, 7007, 198, 6738, 300, 19875, 1330, 27711, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 11748, 33918, 198, 11748, 40481, 82, 198, 11748, 302, 628, 198, 2, 818, 428, 7885, 314, 481, 3650, 262, 1321, 355, 257, 22155, 351, ...
2.432857
700
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import numpy as np import pytest import GPy from math import isclose from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy from emukit.quadrature.methods import VanillaBayesianQuadrature from emukit.quadrature.acquisitions import MutualInformation, IntegralVarianceReduction REL_TOL = 1e-5 ABS_TOL = 1e-4
[ 2, 15069, 2864, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 628, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 12972, 9288, 1...
3.012579
159
import unittest import sys sys.path.insert(2, "..") from awsec2instances_includes.ProtocolService import ProtocolService
[ 11748, 555, 715, 395, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 28463, 7, 17, 11, 366, 492, 4943, 198, 6738, 3253, 2363, 17, 8625, 1817, 62, 42813, 13, 19703, 4668, 16177, 1330, 20497, 16177, 628 ]
3.388889
36
from .constants import SPECIAL_TOKENS try: import re2 as re except ImportError: import re def twitter_sentiment_token_matching(token): """Special token matching function for twitter sentiment data.""" if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token): return SPECIAL_TOKENS['URL_TOKEN'] if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token): return SPECIAL_TOKENS['POS_EM_TOKEN'] if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token): return SPECIAL_TOKENS['NEG_EM_TOKEN'] if 'USER_TOKEN' in SPECIAL_TOKENS and re.match( r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token): return SPECIAL_TOKENS['USER_TOKEN'] if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token): return SPECIAL_TOKENS['HEART_TOKEN']
[ 6738, 764, 9979, 1187, 1330, 38846, 62, 10468, 42, 16938, 198, 28311, 25, 198, 220, 220, 220, 1330, 302, 17, 355, 302, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 1330, 302, 628, 198, 4299, 17044, 62, 34086, 3681, 62, 30001, 6...
2.18797
399
"""A setuptools based setup module. See: https://packaging.python.org/guides/distributing-packages-using-setuptools/ """ from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='envyconfig', version='1.2.1', description='YAML reader with ENV interpolation.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/geirem/envyconfig', author='https://github.com/geirem', author_email='geiremb@gmail.com', classifiers=[ # https://pypi.org/classifiers/ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Software Development', 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3 :: Only', ], keywords='configtools development', package_dir={ '': 'src', }, packages=find_packages(where='src'), python_requires='>=3.8', extras_require={ 'test': ['pytest'], 'googlesecrets': ["google-cloud-secret-manager"] }, project_urls={ # Optional 'Bug Reports': 'https://github.com/geirem/envyconfig/issues', 'Funding': 'https://donate.pypi.org', 'Source': 'https://github.com/geirem/envyconfig/', }, )
[ 37811, 32, 900, 37623, 10141, 1912, 9058, 8265, 13, 198, 6214, 25, 198, 5450, 1378, 8002, 3039, 13, 29412, 13, 2398, 14, 5162, 1460, 14, 17080, 2455, 278, 12, 43789, 12, 3500, 12, 2617, 37623, 10141, 14, 198, 37811, 198, 198, 6738, ...
2.509182
599
# # Solver class using Scipy's adaptive time stepper # import casadi import pybamm import scipy.integrate as it import numpy as np
[ 2, 198, 2, 4294, 332, 1398, 1262, 1446, 541, 88, 338, 29605, 640, 2876, 2848, 198, 2, 198, 11748, 6124, 9189, 198, 11748, 12972, 65, 6475, 198, 198, 11748, 629, 541, 88, 13, 18908, 4873, 355, 340, 198, 11748, 299, 32152, 355, 45941,...
3.022727
44
import datetime from dateutil.parser import parse from mongoengine import DateTimeField, FileField from mongoengine.connection import DEFAULT_CONNECTION_NAME #from mongoengine.python_support import str_types from six import string_types as str_types import io from django.conf import settings if settings.FILE_DB == settings.S3: import crits.core.s3_tools as S3 def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs): """ Determine if the admin has configured CRITs to utilize GridFS or S3 for binary storage. """ if settings.FILE_DB == settings.GRIDFS: return FileField(db_alias, collection_name, **kwargs) elif settings.FILE_DB == settings.S3: return S3FileField(db_alias, collection_name, **kwargs)
[ 11748, 4818, 8079, 198, 198, 6738, 3128, 22602, 13, 48610, 1330, 21136, 198, 6738, 285, 25162, 18392, 1330, 7536, 7575, 15878, 11, 9220, 15878, 198, 6738, 285, 25162, 18392, 13, 38659, 1330, 5550, 38865, 62, 10943, 45, 24565, 62, 20608, ...
2.984674
261
import sys from PySide6 import QtGui
[ 11748, 25064, 198, 6738, 9485, 24819, 21, 1330, 33734, 8205, 72, 198 ]
3.083333
12
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import re import unittest COLOR = "shiny gold" FNAME = "input.txt" N_ITER = 1e7 TEST_FNAME = "test_input.txt" def main(): """Main function.""" data = load_input(FNAME) part1(data) part2(data) print("\nUnittests") unittest.main() def part1(data): """Solution to day 7, part 1.""" for rule in data: Bag(rule) n_bags = Bag.n_bags_containing_specific_bag(COLOR) print(f"{n_bags} bags can contain at least one {COLOR} bag.") return n_bags def part2(data): """Solution to day 7, part 2.""" for rule in data: Bag(rule) n_bags = Bag.n_bags_inside(COLOR) print(f"One {COLOR} bag contains {n_bags} other bags.") return n_bags def load_input(fname): """Read in the data, return as a list.""" with open(fname, "r") as f: data = f.readlines() data = [x.strip("\n") for x in data] return data if __name__=="__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 302, 198, 11748, 555, 715, 395, 198, 198, 46786, 796, 366, 1477, 3541, 3869, 1, 198, 37, 20608, 796, ...
2.272517
433
#!/usr/bin/env python3 #coding: UTF-8 import os import sys import time import json import argparse from os.path import join, exists, dirname from upgrade import check_upgrade from utils import call, get_conf, get_script, get_command_output, get_install_dir installdir = get_install_dir() topdir = dirname(installdir) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Seafile cluster start script') parser.add_argument('--mode') main(parser.parse_args())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 66, 7656, 25, 41002, 12, 23, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 33918, 198, 11748, 1822, 29572, 198, 6738, 28686, 13, 6978, 1330, 4654, 11, ...
2.96988
166
import sys, os import nltk import numpy as np if __name__ == '__main__': result_base = '/home/sc2nf/codit-clone' option = 'token' # 'token size = 10 # if option == 'tree': # file_name = 'codit-all-concrete_' + str(size) + '.2_' + str(2*size) + '_decode_res.txt' # else: # file_name = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt' file_name_tree = 'codit-all-concrete_' + str(size) + '.2_' + str(2 * size) + '_decode_res.txt' file_path_tree = result_base + '/' + file_name_tree patches_tree = read_patch(file_path_tree, size) unique_indices = de_duplicate_patches(patches_tree) # unique_patches_tree = patches_tree[unique_indices] # unique_count = len(unique_patches_tree) file_name_token = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt' file_path_token = result_base + '/' + file_name_token patches_token = read_patch(file_path_token, size) # unique_patches = patches_token[unique_indices] unified_patches = [] for idx, (p_tree, p_token) in enumerate(zip(patches_tree, patches_token)): if idx in unique_indices: assert isinstance(p_tree, Patch) and isinstance(p_token, Patch) p_tree.verdict_token = p_token.verdict unified_patches.append(p_tree) tree_count = np.sum([1 if p.verdict else 0 for p in unified_patches]) token_count = np.sum([1 if p.verdict_token else 0 for p in unified_patches]) tree_indices = set() token_indices = set() for i, p in enumerate(unified_patches): if p.verdict: tree_indices.add(i) if p.verdict_token: token_indices.add(i) only_tree = tree_indices.difference(token_indices) only_token = token_indices.difference(tree_indices) common = tree_indices.intersection(token_indices) print(tree_count, token_count, len(only_token), len(only_tree), len(common), len(unified_patches)) # # total_success_tree = np.sum([1 if p.verdict else 0 for p in unique_patches]) # print(unique_patches, total_success_tree) # tree_success_indices_in_unique = set() # for idx, p in enumerate(unique_patches): # if p.verdict: # tree_success_indices_in_unique.add(idx) # # # # total_success_token = np.sum([1 if p.verdict else 0 for p in unique_patches]) # print(tree_count, total_success_token)
[ 11748, 25064, 11, 28686, 198, 11748, 299, 2528, 74, 198, 11748, 299, 32152, 355, 45941, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1255, 62, 8692, 796, 31051, 11195, 14, 1416, 17, 77, ...
2.310772
1,049
# Copyright (c) 2016-2018 Koninklijke Philips N.V. All rights reserved. A # copyright license for redistribution and use in source and binary forms, # with or without modification, is hereby granted for non-commercial, # experimental and research purposes, provided that the following conditions # are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimers. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimers in the # documentation and/or other materials provided with the distribution. If # you wish to use this software commercially, kindly contact # info.licensing@philips.com to obtain a commercial license. # # This license extends only to copyright and does not include or grant any # patent license or other license whatsoever. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import os import subprocess import sys import pysnark.options def run(eksize, pksize, genmk=False): """ Run the qapgen tool :param eksize: Desired master evaluation key size :param pksize: Desired master public key size :param genmk: True if a new master secret key should be generated, False otherwise :return: None """ mskfile = pysnark.options.get_mskey_file() mkeyfile = pysnark.options.get_mkey_file() mpkeyfile = pysnark.options.get_mpkey_file() if not genmk and not os.path.isfile(mskfile): raise IOError("Could not enlarge master key materiak: master secret key missing") print >> sys.stderr, "*** " + ("Generating" if genmk else "Enlarging") + " master key material" if subprocess.call([pysnark.options.get_qaptool_exe("qapgen"), str(max(pksize,eksize,0)), str(max(pksize,0)), mskfile, mkeyfile, mpkeyfile]) != 0: sys.exit(2) def get_mekey_size(): """ Get the size (maximal exponent) of the current master evaluation key :return: Size, or -1 if key does not exist """ try: mekf = open(pysnark.options.get_mkey_file()) curmk = int(mekf.next().strip().split(" ")[2]) mekf.close() return curmk except IOError: return -1 def get_mpkey_size(): """ Get the size (maximal exponent) of the current master public key :return: Size, or -1 if key does not exist """ try: mpkf = open(pysnark.options.get_mpkey_file()) curmpk = int(mpkf.next().strip().split(" ")[2]) mpkf.close() return curmpk except IOError: return -1 def ensure_mkey(eksize, pksize): """ Ensures that there are master evaluation and public keys of the given sizes. If master evaluation/public keys exist but are to small, and there is no master secret key, this raises an error. If there is no key material at all, a fresh master secret key will be generated. :param eksize: Minimal evaluation key size (-1 if not needed) :param pksize: Minimal public key size (-1 if not needed) :return: Actual evaluation key, public key size after key generation """ curek = get_mekey_size() curpk = get_mpkey_size() havemsk = os.path.isfile(pysnark.options.get_mskey_file()) havekeys = os.path.isfile(pysnark.options.get_mpkey_file()) or os.path.isfile(pysnark.options.get_mkey_file()) if curek < eksize or curpk < pksize: if havemsk: run(max(curek, eksize), max(curpk, pksize), False) return (max(curek, eksize), max(curpk, pksize)) elif havekeys: raise IOError("Key material too small ("+str(curek)+","+str(curpk)+ ")<("+str(eksize)+","+str(pksize)+") and missing master secret key") else: run(eksize, pksize, True) return (eksize,pksize) else: return (curek,curpk) if __name__ == "__main__": if len(sys.argv)<3: print >>sys.stderr, "*** Usage:", sys.argv[0], "<eksize>", "<pksize>" sys.exit(2) argeksize = int(sys.argv[1]) argpksize = int(sys.argv[2]) run(argeksize, argpksize, not os.path.isfile(pysnark.options.get_mskey_file()))
[ 2, 15069, 357, 66, 8, 1584, 12, 7908, 17431, 676, 75, 2926, 365, 46905, 399, 13, 53, 13, 1439, 2489, 10395, 13, 317, 198, 2, 6634, 5964, 329, 41425, 290, 779, 287, 2723, 290, 13934, 5107, 11, 198, 2, 351, 393, 1231, 17613, 11, 3...
2.656974
1,857
"""Helper Functions Some Misc Functions used in this app """ import secrets import string from functools import wraps from urllib.parse import urljoin, urlparse from dateutil import parser from flask import abort, current_app, request from flask_login import current_user from flask_migrate import upgrade def admin_required_decorator(func): """Restrict view function to admin-only Arguments: func {view function} -- The view function to be restricting Returns: view function -- The restricted function """ return decorated_view_function def pushover_required(func): """Restrict view function to users who have configured Pushover account Arguments: func {view function} -- The view function to be restricting Returns: view function -- The restricted function """ return decorated_function def youtube_required(func): """Restrict view function to users who have configured YouTube account Arguments: func {view function} -- The view function to be restricting Returns: view function -- The restricted function """ return decorated_function def is_safe_url(target): """Helper used to check endpoint before redirecting user Arguments: target {url} -- a url with complete scheme and domain to be examine Returns: bool -- target is a safe url or not """ ref_url = urlparse(request.host_url) test_url = urlparse(urljoin(request.host_url, target)) return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc def notify_admin(initiator, service, **kwargs): """Send Notification to all Admin A Temporary function used to notify admin Arguments: initiator {str} -- Action or reason that trigger this notification service {str or notification.Service} -- Service used to send notification **kwargs {dict} -- optional arguments passed to notification Returns: dict -- Response from notification service """ from ..models.user import User admins = User.query.filter_by(admin=True).all() response = {} for admin in admins: response[admin.username] = admin.send_notification(initiator, service, **kwargs) return response
[ 37811, 47429, 40480, 198, 198, 4366, 29882, 40480, 973, 287, 428, 598, 198, 37811, 198, 11748, 13141, 198, 11748, 4731, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 19016, 22179, 11, 19016, 29572, ...
3.226761
710
import numpy as np import matplotlib import matplotlib.pyplot as plt import scipy.linalg as la print("loading time series... ") plt.figure(figsize=(16,9)) timeSeries = np.loadtxt('TheftTS.txt',delimiter=',',dtype=float) # load data hourly = timeSeries[:,0] plt.plot(hourly,'r-', label='Theft') timeSeries = np.loadtxt('BatteryTS.txt',delimiter=',',dtype=float) # load data hourly = timeSeries[:,0] plt.plot(hourly,'g-', label='Battery') timeSeries = np.loadtxt('CriminalDamageTS.txt',delimiter=',',dtype=float) # load data hourly = timeSeries[:,0] plt.plot(hourly,'b-', label='Criminal_Damage') timeSeries = np.loadtxt('TarcoticsTS.txt',delimiter=',',dtype=float) # load data hourly = timeSeries[:,0] plt.plot(hourly,'c-', label='Narcotics') timeSeries = np.loadtxt('AssaultTS.txt',delimiter=',',dtype=float) # load data hourly = timeSeries[:,0] plt.plot(hourly,'m-', label='Assault') plt.xticks(np.arange(0,24,step=1)) plt.grid(True) plt.legend() plt.xlabel('Hour') plt.ylabel('Total Crimes') plt.title('Crime per Hour') # plt.show() plt.savefig('CrimePerHour.png',format='png',dpi=600)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 629, 541, 88, 13, 75, 1292, 70, 355, 8591, 198, 198, 4798, 7203, 25138, 640, 2168, 986, 366, 8, ...
2.503432
437
""" Creating training file from own custom dataset >> python annotation_csv.py \ --path_dataset ~/Data/PeopleDetections \ --path_output ../model_data """ import os import sys import glob import argparse import logging import pandas as pd import tqdm sys.path += [os.path.abspath('.'), os.path.abspath('..')] from keras_yolo3.utils import update_path IMAGE_EXTENSIONS = ('.png', '.jpg') ANNOT_COLUMNS = ('xmin', 'ymin', 'xmax', 'ymax', 'class') if __name__ == '__main__': logging.basicConfig(level=logging.INFO) arg_params = parse_arguments() _main(**arg_params)
[ 37811, 198, 32071, 3047, 2393, 422, 898, 2183, 27039, 198, 198, 4211, 21015, 23025, 62, 40664, 13, 9078, 3467, 198, 220, 220, 220, 1377, 6978, 62, 19608, 292, 316, 47795, 6601, 14, 8061, 47504, 507, 3467, 198, 220, 220, 220, 1377, 697...
2.666667
222
from pegasos import bgd_pegasos import numpy as np import pandas as pd import pickle import sys retrain = False wandbs = None if retrain: x_train, y_train = read_data("mnist/train.csv") num_classes = len(set(y_train)) wandbs = [[() for j in range(num_classes)] for i in range(num_classes)] count = 0 for i in range(num_classes): for j in range(num_classes): if(i < j): count += 1 print("\nClassifier %d: %d vs %d\n" % (count, i, j)) xc, yc = [], [] for x, y in zip(x_train, y_train): if (y == i): xc.append(x) yc.append(1) elif(y == j): xc.append(x) yc.append(-1) wandbs[i][j] = bgd_pegasos(xc, yc, 10e-4, c=1.0) with open("models/pegasos.model", "wb") as f: pickle.dump(wandbs, f) else: print("\nLoading Model") with open("models/pegasos.model", "rb") as f: wandbs = pickle.load(f) input_file = sys.argv[1].strip() output_file = sys.argv[2].strip() x_set, y_set = read_data(input_file) print("Predicting") run2(x_set, y_set, wandbs, output_file)
[ 6738, 613, 22649, 418, 1330, 275, 21287, 62, 22071, 292, 418, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2298, 293, 198, 11748, 25064, 628, 628, 198, 1186, 3201, 796, 10352, 198, 86, 392...
1.829412
680
import numpy as np # Inner (or dot) product a = np.array([1,2]) b = np.array([3,4]) np.inner(a, b) a.dot(b) # Outer product a = np.array([1,2]) b = np.array([3,4]) np.outer(a, b) # Inverse m = np.array([[1,2], [3,4]]) np.linalg.inv(m) # Inner (or dot) product m = np.array([[1,2], [3,4]]) minv = np.linalg.inv(m) m.dot(minv) # Diagonal m = np.array([[1,2], [3,4]]) np.diag(m) m = np.array([1,2]) np.diag(m) # Determinant m = np.array([[1,2], [3,4]]) np.linalg.det(m) # Trace - sum of elements of the diagonal m = np.array([[1,2], [3,4]]) np.diag(m) np.diag(m).sum() np.trace(m) # Transpose m = np.array([ [1,2], [3,4] ]) m.T # Gaussian distribution m = np.random.randn(2,3) m # Covariance X = np.random.randn(100,3) np.cov(X.T) # Eigen vectors and values # For symmetric matrix (m == m.T) and hermitian matrix (m = m.H) we use eigh. m = np.array([ [ 0.89761228, 0.00538701, -0.03229084], [ 0.00538701, 1.04860676, -0.25001666], [-0.03229084, -0.25001666, 0.81116126]]) # The first tuple contains three Eigen values. # The second tuple contains Eigen vectors stored in columns. np.linalg.eigh(m) # Solving linear systems # The admissions fee at a small far is $1.50 for children an $4.00 for adults. # On a certain day 2,200 people enter the fair and $5050 is collected. # How many children and how many adults attended. # # Let X1 = number of children # Let X2 = number of adults # X1 + X2 = 2200 # 1.5X1 + 4X2 = 5050 a = np.array([ [1,1], [1.5,4] ]) b = np.array( [ 2200, 5050] ) np.linalg.solve(a, b)
[ 11748, 299, 32152, 355, 45941, 198, 198, 2, 24877, 357, 273, 16605, 8, 1720, 198, 64, 796, 45941, 13, 18747, 26933, 16, 11, 17, 12962, 198, 65, 796, 45941, 13, 18747, 26933, 18, 11, 19, 12962, 198, 37659, 13, 5083, 7, 64, 11, 275,...
2.1622
709
import os import platform import unittest # ZODB >= 3.9. The blob directory can be a private cache. shared_blob_dir_choices = (False, True) RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR') RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR if RUNNING_ON_CI: skipOnCI = unittest.skip else: skipOnCI = _do_not_skip if RUNNING_ON_APPVEYOR: skipOnAppveyor = unittest.skip else: skipOnAppveyor = _do_not_skip CACHE_SERVERS = None CACHE_MODULE_NAME = None if RUNNING_ON_TRAVIS: # We expect to have access to a local memcache server # on travis. Use it if we can import drivers. # pylint:disable=unused-import try: import pylibmc CACHE_SERVERS = ["localhost:11211"] CACHE_MODULE_NAME = 'relstorage.pylibmc_wrapper' except ImportError: try: import memcache CACHE_SERVERS = ["localhost:11211"] CACHE_MODULE_NAME = 'memcache' except ImportError: pass USE_SMALL_BLOBS = ((RUNNING_ON_CI # slow here or platform.system() == 'Darwin' # interactive testing or os.environ.get("RS_SMALL_BLOB")) # define and not os.environ.get('RS_LARGE_BLOB')) # mysqlclient (aka MySQLdb) and possibly other things that # use libmysqlclient.so will try to connect over the # default Unix socket that was established when that # library was compiled if no host is given. But that # server may not be running, or may not be the one we want # to use for testing, so explicitly ask it to use TCP # socket by giving an IP address (using 'localhost' will # still try to use the socket.) (The TCP port can be bound # by non-root, but the default Unix socket often requires # root permissions to open.) STANDARD_DATABASE_SERVER_HOST = '127.0.0.1' DEFAULT_DATABASE_SERVER_HOST = os.environ.get('RS_DB_HOST', STANDARD_DATABASE_SERVER_HOST) TEST_UNAVAILABLE_DRIVERS = not bool(os.environ.get('RS_SKIP_UNAVAILABLE_DRIVERS')) if RUNNING_ON_CI: TEST_UNAVAILABLE_DRIVERS = False
[ 11748, 28686, 198, 11748, 3859, 198, 11748, 555, 715, 395, 198, 198, 2, 1168, 3727, 33, 18189, 513, 13, 24, 13, 220, 383, 44812, 8619, 460, 307, 257, 2839, 12940, 13, 198, 28710, 62, 2436, 672, 62, 15908, 62, 6679, 1063, 796, 357, ...
2.340659
910
# -*- coding: utf-8 -*- """Example generation for testing. Exports dict of examples, useful for testing as fixtures. example_dict: dict indexed by triple 1st element = mtype - str 2nd element = considered as this scitype - str 3rd element = int - index of example elements are data objects, considered examples for the mtype all examples with same index are considered "same" on scitype content if None, indicates that representation is not possible example_lossy: dict of bool indexed by pairs of str 1st element = mtype - str 2nd element = considered as this scitype - str 3rd element = int - index of example elements are bool, indicate whether representation has information removed all examples with same index are considered "same" on scitype content overall, conversions from non-lossy representations to any other ones should yield the element exactly, identidally (given same index) """ import pandas as pd import numpy as np example_dict = dict() example_dict_lossy = dict() ### X = np.array( [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]], dtype=np.int64, ) example_dict[("numpy3D", "Panel", 0)] = X example_dict_lossy[("numpy3D", "Panel", 0)] = False cols = [f"var_{i}" for i in range(2)] Xlist = [ pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols), pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols), pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols), ] example_dict[("df-list", "Panel", 0)] = Xlist example_dict_lossy[("df-list", "Panel", 0)] = False cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)] Xlist = [ pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols), pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols), pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols), ] X = pd.concat(Xlist) X = X.set_index(["instances", "timepoints"]) example_dict[("pd-multiindex", "Panel", 0)] = X example_dict_lossy[("pd-multiindex", "Panel", 0)] = False cols = [f"var_{i}" for i in range(2)] X = pd.DataFrame(columns=cols, index=[0, 1, 2]) X["var_0"] = pd.Series( [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])] ) X["var_1"] = pd.Series( [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])] ) example_dict[("nested_univ", "Panel", 0)] = X example_dict_lossy[("nested_univ", "Panel", 0)] = False
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 16281, 5270, 329, 4856, 13, 198, 198, 3109, 3742, 8633, 286, 6096, 11, 4465, 329, 4856, 355, 34609, 13, 198, 198, 20688, 62, 11600, 25, 8633, 41497, 416, 15055, ...
2.502569
973
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Jul 9 15:34:36 2019 @author: eseliuni """ from __future__ import print_function #from builtins import str #from builtins import range import os def get_coordinate_from_line(coordinate, line): """ Returns a value of a coordinate from a line """ for word in line.split(","): if str(coordinate)+"=" in word: if coordinate == "phi": return float(word[word.index("=")+1:]) else: return float(word[word.index("=")+1:-1]) def get_los(full_path): """ Reads the file *.coordinate from diaggeom with line of sight (LOS) of a diagnostic. Returns a dictionary with keys: name: short name of the diagnostic description: full name of the diagnostic signals: contains the name of each channel and its LOS """ # Split the text to the lines with open(full_path, "r") as file: lines = file.readlines() lines = [line.strip() for line in lines] los_diag = {"name": lines[0].split()[0], "description": lines[0][ lines[0].index("(")+1:lines[0].index(")") ], "signals":{} } # Combine lines to the blocks, corresponding specific channel phrase = "(Line of sight)" # a phrase, that indicates the beginning of the block signals_line_idx = [ii for ii in range(len(lines)) if phrase in lines[ii]] signals_line_idx.append(len(lines)) signal_blocks_idx = [(signals_line_idx[ii], signals_line_idx[ii+1]) for ii in range(len(signals_line_idx)-1)[:-1]] signal_blocks_idx.append((signals_line_idx[-2], signals_line_idx[-1])) # obtain R, z and phi for each block for (ii, jj) in signal_blocks_idx: los = {} phrase = "From" block = lines[ii:jj] line_idx = [ll for ll in range(len(block)) if phrase in block[ll]] for idx in line_idx: R = [get_coordinate_from_line("R", block[idx]), get_coordinate_from_line("R", block[idx+1])] z = [get_coordinate_from_line("z", block[idx]), get_coordinate_from_line("z", block[idx+1])] phi = [get_coordinate_from_line("phi", block[idx]), get_coordinate_from_line("phi", block[idx+1])] if block[idx].split()[0] == phrase: los.update({"0":{"R": R, "z":z, "phi":phi}}) else: los.update({block[idx].split()[0]:{"R": R, "z":z, "phi":phi}}) los_diag["signals"].update({lines[ii][:lines[ii].index("(")-1]:los}) file.close() return los_diag if __name__ == "__main__": working_dir = os.getcwd() examples_dir = "../../files/" path = os.path.join(working_dir, examples_dir) file_name = 'diaggeom_TS.coords' los_diag = get_los(os.path.join(path, file_name)) print(los_diag)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 30030, 5979, 220, 860, 1315, 25, 2682, 25, 2623, 13130, 198, 198, 31, 9800, 25, 1658, 437...
2.157164
1,368