content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""analysis_utils.py: Helper functions that are used multiple
places.
"""
import os
import re
import numpy as np
import scipy.stats as sps
from cclib.parser import ccopen
from sgr_analysis.find_CO2_frequencies import find_CO2_mode_indices
def make_file_iterator(filename):
"""Return an iterator over the contents of the given file name."""
# pylint: disable=C0103
with open(filename) as f:
contents = f.read()
return iter(contents.splitlines())
def pad_left_zeros(num, maxwidth):
"""Pad the given number with zeros on the left until the
total length is maxwidth, returning it as a string.
"""
numwidth = len(str(num))
if numwidth < maxwidth:
numzeros = maxwidth - numwidth
numstr = (numzeros * '0') + str(num)
else:
numstr = str(num)
return numstr
def make_n_mm_dict():
"""So we can avoid having to check every loop iteration later on. Make
all possible keys even if we won't use the bigger values.
"""
d = dict()
keys = list(range(0, 18, 2)) + [32, 64, 128] + [256, 255, 254, 253]
for k in keys:
d[k] = list()
return d
def filter_n_mm_into_dict(outputfilenames):
"""Place output filenames into a dictionary where the keys are the
number of MM IL pairs.
"""
d = make_n_mm_dict()
for outputfilename in outputfilenames:
try:
n_mm = int(re.search(r'_(\d+)mm', outputfilename).groups()[0])
# Due to a silly shell mishap: `rename _0 _ ./*` is a bad idea, kids.
# Actually, this is very useful, if the filename doesn't have # MM pairs in it at all.
except AttributeError:
n_mm = 0
d[n_mm].append(outputfilename)
return d
def get_CO2_frequencies_4fs(outputfilenames):
"""The same as above, but for the snapshots separated by 4fs."""
snapnums = []
CO2_frequencies = []
CO2_intensities = []
for outputfilename in outputfilenames:
print("Parsing frequencies from {}".format(outputfilename))
job = ccopen(outputfilename)
try:
data = job.parse()
except:
# Is this the right control flow statement?
continue
# geometry = data.atomcoords[-1]
# atoms = data.atomnos
# start_indices = find_CO2_atom_indices(atoms, geometry)
# assert isinstance(start_indices, list)
try:
vibfreqs = data.vibfreqs
vibdisps = data.vibdisps
vibirs = data.vibirs
except AttributeError:
# Is this the correct control flow statement?
continue
# Assumption!
# start_index = start_indices[0]
# Assumption?
start_index = 0
mode_indices = find_CO2_mode_indices(start_index, vibdisps, thresh=0.50)
# mode_indices = [2]
# freqs = [vibfreqs[modeidx] for modeidx in mode_indices]
# freqs = filter(lambda x: x > 0.0, freqs)
# print(freqs)
# Let's only take the last one...
# print(outputfilename)
CO2_frequencies.append(vibfreqs[mode_indices[-1]])
CO2_intensities.append(vibirs[mode_indices[-1]])
snapnum = int(re.search(r'drop_(\d+)', outputfilename).groups()[0])
snapnums.append(snapnum)
return CO2_frequencies, CO2_intensities, snapnums
def get_gradients(outputfilenames):
"""This currently doesn't work for correlated calculations, just SCF
ones!
"""
gradients_rms = []
gradients_max = []
snapnums = []
for outputfilename in outputfilenames:
with open(outputfilename) as outputfile:
print("Parsing gradient from {}".format(outputfilename))
for line in outputfile:
if 'Max gradient component' in line:
gradient_max = float(line.split()[-1])
line = next(outputfile)
assert 'RMS gradient' in line
gradient_rms = float(line.split()[-1])
gradients_rms.append(gradient_rms)
gradients_max.append(gradient_max)
snapnum = int(re.search(r'drop_(\d+)', outputfilename).groups()[0])
snapnums.append(snapnum)
# Only take the first one! This avoids problems
# when parsing numerical frequency runs, where the
# gradient appears every finite difference step.
break
return gradients_rms, gradients_max, snapnums
def mangle_dict_keys(d):
"""Not all 'maximum MM' calculations are going to have 256 pairs, if
there are ionic liquid pairs treated explicitly; they'll take away
from that number, because there are 256 *total* pairs in a box.
Make it appear in a dictionary that these are all the same by
'having' 256 MM pairs.
"""
nd = dict()
for k in d:
if d[k] == []:
d.pop(k)
bad_keys = (253, 254, 255)
for n_qm in d:
nd[n_qm] = dict()
for n_mm in d[n_qm]:
if n_mm in bad_keys and len(d[n_qm][n_mm]) > 0:
nd[n_qm][256] = d[n_qm][n_mm]
else:
nd[n_qm][n_mm] = d[n_qm][n_mm]
return nd
def get_CO2_frequencies_d(filename_dict, do_4fs=False):
"""The filename dictionary passed as an argument should correspond to
only 1 # of QM pairs; that is, it's a single-layered dictionary
where the keys are the # of MM pairs, and the values are lists of
strings (outputfile names).
That means it assumes filter_n_mm_into_dict() has been called!
"""
frequencies_dict = make_n_mm_dict()
intensities_dict = make_n_mm_dict()
snapnums_dict = make_n_mm_dict()
if do_4fs:
f = get_CO2_frequencies_4fs
else:
f = get_CO2_frequencies
for n_mm in filename_dict:
if len(filename_dict[n_mm]) > 0:
frequencies_dict[n_mm], intensities_dict[n_mm], snapnums_dict[n_mm] = f(filename_dict[n_mm])
return frequencies_dict, intensities_dict, snapnums_dict
def get_dipoles_d(filename_dict):
"""The filename dictionary passed as an argument should correspond to
only 1 # of QM pairs; that is, it's a single-layered dictionary
where the keys are the # of MM pairs, and the values are lists of
strings (outputfile names).
That means it assumes filter_n_mm_into_dict() has been called!
"""
dipoles_dict = make_n_mm_dict()
snapnums_dict = make_n_mm_dict()
for n_mm in filename_dict:
if len(filename_dict[n_mm]) > 0:
dipoles_dict[n_mm], snapnums_dict[n_mm] = get_dipoles(filename_dict[n_mm])
return dipoles_dict, snapnums_dict
def get_gradients_d(filename_dict):
""""""
gradients_rms_dict = make_n_mm_dict()
gradients_max_dict = make_n_mm_dict()
snapnums_dict = make_n_mm_dict()
for n_mm in filename_dict:
if len(filename_dict[n_mm]) > 0:
gradients_rms_dict[n_mm], gradients_max_dict[n_mm], snapnums_dict[n_mm] = get_gradients(filename_dict[n_mm])
return gradients_rms_dict, gradients_max_dict, snapnums_dict
def get_geometries_d(filename_dict):
""""""
geometries_dict = make_n_mm_dict()
snapnums_dict = make_n_mm_dict()
for n_mm in filename_dict:
if len(filename_dict[n_mm]) > 0:
geometries_dict[n_mm], snapnums_dict[n_mm] = get_geometries(filename_dict[n_mm])
return geometries_dict, snapnums_dict
def get_eda_covp_totals(outputfilepath):
"""Given a path to an output file, return the totals for each fragment
from the COVP analysis. The first element of the tuple is the
energy contribution, the second element is the number of
millielectrons transferred.
"""
searchstr = "# Delta E(Alpha) Delta E(Beta) Delta Q(Alpha) Delta Q(Beta)"
remove_paren_stuff = lambda x: float(x[:-8])
with open(outputfilepath) as outputfile:
for line in outputfile:
if searchstr in line:
while line[:4] != " Tot":
line = next(outputfile)
f_1_to_2 = tuple(map(remove_paren_stuff, line.split()[1::2]))
line = next(outputfile)
while line[:4] != " Tot":
line = next(outputfile)
f_2_to_1 = tuple(map(remove_paren_stuff, line.split()[1::2]))
return f_1_to_2, f_2_to_1
def sort(snapnums_dict, results_dict):
"""I don't think this gets called anywhere."""
assert snapnums_dict.keys() == results_dict.keys()
for k in results_dict:
sorting_indices = [i[0] for i in sorted(enumerate(snapnums_dict[k]),
key=lambda x: x[1])]
sorted_results = [i[1] for i in sorted(zip(sorting_indices, results_dict[k]),
key=lambda x: x[0])]
sorted_snapnums = [i[1] for i in sorted(zip(sorting_indices, snapnums_dict[k]),
key=lambda x: x[0])]
# Why is this commented out?
# assert sorted_snapnums == list(range(min(snapnums_dict[k]), max(snapnums_dict[k]) + 1))
snapnums_dict[k] = sorted_snapnums
results_dict[k] = sorted_results
return
def get_outputfiles_from_path(path, ext=".out"):
"""Walk the directory tree to find all potential output files.
"""
outputfiles = []
for (root, dirs, files) in os.walk(path, followlinks=True):
for f in files:
if f.endswith(ext):
outputfiles.append(os.path.join(root, f))
return sorted(outputfiles)
def pprint_lengths(d):
"""Pretty-print the lengths of objects inside a two-level dictionary
structure.
"""
for upper_key in sorted(d.keys()):
print(upper_key,
[len(d[upper_key][lower_key])
for lower_key in sorted(d[upper_key].keys())])
return
def pprint_linregress(x, y):
"""Pretty-print a linear regression between x and y arrays."""
slope, intercept, rval, pval, stderr = sps.linregress(x, y)
rsq = rval**2
print(" slope: {:f}".format(slope),
" intercept: {:f}".format(intercept),
" rval2: {:f}".format(rsq))
return slope, intercept, rsq
| [
37811,
20930,
62,
26791,
13,
9078,
25,
5053,
525,
5499,
326,
389,
973,
3294,
198,
23625,
13,
198,
37811,
628,
198,
11748,
28686,
198,
11748,
302,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
355,
599,
... | 2.242418 | 4,583 |
from metakernel.tests.utils import (get_kernel, get_log_text, EvalKernel,
clear_log_text)
| [
6738,
1138,
461,
7948,
13,
41989,
13,
26791,
1330,
357,
1136,
62,
33885,
11,
651,
62,
6404,
62,
5239,
11,
26439,
42,
7948,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 1.897059 | 68 |
# Copyright 2019 Red Hat, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from osc_lib import exceptions
from octaviaclient.osc.v2 import constants
from octaviaclient.osc.v2 import pool as pool
from octaviaclient.tests.unit.osc.v2 import constants as attr_consts
from octaviaclient.tests.unit.osc.v2 import fakes
| [
2,
220,
220,
15069,
13130,
2297,
10983,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
407,
779,
428,
2393,
2... | 3.315985 | 269 |
'''
Evaluate and validate the final model.
More details in the "Model Evaluation and Validation" section of the report.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import random
import gym
import math
import matplotlib.pyplot as plt
import pickle
import learning_agent
# Run model evaluation function from the learning_agent
learning_agent.NUM_ENVS = 1
rewards = learning_agent.model_evaluation()
# Print mean and standard deviation of rewards
print('Rewards mean: %f\nRewards std-dev: %f' % (np.mean(rewards), np.std(rewards)))
# Avg reward for final 100 episodes
print('Average reward for final 100 episodes: %f' % np.mean(rewards[-100:]))
# Save the rewards list just in case we need it later
print('Saving rewards to eval_rewards.p')
with open('eval_rewards.p', 'wb') as rewards_out:
pickle.dump(rewards, rewards_out)
# Plot rewards over episodes for visualization
plt.plot(rewards)
plt.title('Reward over Episodes')
plt.ylabel('Reward')
plt.xlabel('Episode')
plt.show() | [
7061,
6,
198,
36,
2100,
4985,
290,
26571,
262,
2457,
2746,
13,
198,
5167,
3307,
287,
262,
366,
17633,
34959,
290,
3254,
24765,
1,
2665,
286,
262,
989,
13,
198,
7061,
6,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,... | 3.293605 | 344 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.mail.controllers.main import MailController
from odoo import http
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
6738,
16298,
2238,
13,
39996,
13,
4529,
13,
3642,
36667,
13,
... | 3.345455 | 55 |
# Modules used in CVP-MVSNet
# By: Jiayu
# Date: 2019-08-13
# Note: This file use part of the code from the following projects.
# Thanks for the authors for the great code.
# MVSNet: https://github.com/YoYo000/MVSNet
# MVSNet_pytorch: https://github.com/xy-guo/MVSNet_pytorch
import numpy as np
np.seterr(all='raise')
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
# Debug:
# import pdb
# import matplotlib.pyplot as plt
# from verifications import *
# MVSNet modules
| [
2,
3401,
5028,
973,
287,
327,
8859,
12,
44,
20304,
7934,
198,
2,
2750,
25,
29380,
323,
84,
198,
2,
7536,
25,
13130,
12,
2919,
12,
1485,
198,
198,
2,
5740,
25,
770,
2393,
779,
636,
286,
262,
2438,
422,
262,
1708,
4493,
13,
198,
... | 2.616915 | 201 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import json
from transformers import BertModel
from tools.accuracy_tool import multi_label_accuracy, single_label_top1_accuracy
| [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
33918,
201,
198,
201,
198,
6738,
6121,
364,
1330,
22108,
17633,
201,
198,
201,
198,
6738,
4899,
13... | 3.089552 | 67 |
#!/usr/bin/env python
"""Parse and print the list of logs, after validating signature."""
import base64
import datetime
import hashlib
import json
import math
import os
import sys
import gflags
import jsonschema
import M2Crypto
FLAGS = gflags.FLAGS
gflags.DEFINE_string("log_list", None, "Logs list file to parse and print.")
gflags.MarkFlagAsRequired("log_list")
gflags.DEFINE_string("signature", None, "Signature file over the list of logs.")
gflags.DEFINE_string("signer_key", None, "Public key of the log list signer.")
gflags.DEFINE_string("log_list_schema",
os.path.join(os.path.dirname(sys.argv[0]),
"data", "log_list_schema.json"),
"JSON schema for the list of logs.")
gflags.DEFINE_string("header_output", None,
"If specifed, generates C++ code for Chromium.")
gflags.DEFINE_boolean("skip_signature_check", False,
"Skip signature check (only validate schema).")
def generate_code_for_chromium(json_log_list, output_file):
"""Generate a header file of known logs to be included by Chromium."""
include_guard = (output_file.upper().replace('.', '_').replace('/', '_')
+ '_')
f = open(output_file, "w")
write_cpp_header(f, include_guard)
logs = json_log_list["logs"]
list_code = []
key_length_in_bytes = None
for log in logs:
log_key = base64.decodestring(log["key"])
hex_key = "".join(["\\x%.2x" % ord(c) for c in log_key])
# line_width % 4 must be 0 to avoid splitting the hex-encoded key
# across '\' which will escape the quotation marks.
line_width = 68
assert line_width % 4 == 0
num_splits = int(math.ceil(len(hex_key) / float(line_width)))
split_hex_key = ['"%s"' % hex_key[i * line_width:(i + 1) * line_width]
for i in range(num_splits)]
s = " {"
s += "\n ".join(split_hex_key)
s += ',\n "%s"\n }' % (log["description"])
list_code.append(s)
if not key_length_in_bytes:
key_length_in_bytes = len(log_key)
else:
assert key_length_in_bytes == len(log_key)
write_log_info_struct_definition(f, key_length_in_bytes + 1)
f.write("const CTLogInfo kCTLogList[] = {\n")
f.write(",\n" . join(list_code))
f.write("\n};\n")
f.write("\nconst size_t kNumKnownCTLogs = %d;\n" % len(logs))
write_cpp_footer(f, include_guard)
if __name__ == "__main__":
sys.argv = FLAGS(sys.argv)
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
10044,
325,
290,
3601,
262,
1351,
286,
17259,
11,
706,
4938,
803,
9877,
526,
15931,
628,
198,
11748,
2779,
2414,
198,
11748,
4818,
8079,
198,
11748,
12234,
8019,
198,
11748,
33918... | 2.234579 | 1,151 |
from django.forms import ModelForm
from .models import (User, Project, UserProjectEvent, Account, System, Service,
Transaction, Job, StorageCommitment)
from .shortcuts import add_user_to_project, create_account
| [
6738,
42625,
14208,
13,
23914,
220,
220,
1330,
9104,
8479,
198,
198,
6738,
764,
27530,
220,
220,
220,
1330,
357,
12982,
11,
4935,
11,
11787,
16775,
9237,
11,
10781,
11,
4482,
11,
4809,
11,
220,
198,
220,
220,
220,
220,
220,
220,
220... | 2.786517 | 89 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from . import Activation
from . import AdvancedActivation
from . import BatchNorm
from . import Concate
from . import Conv
from . import Crop
from . import Dense
from . import Dot
from . import Embed
from . import Flatten
from . import GRU
from . import LSTM
from . import Merge
from . import Permute
from . import Pool
from . import RepeatVector
from . import Reshape
from . import SimpleRNN
from . import Upsample
from . import ZeroPad
| [
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
16529,
35937,
198,
1... | 4.518072 | 166 |
# Copyright (c) 2019, CMCC Technologies Co., Ltd.
# Copyright 2019 ZTE Corporation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from lcm.ns.serializers.sol.lccn_filter_data import LifeCycleChangeNotificationsFilter
from lcm.ns.serializers.sol.pub_serializers import LinkSerializer
| [
2,
15069,
357,
66,
8,
13130,
11,
16477,
4093,
21852,
1766,
1539,
12052,
13,
198,
2,
15069,
13130,
1168,
9328,
10501,
13,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
... | 3.796296 | 216 |
from setuptools import setup, find_packages
import re
with open('snet_sdk/__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
setup(
name='snet_sdk',
version=version,
packages=find_packages(),
url='https://github.com/singnet/snet-sdk-python',
license='MIT',
author='SingularityNET Foundation',
author_email='info@singularitynet.io',
description='SingularityNET Python SDK',
install_requires=[
'grpcio-tools==1.17.1',
'ecdsa==0.13',
'web3==4.2.1',
'ipfsapi==0.4.2.post1',
'rfc3986==1.1.0'
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
302,
198,
198,
4480,
1280,
10786,
82,
3262,
62,
21282,
74,
14,
834,
15003,
834,
13,
9078,
3256,
705,
17034,
3256,
21004,
11639,
40477,
23,
11537,
355,
277,
25,
198,... | 2.193772 | 289 |
if __name__ == "__main__":
pass
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1208,
198
] | 2.111111 | 18 |
import luigi
import numpy as np
from src.models.search_grid_base import SearchGridBase
if __name__ == '__main__':
luigi.run()
| [
11748,
300,
84,
25754,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
12351,
13,
27530,
13,
12947,
62,
25928,
62,
8692,
1330,
11140,
41339,
14881,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
... | 2.68 | 50 |
# Generated by Django 2.2.13 on 2021-02-06 07:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1485,
319,
33448,
12,
2999,
12,
3312,
8753,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
# Exercício 3.7 - Faça um programa que peça dois números inteiros. Imprima a soma
# desses dois números na tela.
print('\n')
a = int(input('Informe um número inteiro: '))
b = int(input('Informe outro número inteiro: '))
print('\nA soma de %d + %d é igual a %d' % (a, b, a + b))
print('\n') | [
2,
1475,
2798,
8836,
66,
952,
513,
13,
22,
532,
18350,
50041,
23781,
1430,
64,
8358,
613,
50041,
466,
271,
299,
21356,
647,
418,
493,
20295,
4951,
13,
1846,
1050,
8083,
257,
3870,
64,
198,
2,
288,
44667,
466,
271,
299,
21356,
647,
... | 2.123188 | 138 |
# -*- coding: utf-8 -*-
# The main procedure for CHASER.
# Once run, she will read her memory.
# Once stop, she will save her memory.
# She is not event triggered, but
# internally driven.
import numpy as np
# she is born here.
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
383,
1388,
8771,
329,
5870,
1921,
1137,
13,
198,
2,
4874,
1057,
11,
673,
481,
1100,
607,
4088,
13,
198,
2,
4874,
2245,
11,
673,
481,
3613,
607,
4088,
13,
1... | 2.848837 | 86 |
#!/usr/bin/python
import sys
import re
################
# Jason Weirather 20140317
# Get chromosome names as the first non-whitespace characters
# Pre: fasta file
# Post: write list of chromosomes to standard output
# Modifies: standard output
################
if(len(sys.argv) < 2):
print 'split_genome_fasta.py <INPUT FILE>'
sys.exit()
with open(sys.argv[1]) as fp:
for line in fp:
if line.startswith('>'):
p = re.compile('>\s*(\S+)')
name = p.match(line).group(1)
print name
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
302,
198,
198,
14468,
198,
2,
220,
8982,
46669,
1032,
580,
31552,
1558,
198,
2,
220,
3497,
34348,
3891,
355,
262,
717,
1729,
12,
1929,
2737,
10223,
3435,
198,
2,
2... | 2.611111 | 198 |
import logging
from .functions import to_snake_case
class Field:
"""This property like object will map against a table field exposed as a snake_cased
attribute on the model.
Args:
model (airstorm.model.Model): The model this field belongs to.
schema (dict): The schema for this field.
Returns:
airstorm.fields.Field: The initatiazed field object.
"""
_read_only_field_types = ("formula", "computation")
def symmetric_field(self):
"""For foreign key fields this return the reversed field in the foreign table.
Returns:
airtstorm.field.Field: The symmetric field.
"""
if self._schema["type"] == "foreignKey":
field_id = self._schema["typeOptions"]["symmetricColumnId"]
model_id = self._schema["typeOptions"]["foreignTableId"]
model = self._model._base._model_by_id[model_id]
return model._field_by_id[field_id]
return None
class EditableField(Field):
"""Field that can be edited."""
def __set__(self, instance, value):
"""Sets the value of the field.
The value is "local" until the changes are pushed.
"""
self._value = value
def __delete__(self, instance):
"""Reset the local change for this field."""
instance._value = None
| [
11748,
18931,
198,
198,
6738,
764,
12543,
2733,
1330,
284,
62,
16184,
539,
62,
7442,
628,
198,
4871,
7663,
25,
198,
220,
220,
220,
37227,
1212,
3119,
588,
2134,
481,
3975,
1028,
257,
3084,
2214,
7362,
355,
257,
17522,
62,
66,
839,
1... | 2.567619 | 525 |
import iolib, datasets, tools
from tools.tools import add_constant, categorical
import regression
from .regression.linear_model import OLS, GLS, WLS, GLSAR
from .genmod.generalized_linear_model import GLM
from .genmod import families
import robust
from .robust.robust_linear_model import RLM
from .discrete.discrete_model import Poisson, Logit, Probit, MNLogit
from .tsa import api as tsa
import nonparametric
from __init__ import test
import version
from info import __doc__
import os
chmpath = os.path.join(os.path.dirname(__file__),
'docs\\build\\htmlhelp\\statsmodelsdoc.chm')
if os.path.exists(chmpath):
del os
del chmpath
| [
11748,
1312,
349,
571,
11,
40522,
11,
4899,
198,
6738,
4899,
13,
31391,
1330,
751,
62,
9979,
415,
11,
4253,
12409,
198,
11748,
20683,
198,
6738,
764,
2301,
2234,
13,
29127,
62,
19849,
1330,
440,
6561,
11,
402,
6561,
11,
370,
6561,
1... | 2.86087 | 230 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
from jax.nn import softmax
from jax.scipy.special import expit as sigmoid
import jax.numpy as jnp
from jaxopt import loss
from jaxopt import projection
from jaxopt._src import test_util
if __name__ == '__main__':
absltest.main()
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.469636 | 247 |
#!/usr/bin/env python3
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import argparse
import sys
from textwrap import dedent
from typing import Tuple
from .__version__ import __version__
from ._const import MODULE_NAME
from ._logger import LogLevel, initialize_logger
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
492,
2438,
9800,
3712,
23459,
88,
13704,
367,
2381,
12144,
1279,
912,
4669,
13704,
13,
71,
2381,
12144,
31,
14816,
13,
785,
29,
198,
37811,
198,
198,
11748,
1822,
... | 3.008621 | 116 |
from .bronx import r_bronx as rex_neighborhoods_bronx
from .brooklyn import r_brooklyn as rex_neighborhoods_brooklyn
from .manhattan import r_manhattan as rex_neighborhoods_manhattan
from .queens import r_queens as rex_neighborhoods_queens
from .statenisland import r_statenIsland as rex_neighborhoods_statenIsland
from .throughways import names as throughway_names
| [
6738,
764,
65,
1313,
87,
1330,
374,
62,
65,
1313,
87,
355,
302,
87,
62,
710,
394,
2865,
2894,
82,
62,
65,
1313,
87,
198,
6738,
764,
19094,
6213,
1330,
374,
62,
19094,
6213,
355,
302,
87,
62,
710,
394,
2865,
2894,
82,
62,
19094,
... | 2.793893 | 131 |
# coding: utf-8
"""
蓝鲸用户管理 API
蓝鲸用户管理后台服务 API # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DepartmentProfileEdgesSLZ(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'department_id': 'int',
'profile_id': 'int'
}
attribute_map = {
'id': 'id',
'department_id': 'department_id',
'profile_id': 'profile_id'
}
def __init__(self, id=None, department_id=None, profile_id=None): # noqa: E501
"""DepartmentProfileEdgesSLZ - a model defined in Swagger""" # noqa: E501
self._id = None
self._department_id = None
self._profile_id = None
self.discriminator = None
if id is not None:
self.id = id
if department_id is not None:
self.department_id = department_id
if profile_id is not None:
self.profile_id = profile_id
@property
def id(self):
"""Gets the id of this DepartmentProfileEdgesSLZ. # noqa: E501
:return: The id of this DepartmentProfileEdgesSLZ. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DepartmentProfileEdgesSLZ.
:param id: The id of this DepartmentProfileEdgesSLZ. # noqa: E501
:type: int
"""
self._id = id
@property
def department_id(self):
"""Gets the department_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:return: The department_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:rtype: int
"""
return self._department_id
@department_id.setter
def department_id(self, department_id):
"""Sets the department_id of this DepartmentProfileEdgesSLZ.
:param department_id: The department_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:type: int
"""
self._department_id = department_id
@property
def profile_id(self):
"""Gets the profile_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:return: The profile_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:rtype: int
"""
return self._profile_id
@profile_id.setter
def profile_id(self, profile_id):
"""Sets the profile_id of this DepartmentProfileEdgesSLZ.
:param profile_id: The profile_id of this DepartmentProfileEdgesSLZ. # noqa: E501
:type: int
"""
self._profile_id = profile_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DepartmentProfileEdgesSLZ, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DepartmentProfileEdgesSLZ):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
5525,
241,
251,
165,
110,
116,
18796,
101,
22755,
115,
163,
106,
94,
49426,
228,
7824,
628,
220,
220,
220,
5525,
241,
251,
165,
110,
116,
18796,
101,
22755,
115,... | 2.15239 | 2,113 |
import logging
from collections import defaultdict
from datetime import datetime
import queue
from workdocs_dr.aws_clients import AwsClients
from workdocs_dr.directory_minder import DirectoryBackupMinder
from workdocs_dr.listings import WdDirectory, WdItemApexOwner
from workdocs_dr.queue_backup import RunSyncTasks
from workdocs_dr.user import UserHelper, UserKeyHelper
from workdocs_dr.workdocs_bucket_sync import WorkDocs2BucketSync
class ActivityBackupRunner():
"""
Pulls a list of activities, and syncs state of documents and folder that have been touched in
in the time interval of the activities
"""
| [
198,
11748,
18931,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
16834,
198,
198,
6738,
670,
31628,
62,
7109,
13,
8356,
62,
565,
2334,
1330,
5851,
82,
2601,
2334,
198,
6738,
670,
31628,
62,
... | 3.488889 | 180 |
from cloud_scanner.config.process_config import ProcessConfig
from .table_storage import TableStorage
def register_resource_storage(service_name, service_factory):
"""Register resource storage service.
:param service_name: Name of service
:param service_factory: Function to instantiate service
:return: None
"""
return decorator
class ResourceStorageFactory:
"""Instantiate resource storage services."""
_factories = {}
@classmethod
def create(cls) -> TableStorage:
"""Create resource storage service.
:return: Resource storage service object
"""
service_type = ProcessConfig().resource_storage_type
try:
return cls._factories[service_type]()
except KeyError:
raise KeyError(
f"Service type {service_type} is not "
"registered for Resource Storage Service")
@classmethod
def register_factory(cls, service_type: str, factory_func):
"""Register factory.
:param service_type: type of service of factory
:param factory_func: Function to intantiate service
:return: None
"""
cls._factories[service_type] = factory_func
| [
6738,
6279,
62,
35836,
1008,
13,
11250,
13,
14681,
62,
11250,
1330,
10854,
16934,
198,
6738,
764,
11487,
62,
35350,
1330,
8655,
31425,
628,
198,
4299,
7881,
62,
31092,
62,
35350,
7,
15271,
62,
3672,
11,
2139,
62,
69,
9548,
2599,
198,
... | 2.750562 | 445 |
#!/usr/bin/env python
import csv
import json
import logging
import re
import sys
import unicodedata
_SLUG_STRIP = re.compile(r'[^\w\s-]')
_SLUG_HYPHENATE = re.compile(r'[-\s]+')
def main(program, fecha=None, asunto=None, url=None, \
sumario=None, nombre=None, *args):
""" Main program """
projects = {}
representatives = {}
rows = csv.reader(sys.stdin, delimiter='\t')
rows.next() # skip header
for row in rows:
load(row, projects, representatives, fecha, asunto, url, \
sumario, nombre)
logging.debug('Saving index json file...')
save('index.json', projects.keys())
logging.debug('Saving representatives json file...')
save('legisladores.json', representatives)
logging.debug('Saving %d json files...', len(projects))
for k, v in projects.iteritems():
save(k + '.json', v)
logging.debug('Done!')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
main(*sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
28000,
9043,
1045,
198,
198,
62,
8634,
7340,
62,
18601,
4061,
796,
302,
13,
... | 2.481481 | 405 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 3.263158 | 38 |
# -*- coding: utf-8 -*-
# BottlePy web programming micro-framework
import bottle
from bottle import request, route, template, get, post, delete
# import urllib.request
# from urllib.parse import urlencode
import os
import os.path
import traceback
import json
import socket
hostname = socket.gethostname()
IP = socket.gethostbyname(hostname)
# import apps from subfolders
for dir in os.listdir():
appFilename = os.path.join(dir, dir + '.py')
if os.path.isfile(appFilename):
print("Importing " + dir + "...")
try:
__import__(dir + '.' + dir)
except:
print("Failed to import " + dir + ":")
msg = traceback.format_exc()
print(msg)
bottle.route('/' + dir, 'GET',
lambda msg=msg, dir=dir:
reportImportError(dir, msg))
@route('/<filename:path>')
def send_static(filename):
"""Helper handler to serve up static game assets.
(Borrowed from BottlePy documentation examples.)"""
if str(filename).find('.py') == -1:
return bottle.static_file(filename, root='.')
else:
return """You do not have sufficient permissions to access this page."""
@route('/', method='Get')
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from scipy.spatial import distance
import codecs
import re
import numpy as np
#모델 로딩
model = load_model('aurabot.h5')
story_maxlen = 5
query_maxlen = 5
vocab = ['?', 'UNK', '건', '계십니까', '너', '너에', '너희', '넌', '널', '누구냐구', '누구야', '누군지', '니가', '다른', '대해', '만든', '뭐든', '뭐야', '뭘', '반갑다', '반갑워', '봇', '봇소개1', '봇소개그만', '사람', '사람이', '소개', '소개1', '소개해줘', '심심해', '아는게', '아무거나', '아우라', '아우라소개1', '아우라소개그만', '아우라에', '안녕', '알려줘', '없어', '오랜만이야', '이름이', '인사1', '인사그만', '있어', '재밌는거', '주인', '주인이', '지루해', '팀', '팀에', '팀을', '하세요', '하십니까', '하이', '할수', '할줄', '해봐', '해줘', '헬로우']
word_idx = {'?': 1, 'UNK': 2, '건': 3, '계십니까': 4, '너': 5, '너에': 6, '너희': 7, '넌': 8, '널': 9, '누구냐구': 10, '누구야': 11, '누군지': 12, '니가': 13, '다른': 14, '대해': 15, '만든': 16, '뭐든': 17, '뭐야': 18, '뭘': 19, '반갑다': 20, '반갑워': 21, '봇': 22, '봇소개1': 23, '봇소개그만': 24, '사람': 25, '사람이': 26, '소개': 27, '소개1': 28, '소개해줘': 29, '심심해': 30, '아는게': 31, '아무거나': 32, '아우라': 33, '아우라소개1': 34, '아우라소개그만': 35, '아우라에': 36, '안녕': 37, '알려줘': 38, '없어': 39, '오랜만이야': 40, '이름이': 41, '인사1': 42, '인사그만': 43, '있어': 44, '재밌는거': 45, '주인': 46, '주인이': 47, '지루해': 48, '팀': 49, '팀에': 50, '팀을': 51, '하세요': 52, '하십니까': 53, '하이': 54, '할수': 55, '할줄': 56, '해봐': 57, '해줘': 58, '헬로우': 59}
##---
@route('/chat', method=['GET','POST'])
#----- 결과 도출 함수 -----
#답변 불러오기
with codecs.open('./answers.json', 'r', encoding='UTF-8') as json_data:
answers_data = json.load(json_data)['answers']
json_data.close()
botAnswer = InputAura()
# Launch the BottlePy dev server
import wsgiref.simple_server, os
wsgiref.simple_server.WSGIServer.allow_reuse_address = 0
if os.environ.get("PORT"):
hostAddr = "0.0.0.0"
else:
hostAddr = "localhost"
if __name__ == '__main__':
bottle.run(host=hostAddr, port=int(os.environ.get("PORT", 8080)), debug=True)
# bottle.run(host=IP, port=int(os.environ.get("PORT", 8080)), debug=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
33608,
20519,
3992,
8300,
4580,
12,
30604,
198,
11748,
9294,
198,
6738,
9294,
1330,
2581,
11,
6339,
11,
11055,
11,
651,
11,
1281,
11,
12233,
198,
2,
1330,
2956,
297... | 1.498581 | 2,114 |
from allauth.socialaccount.providers.amazon_cognito.provider import (
AmazonCognitoProvider,
)
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
urlpatterns = default_urlpatterns(AmazonCognitoProvider)
| [
6738,
477,
18439,
13,
14557,
23317,
13,
15234,
4157,
13,
33103,
62,
66,
2360,
10094,
13,
15234,
1304,
1330,
357,
198,
220,
220,
220,
6186,
34,
2360,
10094,
29495,
11,
198,
8,
198,
6738,
477,
18439,
13,
14557,
23317,
13,
15234,
4157,
... | 3.162162 | 74 |
#!/home/miranda9/miniconda/envs/automl-meta-learning/bin/python3.7
#PBS -V
#PBS -M brando.science@gmail.com
#PBS -m abe
#PBS -l nodes=1:ppn=4:gpus=1,walltime=96:00:00
import torch
import torchvision.transforms as transforms
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from tqdm import tqdm
from pathlib import Path
meta_split = 'train'
data_path = Path('~/data/').expanduser()
dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
print(f'len normal = {len(dataloader)}')
num_batches = 10
with tqdm(dataloader, total=num_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
train_inputs, train_targets = batch["train"]
print(train_inputs.size())
# print(batch_idx)
if batch_idx >= num_batches:
break
print('success\a') | [
2,
48443,
11195,
14,
10793,
5282,
24,
14,
1084,
291,
13533,
14,
268,
14259,
14,
2306,
296,
75,
12,
28961,
12,
40684,
14,
8800,
14,
29412,
18,
13,
22,
198,
2,
47,
4462,
532,
53,
198,
2,
47,
4462,
532,
44,
4508,
78,
13,
16801,
3... | 2.40399 | 401 |
# Made by Mr. Have fun! Version 0.2
# version 0.3 - updated by Kerberos on 2007.11.10
# Visit http://forum.l2jdp.com for more details
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "214_TrialOfScholar"
#Quest items (ugly isn't? :P)
MARK_OF_SCHOLAR_ID,MIRIENS_SIGIL1_ID,MIRIENS_SIGIL2_ID,MIRIENS_SIGIL3_ID,MIRIENS_INSTRUCTION_ID, \
MARIAS_LETTER1_ID,MARIAS_LETTER2_ID,LUKAS_LETTER_ID,LUCILLAS_HANDBAG_ID,CRETAS_LETTER1_ID, \
CRETAS_PAINTING1_ID,CRETAS_PAINTING2_ID,CRETAS_PAINTING3_ID,BROWN_SCROLL_SCRAP_ID, \
CRYSTAL_OF_PURITY1_ID,HIGHPRIESTS_SIGIL_ID,GMAGISTERS_SIGIL_ID,CRONOS_SIGIL_ID,SYLVAINS_LETTER_ID, \
SYMBOL_OF_SYLVAIN_ID,JUREKS_LIST_ID,MEYEDESTROYERS_SKIN_ID,SHAMANS_NECKLACE_ID,SHACKLES_SCALP_ID, \
SYMBOL_OF_JUREK_ID,CRONOS_LETTER_ID,DIETERS_KEY_ID,CRETAS_LETTER2_ID,DIETERS_LETTER_ID, \
DIETERS_DIARY_ID,RAUTS_LETTER_ENVELOPE_ID,TRIFFS_RING_ID,SCRIPTURE_CHAPTER_1_ID,SCRIPTURE_CHAPTER_2_ID, \
SCRIPTURE_CHAPTER_3_ID,SCRIPTURE_CHAPTER_4_ID,VALKONS_REQUEST_ID,POITANS_NOTES_ID = range(2674,2712)
STRONG_LIQUOR_ID,CRYSTAL_OF_PURITY2_ID,CASIANS_LIST_ID,GHOULS_SKIN_ID,MEDUSAS_BLOOD_ID, \
FETTEREDSOULS_ICHOR_ID,ENCHT_GARGOYLES_NAIL_ID,SYMBOL_OF_CRONOS_ID = range (2713,2721)
#npcs
NPC = [30461,30070,30071,30103,30111,30115,30230,30316,30458,30608,30609,30610,30611,30612]
#mobs
MOBS = [20158,20201,20235,20269,20552,20554,20567,20580,20068]
QUEST = Quest(214,qn,"Trial Of Scholar")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(NPC[0])
for npcId in NPC:
for i in range(2674,2721):
QUEST.addTalkId(npcId)
STARTED.addQuestDrop(npcId,i,1)
for mobId in MOBS:
QUEST.addKillId(mobId) | [
2,
14446,
416,
1770,
13,
8192,
1257,
0,
10628,
657,
13,
17,
198,
2,
2196,
657,
13,
18,
532,
6153,
416,
17337,
527,
418,
319,
4343,
13,
1157,
13,
940,
198,
2,
16440,
2638,
1378,
27302,
13,
75,
17,
73,
26059,
13,
785,
329,
517,
... | 2.062967 | 937 |
import os
import pytube
from pytube.cli import on_progress
'''
#se necesita instalar pytube con
pip install pytube
python -m pip install --upgrade pytube
o con
pip install pytube==10.8.2
'''
url = str|(input('ingresar url de la canción: '))
#url= 'https://www.youtube.com/watch?v=qk-GWtcdQek' #Ejemplo solamente
try:
yt = pytube.YouTube(url, on_progress_callback=on_progress)
my_routes = os.path.dirname(os.path.realpath(__file__))
print(f'Se descargará en: {my_routes}')
print(f"Titulo .........: {yt.title}")
print(f"Duracion (seg)..: {yt.length}")
print(f"Descripcion.....: {yt.description}")
t = yt.streams.filter(only_audio=True).all()
t[0].download(my_routes)
print('Finalizado')
except Exception as e:
print(f'Huno un error: {e}')
| [
11748,
28686,
198,
11748,
12972,
29302,
198,
6738,
12972,
29302,
13,
44506,
1330,
319,
62,
33723,
198,
7061,
6,
198,
2,
325,
497,
728,
5350,
916,
282,
283,
12972,
29302,
369,
220,
198,
79,
541,
2721,
12972,
29302,
220,
198,
29412,
532... | 2.335329 | 334 |
import requests
from fractions import *
import re
#modulus
#multiplier
#increment
#s1 = (s0*m + c) % n
cookies = get_cookie()
numbers = []
for i in range(8):
numbers.append(int(guess(i)))
print "Cracking these numbers: "
print numbers
(n,m,c) = crack_unknown_modulus(numbers)
print "N=%d" % n
print "m=%d" % m
print "c=%d" % c
start = int(guess(1,False))
print start
for i in range(20):
sn = (start*m + c) % n
print "Predicting next number is %d" % sn
start = int(guess(sn,True)) | [
198,
11748,
7007,
198,
6738,
49876,
1330,
1635,
198,
11748,
302,
628,
198,
2,
4666,
23515,
198,
2,
47945,
959,
198,
2,
24988,
434,
628,
198,
2,
82,
16,
796,
357,
82,
15,
9,
76,
1343,
269,
8,
4064,
299,
628,
628,
198,
27916,
444,... | 2.4 | 210 |
import streamlit as st
| [
11748,
4269,
18250,
355,
336,
198
] | 3.833333 | 6 |
import tweepy
from datetime import date
from src.keys import *
import requests
url = 'https://health.hawaii.gov/coronavirusdisease2019/'
r = requests.get(url)
my_text = r.text
full_array = bytearray(my_text, 'utf-8')
pre_total_cases = full_array.partition(b'Total cases:</span> <span class="value">')
after_total_cases = pre_total_cases[2].partition(b' (')
after_new_cases = after_total_cases[2].partition(b' newly reported')
before_hawaii = after_new_cases[2].partition(b'Hawai’i County:</span> <span class="value">')
before_honolulu = before_hawaii[2].partition(b'</span></dd>\n<dd><span class="label">Honolulu County:</span> <span class="value">')
before_kauai = before_honolulu[2].partition(b'</span></dd>\n<dd><span class="label">Kaua’i County:</span> <span class="value">')
before_maui = before_kauai[2].partition(b'</span></dd>\n<dd><span class="label">Maui County:</span> <span class="value">')
before_deaths = before_maui[2].partition(b'Hawaii deaths:</span> <span class="value">')
after_deaths = before_deaths[2].partition(b'</span>')
#print(before_maui[2])
total_cases_incorrect = after_total_cases[0]
total_cases_correct = bytearray
for index, value in enumerate(total_cases_incorrect):
total_cases_correct = bytearray.decode(total_cases_incorrect)
if value not in range(48,58):
total_cases_correct = bytearray.decode(total_cases_incorrect[0:index])
break
new_cases = bytearray.decode(after_new_cases[0])
hawaii = bytearray.decode(before_honolulu[0])
honolulu = bytearray.decode(before_kauai[0])
kauai = bytearray.decode(before_maui[0])
maui_incorrect = before_deaths[0]
maui_correct = bytearray
for index, value in enumerate(maui_incorrect):
maui_correct = bytearray.decode(maui_incorrect)
if value not in range(48,58):
maui_correct = bytearray.decode(maui_incorrect[0:index])
break
deaths_incorrect = after_deaths[0]
deaths_correct = bytearray
for index, value in enumerate(deaths_incorrect):
deaths_correct = bytearray.decode(deaths_incorrect)
if value not in range(48,58):
deaths_correct = bytearray.decode(deaths_incorrect[0:index])
break
message = 'HI Coronavirus Stats for ' + str(date.today()) +'\
\nTotal cases: ' + str(total_cases_correct) + '\
\nNew cases: ' + str(new_cases) + '\
\nHawai’i County: ' + str(hawaii) + '\
\nHonolulu County: ' + str(honolulu) + '\
\nKauai County: ' + str(kauai) + '\
\nMaui County: ' + str(maui_correct) + '\
\nTotal Deaths: ' + str(deaths_correct) + '\
\nhttps://health.hawaii.gov/coronavirusdisease2019/'
print(str(total_cases_correct)+'hi')
account = tweepy.OAuthHandler(api_key, api_key_secret)
account.set_access_token(access_token, access_token_secret)
bot = tweepy.API(account)
bot.update_status(message)
| [
11748,
4184,
538,
88,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
12351,
13,
13083,
1330,
1635,
198,
11748,
7007,
198,
198,
6371,
796,
705,
5450,
1378,
13948,
13,
26615,
42648,
13,
9567,
14,
10215,
261,
615,
19397,
67,
786,
589,
233... | 2.408319 | 1,178 |
""" Makes a copy of the plot in the overlay and adds it to the canvas.
"""
# Enthought library imports
from traits.api import Bool, Callable, Enum, Float, Instance, Int, Trait, Tuple
from enable.api import Container
# Chaco imports
from chaco.api import AbstractOverlay
from enable.tools.api import DragTool
class PlotCloneTool(AbstractOverlay, DragTool):
""" On a drag operation, draws an overlay of self.component underneath
the cursor. On drag_end, a copy of the plot is dropped onto the
self.dest container.
"""
# The container to add the cloned plot to
dest = Instance(Container)
# A function that gets called on drag_end. It gets passed this tool
# and the position at which to place the new cloned plot.
plot_cloner = Callable
# The amount to fade the plot when we draw as overlay
alpha = Float(0.5)
# The possible event states for this tool.
event_state = Enum("normal", "dragging")
capture_mouse = True
# The (x,y) position of the "last" mouse position we received
_offset = Trait(None, None, Tuple)
# The relative position of the mouse_down_position to the origin
# of the plot's coordinate system
_offset_from_plot = Tuple
# This is set to True before we attempt to move the plot, so that
# we do not get called again, in case we are an overlay on the plot
# we are drawing.
_recursion_check = Bool(False)
def drag_start(self, event):
""" Called when the drag operation starts.
Implements DragTool.
"""
self._offset = (event.x - self.mouse_down_position[0],
event.y - self.mouse_down_position[1])
self._offset_from_plot = (
self.mouse_down_position[0] - self.component.x,
self.mouse_down_position[1] - self.component.y)
self.visible = True
event.handled = True
| [
37811,
27433,
257,
4866,
286,
262,
7110,
287,
262,
33345,
290,
6673,
340,
284,
262,
21978,
13,
198,
37811,
198,
198,
2,
2039,
28895,
5888,
17944,
198,
6738,
12796,
13,
15042,
1330,
347,
970,
11,
4889,
540,
11,
2039,
388,
11,
48436,
... | 2.857791 | 661 |
# coord1 = {'lat':45.766504,'lon':5.791166}
# coord2 = {'lat':45.764019,'lon':5.788881}
# coord3 = {'lat':45.759428,'lon':5.790793}
# coord4 = {'lat':45.762664,'lon':5.792797}
# coord5 = {'lat':45.762664,'lon':5.792797}
# coord6 = {'lat':45.754700,'lon':5.787025}
# coord7 = {'lat':45.753517,'lon':5.789181}
# coord8 = {'lat':45.751544,'lon':5.794889}
# coord9 = {'lat':45.750691,'lon':5.792722}
# coord10 = {'lat':45.751511,'lon':5.796375}
# coord11 = {'lat':45.750111,'lon':5.793934}
import requests
#geocode=[coord1,coord2,coord3,coord4,coord5,coord6,coord7coord8,coord9,coord10,coord11]
#print(geocode[0])
#print(geogode[1])
api='&appid=65cac0f456f55747c7f58e9ba1e824d0'
base_url='http://api.openweathermap.org/data/2.5/weather?'+api+"&units=metric"
# tout ca c'est sympa mais autant le sortir du fichier :
coords=get_location() #"coords" est le resultat du tableau
print(coords[5]);
w=get_weather(coords[6])
print(w)
| [
2,
6349,
16,
796,
1391,
6,
15460,
10354,
2231,
13,
22,
2791,
33580,
4032,
14995,
10354,
20,
13,
3720,
1157,
2791,
92,
198,
2,
6349,
17,
796,
1391,
6,
15460,
10354,
2231,
13,
22,
2414,
30484,
4032,
14995,
10354,
20,
13,
22,
3459,
3... | 2.064159 | 452 |
import sys,os
import torch
import torch.nn as nn
import config
import numpy as np
from .smpl import SMPL
sys.path.append(os.path.abspath(__file__).replace('models/smpl_regressor.py',''))
from config import args | [
11748,
25064,
11,
418,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
4566,
198,
11748,
299,
32152,
355,
45941,
220,
198,
6738,
764,
5796,
489,
1330,
9447,
6489,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,... | 2.971831 | 71 |
#!/usr/bin/env python3
"""Main entry point."""
from zoneh.launcher import ZBotLauncher
from zoneh.log import init_logging
__version__ = '0.2.2'
def main():
"""Main function."""
init_logging()
zoneh = ZBotLauncher()
zoneh.run()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
13383,
5726,
966,
526,
15931,
198,
198,
6738,
6516,
71,
13,
38722,
2044,
1330,
1168,
20630,
46182,
2044,
198,
6738,
6516,
71,
13,
6404,
1330,
2315,
62,
6404,
2667,
198,
198,... | 2.482759 | 116 |
import sys
string = raw_input()
sys.stdout.write(string)
sys.stdout.flush() | [
11748,
25064,
198,
198,
8841,
796,
8246,
62,
15414,
3419,
198,
17597,
13,
19282,
448,
13,
13564,
7,
8841,
8,
198,
17597,
13,
19282,
448,
13,
25925,
3419
] | 2.714286 | 28 |
from urllib.parse import urlencode
import cherrypy
from cherrypy import _json as json
from ingredients_http.errors.validation import ResponseValidationError
from schematics import Model
from schematics.exceptions import DataError
| [
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
198,
198,
11748,
23612,
9078,
198,
6738,
23612,
9078,
1330,
4808,
17752,
355,
33918,
198,
6738,
9391,
62,
4023,
13,
48277,
13,
12102,
341,
1330,
18261,
7762,
24765,
12331,
198,
6... | 4 | 58 |
# coding utf-8
'''
XML value serialization and deserialization for JSON.
'''
import canvas as cv
import canvas.ext as cve
from .exceptions import XMLSyntaxError
from .utils import element_t, serialize, deserialize
from . import plugin_config
@cve.json_serializer(element_t)
| [
2,
197,
66,
7656,
3384,
69,
12,
23,
198,
7061,
6,
198,
55,
5805,
1988,
11389,
1634,
290,
748,
48499,
1634,
329,
19449,
13,
198,
7061,
6,
198,
198,
11748,
21978,
355,
269,
85,
198,
11748,
21978,
13,
2302,
355,
269,
303,
198,
198,
... | 3.043956 | 91 |
#!/usr/bin/env python
import sys
import common
import os
import optparse
import zipfile
# Add the trunk/Python/util directory to the PYTHONPATH
scr = sys.argv[0]
scr = os.path.abspath(scr)
scr_list = scr.split('/')
trunk_pos = scr_list.index('trunk')
util_path = '/'.join(scr_list[:trunk_pos+1] + ['Python','util'])
sys.path.append(util_path)
import cga_util
if __name__=='__main__':
parser = optparse.OptionParser()
parser.add_option( "--module_name", dest="module_name",help="")
#parser.add_option( "--lsid", dest="lsid",help="")
parser.add_option( "--zip_search_path", dest="zip_search_path",
default=common.zip_search_path,action="append",help="")
parser.add_option( "--cache_path", dest="cache_path",default=common.cache_path,
help="")
(options, args) = parser.parse_args()
module_name = options.module_name
#lsid = options.lsid
zip_search_path = options.zip_search_path
cache_path = options.cache_path
register_module(module_name,zip_search_path,cache_path) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
2219,
198,
11748,
28686,
198,
11748,
2172,
29572,
198,
11748,
19974,
7753,
628,
198,
2,
3060,
262,
21427,
14,
37906,
14,
22602,
8619,
284,
262,
350,
56,
... | 2.418919 | 444 |
# Webhooks for external integrations.
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from zerver.models import get_client, UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict
from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view
import base64
from functools import wraps
from zerver.webhooks.github.view import build_message_from_gitlog
from typing import Any, Callable, Dict, TypeVar, Optional, Text
from zerver.lib.str_utils import force_str, force_bytes
ViewFuncT = TypeVar('ViewFuncT', bound=Callable[..., HttpResponse])
# Beanstalk's web hook UI rejects url with a @ in the username section of a url
# So we ask the user to replace them with %40
# We manually fix the username here before passing it along to @authenticated_rest_api_view
@beanstalk_decoder
@authenticated_rest_api_view(is_webhook=True)
@has_request_variables
| [
2,
5313,
25480,
82,
329,
7097,
4132,
9143,
13,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
18453,
11,
367,
29281,
31077,
198,
6738,
1976,
18497,
13,
27530,
1330,
651,
... | 3.355482 | 301 |
from rest_framework import serializers
from account.serializers import UserDetailSerializer
from customer.serializers import LinkCustomerSerializer
from liaison.models import Liaison
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
201,
198,
201,
198,
6738,
1848,
13,
46911,
11341,
1330,
11787,
11242,
603,
32634,
7509,
201,
198,
6738,
6491,
13,
46911,
11341,
1330,
7502,
44939,
32634,
7509,
201,
198,
6738,
43176,
13,
27530,
... | 3.716981 | 53 |
import numpy as np
from astropy import units as u
from .. import Spectrum1D, SpectralRegion
from astropy.nddata.nduncertainty import StdDevUncertainty, VarianceUncertainty, InverseVariance
from .extract_spectral_region import extract_region
__all__ = ['noise_region_uncertainty']
def noise_region_uncertainty(spectrum, spectral_region, noise_func=np.std):
"""
Generates a new spectrum with an uncertainty from the noise in a particular
region of the spectrum.
Parameters
----------
spectrum : `~specutils.Spectrum1D`
The spectrum to which we want to set the uncertainty.
spectral_region : `~specutils.SpectralRegion`
The region to use to calculate the standard deviation.
noise_func : callable
A function which takes the (1D) flux in the ``spectral_region`` and
yields a *single* value for the noise to use in the result spectrum.
Returns
-------
spectrum_uncertainty : `~specutils.Spectrum1D`
The ``spectrum``, but with a constant uncertainty set by the result of
the noise region calculation
"""
# Extract the sub spectrum based on the region
sub_spectra = extract_region(spectrum, spectral_region)
# TODO: make this work right for multi-dimensional spectrum1D's?
if not isinstance(sub_spectra, list):
sub_spectra = [sub_spectra]
sub_flux = u.Quantity(np.concatenate([s.flux.value for s in sub_spectra]),
spectrum.flux.unit)
# Compute the standard deviation of the flux.
noise = noise_func(sub_flux)
# Uncertainty type will be selected based on the unit coming from the
# noise function compared to the original spectral flux units.
if noise.unit == spectrum.flux.unit:
uncertainty = StdDevUncertainty(noise*np.ones(spectrum.flux.shape))
elif noise.unit == spectrum.flux.unit**2:
uncertainty = VarianceUncertainty(noise*np.ones(spectrum.flux.shape))
elif noise.unit == 1/(spectrum.flux.unit**2):
uncertainty = InverseVariance(noise*np.ones(spectrum.flux.shape))
else:
raise ValueError('Can not determine correct NDData Uncertainty based on units {} relative to the flux units {}'.format(noise.unit, spectrum.flux.unit))
# Return new specturm with uncertainty set.
return Spectrum1D(flux=spectrum.flux, spectral_axis=spectrum.spectral_axis,
uncertainty=uncertainty,
wcs=spectrum.wcs,
velocity_convention=spectrum.velocity_convention,
rest_value=spectrum.rest_value)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
198,
6738,
11485,
1330,
27217,
16,
35,
11,
13058,
1373,
47371,
198,
6738,
6468,
28338,
13,
358,
7890,
13,
358,
19524,
1425,
774,
1330,
520,
67,
136... | 2.720881 | 953 |
class IdentificationTypeNotFoundException(Exception):
"""
Raise if an identification type is supplied that does not exist in the application's identification type constants.
"""
pass
class RoleNotFoundException(Exception):
"""
Raise if provided role is not in organization's defined roles
"""
pass
| [
4871,
38657,
6030,
3673,
21077,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
35123,
611,
281,
11795,
2099,
318,
14275,
326,
857,
407,
2152,
287,
262,
3586,
338,
11795,
2099,
38491,
13,
198,
220,
220,
220,
3722... | 3.795455 | 88 |
from ydb.operation import * # noqa
| [
6738,
331,
9945,
13,
27184,
1330,
1635,
220,
1303,
645,
20402,
198
] | 3 | 12 |
import os
from uuid import uuid4
import re
from app.scrape import scrape
from telegram import InlineQueryResultVoice, Update
from telegram.ext import Updater, InlineQueryHandler, CommandHandler, CallbackContext
def inlinequery(update: Update, context: CallbackContext) -> None:
"""Handle the inline query."""
query = update.inline_query.query
query = query.strip().lower()
results = [
InlineQueryResultVoice(
id=uuid4(),
type='voice',
title=audio['title'],
caption=audio['title'],
voice_url=audio['src'],
)
for audio in scrape(query)
]
update.inline_query.answer(results)
def start(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /start is issued."""
update.message.reply_animation(quote=True, animation='https://i.kym-cdn.com/photos/images/original/001/565/728/103.gif')
| [
11748,
28686,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
11748,
302,
198,
6738,
598,
13,
1416,
13484,
1330,
42778,
198,
198,
6738,
573,
30536,
1330,
554,
1370,
20746,
23004,
35708,
11,
10133,
198,
6738,
573,
30536,
13,
2302,
1330... | 2.665714 | 350 |
import roman
| [
11748,
374,
5185,
628,
628,
628
] | 3 | 6 |
from typing import Dict
import torch
from allennlp.data import Batch, Instance, Token, Vocabulary
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.data.fields import TextField, ListField, SpanField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.modules.span_extractors import EndpointSpanExtractor
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
| [
6738,
19720,
1330,
360,
713,
198,
198,
11748,
28034,
198,
198,
6738,
477,
1697,
34431,
13,
7890,
1330,
347,
963,
11,
2262,
590,
11,
29130,
11,
47208,
22528,
198,
6738,
477,
1697,
34431,
13,
7890,
13,
19608,
292,
316,
62,
961,
364,
1... | 3.28481 | 158 |
from functools import partial
from torchnlp.encoders.text.static_tokenizer_encoder import StaticTokenizerEncoder
class MosesEncoder(StaticTokenizerEncoder):
""" Encodes the text using the Moses tokenizer.
**Tokenizer Reference:**
http://www.nltk.org/_modules/nltk/tokenize/moses.html
Args:
**args: Arguments passed onto ``StaticTokenizerEncoder.__init__``.
**kwargs: Keyword arguments passed onto ``StaticTokenizerEncoder.__init__``.
NOTE: The `doctest` is skipped because running NLTK moses with Python 3.7's pytest halts on
travis.
Example:
>>> encoder = MosesEncoder(["This ain't funny.", "Don't?"]) # doctest: +SKIP
>>> encoder.encode("This ain't funny.") # doctest: +SKIP
tensor([5, 6, 7, 8, 9])
>>> encoder.vocab # doctest: +SKIP
['<pad>', '<unk>', '</s>', '<s>', '<copy>', 'This', 'ain', ''t', 'funny', '.', \
'Don', '?']
>>> encoder.decode(encoder.encode("This ain't funny.")) # doctest: +SKIP
"This ain't funny."
"""
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
7332,
1349,
34431,
13,
12685,
375,
364,
13,
5239,
13,
12708,
62,
30001,
7509,
62,
12685,
12342,
1330,
36125,
30642,
7509,
27195,
12342,
628,
198,
4871,
19010,
27195,
12342,
7,
45442,
... | 2.468085 | 423 |
import pandas as pd
import requests
import yaml
import os
import json
import argparse
import math
import time
if __name__ == "__main__":
main()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
7007,
198,
11748,
331,
43695,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
640,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
8... | 3.102041 | 49 |
from django.contrib import admin
# Register your models here.
from .models import FamilyTree
###############################################################################
# full views
admin.site.register(FamilyTree, FamilyTreeAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
6738,
764,
27530,
1330,
7884,
27660,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
1336,
5009,
198,
28482,
13,
15654,
13,
30238,
7,... | 4.76 | 50 |
import sys
import miner_globals
import m.keywords
#####################
# Syntax token definitions
#####################
tokens = [
'INTEGER', 'HEXINTEGER', 'OCTINTEGER', 'BININTEGER',
'FLOAT',
'STRING',
'ID',
'GE', 'LE', 'EQ', 'NEQ', 'POW',
'MATCH_EQ', 'MATCH_NEQ',
'INCR', 'DECR',
'FILENAME', 'STREAMTYPE', 'STREAMVAR',
'ALIAS_ID',
'SHIFT_LEFT', 'SHIFT_RIGHT',
'FLOORDIV', 'BINARY_OR',
'OR_EQUAL',
'RAWSTRING',
'PIPE',
]
if sys.platform != "win32":
tokens.append('CURLY_OPEN') # used by MMAP command
states = (
('files', 'inclusive'),
('raw', 'inclusive'),
)
reserved = {
'not' : 'NOT',
'or' : 'OR',
'and' : 'AND',
'in' : 'IN',
'is' : 'IS',
'True' : 'TRUE',
'False' : 'FALSE',
'None' : 'NONE',
'as' : 'AS',
'for' : 'LC_FOR',
'IN' : 'UC_IN',
'lambda': 'LAMBDA',
'WITH' : 'WITH',
}
literals = [',', '.', '+', '-', '*', '%', '/', '=', '<', '>', '?', '(', ')', '[', ']', ':', '&', '^', '@', ';', '{', '}']
t_raw_RAWSTRING = r'.+'
#t_STRING = r'\"([^\\"]|(\\.))*\"'
t_HEXINTEGER = r'0[xX][0-9a-fA-F]+[lL]?'
t_OCTINTEGER = r'0[oO]?[0-7]+[lL]?'
t_BININTEGER = r'0[bB][0-1]+[lL]?'
t_GE = r'>='
t_LE = r'<='
t_EQ = r'=='
t_NEQ = r'!='
t_MATCH_EQ = r'=~'
t_MATCH_NEQ = r'!~'
t_POW = r'\*\*'
t_INCR = r'\+\+'
t_DECR = r'--'
t_SHIFT_LEFT = r'\<\<'
t_SHIFT_RIGHT = r'\>\>'
t_FLOORDIV = r'//'
t_BINARY_OR = r'\|\|'
t_OR_EQUAL = r'\|='
t_PIPE = r'\|'
t_ignore = ' \t'
t_files_ignore = ' \t'
tpart_exponent = r"[eE][-+]?\d+"
tpart_pointfloat = r"((\d+)?\.\d+)|(\d+\.)"
tpart_int_exponent = r"\d+%s" % tpart_exponent
tpart_float_opt_exponent = r"(%s)(%s)?" % (tpart_pointfloat, tpart_exponent)
####t_FLOAT = "%s|%s" % (tpart_pointfloat, tpart_exponentfloat)
t_FLOAT = "%s|%s" % (tpart_int_exponent, tpart_float_opt_exponent)
t_INTEGER = r'[1-9]\d*[lL]?|0'
_bytesFactor = {"T": 1024 * 1024 * 1024 * 1024,
"G": 1024 * 1024 * 1024,
"M": 1024 * 1024,
"K": 1024}
_numberFactor = {"T": 1000 * 1000 * 1000 * 1000,
"G": 1000 * 1000 * 1000,
"M": 1000 * 1000,
"K": 1000,
"d": 24 * 3600,
"h": 3600,
"m": 60}
def t_NUMBERBYTES(t):
r"\d+[TGMK]B"
number = int(t.value[:-2])
suffix = t.value[-2]
t.value = str(number*_bytesFactor[suffix])
t.type = "INTEGER"
return t
def t_FLOATBYTES(t):
r"\d+\.(\d+)?[TGMK]B"
number = float(t.value[:-2])
suffix = t.value[-2]
t.value = str(int(number*_bytesFactor[suffix]))
t.type = "INTEGER"
return t
def t_NUMBERSUFFIX(t):
r"\d+(T|G|M|K|d|h|m)"
number = int(t.value[:-1])
suffix = t.value[-1]
t.value = str(number*_numberFactor[suffix])
t.type = "INTEGER"
return t
def t_FLOATSUFFIX(t):
r"\d+\.(\d+)?(T|G|M|K|d|h|m)"
number = float(t.value[:-1])
suffix = t.value[-1]
t.value = str(number*_numberFactor[suffix])
t.type = "FLOAT"
return t
def t_TIME(t):
r"\d\d\d\d(\d\d)?H"
hours = int(t.value[0:2])
minutes = int(t.value[2:4])
if len(t.value)> 5:
secs = int(t.value[4:6])
else:
secs = 0
t.value = str(hours*3600+minutes*60+secs)
t.type = "INTEGER"
return t
def t_DATE(t):
r"\d{8}D"
from calendar import timegm
year = int(t.value[0:4])
month = int(t.value[4:6])
day = int(t.value[6:8])
val = timegm( (year, month, day, 0, 0, 0, 0, 0, 0) )
t.value = str( val )
t.type = "INTEGER"
return t
files_literals = ['|', '{']
def t_files_STREAMTYPE(t):
r'\<[-a-zA-Z0-9_]+\>'
t.value = t.value[1:-1]
return t
def t_files_PIPE(t):
r'\|'
t.lexer.begin('INITIAL')
t.type = "PIPE"
t.value = "|"
return t
if sys.platform != "win32":
def t_files_CURLY_OPEN(t):
r'\{'
t.lexer.begin('INITIAL')
t.type = "CURLY_OPEN"
t.value = "{"
return t
def t_files_STREAMVAR(t):
r"""[_a-zA-Z]\w*=([^ \t"']+|"([^\\"]|(\\.))*"|'([^\\']|(\\.))*')"""
equal = t.value.index('=')
value = t.value[equal+1:]
if value[0]=='"' or value[0]=="'":
value = value[1:-1]
value = value.decode('string_escape')
t.value = (t.value[:equal], value)
return t
# This token is used to specify URIs, filenames or filename patterns
# it is active only in the <files> state (after READ and WRITE)
def t_files_FILENAME(t):
r'([a-z]+:[^ \t\n\"\|\<\>]+)|([^ \t\n\"\|\<\>=}]+)|"[^"]*"'
if t.value.startswith('"'):
t.value = t.value[1:-1]
return t
def t_longSingleSTRING(t):
r"'''.*'''"
t.type = 'STRING'
return t
def t_longDoubleSTRING(t):
r'""".*"""'
t.type = 'STRING'
return t
def t_rSTRING(t):
r"""[ur]?\"([^\\"]|(\\.))*\"|[ur]?'([^\\']|(\\.))*'"""
t.type = 'STRING'
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value, 'ID')
if m.keywords.shouldSwitchToFileMode(t.type):
# we need to switch to the special lexing state which tokenizes input as files
t.lexer.begin('files')
elif t.type == 'PARAM':
t.lexer.begin('raw')
elif t.value in miner_globals.aliasCommands:
t.type = 'ALIAS_ID'
return t
| [
11748,
25064,
201,
198,
11748,
16285,
62,
4743,
672,
874,
201,
198,
11748,
285,
13,
2539,
10879,
201,
198,
201,
198,
14468,
4242,
2,
201,
198,
2,
26375,
897,
11241,
17336,
201,
198,
14468,
4242,
2,
201,
198,
201,
198,
83,
482,
641,
... | 1.744142 | 3,201 |
"""Pony based StatementConnector.
"""
import datetime
from pony import orm
# This is important as using orm.desc in a query leads to an Exception
from pony.orm import desc
from papilotte.connectors.abstractconnector import AbstractConnector
from papilotte.exceptions import CreationException, ReferentialIntegrityError
from . import filter_queries
class StatementConnector(AbstractConnector):
"""A StatementConnector using the Pony ORM.
"""
def get(self, obj_id):
"""Return the statement dict with id statement_id or None if no such statement.
:param obj_id: the id or uri of the statement object to return
:type object_id: string
:return: The object as defined in the openAPI definition or None
:rtype: dict
"""
result = None
Statement = self.db.entities["Statement"]
with orm.db_session:
statement = Statement.get(id=obj_id)
if statement is None:
query = Statement.select(lambda st: obj_id in st.uris.uri)
statement = query.first()
if statement:
result = statement.to_ipif()
result.pop('Factoid', None)
return result
def filter(self, **filters):
"""Return a pony query object with all filters applied.
# TODO: Discuss if metadata should be searched, too
"""
Statement = self.db.entities["Statement"]
subquery_person = filter_queries.person_query(self.db, **filters)
subquery_source = filter_queries.source_query(self.db, **filters)
subquery_statement = filter_queries.statement_query(self.db, **filters)
subquery_factoid = filter_queries.factoid_query(self.db, **filters)
query = orm.select(s for s in Statement
if s in subquery_statement
if (s.factoid in subquery_factoid
and s.factoid.person in subquery_person
and s.factoid.source in subquery_source)
)
return query
def search(self, size, page, sort_by="createdWhen", sort_order="ASC", **filters):
"""Find all objects which match the filter conditions set via
filters.
The filter conditions are defined in the openapi spec of statement.
:param size: the number of results per page.
:type size: int
:param page: the number of the result page, starting with 1 (first page).
:type page: int
:param sort_by: the field the output should be sorted by. Default is 'createdWhen'.
It is suggegested to alway use '@id' as as second sort field, to
keep result order consistent for paging.
:type sort_by: str
:return: a list of statement objects (represented as dictionaries)
:rtype: list
"""
Statement = self.db.entities["Statement"]
if sort_by == "@id":
sort_by = "id"
with orm.db_session:
if filters:
# TODO: replace datetime by anything which can handle dates bc
if "from_" in filters:
filters["from_"] = datetime.date.fromisoformat(filters["from_"])
if "to" in filters:
filters["to"] = datetime.date.fromisoformat(filters["to"])
query = self.filter(**filters)
else:
query = orm.select(s for s in Statement)
# TODO: specifiy sort order values in spec. Sorting by uris should be excluded in
# spec (needs discussion)
# set descending order if necessary and add id as second sort field (if not first)
if sort_order.lower() == "desc":
sort_expression = "desc(s.{}), s.id".format(sort_by)
query = query.sort_by(sort_expression)
else:
if sort_by == "id":
sort_expression = "s.id"
else:
sort_expression = "s.{}, s.id".format(sort_by)
query = query.sort_by(sort_expression)
result = [s.to_ipif() for s in query.page(page, size)]
return result
def count(self, **filters):
"""Return the number of statements matching the filters.
:param **filters: a **kwargs containing any number of filter parameters
:type **filters: dict
:return: the number of statements found
:rtype: int
"""
Statement = self.db.entities["Statement"]
with orm.db_session:
if filters:
query = self.filter(**filters)
else:
query = orm.select(s for s in Statement)
result = query.count()
return result
def create(self, data):
"""Create a new Statement.
"""
Statement = self.db.entities["Statement"]
try:
with orm.db_session:
statement = Statement.create_from_ipif(data)
result = statement.to_ipif()
return result
except orm.TransactionIntegrityError:
raise CreationException(
"A statement with id '{}' already exists.".format(data["@id"])
)
def update(self, obj_id, data):
"""
Update or created an object specified by obj_id.
"""
Statement = self.db.entities["Statement"]
with orm.db_session:
statement = Statement.get_for_update(id=obj_id) or Statement(id=obj_id)
statement.update_from_ipif(data)
result = statement.to_ipif()
return result
@orm.db_session
def delete(self, obj_id):
"""
Delete statement with id `obj_id`.
"""
Statement = self.db.entities["Statement"]
try:
with orm.db_session:
Statement[obj_id].delete()
except orm.ConstraintError:
with orm.db_session:
source = Statement[obj_id]
msg = (
"Statement '{}' cannot be deleted because it is used by at "
"least one factoid ({})."
).format(obj_id, [s.id for s in source.factoids][0])
raise ReferentialIntegrityError(msg)
| [
37811,
47,
1647,
1912,
21983,
34525,
13,
198,
37811,
198,
11748,
4818,
8079,
198,
198,
6738,
26902,
1330,
393,
76,
198,
2,
770,
318,
1593,
355,
1262,
393,
76,
13,
20147,
287,
257,
12405,
5983,
284,
281,
35528,
198,
6738,
26902,
13,
... | 2.226059 | 2,809 |
MOCKTYPES = ()
try:
from mock import Mock
MOCKTYPES += (Mock,)
except ImportError:
pass
try:
from unittest.mock import Mock
MOCKTYPES += (Mock,)
except ImportError:
pass
| [
44,
11290,
9936,
47,
1546,
796,
7499,
198,
28311,
25,
198,
220,
220,
220,
422,
15290,
1330,
44123,
628,
220,
220,
220,
337,
11290,
9936,
47,
1546,
15853,
357,
44,
735,
35751,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1208,
1... | 2.373494 | 83 |
"""Main module."""
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.utils import ChromeType
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.chrome.options import Options
import time
from bs4 import BeautifulSoup
import urllib.request
import glob
import os
import csv
targets = []
with open('./helloworld/data.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
targets.append(row)
# print(targets)
crawled = [int(os.path.basename(f).split(".")[0]) for f in glob.glob("./video/*.mp4")]
starting_index = max(crawled)+1
print(f'Start crawling from {starting_index}')
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-extensions')
# chrome_options.add_argument('--disable-gpu')
# chrome_options.add_argument('--disable-dev-shm-usage')
# chrome_options.add_experimental_option('w3c', True)
#This example requires Selenium WebDriver 3.13 or newer
with webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=chrome_options) as driver:
for index, item in enumerate(targets):
print(item)
if (index < starting_index):
print("done")
continue
time.sleep(3)
driver.get(item["link"])
# soup = BeautifulSoup(driver.page_source, 'html.parser')
# print(soup.prettify())
wait = WebDriverWait(driver, 10)
# driver.find_element(By.NAME, "q").send_keys("cheese" + Keys.RETURN)
first_result = wait.until(presence_of_element_located((By.CSS_SELECTOR, "video source")))
urllib.request.urlretrieve(first_result.get_attribute("src"), f'./video/{index}.mp4') | [
37811,
13383,
8265,
526,
15931,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
15271,
1330,
4809,
198,
6738,
3992,
26230,
62,
37153,
13,
46659,
1330,
13282,
32103,
13511,
198,... | 2.772666 | 739 |
#!/usr/bin/env python
"""
PlexConnectDaemon
Creates a proper daemon on mac/linux
"""
import os
import sys
import signal
import argparse
import atexit
from PlexConnect import startup, shutdown, run, cmdShutdown
def daemonize(args):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
"""
# Make a non-session-leader child process
try:
pid = os.fork()
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno))
# decouple from parent environment
os.setsid()
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork()
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file('/dev/null', 'a+')
se = file('/dev/null', 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if args.pidfile:
try:
atexit.register(delpid)
pid = str(os.getpid())
file(args.pidfile, 'w').write("%s\n" % pid)
except IOError, e:
raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno))
if __name__ == '__main__':
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
parser = argparse.ArgumentParser(description='PlexConnect as daemon.')
parser.add_argument('--pidfile', dest='pidfile')
args = parser.parse_args()
daemonize(args)
success = startup()
if success:
run()
shutdown()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
46383,
13313,
26531,
7966,
198,
198,
16719,
274,
257,
1774,
33386,
319,
8352,
14,
23289,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
6737,
198,
11... | 2.324201 | 876 |
"""
绘制期望值
"""
import dash
import dash_html_components as html
import dash_core_components as dcc
from lottery import get_history, expected_value
codes = ['双色球', '七乐彩', '3D', '大乐透', '排列三', '排列五', '七星彩']
count = 20
datas = []
for c in codes:
print('Get %s data...' % c)
try:
his = get_history(c, count)
except AttributeError:
print('Fail to get history')
continue
exp = expected_value(his)
data={'type':'line', 'name':c}
data['x'] = [d for (d, e) in exp]
data['y'] = [e for (d, e) in exp]
datas.append(data)
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='期望值对比'),
html.Div(children='''
最近%d期
''' % count),
dcc.Graph(
id='exp-value-graph',
figure={
'data':datas,
'layout':{
'title':'期望值'
}
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| [
37811,
198,
163,
119,
246,
26344,
114,
17312,
253,
17312,
249,
161,
222,
120,
198,
37811,
198,
11748,
14470,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
198... | 1.856299 | 508 |
import argparse
import time
import math
import numpy as np
import torch
from torch._C import device
import torch.nn as nn
import torch.optim.lr_scheduler as lr_scheduler
from functools import partial
import data
from ONLSTM_model import ONLSTMModel
from AWDLSTM_model import RNNModel
from tqdm import tqdm
from utils import batchify, batchify_dep_tokenlist, get_batch, repackage_hidden, collate_func_for_tok
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
import os
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
import warnings
warnings.filterwarnings('ignore')
###############################################################################
# Training code
###############################################################################
if __name__ == '__main__':
main() | [
11748,
1822,
29572,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
13557,
34,
1330,
3335,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
13,
14050,
... | 3.014085 | 355 |
# imports
import numpy as np
import pandas as pd
from scipy.interpolate import griddata, Akima1DInterpolator
import random
from utils import io, plotting, fit, modify
import matplotlib.pyplot as plt
plt.style.use(['science', 'ieee', 'std-colors'])
# inspect calibration in-focus coords
"""
# file path
fp_in_focus = '/Users/mackenzie/Desktop/gdpyt-characterization/publication data/iteration 5/experiment validation/test_coords/calibration/calib_in-focus_coords_z-micrometer-v2.xlsx'
# read excel to disk
df = io.read_excel(path_name=fp_in_focus, filetype='.xlsx')
j = 1
"""
# inspect calibration correction coordinates
"""
# file path
fp_correction = '/Users/mackenzie/Desktop/gdpyt-characterization/publication data/iteration 5/experiment validation/test_coords/test/step/test_id1_coords_45micron_step_towards.xlsx'
# read excel to disk
df = io.read_excel(path_name=fp_correction, filetype='.xlsx')
# plot 3D scatter of all particle coordinates
plt.style.use(['science', 'ieee', 'scatter'])
fig, ax = plotting.plot_scatter_3d(df, fig=None, ax=None, elev=20, azim=-40, color='tab:blue', alpha=0.1)
fig, ax = plotting.plot_scatter_3d(df=[df.x, df.y, df.z_f], fig=fig, ax=ax, elev=20, azim=-40, alpha=1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.tight_layout()
plt.show()
# make dictionary: dataframe for 5 particles
rand_pids = [pid for pid in random.sample(set(df.id.unique()), 5)]
dfpid = df[df.id.isin(rand_pids)]
dpidicts = modify.split_df_and_merge_dficts(dfpid, keys=rand_pids, column_to_split='id', splits=rand_pids, round_to_decimal=0)
# plot peak intensity profile
fig, ax = plotting.plot_scatter(dpidicts, xparameter='z', yparameter='peak_int', min_cm=None, z0=0, take_abs=False)
ax.set_xlabel(r'$z\: (\mu m)$')
ax.set_ylabel(r'$I_{peak}\: (A.U.)$')
ax.legend(rand_pids, title=r'$p_{ID}$')
plt.tight_layout()
plt.show()
"""
# 3D scatter plot of x, y, and in-focus z
"""
fig, ax = plotting.plot_scatter_3d([df.x, df.y, df.z_f], fig=None, ax=None, elev=5, azim=-40, color=None, alpha=0.75)
plt.show()
# Fit a 2D plane to the in-focus particles
points = np.stack((df.x, df.y, df.z_f)).T
px, py, pz = fit.fit_3d(points, fit_function='plane')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df.x, df.y, df.z_f, c=df.z_f)
ax.plot_surface(px, py, pz, alpha=0.2, color='tab:red')
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
ax.set_zlabel('z', fontsize=18)
ax.view_init(5, -40)
ax.scatter(px[0][0], py[0][0], pz[0][0], color='red')
ax.scatter(px[0][1], py[0][1], pz[0][1], color='blue')
ax.scatter(px[1][0], py[1][0], pz[1][0], color='green')
ax.scatter(px[1][1], py[1][1], pz[1][1], color='purple')
plt.show()
""" | [
198,
2,
17944,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
1036,
1638,
1045,
11,
9084,
8083,
16,
35,
9492,
16104,
1352,
198,
11748,
4738,
198,
198,
6... | 2.356021 | 1,146 |
import xlsxwriter
import requests
import os,re
from lxml import html
import time
hc=house_crawl()
page_num=1546
for i in range(1,page_num+1):
print('正在爬去第{}页'.format(i))
url = 'http://sh.centanet.com/ershoufang/g{}/?sem=baidu_ty'.format(i)
try:
hc(url)
except:
hc.close()
time.sleep(0.2)
hc.close()
| [
11748,
2124,
7278,
87,
16002,
201,
198,
11748,
7007,
201,
198,
11748,
28686,
11,
260,
201,
198,
6738,
300,
19875,
1330,
27711,
201,
198,
11748,
640,
201,
198,
71,
66,
28,
4803,
62,
66,
13132,
3419,
201,
198,
7700,
62,
22510,
28,
131... | 1.816327 | 196 |
from flask_jwt_extended import jwt_required
from flask_restful import Resource, reqparse
from werkzeug.exceptions import NotFound, BadRequest
from model.site import SiteModel
from utils.utils import non_empty_string
arguments = reqparse.RequestParser(bundle_errors=True)
arguments.add_argument('url', type=non_empty_string, required=True, nullable=False,
help='The field URL cannot be left blank')
| [
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
474,
46569,
62,
35827,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
43089,
29572,
198,
6738,
266,
9587,
2736,
1018,
13,
1069,
11755,
1330,
1892,
21077,
11,
7772,
18453,
198,
198,
... | 3.035714 | 140 |
import numpy as np
from pathlib import Path
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Conv2DTranspose, LeakyReLU, BatchNormalization, Activation, Reshape
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import SGD
dir = Path.cwd + "/all-images"
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()
train_images = (train_images / 255) - 0.5
test_images = (test_images / 255) - 0.5
train_images = np.expand_dims(train_images, axis=3)
test_images = np.expand_dims(test_images, axis=3)
model = Sequential([
Conv2D(8, 3, input_shape=(700, 600, 3), use_bias=False),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(10, activation='softmax'),
])
model.compile(SGD(lr=.005), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(
train_images,
to_categorical(train_labels),
batch_size=1,
epochs=3,
validation_data=(test_images, to_categorical(test_labels)),
) | [
11748,
299,
32152,
355,
45941,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
34872,
17,
35,
11,
5436,
27... | 2.622276 | 413 |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
df = px.data.tips()
days = df.day.unique()
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Dropdown(
id="dropdown",
options=[{"label": x, "value": x} for x in days],
value=days[0],
clearable=False,
),
dcc.Graph(id="bar-chart"),
])
@app.callback(
Output("bar-chart", "figure"),
[Input("dropdown", "value")])
app.run_server(debug=True)
| [
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
11748,
7110,
306,
13,
42712,
355,
279,
... | 2.413043 | 230 |
from datetime import datetime
from mpplugs.Namespace import Namespace
from mpplugs.Settings import Settings
from mpplugs.LogColor import LogColor as CLR
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
285,
381,
75,
10339,
13,
36690,
10223,
1330,
28531,
10223,
198,
6738,
285,
381,
75,
10339,
13,
26232,
1330,
16163,
198,
6738,
285,
381,
75,
10339,
13,
11187,
10258,
1330,
5972,
10258,
355,
... | 3.55814 | 43 |
# -*- coding: utf-8 -*-
"""Workchain to generate pinball hyperparameters"""
from aiida.engine import calcfunction
from aiida.engine.processes import workchains
from aiida_quantumespresso.utils.defaults import calculation
from samos.trajectory import Trajectory
from aiida import orm
from aiida.common import AttributeDict, exceptions
from aiida.engine import BaseRestartWorkChain, WorkChain, ToContext, if_, while_, append_
from aiida.plugins import CalculationFactory, WorkflowFactory
import numpy as np
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin
from aiida_flipper.calculations.functions.functions import get_pinball_factors
from aiida_flipper.utils.utils import get_or_create_input_node
ReplayMDHWorkChain = WorkflowFactory('quantumespresso.flipper.replaymdhustler')
class FittingWorkChain(ProtocolMixin, WorkChain):
"""Workchain to run hustler level `pinball` and `DFT` calculations to fit forces and
generate pinball hyperparameters, using Pinball Quantum ESPRESSO pw.x."""
_process_class = ReplayMDHWorkChain
@classmethod
def define(cls, spec):
"""Define the process specification."""
super().define(spec)
spec.expose_inputs(ReplayMDHWorkChain, namespace='md',
exclude=('clean_workdir', 'pw.structure', 'pw.parent_folder'),
namespace_options={'help': 'Inputs for the `ReplayMDWorkChain` for MD runs are called in the `md` namespace.'})
spec.input('structure', valid_type=orm.StructureData, help='The input supercell structure.')
spec.input('parent_folder', valid_type=orm.RemoteData, required=True,
help='The stashed directory containing charge densities of host lattice.')
spec.input('fitting_parameters', valid_type=orm.Dict, required=False, help='The dictionary containing the fitting parameters.')
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.setup,
cls.run_process_pb,
cls.run_process_dft,
cls.inspect_process,
cls.results,
)
spec.exit_code(706, 'ERROR_FITTING_FAILED',
message='The linear regression to fit pinball and dft forces failed.')
spec.exit_code(703, 'ERROR_CHARGE_DENSITIES_NOT_FOUND',
message='Either the stashed charge densities or the flipper compatible supercell structure not found.')
spec.exit_code(704, 'ERROR_SUB_PROCESS_FAILED_MD',
message='The ReplayMDHustlerWorkChain sub process failed.')
spec.exit_code(705, 'ERROR_TRAJECTORY_NOT_FOUND',
message='The output trajectory of ReplayMDWorkChain not found.')
# spec.expose_outputs(ReplayMDWorkChain)
spec.output('coefficients', valid_type=orm.Dict,
help='The dictionary containing the newly fitted pinball hyperparameters(keyword - `coefs`) along with linear regression values.')
spec.output('trajectory_pb', valid_type=orm.TrajectoryData,
help='The output trajectory of pinball Hustler calculation for easy manual fitting/post-processing if needed.')
spec.output('trajectory_dft', valid_type=orm.TrajectoryData,
help='The output trajectory of DFT Hustler calculation for easy manual fitting/post-processing if needed.')
def setup(self):
"""Input validation and context setup."""
# I store the flipper/pinball compatible structure as current_structure
self.ctx.current_structure = self.inputs.structure
# I store all the input dictionaries in context variables
self.ctx.replay_inputs = AttributeDict(self.exposed_inputs(ReplayMDHWorkChain, namespace='md'))
self.ctx.replay_inputs.pw.parameters = self.ctx.replay_inputs.pw.parameters.get_dict()
self.ctx.replay_inputs.pw.settings = self.ctx.replay_inputs.pw.settings.get_dict()
self.ctx.fitting_parameters_d = self.inputs.fitting_parameters.get_dict()
# Setting up how many configurations are to be extracted from the input trajectory
forces_to_fit = self.ctx.fitting_parameters_d['forces_to_fit']
pinballs = [s for s in self.ctx.current_structure.sites if s.kind_name == 'Li']
self.ctx.hustler_steps = orm.Int(forces_to_fit/(len(pinballs)*3)+1)
# I store this in context variable to update the wallclock at dft and pinball level
self.ctx.max_wallclock_seconds = self.ctx.replay_inputs.pw['metadata']['options']['max_wallclock_seconds']
@classmethod
def get_protocol_filepath(cls):
"""Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols."""
from importlib_resources import files
from aiida_flipper.workflows import protocols as proto
return files(proto) / 'fitting.yaml'
@classmethod
def get_builder_from_protocol(
cls, code, structure, parent_folder, protocol=None, overrides=None, **kwargs
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.
:param code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin.
:param structure: the ``StructureData`` instance to use.
:param parent_folder: the location of charge densities of host lattice
:param protocol: protocol to use, if not specified, the default will be used.
:param overrides: optional dictionary of inputs to override the defaults of the protocol, usually takes the pseudo potential family.
:param kwargs: additional keyword arguments that will be passed to the ``get_builder_from_protocol`` of all the
sub processes that are called by this workchain.
:return: a process builder instance with all inputs defined ready for launch.
"""
from aiida_quantumespresso.common.types import ElectronicType
inputs = cls.get_protocol_inputs(protocol, overrides)
# validating whether the charge density is correct, better I validate here before the workchain is submitted
qb = orm.QueryBuilder()
# querying the original unitcell
qb.append(orm.StructureData, filters={'uuid':{'==':structure.extras['original_unitcell']}}, tag='struct')
qb.append(WorkflowFactory('quantumespresso.flipper.preprocess'), with_incoming='struct', tag='prepro')
qb.append(orm.RemoteData, with_incoming='prepro', project='id')
parent_folders = qb.all(flat=True)
if not parent_folder.pk in parent_folders:
print(f'the charge densities <{parent_folder.pk}> do not match with structure {structure.pk}')
print('Proceed at your own risk')
args = (code, structure, parent_folder, protocol)
replay = ReplayMDHWorkChain.get_builder_from_protocol(*args, electronic_type=ElectronicType.INSULATOR, overrides=inputs['md'], **kwargs)
replay['pw'].pop('structure', None)
replay.pop('clean_workdir', None)
replay['pw'].pop('parent_folder', None)
# For fireworks scheduler, setting up the required resources options
if 'fw' in code.get_computer_label():
replay['pw']['metadata']['options']['resources'].pop('num_machines')
replay['pw']['metadata']['options']['resources']['tot_num_mpiprocs'] = 32
# For hyperqueue scheduler, setting up the required resources options
if 'hq' in code.get_computer_label():
replay['pw']['metadata']['options']['resources'].pop('num_cores_per_mpiproc')
replay['pw']['metadata']['options']['resources'].pop('num_mpiprocs_per_machine')
replay['pw']['metadata']['options']['resources']['num_cores'] = 32
builder = cls.get_builder()
builder.md = replay
builder.structure = structure
builder.parent_folder = parent_folder
builder.clean_workdir = orm.Bool(inputs['clean_workdir'])
builder.fitting_parameters = orm.Dict(dict=inputs['fitting_parameters'])
return builder
def run_process_pb(self):
"""Run the `ReplayMDHustlerWorkChain` to launch a `HustlerCalculation`."""
inputs = self.ctx.replay_inputs
inputs.nstep = self.ctx.hustler_steps
inputs.pw['parent_folder'] = self.inputs.parent_folder
inputs.pw['structure'] = self.ctx.current_structure
# No need to request nodes for such long time
inputs.pw['metadata']['options']['max_wallclock_seconds'] = int(self.ctx.max_wallclock_seconds / 10)
# inputs.pw['parameters']['CONTROL']['lflipper'] = True
# inputs.pw['parameters']['CONTROL']['ldecompose_forces'] = True
# inputs.pw['parameters']['CONTROL']['ldecompose_ewald'] = True
# inputs.pw['parameters']['CONTROL']['flipper_do_nonloc'] = True
# Set the `CALL` link label
self.inputs.metadata.call_link_label = 'replayh_pb'
inputs.metadata.label = 'replayh_pb'
inputs = prepare_process_inputs(ReplayMDHWorkChain, inputs)
running = self.submit(ReplayMDHWorkChain, **inputs)
self.report(f'launching ReplayMDHustlerWorkChain<{running.pk}> at pinball level')
return ToContext(workchains=append_(running))
def run_process_dft(self):
"""Run the `ReplayMDHustlerWorkChain` to launch a `HustlerCalculation`."""
inputs = self.ctx.replay_inputs
inputs.nstep = self.ctx.hustler_steps
inputs.pw['parent_folder'] = self.inputs.parent_folder
inputs.pw['structure'] = self.ctx.current_structure
inputs.pw['parameters']['CONTROL'].pop('lflipper')
inputs.pw['parameters']['CONTROL'].pop('ldecompose_forces')
inputs.pw['parameters']['CONTROL'].pop('ldecompose_ewald')
inputs.pw['parameters']['CONTROL'].pop('flipper_do_nonloc')
# Setting the wallclock to inputted value
inputs.pw['metadata']['options']['max_wallclock_seconds'] = int(self.ctx.max_wallclock_seconds)
# Set the `CALL` link label
self.inputs.metadata.call_link_label = 'replayh_dft'
inputs.metadata.label = 'replayh_dft'
inputs = prepare_process_inputs(ReplayMDHWorkChain, inputs)
running = self.submit(ReplayMDHWorkChain, **inputs)
self.report(f'launching ReplayMDHustlerWorkChain<{running.pk}> at DFT level')
return ToContext(workchains=append_(running))
def inspect_process(self):
"""Inspect the results of the last `ReplayMDHustlerWorkChain`.
I compute the MSD from the previous trajectory and check if it converged with respect to the provided threshold, both relative and absolute.
"""
workchain_pb = self.ctx.workchains[0]
workchain_dft = self.ctx.workchains[1]
for workchain in [workchain_pb, workchain_dft]:
if workchain.is_excepted or workchain.is_killed:
self.report(f'called ReplayMDHustlerWorkChain<{workchain.pk}> was excepted or killed')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_MD
if workchain.is_failed: # and workchain.exit_status not in ReplayMDHustlerWorkChain.get_exit_statuses(acceptable_statuses):
self.report(f'called ReplayMDHustlerWorkChain<{workchain.pk}> failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_MD
try:
trajectory = workchain.outputs.total_trajectory
except (KeyError, exceptions.NotExistent):
self.report(f'the Md run with ReplayMDHustlerWorkChain<{workchain.pk}> finished successfully but without output trajectory')
return self.exit_codes.ERROR_TRAJECTORY_NOT_FOUND
# Start fitting and compute pinball hyperparameters
trajectory_pb = workchain_pb.outputs.total_trajectory
trajectory_dft = workchain_dft.outputs.total_trajectory
nstep = self.ctx.replay_inputs.nstep.value
for traj in (trajectory_pb, trajectory_dft):
shape = traj.get_positions().shape
# I should remove the first step before comparing
if shape[0] != nstep:
self.report('Wrong shape of array returned by {} ({} vs {})'.format(traj.pk, shape, nstep))
# self.exit_codes.ERROR_FITTING_FAILED
self.ctx.coefficients = get_pinball_factors(trajectory_dft, trajectory_pb)['coefficients']
self.ctx.trajectory_pb = trajectory_pb
self.ctx.trajectory_dft = trajectory_dft
return
def results(self):
"""Output the pinball hyperparameter and results of the fit along with the trajectories."""
self.out('coefficients', self.ctx.coefficients)
self.out('trajectory_pb', self.ctx.trajectory_pb)
self.out('trajectory_dft', self.ctx.trajectory_dft)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
12468,
7983,
284,
7716,
6757,
1894,
8718,
17143,
7307,
37811,
198,
6738,
257,
72,
3755,
13,
18392,
1330,
42302,
8818,
198,
6738,
257,
72,
3755,
13,
18392,
13,
146... | 2.536947 | 5,129 |
import requests
import time
from hashlib import md5
| [
11748,
7007,
198,
11748,
640,
198,
6738,
12234,
8019,
1330,
45243,
20,
628,
198
] | 3.857143 | 14 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Uses webscraping to search tldrlegal for human-readable information on
software licenses, et cetera.
"""
import asyncio
from dataclasses import dataclass
from typing import List
from typing import Tuple
import aiohttp
import bs4
import neko3.cog
from neko3 import algorithms
from neko3 import embeds
from neko3 import neko_commands
from neko3 import pagination
from neko3 import string
base_url = "https://tldrlegal.com/"
@dataclass()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
37167,
8590,
5049,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
2... | 3.393939 | 330 |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from . import packet_base
from . import packet_utils
class gre(packet_base.PacketBase):
"""
"""
_PACK_STR = '!HHI'
_MIN_LEN = struct.calcsize(_PACK_STR)
@classmethod
@classmethod
| [
2,
15069,
357,
34,
8,
1584,
399,
3974,
261,
21821,
290,
44735,
10501,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 3.34 | 250 |
import cv2
from win32api import GetSystemMetrics
import time
import os
import math
from tkinter import filedialog, messagebox
from cvzone.HandTrackingModule import HandDetector
filepath = filedialog.askopenfilename(initialdir=os.getcwd(), title='Browse Image File', filetypes=(('JPEG Image File', '*.jpg'),('PNG Image File','*.png')))
if not filepath.endswith('.jpg') or filepath.endswith('.jpeg'):
messagebox.showerror('Error Occured','The image type is not supported.\nWe only support .jpg and .jpeg files.')
quit()
imagefile = cv2.imread(filepath)
image_aspect_ratio = (imagefile.shape[1]/imagefile.shape[0])
w = imagefile.shape[1]
h = imagefile.shape[0]
if w > h:
w = 400
h = int((1/image_aspect_ratio) * w)
elif w < h:
h = 400
w = int(image_aspect_ratio * h)
else:
w = 400
h = 400
finalimage = cv2.resize(imagefile, (w,h), cv2.INTER_AREA)
trueimage = finalimage
window_name = '2D Graphics Manipulation Software'
camera_width = GetSystemMetrics(0)
camera_height = GetSystemMetrics(1)
willQuit = False
quitCounter = 10
distance_hands = None
capture = cv2.VideoCapture(0)
capture.set(3, camera_width)
capture.set(4, camera_height)
pTime = 0
success, tempcam = capture.read()
image_x = int((tempcam.shape[1]-finalimage.shape[1])/2)
image_y = int((tempcam.shape[0]-finalimage.shape[0])/2)
image_width = finalimage.shape[1]
image_height = finalimage.shape[0]
detector = HandDetector(detectionCon=0.8)
messagebox.showinfo('Before we start!','''Before Proceeding, please read the following instructions:
> To exit the program, make sure your index finger is positioned at the close button on the top right side of the screen. (Make sure only one hand is being detected, and you need not be necessarily pointing your index finger)
> To move a picture around, open only your index finger and/or your middle finger. (Make sure only one hand is being detected)
> To zoom in or zoom out of a picture, use both of your hands' index finger and/or your middle finger. The farther the position of the two index finger tips are, the bigger the image.
That's it, have fun!
''')
while True:
# Capture webcam and mirror it
success, src = capture.read()
img = cv2.flip(src, 1)
img_shape = img.shape
# Detect hands
hands, img = detector.findHands(img)
if len(hands) == 1:
distance_hands = None
# Single handed functions
for i in range(len(hands)):
lmList = hands[i]["lmList"]
#print('Index finger X:' + str(lmList[8][0]) + ' Y: ' + str(lmList[8][1]))
if (lmList[8][0] >= img_shape[1]-70) and (lmList[8][1] <= 70):
willQuit = True
quitCounter -= 1
if quitCounter == 0:
cv2.destroyAllWindows()
quit()
else:
willQuit = False
quitCounter = 10
if detector.fingersUp(hands[0]) == [0,1,0,0,0] or detector.fingersUp(hands[0]) == [0,1,1,0,0]:
lmList = hands[0]['lmList']
x = lmList[8][0]
y = lmList[8][1]
if (x >= (image_width/2)) or (x <= img_shape[1]-(image_width/2)) or (y >= (image_height/2)) or (y <= img_shape[0]-(image_height/2)):
image_x = int(x-(image_width/2))
if image_x < 0:
image_x = 0
elif image_x > img_shape[1]-image_width:
image_x = img_shape[1]-image_width
image_y = int(y-(image_height/2))
if image_y < 0:
image_y = 0
elif image_y > img_shape[0]-image_height:
image_y = img_shape[0]-image_height
if willQuit:
image_x = int((img_shape[1]-image_width)/2)
image_y = int((img_shape[0]-image_height)/2)
elif len(hands) == 2:
#Double handed functions
if (detector.fingersUp(hands[0]) == [0,1,0,0,0] or detector.fingersUp(hands[0]) == [0,1,1,0,0]) and (detector.fingersUp(hands[1]) == [0,1,0,0,0] or detector.fingersUp(hands[1]) == [0,1,1,0,0]):
image_x = int((img_shape[1]-image_width)/2)
image_y = int((img_shape[0]-image_height)/2)
lmList1 = hands[0]['lmList']
lmList2 = hands[1]['lmList']
x1 = lmList1[8][0]
x2 = lmList2[8][0]
y1 = lmList1[8][1]
y2 = lmList2[8][1]
if distance_hands == None:
distance_hands = math.sqrt(math.pow(abs(x2-x1),2)+math.pow(abs(y2-y1),2))
else:
prev_distance_hands = distance_hands
distance_hands = abs(math.sqrt(math.pow(abs(x2-x1),2)+math.pow(abs(y2-y1),2)))
diff = int((distance_hands - prev_distance_hands)/2.5)
image_height += diff
image_width += image_aspect_ratio * diff
if image_width > img_shape[1]:
image_width = img_shape[1]
image_height = int((1/image_aspect_ratio)*image_width)
elif image_width < 50:
image_width = 50
image_height = int((1/image_aspect_ratio)*image_width)
elif image_height > img_shape[0]:
image_height = img_shape[0]
image_width = int(image_aspect_ratio*image_height)
elif image_height < 100:
image_height = 100
image_width = int(image_aspect_ratio*image_height)
image_x = (img_shape[1]-int(image_width))/2
image_y = (img_shape[0]-int(image_height))/2
finalimage = cv2.resize(imagefile, (int(image_width), int(image_height)), interpolation=cv2.INTER_AREA)
else:
distance_hands = None
# Window Properties
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(window_name,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
# Calculate FPS
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
# Show elements on screen
fpscolor = None
titlecolor = None
if isDark(img[0:100,0:100]):
fpscolor = (255,255,255)
else:
fpscolor = (0,0,0)
if isDark(img[img_shape[0]-50:img_shape[0],0:400]):
titlecolor = (255,255,255)
else:
titlecolor = (0,0,0)
cv2.putText(img, f'{int(fps)} FPS', (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, fpscolor, 3)
cv2.putText(img, '2D Augmented Graphics Manipulation Software', (10,img_shape[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 1, titlecolor, 3)
#Format image and show it
try:
exit_button = cv2.imread('images/exit-button-image.png')
img[int(image_y):int(image_y)+int(image_height),int(image_x):int(image_x)+int(image_width)] = finalimage
img[10:60, img_shape[1]-60:img_shape[1]-10] = exit_button
except:
pass
cv2.imshow(window_name, img)
cv2.waitKey(1) | [
11748,
269,
85,
17,
201,
198,
6738,
1592,
2624,
15042,
1330,
3497,
11964,
9171,
10466,
201,
198,
11748,
640,
201,
198,
11748,
28686,
201,
198,
11748,
10688,
201,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
11,
3275,
3524,
201,
198... | 2.008727 | 3,552 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import argparse
import pandas as pd
from datetime import datetime
import ray
from ray.tune import run, sample_from
from ray.tune.schedulers import PopulationBasedTraining, AsyncHyperBandScheduler
from pb2 import PB2
#from ppbt_noreward import PPBT
# Postprocess the perturbed config to ensure it's still valid
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--max", type=int, default=3000000)
parser.add_argument("--algo", type=str, default='IMPALA')
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--num_samples", type=int, default=4)
parser.add_argument("--freq", type=int, default=500000)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--horizon", type=int, default=1000)
parser.add_argument("--perturb", type=float, default=0.25)
parser.add_argument("--env_name", type=str, default="SpaceInvadersNoFrameskip-v4")
parser.add_argument("--criteria", type=str, default="timesteps_total") # "training_iteration"
parser.add_argument("--filename", type=str, default="")
parser.add_argument("--method", type=str, default="pb2") # ['pbt', 'pb2', 'asha']
args = parser.parse_args()
ray.init()
args.dir = "{}_{}_{}_Size{}_{}_{}".format(args.algo, args.filename, args.method, str(args.num_samples), args.env_name, args.criteria)
if not(os.path.exists('data/'+args.dir)):
os.makedirs('data/'+args.dir)
pbt = PopulationBasedTraining(
time_attr= args.criteria,
metric="episode_reward_mean",
mode="max",
perturbation_interval=args.freq,
resample_probability=args.perturb,
quantile_fraction = args.perturb, # copy bottom % with top %
# Specifies the mutations of these hyperparams
hyperparam_mutations={
"epsilon": lambda: random.uniform(0.01, 0.5), # 0.1
"entropy_coeff": lambda: random.uniform(0.001, 0.1), # 0.01
"lr": lambda: random.uniform(1e-5, 1e-2), # 5e-3
},
custom_explore_fn=explore)
pb2 = PB2(
time_attr= args.criteria,
metric="episode_reward_mean",
mode="max",
perturbation_interval=args.freq,
resample_probability=0,
quantile_fraction = args.perturb, # copy bottom % with top %
# Specifies the mutations of these hyperparams
hyperparam_mutations={
"epsilon": lambda: random.uniform(0.01, 0.5), # 0.1
"entropy_coeff": lambda: random.uniform(0.001, 0.1), # 0.01
"lr": lambda: random.uniform(1e-5, 1e-2), # 5e-3
},
custom_explore_fn=explore)
asha = AsyncHyperBandScheduler(
time_attr=args.criteria,
metric="episode_reward_mean",
mode="max",
grace_period=args.freq,
max_t=args.max)
methods = {'pbt': pbt,
'pb2': pb2,
'asha': asha}
timelog = str(datetime.date(datetime.now())) + '_' + str(datetime.time(datetime.now()))
analysis = run(
args.algo,
name="{}_{}_{}_seed{}_{}".format(timelog, args.method, args.env_name, str(args.seed), args.filename),
scheduler=methods[args.method],
verbose=1,
num_samples= args.num_samples,
stop= {args.criteria: args.max},
config= {
"env": args.env_name,
"log_level": "INFO",
"seed": args.seed,
"num_gpus": 0,
"num_workers": args.num_workers,
"horizon": args.horizon,
"rollout_fragment_length": 50,
"train_batch_size": 500,
"num_envs_per_worker": 5,
"epsilon": sample_from(
lambda spec: random.uniform(0.1, 0.5)),
"entropy_coeff": sample_from(
lambda spec: random.uniform(0.001, 0.1)),
"lr": sample_from(
lambda spec: random.uniform(1e-5, 1e-2)),
}
)
all_dfs = analysis.trial_dataframes
names = list(all_dfs.keys())
results = pd.DataFrame()
for i in range(args.num_samples):
df = all_dfs[names[i]]
df = df[['timesteps_total', 'episodes_total', 'episode_reward_mean']]
df['Agent'] = i
results = pd.concat([results, df]).reset_index(drop=True)
results.to_csv("data/{}/seed{}.csv".format(args.dir, str(args.seed)))
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
... | 2.133023 | 2,150 |
# Parallel implementation template from: https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT
# PuyoPuyo gym environment from: https://github.com/frostburn/gym_puyopuyo
import os
import pickle
import numpy as np
from gym_puyopuyo import register
import gym
import neat
import visualize
DRAW_NETS = True
NUM_WORKERS = 4 # number of workers for parallel genome score evaluation
NUM_RUNS = 5 # game runs per genome
NUM_GEN = 5000 # max number of generations
WIDTH = 3 # width for Small env is 3
NUM_COLORS = 3 # 3 colors in the small env mode
# TODO: could probably read color number from observation data
NUM_ACTIONS = 4 * WIDTH - 2 - 1
piece_shape = (3, 2)
fn_results = "recurrent-small"
register()
env = gym.make("PuyoPuyoEndlessSmall-v2")
# Converts the 3d array (RGB) supplied by the game
# into a 1d array to be used as network input
if __name__ == "__main__":
run()
| [
2,
42945,
7822,
11055,
422,
25,
3740,
1378,
18300,
23912,
13,
785,
14,
75,
1229,
292,
81,
400,
296,
8430,
14,
50,
9229,
12,
20630,
12,
818,
12,
11505,
20185,
12,
392,
12,
12161,
1404,
198,
2,
350,
4669,
78,
47,
4669,
78,
11550,
... | 2.97351 | 302 |
string = "bbc!"
palavra = "a".join(list(string))
print(palavra)
| [
8841,
796,
366,
11848,
66,
2474,
198,
18596,
615,
430,
796,
366,
64,
1911,
22179,
7,
4868,
7,
8841,
4008,
198,
4798,
7,
18596,
615,
430,
8,
198
] | 2.285714 | 28 |
# pylint: disable-msg=W0703
# pylint: disable-msg=E1103
import logging
import sys
from google.appengine.ext import ndb
from google.appengine.api import users
from app_dashboard_helper import AppDashboardHelper
from app_dashboard_helper import AppHelperException
class DashboardDataRoot(ndb.Model):
""" A Datastore Model that contains information about the AppScale cloud
itself, and is shown to users regardless of whether or not they are logged in.
Fields:
head_node_ip: A str that corresponds the hostname (IP or FQDN) of the
machine that runs the nginx service, providing a full proxy to Google App
Engine apps hosted in this cloud.
table: A str containing the name of the database that we are using to
implement support for the Datastore API (e.g., hypertable, cassandra).
replication: An int that corresponds to the number of replicas present for
each piece of data in the underlying datastore.
"""
head_node_ip = ndb.StringProperty()
table = ndb.StringProperty()
replication = ndb.IntegerProperty()
class ApiStatus(ndb.Model):
""" A Datastore Model that contains information about the current state of an
Google App Engine API that AppScale provides support for.
Fields:
id: A str that corresponds to the name of the Google App Engine API. This
field isn't explicitly defined because all ndb.Models have a str id
that uniquely identifies them in the Datastore.
status: A str that indicates what the current status of the API is (e.g.,
running, failed, unknown).
"""
status = ndb.StringProperty()
class ServerStatus(ndb.Model):
""" A Datastore Model that contains information about a single virtual machine
running in this AppScale deployment.
Fields:
id: The hostname (IP or FQDN) corresponding to this machine. This field
isn't explicitly defined because all ndb.Models have a str id that
uniquely identifies them in the Datastore.
cpu: The percent of CPU currently in use on this machine.
memory: The percent of RAM currently in use on this machine.
disk: The percent of hard disk space in use on this machine.
roles: A list of strs, where each str corresponds to a service that this
machine runs.
"""
cpu = ndb.StringProperty()
memory = ndb.StringProperty()
disk = ndb.StringProperty()
roles = ndb.StringProperty(repeated=True)
class RequestInfo(ndb.Model):
""" A Datastore Model that stores a single measurement of the average number
of requests per second that reach a Google App Engine application.
Fields:
app_id: A string, the application identifier.
timestamp: The date and time when the AppController took the measurement
of how many requests access haproxy for an App Engine app.
num_of_requests: The average number of requests per second that reached
haproxy for a Google App Engine application.
"""
app_id = ndb.StringProperty(required=True)
timestamp = ndb.DateTimeProperty()
num_of_requests = ndb.FloatProperty()
class AppStatus(ndb.Model):
""" A Datastore Model that contains information about where an application
hosted in AppScale can be located, to display to users.
Fields:
name: The application ID associated with this Google App Engine app.
url: A URL that points to an nginx server, which serves a full proxy to
this Google App Engine app.
"""
name = ndb.StringProperty()
url = ndb.StringProperty()
class UserInfo(ndb.Model):
""" A Datastore Model that contains information about users who have signed up
for accounts in this AppScale deployment.
Fields:
id: A str that contains the e-mail address the user signed up with. This
field isn't explicitly defined because all ndb.Models have a str id that
uniquely identifies them in the Datastore.
is_user_cloud_admin: A bool that indicates if the user is authorized to
perform any action on this AppScale cloud (e.g., remove any app, view all
logs).
can_upload_apps: A bool that indicates if the user is authorized to upload
Google App Engine applications to this AppScale cloud via the web
interface.
owned_apps: A list of strs, where each str represents an application ID
that the user has administrative rights on.
"""
is_user_cloud_admin = ndb.BooleanProperty()
can_upload_apps = ndb.BooleanProperty()
owned_apps = ndb.StringProperty(repeated=True)
class InstanceInfo(ndb.Model):
""" A Datastore Model that contains information about AppServer processes that
are running Google App Engine applications in this AppScale deployment.
Fields:
appid: A str that names that application ID this instance is running an app
for. We avoid setting the appid as the Model's id here because multiple
AppServers can run for the same appid.
host: A str that names the IP address or FQDN of the machine that runs this
instance.
port: An int that indicates what port this AppServer process is bound to
on the given hostname. Note that this port is firewalled off to outside
traffic, so users cannot access the AppServer by visiting host:port in a
browser.
language: A str that indicates if this instance is running a Python, Java,
Go, or PHP App Engine application.
"""
appid = ndb.StringProperty()
host = ndb.StringProperty()
port = ndb.IntegerProperty()
language = ndb.StringProperty()
class AppDashboardData():
""" AppDashboardData leverages ndb (which itself utilizes Memcache and the
Datastore) to implement a cache in front of SOAP-exposed services provided
by the AppController. """
# The name of the key that we store globally accessible Dashboard information
# in.
ROOT_KEYNAME = 'AppDashboard'
# The port that the AppMonitoring service runs on, by default.
MONITOR_PORT = 8050
# The port that the Celery Flower service runs on, by default.
FLOWER_PORT = 5555
# The sentinel app name that indicates that no apps are running on a given
# machine.
NO_APPS_RUNNING = "none"
def __init__(self, helper=None):
""" Creates a new AppDashboard, which will cache SOAP-exposed information
provided to us by the AppDashboardHelper.
Args:
helper: An AppDashboardHelper, which will perform SOAP calls to the
AppController whenever the AppDashboardData needs to update its caches.
If None is provided here, then the AppDashboardData will create a new
AppDashboardHelper to talk to the AppController.
"""
self.helper = helper or AppDashboardHelper()
def get_by_id(self, model, key_name):
""" Retrieves an object from the datastore, referenced by its keyname.
ndb does provide a method of the same name that does this, but we ran into
issues mocking out both ModelName() and ModelName.get_by_id() in the same
unit test, so using this level of indirection lets us mock out both without
issues.
Args:
model: The ndb.Model that the requested object belongs to.
key_name: A str that corresponds to the the Model's key name.
Returns:
The object with the given keyname, or None if that object does not exist.
"""
return model.get_by_id(key_name)
def get_all(self, obj, keys_only=False):
""" Retrieves all objects from the datastore for a given model, or all of
the keys for those objects.
Args:
model: The ndb.Model that the requested object belongs to.
keys_only: A bool that indicates that only keys should be returned,
instead of the actual objects.
Returns:
A list of keys (if keys_only is True), or a list of objects in the given
model (if keys_only is False).
"""
return obj.query().fetch(keys_only=keys_only)
def update_all(self):
""" Queries the AppController to learn about the currently running
AppScale deployment.
This method stores all information it learns about this deployment in
the Datastore, to speed up future accesses to this data.
"""
self.update_head_node_ip()
self.update_database_info()
self.update_api_status()
self.update_status_info()
self.update_application_info()
self.update_users()
def get_monitoring_url(self):
""" Retrieves the URL where the AppMonitoring web service can be found in
this AppScale deployment (typically on the login node).
Returns:
A str that contains a URL where low-level monitoring information is
displayed to users.
"""
return "http://{0}:{1}".format(self.get_head_node_ip(), self.MONITOR_PORT)
def get_flower_url(self):
""" Retrieves the URL where the Celery Flower web service can be found in
this AppScale deployment (typically on the login node).
Returns:
A str that contains a URL where low-level monitoring information is
displayed to users.
"""
return "http://{0}:{1}".format(self.get_head_node_ip(), self.FLOWER_PORT)
def get_head_node_ip(self):
""" Retrieves the IP address or FQDN where the machine running the
shadow service can be found, via the Datastore.
Returns:
A str containing the IP address or FQDN of the shadow node.
"""
dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)
if dashboard_root and dashboard_root.head_node_ip is not None:
return dashboard_root.head_node_ip
else:
return self.update_head_node_ip()
def update_head_node_ip(self):
""" Updates the Datastore with the IP address or FQDN of the node running
the shadow service.
This update is only performed if there is no data in the Datastore about the
current location of the head node, as this is unlikely to dynamically change
at this time.
Returns:
A str containing the IP address or FQDN of the shadow node, or None if
there was an error updating the head node's IP address.
"""
dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)
if dashboard_root and dashboard_root.head_node_ip is not None:
return dashboard_root.head_node_ip
try:
if dashboard_root is None:
dashboard_root = DashboardDataRoot(id = self.ROOT_KEYNAME)
dashboard_root.head_node_ip = self.helper.get_host_with_role('shadow')
dashboard_root.put()
return dashboard_root.head_node_ip
except Exception as err:
logging.exception(err)
return None
def get_api_status(self):
""" Retrieves the current status of Google App Engine APIs in this AppScale
deployment from the Datastore.
Returns:
A dict, where each key is the name of an API (a str), and each value
indicates if the API is running, has failed, or is in an unknown state
(also a str).
"""
return dict((api.key.id(), api.status) for api in self.get_all(ApiStatus))
def update_api_status(self):
""" Updates the Datastore with the newest information about the health of
the Google App Engine APIs available in this AppScale deployment, by
contacting the AppController. """
try:
acc = self.helper.get_appcontroller_client()
updated_status = acc.get_api_status()
updated_datastore_entries = []
for api_name, api_status in updated_status.iteritems():
store = self.get_by_id(ApiStatus, api_name)
if store and store.status != api_status:
store.status = api_status
updated_datastore_entries.append(store)
else:
store = ApiStatus(id = api_name, status = api_status)
updated_datastore_entries.append(store)
ndb.put_multi(updated_datastore_entries)
except Exception as err:
logging.exception(err)
def get_status_info(self):
""" Retrieves the current status of each machine in this AppScale deployment
from the Datastore.
Returns:
A list of dicts, where each dict contains information about one machine
in this AppScale deployment.
"""
servers = self.get_all(ServerStatus)
return [{'ip' : server.key.id(), 'cpu' : server.cpu,
'memory' : server.memory, 'disk' : server.disk, 'roles' : server.roles,
'key' : server.key.id().translate(None, '.') } for server in servers]
def update_status_info(self):
""" Queries the AppController to get status information for all servers in
this deployment, storing it in the Datastore for later viewing.
"""
try:
nodes = self.helper.get_appcontroller_client().get_stats()
updated_statuses = []
for node in nodes:
status = self.get_by_id(ServerStatus, node['ip'])
if status:
# Make sure that at least one field changed before we decide to
# update this ServerStatus.
if status.cpu != str(node['cpu']) or \
status.memory != str(node['memory']) or \
status.disk != str(node['disk']) or status.roles != node['roles']:
status.cpu = str(node['cpu'])
status.memory = str(node['memory'])
status.disk = str(node['disk'])
status.roles = node['roles']
updated_statuses.append(status)
else:
status = ServerStatus(id = node['ip'])
status.cpu = str(node['cpu'])
status.memory = str(node['memory'])
status.disk = str(node['disk'])
status.roles = node['roles']
updated_statuses.append(status)
ndb.put_multi(updated_statuses)
except Exception as err:
logging.exception(err)
def get_database_info(self):
""" Retrieves the name of the database used to implement the Datastore API
in this AppScale deployment, as well as the number of replicas stored for
each piece of data.
Returns:
A dict containing the name of the database used (a str), as well as the
number of replicas for each piece of data (an int).
"""
dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)
if dashboard_root and dashboard_root.table is not None and \
dashboard_root.replication is not None:
return {
'table' : dashboard_root.table,
'replication' : dashboard_root.replication
}
else:
return self.update_database_info()
def update_database_info(self):
""" Queries the AppController for information about what datastore is used
to implement support for the Google App Engine Datastore API, placing this
info in the Datastore for later viewing.
This update is only performed if there is no data in the Datastore about the
current location of the head node, as this is unlikely to dynamically change
at this time.
Returns:
A dict containing the name of the database used (a str), as well as the
number of replicas for each piece of data (an int).
"""
dashboard_root = self.get_by_id(DashboardDataRoot, self.ROOT_KEYNAME)
if dashboard_root and dashboard_root.table is not None and \
dashboard_root.replication is not None:
return {
'table' : dashboard_root.table,
'replication' : dashboard_root.replication
}
try:
acc = self.helper.get_appcontroller_client()
db_info = acc.get_database_information()
if dashboard_root is None:
dashboard_root = DashboardDataRoot(id = self.ROOT_KEYNAME)
dashboard_root.table = db_info['table']
dashboard_root.replication = int(db_info['replication'])
dashboard_root.put()
return {
'table' : dashboard_root.table,
'replication' : dashboard_root.replication
}
except Exception as err:
logging.exception(err)
return {
'table' : 'unknown',
'replication' : 0
}
def get_application_info(self):
""" Retrieves a list of Google App Engine applications running in this
AppScale deployment, along with the URL that users can access them at.
Returns:
A dict, where each key is a str indicating the name of a Google App Engine
application, and each value is either a str, indicating the URL where the
application is running, or None, if the application has been uploaded but
is not yet running (e.g., it is loading).
"""
return dict((app.name, app.url) for app in self.get_all(AppStatus))
def delete_app_from_datastore(self, app, email=None):
""" Removes information about the named app from the datastore and, if
necessary, the list of applications that this user owns.
Args:
app: A str that corresponds to the appid of the app to delete.
email: A str that indicates the e-mail address of the administrator of
this application, or None if the currently logged-in user is the admin.
Returns:
A UserInfo object for the user with the specified e-mail address, or if
None was provided, the currently logged in user.
"""
if email is None:
user = users.get_current_user()
if not user:
return None
email = user.email()
try:
app_status = self.get_by_id(AppStatus, app)
if app_status:
app_status.key.delete()
user_info = self.get_by_id(UserInfo, email)
if user_info:
if app in user_info.owned_apps:
user_info.owned_apps.remove(app)
user_info.put()
return user_info
except Exception as err:
logging.exception(err)
return None
def update_application_info(self):
""" Queries the AppController for information about which Google App Engine
applications are currently running, and if they are done loading, the URL
that they can be accessed at, storing this info in the Datastore for later
viewing.
Returns:
A dict, where each key is a str indicating the name of a Google App Engine
application running in this deployment, and each value is either a str
indicating the URL that the app can be found at, or None, if the
application is still loading.
"""
try:
status_on_all_nodes = self.helper.get_status_info()
app_names_and_urls = {}
if not status_on_all_nodes:
return {}
for status in status_on_all_nodes:
for app, done_loading in status['apps'].iteritems():
if app == self.NO_APPS_RUNNING:
continue
if done_loading:
try:
app_names_and_urls[app] = "http://{0}:{1}".format(
self.helper.get_login_host(), self.helper.get_app_port(app))
except AppHelperException:
app_names_and_urls[app] = None
else:
app_names_and_urls[app] = None
# To make sure that we only update apps that have been recently uploaded
# or removed, we grab a list of all the apps that were running before we
# asked the AppController and compare it against the list of apps that the
# AppController reports are now running.
all_apps = self.get_all(AppStatus)
all_app_names_were_running = [app.key.id() for app in all_apps]
all_app_names_are_running = [app for app in app_names_and_urls.keys()]
# Delete any apps that are no longer running.
app_names_to_delete = []
for app_name in all_app_names_were_running:
if app_name not in all_app_names_are_running:
app_names_to_delete.append(app_name)
elif not app_names_and_urls[app_name]:
app_names_to_delete.append(app_name)
if app_names_to_delete:
apps_to_delete = []
for app in all_apps:
if app.name in app_names_to_delete:
apps_to_delete.append(app)
ndb.delete_multi(apps_to_delete)
# Add in new apps that are now running.
app_names_to_add = []
for app_name in all_app_names_are_running:
if app_name not in all_app_names_were_running:
app_names_to_add.append(app_name)
elif app_names_and_urls[app_name]:
app_names_to_add.append(app_name)
if app_names_to_add:
apps_to_add = [AppStatus(id=app, name=app, url=app_names_and_urls[app])
for app in app_names_to_add]
ndb.put_multi(apps_to_add)
return app_names_and_urls
except Exception as err:
logging.exception(err)
return {}
def update_users(self):
""" Queries the UserAppServer for information every user account registered
in this AppScale deployment, storing this info in the Datastore for later
viewing.
Returns:
A list of UserInfo objects, where each UserInfo corresponds to a user
account registered in this AppScale deployment. This list will be empty if
there was a problem accessing user information from the UserAppServer.
"""
user_list = []
try:
all_users_list = self.helper.list_all_users()
users_to_update = []
for email in all_users_list:
user_info = self.get_by_id(UserInfo, email)
if user_info:
# Only update the model in the Datastore if one of the fields has
# changed.
is_user_cloud_admin = self.helper.is_user_cloud_admin(email)
can_upload_apps = self.helper.can_upload_apps(email)
owned_apps = self.helper.get_owned_apps(email)
if user_info.is_user_cloud_admin != is_user_cloud_admin or \
user_info.can_upload_apps != can_upload_apps or \
user_info.owned_apps != owned_apps:
user_info.is_user_cloud_admin = is_user_cloud_admin
user_info.can_upload_apps = can_upload_apps
user_info.owned_apps = owned_apps
users_to_update.append(user_info)
# Either way, add the user's info to the list of all user's info.
user_list.append(user_info)
else:
user_info = UserInfo(id = email)
user_info.is_user_cloud_admin = self.helper.is_user_cloud_admin(email)
user_info.can_upload_apps = self.helper.can_upload_apps(email)
user_info.owned_apps = self.helper.get_owned_apps(email)
users_to_update.append(user_info)
user_list.append(user_info)
ndb.put_multi(users_to_update)
return user_list
except Exception as err:
logging.exception(err)
return []
def get_owned_apps(self):
""" Queries the UserAppServer to see which Google App Engine applications
the currently logged in user has administrative permissions on.
Returns:
A list of strs, where each str corresponds to an appid that this user
can administer. Returns an empty list if this user isn't logged in.
"""
user = users.get_current_user()
if not user:
return []
email = user.email()
try:
user_info = self.get_by_id(UserInfo, email)
if user_info:
return user_info.owned_apps
else:
return []
except Exception as err:
logging.exception(err)
return []
def is_user_cloud_admin(self):
""" Queries the UserAppServer to see if the currently logged in user has the
authority to administer this AppScale deployment.
Returns:
True if the currently logged in user is a cloud administrator, and False
otherwise (or if the user isn't logged in).
"""
user = users.get_current_user()
if not user:
return False
try:
user_info = self.get_by_id(UserInfo, user.email())
if user_info:
return user_info.is_user_cloud_admin
else:
return False
except Exception as err:
logging.exception(err)
return False
def can_upload_apps(self):
""" Queries the UserAppServer to see if the currently logged in user has the
authority to upload Google App Engine applications on this AppScale
deployment.
Returns:
True if the currently logged in user can upload Google App Engine
applications, and False otherwise (or if the user isn't logged in).
"""
user = users.get_current_user()
if not user:
return False
try:
user_info = self.get_by_id(UserInfo, user.email())
if user_info:
return user_info.can_upload_apps
else:
return False
except Exception as err:
logging.exception(err)
return False
| [
2,
279,
2645,
600,
25,
15560,
12,
19662,
28,
54,
15,
36809,
198,
2,
279,
2645,
600,
25,
15560,
12,
19662,
28,
36,
11442,
18,
198,
198,
11748,
18931,
198,
11748,
25064,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
299,
9945,
... | 2.810956 | 8,580 |
from .._factorial import factorial
def number_of_subsets(
set_size: int,
subset_size: int,
order_matters: bool = False,
can_reselect: bool = False
):
"""
Gets the number of ways to choose ``subset_size`` items from a set of
``set_size`` possibilities.
:param set_size:
The number of items to select from.
:param subset_size:
The number of items to select.
:param order_matters:
Whether selections of the same items but in a different selection-order
are considered distinct subsets.
:param can_reselect:
Whether the same item can appear more than once in a subset.
:return:
The number of possible subsets that could be selected.
"""
# Sets can't have negative size
if set_size < 0:
raise ArithmeticError(f"Can't have a set of {set_size} items")
if subset_size < 0:
raise ArithmeticError(f"Can't have a subset of {subset_size} items")
# Can only ever select 1 subset of size zero, the empty set
if subset_size == 0:
return 1
# If there are no items to select from, the empty set is the only possible selection,
# so any subsets of greater size are impossible
if set_size == 0:
return 0
# Handle reselection separately
if can_reselect:
# If order matters, (n, k) = n^k
if order_matters:
return set_size ** subset_size
# Otherwise, (n, k) = (n + k - 1, k) (without reselection). Rather than recursing, we
# just fall through with a modified n
set_size += subset_size - 1
else:
# Without reselection, we can't select more items than are in the set
if subset_size > set_size:
return 0
# If order matters, (n, k) = n! / (n - k)! (without reselection)
if order_matters:
return factorial(set_size, set_size - subset_size)
# Otherwise, (n, k) = n! / k!(n - k)! (again, without reselection).
# We discriminate on the difference between n and k to determine
# the least number of multiplications to perform
remainder_size = set_size - subset_size
if subset_size > remainder_size:
return factorial(set_size, subset_size) // factorial(remainder_size)
else:
return factorial(set_size, remainder_size) // factorial(subset_size)
| [
6738,
11485,
62,
22584,
5132,
1330,
1109,
5132,
628,
198,
4299,
1271,
62,
1659,
62,
7266,
28709,
7,
198,
220,
220,
220,
220,
220,
220,
220,
900,
62,
7857,
25,
493,
11,
198,
220,
220,
220,
220,
220,
220,
220,
24637,
62,
7857,
25,
... | 2.563034 | 936 |
# Analyse eines Textes: Vorkommen von Links
from re import *
#Hauptprogramm
linkliste = linkfind("LICENSE.txt")
print("Links in der Python-LICENSE-Datei: ")
for link in linkliste:
print(link)
#1 Die Sequenz "?:" am Anfang des Ausdrucks "(?:html?|/)"
#1 ist notwendig, damit "findall()" den gesamten passenden
#1 Substring zurückgibt
| [
2,
16213,
325,
304,
1127,
8255,
274,
25,
569,
967,
2002,
268,
18042,
21691,
201,
198,
201,
198,
6738,
302,
1330,
1635,
201,
198,
201,
198,
201,
198,
2,
39,
559,
457,
23065,
76,
201,
198,
8726,
4868,
68,
796,
2792,
19796,
7203,
43,... | 2.240741 | 162 |
import unittest
from app.parOimpar import verificar
# IMPAR = FALSE , PAR = TRUE
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
201,
198,
6738,
598,
13,
1845,
46,
320,
1845,
1330,
3326,
811,
283,
201,
198,
201,
198,
201,
198,
220,
220,
220,
1303,
30023,
1503,
796,
26563,
837,
29463,
796,
26751,
201,
198,
201,
198,
201,
198,
361,
11593,
... | 2.265625 | 64 |
# Copyright (C) 2021 Satoshi Konno. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import argparse
from uecho import LocalNode, Property, ObjectRequestHandler
from uecho.std import StandardDevice
import uecho.log as log
args = sys.argv
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Mono functional lighting device')
parser.add_argument("-v", "--verbose", help="output all mandatory read properties of found nodes", action="store_true")
parser.add_argument("-d", "--debug", help="output raw debug messages", action="store_true")
log.setLevel(log.ERROR)
args = parser.parse_args()
if args.debug:
log.setLevel(log.DEBUG)
node = MonoLightNode()
node.start()
try:
while True:
time.sleep(1.0)
except KeyboardInterrupt:
node.stop()
| [
2,
15069,
357,
34,
8,
33448,
40824,
17431,
3919,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
118... | 3.197216 | 431 |
import getopt
import logging
import re
import time
from models import Models
from classes.Identifier import Identifier
from services.YahooShopping import YahooShopping
from services.AmazonScraper import AmazonScraper, AccessBlocked
from utils.common import get_db_instance, get_yahoo_appid, \
get_amazon_scraping_proxy, get_logger
INTERVAL_AMAZON_REQUEST = 10
| [
11748,
651,
8738,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
640,
198,
198,
6738,
4981,
1330,
32329,
198,
6738,
6097,
13,
33234,
7483,
1330,
11440,
7483,
198,
6738,
2594,
13,
56,
12992,
2484,
33307,
1330,
16551,
2484,
33307,
198,
6... | 3.47619 | 105 |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
from .logger import Logger
from .argparser import get_common_parser, parse_with_config
from .metrics import Metrics, accuracy
from .ipu_settings import inference_settings, train_settings
from .distributed import handle_distributed_settings, init_popdist, allreduce_values, synchronize_throughput_values, synchronize_latency_values
from .test_tools import get_train_accuracy, get_test_accuracy, run_script, get_max_thoughput, \
download_images, get_models, get_cifar10_dataset, get_current_interpreter_executable
| [
2,
15069,
357,
66,
8,
12131,
29681,
7295,
12052,
13,
1439,
2489,
10395,
13,
198,
6738,
764,
6404,
1362,
1330,
5972,
1362,
198,
6738,
764,
853,
48610,
1330,
651,
62,
11321,
62,
48610,
11,
21136,
62,
4480,
62,
11250,
198,
6738,
764,
4... | 3.121053 | 190 |
from SLIT import Lens
import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from scipy import signal as scp
from SLIT import wave_transform as mw
import time
from scipy import signal as scp
import SLIT as slit
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import warnings
warnings.simplefilter("ignore")
S = pf.open('../Files/source.fits')[0].data
G = pf.open('../Files/Galaxy.fits')[0].data
##Sizes in image and source planes
nt1= 100
nt2 = 100
size = 1
#Mass profile of the lens
kappa = pf.open('../Files/kappa.fits')[0].data
Fkappa = Lens.F(kappa, nt1,nt2, size,nt1/2.,nt2/2.)
lensed = slit.lens_one(Fkappa, nt1,nt2, size)
#Levels for normalisation
lev = slit.level(nt1,nt1)
#Starlet transforms of the lens and source in their respective planes
wG = mw.wave_transform(G, lvl = 6, newwave =1)/lev
wS = mw.wave_transform(S, lvl = 6, newwave =1)/lev
#Lensed source
FS = Lens.source_to_image(S, nt1, nt2,Fkappa)
#Unlensed lens
FG = Lens.image_to_source(G, size, Fkappa, lensed=lensed)
#Starlet transform of the unlensed lens
wFG = mw.wave_transform(FG, 6, newwave =1)/lev
#Starlet transform of the lensed
wFS = mw.wave_transform(FS, 6, newwave =1)/lev
#Function that computes the reconstruction error from the p% highest coefficients
#Computation of reconstruction errors for each light profile
error_wS = np.zeros(1000)
error_S = np.zeros(1000)
error_wFS = np.zeros(1000)
error_G = np.zeros(1000)
error_wG = np.zeros(1000)
error_wFG = np.zeros(1000)
for i in np.linspace(0,999, 1000):
error_wS[i] = error_rec_from(wS, i, wave = 1)
error_S[i] = error_rec_from(S, i)
error_wFS[i] = error_rec_from(wFS, i, wave = 1)
error_G[i] = error_rec_from(G, i)
error_wG[i] = error_rec_from(wG, i, wave = 1)
error_wFG[i] = error_rec_from(wFG, i, wave = 1)
print('NLA on the source at 10%: ',error_wS[100]/np.max(error_wS))
print('NLA on the lens at 10%: ', error_wG[100]/np.max(error_wG))
print('NLA on the lensed source at 10%: ', error_wFS[100]/np.max(error_wFS))
print('NLA on the delensed lens at 10%: ', error_wFG[100]/np.max(error_wFG))
#Display
plt.figure(1)
plt.plot(np.linspace(0,100, 1000), error_wS/np.max(error_wS), 'r', label = 'Source in starlet space', linewidth = 3)
plt.plot(np.linspace(0,100, 1000), error_wFG/np.max(error_wFG), 'c', label = 'Lens in source plane in starlet space', linewidth = 3)
plt.xlabel('percentage of coefficients used in reconstruction', fontsize=25)
plt.ylabel('Error on reconstruction', fontsize=25)
plt.title('Non-linear approximation error in source plane', fontsize=25)
plt.legend(fontsize = 25)
a = plt.axes([0.4, 0.2, 0.45, 0.4])
plt.semilogy(np.linspace(0,100, 1000), (error_wFG/np.max(error_wFG)), 'c', linewidth = 3)
plt.semilogy(np.linspace(0,100, 1000), error_wS/np.max(error_wS), 'r', linewidth = 3)
plt.xlim(20,100)
plt.figure(2)
plt.plot(np.linspace(0,100, 1000), error_wG/np.max(error_wG), 'b', label = 'Galaxy in starlet space', linewidth = 3)
plt.plot(np.linspace(0,100, 1000), error_wFS/np.max(error_wFS), 'm', label = 'Lensed source in starlet space', linewidth = 3)
plt.xlabel('percentage of coefficients used in reconstruction', fontsize=25)
plt.ylabel('Error on reconstruction', fontsize=25)
plt.title('Non-linear approximation error in lens plane', fontsize=25)
plt.legend(fontsize = 25)
a = plt.axes([0.4, 0.2, 0.45, 0.4])
plt.semilogy(np.linspace(0,100, 1000), (error_wFS/np.max(error_wFS)), 'm', linewidth = 3)
plt.semilogy(np.linspace(0,100, 1000), error_wG/np.max(error_wG), 'b', linewidth = 3)
plt.xlim(20,100)
plt.show()
| [
6738,
12419,
2043,
1330,
30092,
198,
11748,
12972,
21013,
355,
279,
69,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
11215,
355,
12067,
198,
67... | 2.434377 | 1,501 |
########O.K
# str() converte número em string
########O.K
# Ex: s = banana retorna bana
########O.K
########O.K
if __name__ == '__main__':
main()
| [
7804,
46,
13,
42,
201,
198,
2,
965,
3419,
6718,
660,
299,
21356,
647,
78,
795,
4731,
220,
201,
198,
220,
220,
220,
220,
201,
198,
7804,
46,
13,
42,
201,
198,
2,
1475,
25,
264,
796,
25996,
1005,
1211,
64,
275,
2271,
201,
198,
2... | 2.011364 | 88 |
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import aiohttp
import sys
from s3replicationcommon.s3_common import S3RequestState
from s3replicationcommon.timer import Timer
from s3replicationcommon.aws_v4_signer import AWSV4Signer
from s3replicationcommon.log import fmt_reqid_log
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
1001,
37861,
8987,
11419,
290,
14,
273,
663,
6708,
2403,
689,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 3.578755 | 273 |
#from pivottablejs import pivot_ui
import dash
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_pivottable
from django_plotly_dash import DjangoDash
#app = dash.Dash(__name__)
app = DjangoDash('efiga_pivot_app')
#app.scripts.config.serve_locally = True
#app.css.config.serve_locally = True
from py2neo import Graph
import pandas as pd
graph = Graph("bolt://localhost:7687", auth=("neo4j", "test"))
cursor = graph.run("MATCH (a:EfigaRecord) RETURN a")
rs = []
for record in cursor: rs.append(dict(record.values(0)[0]))
df = pd.DataFrame(rs)
#pivot_ui(df,outfile_path="pivottablejs.html")
app.title = 'Efiga Data'
app.layout = html.Div([
dash_pivottable.PivotTable(
id='table',
data=df.to_dict('records'),
cols=[],
colOrder="key_a_to_z",
rows=[],
rowOrder="key_a_to_z",
rendererName="Table",
aggregatorName="Count",
vals=[],
valueFilter={}
),
dcc.Markdown(
id='output'
),
dcc.Markdown(
id='output'
)
])
@app.callback(Output('output', 'children'),
[Input('table', 'cols'),
Input('table', 'rows'),
Input('table', 'rowOrder'),
Input('table', 'colOrder'),
Input('table', 'aggregatorName'),
Input('table', 'rendererName')])
if __name__ == '__main__':
app.run_server(debug=True)
| [
2,
6738,
16767,
1252,
540,
8457,
1330,
30355,
62,
9019,
198,
198,
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
11748,
14470,
62,
6494,
62,
55... | 2.206587 | 668 |
import numpy as np
import matplotlib.pyplot as plt
xmin, xmax = -np.pi, np.pi
x = np.arange(xmin, xmax, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.subplot(2, 1, 1)
plt.plot(x, y_sin)
plt.title('$\sin(x)$')
plt.xlim(xmin, xmax)
plt.ylim(-1.3, 1.3)
plot_function_name('$\sin(x)$')
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('$\cos(x)$')
plt.xlim(xmin, xmax)
plt.ylim(-1.3, 1.3)
plot_function_name('$\cos(x)$')
plt.tight_layout()
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
87,
1084,
11,
2124,
9806,
796,
532,
37659,
13,
14415,
11,
45941,
13,
14415,
198,
87,
796,
45941,
13,
283,
858,
7,
87,
1084,
11,
... | 1.786561 | 253 |
from hexagon.support.storage import store_user_data
from hexagon.support.hooks import HexagonHooks
from hexagon.support.hooks.hook import HookSubscription
SUBSCRIPTION_NAME = "test-plugin"
| [
6738,
17910,
1840,
13,
11284,
13,
35350,
1330,
3650,
62,
7220,
62,
7890,
198,
6738,
17910,
1840,
13,
11284,
13,
25480,
82,
1330,
22212,
1840,
39,
31085,
198,
6738,
17910,
1840,
13,
11284,
13,
25480,
82,
13,
25480,
1330,
18531,
7004,
3... | 3.410714 | 56 |
import numpy #for numpy storage
import os #to find files
import time #for time to complete
from sklearn import svm
import pickle
start = time.time()
from sklearn.calibration import CalibratedClassifierCV
from statistics import mean
#Import Training Data & Labels
data = numpy.load("Data/1UkiORBTrainingData.npy")
data = data.reshape(1476,(10*256))
isnot = numpy.load("Data/1UkiORBTrainingDataLabels.npy")
#import wrong test data
wdata0 = numpy.load("Data/UkiORBTestingData.npy")
wdata0 = wdata0.reshape(174,(10*256))
wdata1 = numpy.load("Data/LReORBTestingData.npy")
wdata1 = wdata1.reshape(172,(10*256))
wdata2 = numpy.load("Data/MinORBTestingData.npy")
wdata2 = wdata2.reshape(90,(10*256))
wdata3 = numpy.load("Data/HReORBTestingData.npy")
wdata3 = wdata3.reshape(187,(10*256))
wdata4 = numpy.load("Data/EreORBTestingData.npy")
wdata4 = wdata4.reshape(197,(10*256))
wdata5 = numpy.load("Data/PopORBTestingData.npy")
wdata5 = wdata5.reshape(208,(10*256))
wdata6 = numpy.load("Data/CFPORBTestingData.npy")
wdata6 = wdata6.reshape(93,(10*256))
wdata7 = numpy.load("Data/RocORBTestingData.npy")
wdata7 = wdata7.reshape(282,(10*256))
wdata8 = numpy.load("Data/CubORBTestingData.npy")
wdata8 = wdata8.reshape(317,(10*256))
wdata9 = numpy.load("Data/NAPORBTestingData.npy")
wdata9 = wdata9.reshape(355,(10*256))
wdata10 = numpy.load("Data/NreORBTestingData.npy")
wdata10 = wdata10.reshape(358,(10*256))
wdata11 = numpy.load("Data/AExORBTestingData.npy")
wdata11 = wdata11.reshape(380,(10*256))
wdata12 = numpy.load("Data/BORBTestingData.npy")
wdata12 = wdata12.reshape(589,(10*256))
wdata13 = numpy.load("Data/ANMORBTestingData.npy")
wdata13 = wdata13.reshape(633,(10*256))
wdata14 = numpy.load("Data/SymORBTestingData.npy")
wdata14 = wdata14.reshape(638,(10*256))
wdata15 = numpy.load("Data/PImORBTestingData.npy")
wdata15 = wdata15.reshape(946,(10*256))
wdata16 = numpy.load("Data/ExpORBTestingData.npy")
wdata16 = wdata16.reshape(981,(10*256))
wdata17 = numpy.load("Data/RomORBTestingData.npy")
wdata17 = wdata17.reshape(964,(10*256))
wdata18 = numpy.load("Data/RelORBTestingData.npy")
wdata18 = wdata18.reshape(1543,(10*256))
wdata19 = numpy.load("Data/ImpORBTestingData.npy")
wdata19 = wdata19.reshape(1913,(10*256))
#cval = 21 length from 2^-5 to 2^15
cval = [0.03125,0.0625,0.125,0.25,0.5,1,2,4,8,16,32,64,128,256,512] #
print ("Training Test Data")
results = [0] *19
checkagainst = [0]
falsepositive = 0;
falsenegative = 0;
truepositive = 0;
truenegative = 0;
for cavls in cval:
machine = svm.LinearSVC(C = cavls, random_state = 2,max_iter = 1000000,loss="hinge")
machine = CalibratedClassifierCV(machine, cv = 3)
machine.fit(data,isnot)
#score the data
checkagainst[0] = mean(machine.predict_proba(wdata0)[:,1]) #true positive
falsenegative = 1-checkagainst[0]
#make sure correct wdataXX isn't in the results and that the other 19 are
results[0] = mean(machine.predict_proba(wdata1)[:,1])
results[1] = mean(machine.predict_proba(wdata3)[:,1])
results[2] = mean(machine.predict_proba(wdata4)[:,1])
results[3] = mean(machine.predict_proba(wdata5)[:,1])
results[4] = mean(machine.predict_proba(wdata6)[:,1])
results[5] = mean(machine.predict_proba(wdata7)[:,1])
results[6] = mean(machine.predict_proba(wdata8)[:,1])
results[7] = mean(machine.predict_proba(wdata9)[:,1])
results[8] = mean(machine.predict_proba(wdata10)[:,1])
results[9] = mean(machine.predict_proba(wdata11)[:,1])
results[10] = mean(machine.predict_proba(wdata12)[:,1])
results[11] = mean(machine.predict_proba(wdata13)[:,1])
results[12] = mean(machine.predict_proba(wdata14)[:,1])
results[13] = mean(machine.predict_proba(wdata15)[:,1])
results[14] = mean(machine.predict_proba(wdata16)[:,1])
results[15] = mean(machine.predict_proba(wdata17)[:,1])
results[16] = mean(machine.predict_proba(wdata18)[:,1])
results[17] = mean(machine.predict_proba(wdata19)[:,1])
results[18] = mean(machine.predict_proba(wdata2)[:,1])
for numbers in results:
falsepositive = falsepositive+numbers
truenegative = truenegative+(1-numbers)
#ACC = (TP+TN)/(TP+TN+FP+FN)
accuracy = ((truepositive+truenegative)/(truepositive+truenegative+falsepositive+falsenegative))
print (str(accuracy))
checkagainst = [0]
falsepositive = 0;
falsenegative = 0;
truepositive = 0;
truenegative = 0;
end = time.time()
print (str(round((end - start),2)) + " seconds to complete")
| [
11748,
299,
32152,
1303,
1640,
299,
32152,
6143,
198,
11748,
28686,
1303,
1462,
1064,
3696,
198,
11748,
640,
1303,
1640,
640,
284,
1844,
198,
6738,
1341,
35720,
1330,
264,
14761,
198,
11748,
2298,
293,
198,
9688,
796,
640,
13,
2435,
341... | 2.331085 | 1,927 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import contractor.Building.models
import django.db.models.deletion
import contractor.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
17195,
13,
25954,
13,
27530,
... | 3.262295 | 61 |
import requests
import string
import ephem
from math import atan2, degrees
class MPCORB:
"""Docstring will go here...
...Eventually"""
def __init__ (self, obj):
"""Doc string"""
self.obj = obj
self.objdata = self.getMPC()
self.pdes = self.objdata[0:7].strip()
self.rdes = self.objdata[166:194].strip()
self.flags = self.objdata[161:165].strip()
self.H = float(self.objdata[8:13].strip())
self.G = float(self.objdata[14:19].strip())
self.orbEl = {"epoch":self.dateUnpack(self.objdata[20:25].strip()),"ME":float(self.objdata[26:35].strip()),"w":float(self.objdata[37:46].strip()),"O":float(self.objdata[48:57].strip()),"i":float(self.objdata[59:68].strip()),"e":float(self.objdata[70:79].strip()),"a":float(self.objdata[92:103].strip()),"n":float(self.objdata[80:91].strip())}
self.xephem = self.rdes + ",e," + str(self.orbEl["i"]) + "," + str(self.orbEl["O"]) + "," + str(self.orbEl["w"]) + "," + str(self.orbEl["a"]) + "," + str(self.orbEl["n"]) + "," + str(self.orbEl["e"]) + "," + str(self.orbEl["ME"]) + "," + self.orbEl["epoch"] + ",2000,H" + str(self.H) + "," + str(self.G)
self.target = ephem.readdb(self.xephem)
def getMPC(self):
"""Docstring"""
print("----------------------------------------------\nFetching MPCORB.DAT")
mpcorb = requests.get("http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT", stream=True)
asteroid = "(%s)" % self.obj
for line in mpcorb.iter_lines(decode_unicode=True):
if self.obj in line:
mpcorb.close()
print('SUCCESS')
return line
def getObs(self, code):
"""Docstring"""
print("----------------------------------------------\nFetching Observatory Data")
obslist = requests.get("http://www.minorplanetcenter.net/iau/lists/ObsCodes.html", stream=True)
code = str(code).upper() + " "
for line in obslist.iter_lines(decode_unicode=True):
if code in line:
obslist.close()
print('SUCCESS')
return line
def dateUnpack(self, packed):
"""Docstring"""
yearcode = {"I":"18","J":"19","K":"20"}
daycode = "123456789ABCDEFGHIJKLMNOPQRSTUV"
year = yearcode[packed[0]]+packed[1:3]
month = daycode.index(packed[3])+1
day = daycode.index(packed[4])+1
return "%s/%s/%s" % (month, day, year)
def geocentric(self, obstime):
"""Docstring"""
self.target.compute(obstime)
return "RA: %s\nDec: %s" % (self.target.a_ra, self.target.a_dec)
def topocentric(self, obs, obstime):
"""Docstring"""
obsstring = self.getObs(obs)
cosl = float(obsstring[13:21].strip())
sinl = float(obsstring[21:29].strip())
location = ephem.Observer()
location.date = obstime
location.lon = obsstring[4:13]
location.lat = str(degrees(atan2(sinl,cosl)))
location.elevation = 300
self.target.compute(location)
return "Alt: %s\nAz: %s" % (self.target.alt, self.target.az)
if __name__ == "__main__":
print("QuickEphem v1.0 | Code by Alex Davenport\n----------------------------------------------")
asteroid = input("Asteroid Designation: ")
observatory = input("Observatory Code: ")
datetime = input("UTC (YYYY/MM/DD HH:MM:SS): ")
ast = MPCORB(asteroid)
geo = ast.geocentric(datetime)
topo = ast.topocentric(observatory, datetime)
print("----------------------------------------------")
print(geo)
print()
print(topo)
| [
11748,
7007,
198,
11748,
4731,
198,
11748,
2462,
4411,
198,
6738,
10688,
1330,
379,
272,
17,
11,
7370,
198,
198,
4871,
4904,
44879,
33,
25,
198,
220,
220,
220,
37227,
23579,
8841,
481,
467,
994,
986,
628,
220,
220,
220,
2644,
28724,
... | 2.231334 | 1,634 |
import tensorflow as tf
from speechrecproj.data.labels import Label
__author__ = 'Daniel Schlaug'
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
4046,
8344,
1676,
73,
13,
7890,
13,
23912,
1424,
1330,
36052,
198,
198,
834,
9800,
834,
796,
705,
19962,
48467,
7493,
6,
628,
198
] | 3.15625 | 32 |
import asyncio
import json
from mashup.services.music_brainz import MusicBrainzService
from mashup.services.cover_art import CoverArtService
from mashup.services.wikipedia import WikipediaService
| [
198,
11748,
30351,
952,
198,
11748,
33918,
198,
198,
6738,
30407,
929,
13,
30416,
13,
28965,
62,
27825,
89,
1330,
7849,
44687,
89,
16177,
198,
6738,
30407,
929,
13,
30416,
13,
9631,
62,
433,
1330,
17546,
8001,
16177,
198,
6738,
30407,
... | 3.98 | 50 |
#
# PySNMP MIB module CISCO-SVC-INTERFACE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SVC-INTERFACE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:56:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
VsanIndex, FcNameId = mibBuilder.importSymbols("CISCO-ST-TC", "VsanIndex", "FcNameId")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ModuleIdentity, Counter64, IpAddress, iso, MibIdentifier, Gauge32, Integer32, Counter32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Bits, NotificationType, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "IpAddress", "iso", "MibIdentifier", "Gauge32", "Integer32", "Counter32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Bits", "NotificationType", "Unsigned32")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
ciscoSvcInterfaceMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 378))
ciscoSvcInterfaceMIB.setRevisions(('2004-09-21 00:00',))
if mibBuilder.loadTexts: ciscoSvcInterfaceMIB.setLastUpdated('200409210000Z')
if mibBuilder.loadTexts: ciscoSvcInterfaceMIB.setOrganization('Cisco Systems Inc. ')
ciscoSvcInterfaceMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 1))
ciscoSvcInterfaceMIBTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 2))
cSvcInterfaceConfiguration = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1))
cSvcInterfaceTrapObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2))
csiNportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1), )
if mibBuilder.loadTexts: csiNportTable.setStatus('current')
csiNportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiNportIfIndex"), (0, "CISCO-SVC-INTERFACE-MIB", "csiNportType"), (0, "CISCO-SVC-INTERFACE-MIB", "csiNportVsanId"))
if mibBuilder.loadTexts: csiNportEntry.setStatus('current')
csiNportIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: csiNportIfIndex.setStatus('current')
csiNportType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 2), NportType())
if mibBuilder.loadTexts: csiNportType.setStatus('current')
csiNportVsanId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 3), VsanIndex())
if mibBuilder.loadTexts: csiNportVsanId.setStatus('current')
csiNportPwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 4), FcNameId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: csiNportPwwn.setStatus('current')
csiNportFcid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportFcid.setStatus('current')
csiNportState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportState.setStatus('current')
csiNportDownReason = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 1), ("adminDown", 2), ("ifSoftwareDown", 3), ("lineCardSwDown", 4), ("vsanDown", 5), ("inRemovalState", 6), ("ifHardwareDown", 7), ("uninitialized", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportDownReason.setStatus('current')
csiNportRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: csiNportRowStatus.setStatus('current')
csiSessionTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2), )
if mibBuilder.loadTexts: csiSessionTable.setStatus('current')
csiSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiSessionIfIndex"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionType"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionVsanId"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionId"))
if mibBuilder.loadTexts: csiSessionEntry.setStatus('current')
csiSessionIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: csiSessionIfIndex.setStatus('current')
csiSessionType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 2), NportType())
if mibBuilder.loadTexts: csiSessionType.setStatus('current')
csiSessionVsanId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 3), VsanIndex())
if mibBuilder.loadTexts: csiSessionVsanId.setStatus('current')
csiSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: csiSessionId.setStatus('current')
csiSessionNportPwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 5), FcNameId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionNportPwwn.setStatus('current')
csiSessionPeerPwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 6), FcNameId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionPeerPwwn.setStatus('current')
csiSessionPeerNwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 7), FcNameId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionPeerNwwn.setStatus('current')
csiSessionPeerFcid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionPeerFcid.setStatus('current')
csiInterfaceStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3), )
if mibBuilder.loadTexts: csiInterfaceStatsTable.setStatus('current')
csiInterfaceStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiNportIfIndex"))
if mibBuilder.loadTexts: csiInterfaceStatsEntry.setStatus('current')
csiInterfaceInFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceInFrames.setStatus('current')
csiInterfaceInFrameRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceInFrameRate.setStatus('current')
csiInterfaceInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceInBytes.setStatus('current')
csiInterfaceInBytesRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceInBytesRate.setStatus('current')
csiInterfaceOutFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceOutFrames.setStatus('current')
csiInterfaceOutFrameRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceOutFrameRate.setStatus('current')
csiInterfaceOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceOutBytes.setStatus('current')
csiInterfaceOutBytesRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 3, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceOutBytesRate.setStatus('current')
csiNportStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4), )
if mibBuilder.loadTexts: csiNportStatsTable.setStatus('current')
csiNportStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiNportIfIndex"), (0, "CISCO-SVC-INTERFACE-MIB", "csiNportType"), (0, "CISCO-SVC-INTERFACE-MIB", "csiNportVsanId"))
if mibBuilder.loadTexts: csiNportStatsEntry.setStatus('current')
csiNportSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportSessions.setStatus('current')
csiNportInFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportInFrames.setStatus('current')
csiNportInFrameRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportInFrameRate.setStatus('current')
csiNportInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportInBytes.setStatus('current')
csiNportInBytesRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportInBytesRate.setStatus('current')
csiNportOutFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportOutFrames.setStatus('current')
csiNportOutFrameRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportOutFrameRate.setStatus('current')
csiNportOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportOutBytes.setStatus('current')
csiNportOutBytesRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiNportOutBytesRate.setStatus('current')
csiSessionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5), )
if mibBuilder.loadTexts: csiSessionStatsTable.setStatus('current')
csiSessionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiSessionIfIndex"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionType"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionVsanId"), (0, "CISCO-SVC-INTERFACE-MIB", "csiSessionId"))
if mibBuilder.loadTexts: csiSessionStatsEntry.setStatus('current')
csiSessionInELSFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInELSFrames.setStatus('current')
csiSessionInBLSFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInBLSFrames.setStatus('current')
csiSessionInFCPCmds = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPCmds.setStatus('current')
csiSessionInFCPXferRdys = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPXferRdys.setStatus('current')
csiSessionInFCPDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPDataFrames.setStatus('current')
csiSessionInFCPStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPStatus.setStatus('current')
csiSessionInFCPDataBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPDataBytes.setStatus('current')
csiSessionInFCPOverRuns = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPOverRuns.setStatus('current')
csiSessionInFCPUnderRuns = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPUnderRuns.setStatus('current')
csiSessionInAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInAborts.setStatus('current')
csiSessionOutELSFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutELSFrames.setStatus('current')
csiSessionOutBLSFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutBLSFrames.setStatus('current')
csiSessionOutFCPCmds = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPCmds.setStatus('current')
csiSessionOutFCPXferRdys = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPXferRdys.setStatus('current')
csiSessionOutFCPDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPDataFrames.setStatus('current')
csiSessionOutFCPStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPStatus.setStatus('current')
csiSessionOutFCPDataBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPDataBytes.setStatus('current')
csiSessionOutFCPOverRuns = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPOverRuns.setStatus('current')
csiSessionOutFCPUnderRuns = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutFCPUnderRuns.setStatus('current')
csiSessionOutAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOutAborts.setStatus('current')
csiSessionOpenXchanges = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionOpenXchanges.setStatus('current')
csiSessionInBadFc2Drops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInBadFc2Drops.setStatus('current')
csiSessionInBadFcPDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInBadFcPDrops.setStatus('current')
csiSessionInFCPDataExcess = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 5, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiSessionInFCPDataExcess.setStatus('current')
csiInterfaceNwwnTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 6), )
if mibBuilder.loadTexts: csiInterfaceNwwnTable.setStatus('current')
csiInterfaceNwwnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 6, 1), ).setIndexNames((0, "CISCO-SVC-INTERFACE-MIB", "csiNportIfIndex"))
if mibBuilder.loadTexts: csiInterfaceNwwnEntry.setStatus('current')
csiInterfaceNwwn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 6, 1, 1), FcNameId()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: csiInterfaceNwwn.setStatus('current')
csiInterfaceOperStateCause = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 1, 6, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: csiInterfaceOperStateCause.setStatus('current')
csiErrorId = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiErrorId.setStatus('current')
csiErrorSeqNumber = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiErrorSeqNumber.setStatus('current')
csiSlotNumber = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiSlotNumber.setStatus('current')
csiPortNumber = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiPortNumber.setStatus('current')
csiObjName = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 5), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiObjName.setStatus('current')
csiErrorText = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 6), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiErrorText.setStatus('current')
csiMachineType = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 7), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiMachineType.setStatus('current')
csiCardSerialNo = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 8), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiCardSerialNo.setStatus('current')
csiSwVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 9), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiSwVersion.setStatus('current')
csiSwitchName = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 10), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiSwitchName.setStatus('current')
csiClusterName = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 11), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiClusterName.setStatus('current')
csiNodeName = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 378, 1, 2, 12), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: csiNodeName.setStatus('current')
csiMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 2, 0))
csiErrorTrap = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 378, 2, 0, 1)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiErrorId"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorSeqNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiSlotNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiPortNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiObjName"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorText"), ("CISCO-SVC-INTERFACE-MIB", "csiMachineType"), ("CISCO-SVC-INTERFACE-MIB", "csiCardSerialNo"), ("CISCO-SVC-INTERFACE-MIB", "csiSwVersion"), ("CISCO-SVC-INTERFACE-MIB", "csiSwitchName"), ("CISCO-SVC-INTERFACE-MIB", "csiClusterName"), ("CISCO-SVC-INTERFACE-MIB", "csiNodeName"))
if mibBuilder.loadTexts: csiErrorTrap.setStatus('current')
csiWarningTrap = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 378, 2, 0, 2)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiErrorId"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorSeqNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiSlotNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiPortNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiObjName"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorText"), ("CISCO-SVC-INTERFACE-MIB", "csiMachineType"), ("CISCO-SVC-INTERFACE-MIB", "csiCardSerialNo"), ("CISCO-SVC-INTERFACE-MIB", "csiSwVersion"), ("CISCO-SVC-INTERFACE-MIB", "csiSwitchName"), ("CISCO-SVC-INTERFACE-MIB", "csiClusterName"), ("CISCO-SVC-INTERFACE-MIB", "csiNodeName"))
if mibBuilder.loadTexts: csiWarningTrap.setStatus('current')
csiInformationTrap = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 378, 2, 0, 3)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiErrorId"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorSeqNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiSlotNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiPortNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiObjName"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorText"), ("CISCO-SVC-INTERFACE-MIB", "csiMachineType"), ("CISCO-SVC-INTERFACE-MIB", "csiCardSerialNo"), ("CISCO-SVC-INTERFACE-MIB", "csiSwVersion"), ("CISCO-SVC-INTERFACE-MIB", "csiSwitchName"), ("CISCO-SVC-INTERFACE-MIB", "csiClusterName"), ("CISCO-SVC-INTERFACE-MIB", "csiNodeName"))
if mibBuilder.loadTexts: csiInformationTrap.setStatus('current')
ciscoSvcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 3))
ciscoSvcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 1))
ciscoSvcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2))
ciscoSvcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 1, 1)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiNportGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceStatsGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiNportStatsGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionStatsGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceNwwnGroup"), ("CISCO-SVC-INTERFACE-MIB", "csiNotifObjectsGroup"), ("CISCO-SVC-INTERFACE-MIB", "cefcMgmtNotificationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSvcMIBCompliance = ciscoSvcMIBCompliance.setStatus('current')
csiNportGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 1)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiNportPwwn"), ("CISCO-SVC-INTERFACE-MIB", "csiNportFcid"), ("CISCO-SVC-INTERFACE-MIB", "csiNportState"), ("CISCO-SVC-INTERFACE-MIB", "csiNportDownReason"), ("CISCO-SVC-INTERFACE-MIB", "csiNportRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiNportGroup = csiNportGroup.setStatus('current')
csiSessionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 2)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiSessionNportPwwn"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionPeerPwwn"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionPeerNwwn"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionPeerFcid"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiSessionGroup = csiSessionGroup.setStatus('current')
csiInterfaceStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 3)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiInterfaceInFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceInFrameRate"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceInBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceInBytesRate"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceOutFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceOutFrameRate"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceOutBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceOutBytesRate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiInterfaceStatsGroup = csiInterfaceStatsGroup.setStatus('current')
csiNportStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 4)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiNportSessions"), ("CISCO-SVC-INTERFACE-MIB", "csiNportInFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiNportInFrameRate"), ("CISCO-SVC-INTERFACE-MIB", "csiNportInBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiNportInBytesRate"), ("CISCO-SVC-INTERFACE-MIB", "csiNportOutFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiNportOutFrameRate"), ("CISCO-SVC-INTERFACE-MIB", "csiNportOutBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiNportOutBytesRate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiNportStatsGroup = csiNportStatsGroup.setStatus('current')
csiSessionStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 5)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiSessionInELSFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInBLSFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPCmds"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPXferRdys"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPDataFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPStatus"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPDataBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPOverRuns"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPUnderRuns"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInAborts"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutELSFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutBLSFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPCmds"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPXferRdys"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPDataFrames"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPStatus"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPDataBytes"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPOverRuns"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutFCPUnderRuns"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOutAborts"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionOpenXchanges"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInBadFc2Drops"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInBadFcPDrops"), ("CISCO-SVC-INTERFACE-MIB", "csiSessionInFCPDataExcess"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiSessionStatsGroup = csiSessionStatsGroup.setStatus('current')
csiInterfaceNwwnGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 6)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiInterfaceNwwn"), ("CISCO-SVC-INTERFACE-MIB", "csiInterfaceOperStateCause"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiInterfaceNwwnGroup = csiInterfaceNwwnGroup.setStatus('current')
csiNotifObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 7)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiErrorId"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorSeqNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiSlotNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiPortNumber"), ("CISCO-SVC-INTERFACE-MIB", "csiObjName"), ("CISCO-SVC-INTERFACE-MIB", "csiErrorText"), ("CISCO-SVC-INTERFACE-MIB", "csiMachineType"), ("CISCO-SVC-INTERFACE-MIB", "csiCardSerialNo"), ("CISCO-SVC-INTERFACE-MIB", "csiSwVersion"), ("CISCO-SVC-INTERFACE-MIB", "csiSwitchName"), ("CISCO-SVC-INTERFACE-MIB", "csiClusterName"), ("CISCO-SVC-INTERFACE-MIB", "csiNodeName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csiNotifObjectsGroup = csiNotifObjectsGroup.setStatus('current')
cefcMgmtNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 378, 3, 2, 8)).setObjects(("CISCO-SVC-INTERFACE-MIB", "csiErrorTrap"), ("CISCO-SVC-INTERFACE-MIB", "csiWarningTrap"), ("CISCO-SVC-INTERFACE-MIB", "csiInformationTrap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cefcMgmtNotificationsGroup = cefcMgmtNotificationsGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-SVC-INTERFACE-MIB", csiSessionOutFCPDataFrames=csiSessionOutFCPDataFrames, csiSessionInFCPXferRdys=csiSessionInFCPXferRdys, csiNportStatsTable=csiNportStatsTable, csiSessionOutBLSFrames=csiSessionOutBLSFrames, csiNportOutBytes=csiNportOutBytes, csiInterfaceNwwnGroup=csiInterfaceNwwnGroup, csiSessionInFCPDataFrames=csiSessionInFCPDataFrames, csiNportType=csiNportType, csiSwitchName=csiSwitchName, csiNportGroup=csiNportGroup, csiWarningTrap=csiWarningTrap, csiSessionGroup=csiSessionGroup, csiInterfaceStatsTable=csiInterfaceStatsTable, csiInterfaceNwwn=csiInterfaceNwwn, PYSNMP_MODULE_ID=ciscoSvcInterfaceMIB, csiNotifObjectsGroup=csiNotifObjectsGroup, csiSessionInFCPUnderRuns=csiSessionInFCPUnderRuns, csiInterfaceNwwnTable=csiInterfaceNwwnTable, ciscoSvcMIBGroups=ciscoSvcMIBGroups, csiCardSerialNo=csiCardSerialNo, csiInterfaceInBytes=csiInterfaceInBytes, csiInterfaceOutFrameRate=csiInterfaceOutFrameRate, csiSessionPeerPwwn=csiSessionPeerPwwn, csiInterfaceOutBytesRate=csiInterfaceOutBytesRate, csiInterfaceInFrames=csiInterfaceInFrames, csiNportFcid=csiNportFcid, csiNportRowStatus=csiNportRowStatus, csiNportOutBytesRate=csiNportOutBytesRate, csiSessionInBadFcPDrops=csiSessionInBadFcPDrops, csiNportOutFrameRate=csiNportOutFrameRate, NportType=NportType, csiNportOutFrames=csiNportOutFrames, ciscoSvcMIBCompliance=ciscoSvcMIBCompliance, csiNportPwwn=csiNportPwwn, csiSessionOutFCPOverRuns=csiSessionOutFCPOverRuns, cSvcInterfaceConfiguration=cSvcInterfaceConfiguration, csiSessionInFCPDataBytes=csiSessionInFCPDataBytes, csiSessionPeerNwwn=csiSessionPeerNwwn, csiSessionEntry=csiSessionEntry, csiSessionTable=csiSessionTable, csiNportInBytes=csiNportInBytes, csiSessionInELSFrames=csiSessionInELSFrames, csiErrorTrap=csiErrorTrap, csiInterfaceOutFrames=csiInterfaceOutFrames, csiSessionIfIndex=csiSessionIfIndex, csiErrorSeqNumber=csiErrorSeqNumber, csiSessionInBLSFrames=csiSessionInBLSFrames, csiNportState=csiNportState, csiInterfaceOperStateCause=csiInterfaceOperStateCause, csiInterfaceStatsGroup=csiInterfaceStatsGroup, csiInterfaceNwwnEntry=csiInterfaceNwwnEntry, csiPortNumber=csiPortNumber, csiClusterName=csiClusterName, csiSessionStatsEntry=csiSessionStatsEntry, csiNportDownReason=csiNportDownReason, csiSessionInBadFc2Drops=csiSessionInBadFc2Drops, csiSessionOutELSFrames=csiSessionOutELSFrames, csiSlotNumber=csiSlotNumber, csiSessionOutAborts=csiSessionOutAborts, ciscoSvcInterfaceMIBTrapPrefix=ciscoSvcInterfaceMIBTrapPrefix, csiNportIfIndex=csiNportIfIndex, csiSessionInFCPCmds=csiSessionInFCPCmds, csiInformationTrap=csiInformationTrap, csiSessionInAborts=csiSessionInAborts, csiNportVsanId=csiNportVsanId, csiInterfaceInFrameRate=csiInterfaceInFrameRate, csiInterfaceOutBytes=csiInterfaceOutBytes, csiSessionOutFCPStatus=csiSessionOutFCPStatus, csiSessionNportPwwn=csiSessionNportPwwn, csiSessionInFCPDataExcess=csiSessionInFCPDataExcess, cefcMgmtNotificationsGroup=cefcMgmtNotificationsGroup, csiSessionVsanId=csiSessionVsanId, csiSwVersion=csiSwVersion, cSvcInterfaceTrapObjects=cSvcInterfaceTrapObjects, csiNportInBytesRate=csiNportInBytesRate, csiErrorText=csiErrorText, csiNportStatsGroup=csiNportStatsGroup, csiNportSessions=csiNportSessions, csiNportTable=csiNportTable, ciscoSvcMIBCompliances=ciscoSvcMIBCompliances, csiSessionOutFCPXferRdys=csiSessionOutFCPXferRdys, csiSessionOutFCPUnderRuns=csiSessionOutFCPUnderRuns, csiNportInFrames=csiNportInFrames, csiSessionStatsTable=csiSessionStatsTable, csiMIBTraps=csiMIBTraps, ciscoSvcInterfaceMIB=ciscoSvcInterfaceMIB, csiSessionId=csiSessionId, ciscoSvcMIBConformance=ciscoSvcMIBConformance, csiInterfaceStatsEntry=csiInterfaceStatsEntry, csiSessionPeerFcid=csiSessionPeerFcid, csiSessionInFCPOverRuns=csiSessionInFCPOverRuns, csiMachineType=csiMachineType, csiInterfaceInBytesRate=csiInterfaceInBytesRate, csiSessionOutFCPDataBytes=csiSessionOutFCPDataBytes, csiSessionType=csiSessionType, csiErrorId=csiErrorId, csiSessionOutFCPCmds=csiSessionOutFCPCmds, csiSessionStatsGroup=csiSessionStatsGroup, csiNportInFrameRate=csiNportInFrameRate, csiNportStatsEntry=csiNportStatsEntry, csiNportEntry=csiNportEntry, csiObjName=csiObjName, csiSessionInFCPStatus=csiSessionInFCPStatus, csiNodeName=csiNodeName, ciscoSvcInterfaceMIBObjects=ciscoSvcInterfaceMIBObjects, csiSessionOpenXchanges=csiSessionOpenXchanges)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
36159,
8220,
12,
50,
15922,
12,
41358,
49836,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14... | 2.463106 | 12,807 |
"""
Copyright 2018-present, Facebook, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
| [
37811,
198,
15269,
2864,
12,
25579,
11,
3203,
11,
3457,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.859375 | 192 |
'''
Describes core model of bacterio
'''
import random
from app.hexafield import HexCoords, get_step_to
from app.creatures import Predator, Bacteria
from app.rand_p import rand_p
class CoreModel(object):
'''
Describes core (the simpliest one) model of bacterio.
'field' is HexafieldBase instance,
'modelParams' is ModelParams instance,
'bacteriaPositions' and 'predatorPositions' are dicts with keys HexCoords and values lists of Bacteria and Predator
'''
__slots__ = ('modelParams', 'field', 'bacteriaPositions', 'predatorPositions')
def __init__(self, modelParams, state):
'''
modelParams is ModelParams,
state is state.BacretioState that will be parsed as initial state
'''
self.modelParams = modelParams
self.parse_state(state)
def parse_state(self, state):
'''
state is state.BacretioState
'''
self.field = state.field
self.bacteriaPositions = state.bacteriaPositions
self.predatorPositions = state.predatorPositions
def step(self):
'''
Makes one turn and updates 'bacteriaPositions' and 'predatorPositions'
'''
self.step_predators()
self.step_bacteria()
def count_bacteria(self):
'''
Returns total amount of bacteria
'''
res = 0
for hc in self.bacteriaPositions:
res += len(self.bacteriaPositions[hc])
return res
def count_predators(self):
'''
Returns total amount of predators
'''
res = 0
for hc in self.predatorPositions:
res += len(self.predatorPositions[hc])
return res
def check_bacteria_overcrowd(self, hexCoords):
'''
Checks if number of bacteria near hexCoords in radius BACT_OVERCROWD_RADIUS less than BACT_OVERCROWD
'''
if self.modelParams.BACT_OVERCROWD<=0:
return True
res = 0
coords = self.field.get_all_within(hexCoords, self.modelParams.BACT_OVERCROWD_RADIUS)
for hc in coords:
if hc in self.bacteriaPositions:
res+=len(self.bacteriaPositions[hc])
return res<self.modelParams.BACT_OVERCROWD
def check_predators_overcrowd(self, hexCoords):
'''
Checks if number of bacteria near hexCoords in radius PR_OVERCROWD_RADIUS less than PR_OVERCROWD
'''
if self.modelParams.PR_OVERCROWD<=0:
return True
res = 0
coords = self.field.get_all_within(hexCoords, self.modelParams.PR_OVERCROWD_RADIUS)
for hc in coords:
if hc in self.predatorPositions:
res+=len(self.predatorPositions[hc])
return res<self.modelParams.PR_OVERCROWD
def find_closest_bacteria(self, hexCoords):
'''
Returns position of the closest bacteria within maxRange (or None)
'''
if hexCoords in self.bacteriaPositions:
return hexCoords
for r in range(1, self.modelParams.PR_SIGHT+1):
cells = self.field.get_at_exact_range(hexCoords, r)
possiblePos = []
for hc in cells:
if hc in self.bacteriaPositions:
possiblePos.append(hc)
if len(possiblePos)>0:
return random.choice(possiblePos)
return None
def add_bacteria(self, hexCoords):
'''
Adds bacteria to given cell
'''
if not hexCoords in self.bacteriaPositions:
self.bacteriaPositions[hexCoords] = []
self.bacteriaPositions[hexCoords].append(Bacteria())
def add_predator(self, hexCoords):
'''
Adds predator to given cell
'''
if not hexCoords in self.predatorPositions:
self.predatorPositions[hexCoords] = []
self.predatorPositions[hexCoords].append(Predator(self.modelParams.PR_INIT_ENERGY))
def clear_cell(self, hexCoords):
'''
Removes all creatures from given cell
'''
if hexCoords in self.bacteriaPositions:
self.bacteriaPositions.pop(hexCoords)
if hexCoords in self.predatorPositions:
self.predatorPositions.pop(hexCoords)
def clear_all(self):
'''
Removes all creatures from entire board
'''
self.bacteriaPositions.clear()
self.predatorPositions.clear()
class RapidBacteriaModel(CoreModel):
'''
Describes model with rapid moving bacteria. Each turn they divide or move for two cells
'''
__slots__ = ()
| [
7061,
6,
198,
24564,
22090,
4755,
2746,
286,
19241,
952,
220,
198,
7061,
6,
198,
198,
11748,
4738,
198,
198,
6738,
598,
13,
33095,
1878,
1164,
1330,
22212,
7222,
3669,
11,
651,
62,
9662,
62,
1462,
198,
6738,
598,
13,
20123,
942,
133... | 2.15406 | 2,155 |
from datadog import initialize, statsd
from typing import Dict, List
import os
import json
NUMBER_OF_TERMS = 3
options = {"statsd_host": "127.0.0.1", "statsd_port": 8125}
initialize(**options)
# Gets the newest NUMBER_OF_TERMS terms
tags = {}
if os.environ.get("GITHUB_ACTIONS"):
tags["env"] = "github"
else:
tags["env"] = "dev"
for term in get_terms():
with open(f"data/semester_data/{term}/courses.json") as json_file:
courses = json.load(json_file)
tags["term"] = term
for department in courses:
tags["department"] = department["code"]
for course in department["courses"]:
tags["course"] = course["crse"]
for section in course["sections"]:
tags["title"] = section["title"]
tags["crn"] = section["crn"]
tags["section_number"] = section["sec"]
tags["credits_min"] = section["credMin"]
tags["credits_max"] = section["credMax"]
print(
get_tag_list(tags),
"Actual:",
section["act"],
"Remaining:",
section["rem"],
"Capacity:",
section["cap"],
)
statsd.gauge(
"quacs.section.actual", section["act"], tags=get_tag_list(tags)
)
statsd.gauge(
"quacs.section.remaining",
section["rem"],
tags=get_tag_list(tags),
)
statsd.gauge(
"quacs.section.capacity",
section["cap"],
tags=get_tag_list(tags),
)
| [
6738,
4818,
324,
519,
1330,
41216,
11,
9756,
67,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
41359,
13246,
62,
19238,
62,
5781,
5653,
796,
513,
198,
198,
25811,
796,
19779,
34242,
67,
62... | 1.719234 | 1,097 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.compiler import Compiler, _version_cache
from spack.util.executable import Executable
| [
2,
15069,
2211,
12,
23344,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,... | 3.488636 | 88 |
# Created by MechAviv
# ID :: [302010000]
# Ariant : Middle of the Desert
# Update Quest Record EX | Quest ID: [32603] | Data: ep1=302010000 | [
2,
15622,
416,
18202,
7355,
452,
198,
2,
4522,
7904,
685,
1270,
1264,
2388,
60,
198,
2,
6069,
415,
1058,
6046,
286,
262,
18692,
198,
198,
2,
10133,
6785,
13266,
7788,
930,
6785,
4522,
25,
685,
2624,
35642,
60,
930,
6060,
25,
2462,
... | 2.9375 | 48 |