code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""The AFF4 lexicon."""
from __future__ import unicode_literals
# This is the version of the AFF4 specification we support - not the library
# version itself.
from builtins import object
import rdflib
from pyaff4 import rdfvalue
AFF4_VERSION = "0.2"
AFF4_MAX_READ_LEN = 1024*1024*100
AFF4_NAMESPACE = "http://aff4.org/Schema#"
AFF4_LEGACY_NAMESPACE = "http://afflib.org/2009/aff4#"
XSD_NAMESPACE = "http://www.w3.org/2001/XMLSchema#"
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
AFF4_MEMORY_NAMESPACE = "http://aff4.org/Schema#memory/"
AFF4_DISK_NAMESPACE = "http://aff4.org/Schema#disk/"
AFF4_MACOS_NAMESPACE = "http://aff4.org/Schema#macos/"
# Attributes in this namespace will never be written to persistant
# storage. They are simply used as a way for storing metadata about an AFF4
# object internally.
AFF4_VOLATILE_NAMESPACE = "http://aff4.org/VolatileSchema#"
# The configuration space of the library itself. All these should be volatile
# and therefore not persistant or interoperable with other AFF4 implementations.
AFF4_CONFIG_NAMESPACE = AFF4_NAMESPACE + "config"
# Location of the cache (contains AFF4_FILE_NAME)
AFF4_CONFIG_CACHE_DIR = AFF4_CONFIG_NAMESPACE + "/cache"
# Commonly used RDF types.
URNType = "URN"
XSDStringType = (XSD_NAMESPACE + "string")
RDFBytesType = (XSD_NAMESPACE + "hexBinary")
XSDIntegerType = (XSD_NAMESPACE + "integer")
XSDIntegerTypeInt = (XSD_NAMESPACE + "int")
XSDIntegerTypeLong = (XSD_NAMESPACE + "long")
XSDBooleanType = (XSD_NAMESPACE + "boolean")
# Attribute names for different AFF4 objects.
# Base AFF4Object
AFF4_TYPE = (RDF_NAMESPACE + "type")
AFF4_STORED = (AFF4_NAMESPACE + "stored")
AFF4_CONTAINS = (AFF4_NAMESPACE + "contains")
# Each container should have this file which contains the URN of the container.
AFF4_CONTAINER_DESCRIPTION = "container.description"
AFF4_CONTAINER_INFO_TURTLE = "information.turtle"
AFF4_CONTAINER_INFO_YAML = "information.yaml"
# AFF4 ZipFile containers.
AFF4_ZIP_TYPE = (AFF4_NAMESPACE + "zip_volume")
# AFF4Stream
AFF4_STREAM_SIZE = (AFF4_NAMESPACE + "size")
AFF4_LEGACY_STREAM_SIZE = (AFF4_LEGACY_NAMESPACE + "size")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# Can be "read", "truncate", "append"
AFF4_STREAM_WRITE_MODE = (AFF4_VOLATILE_NAMESPACE + "writable")
# FileBackedObjects are either marked explicitly or using the file:// scheme.
AFF4_FILE_TYPE = (AFF4_NAMESPACE + "file")
# file:// based URNs do not always have a direct mapping to filesystem
# paths. This volatile attribute is used to control the filename mapping.
AFF4_FILE_NAME = (AFF4_VOLATILE_NAMESPACE + "filename")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# ZipFileSegment
AFF4_ZIP_SEGMENT_TYPE = (AFF4_NAMESPACE + "zip_segment")
# ZipStoredLogicalStream
AFF4_ZIP_SEGMENT_IMAGE_TYPE = (AFF4_NAMESPACE + "ZipSegment")
AFF4_FILEIMAGE = (AFF4_NAMESPACE + "FileImage")
# AFF4 Image Stream - stores a stream using Bevies.
AFF4_IMAGE_TYPE = (AFF4_NAMESPACE + "ImageStream")
AFF4_LEGACY_IMAGE_TYPE = (AFF4_LEGACY_NAMESPACE + "stream")
AFF4_SCUDETTE_IMAGE_TYPE = (AFF4_NAMESPACE + "image")
AFF4_IMAGE_CHUNK_SIZE = (AFF4_NAMESPACE + "chunkSize")
AFF4_LEGACY_IMAGE_CHUNK_SIZE = (AFF4_LEGACY_NAMESPACE + "chunkSize")
AFF4_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_NAMESPACE + "chunksInSegment")
AFF4_LEGACY_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_LEGACY_NAMESPACE + "chunksInSegment")
AFF4_IMAGE_COMPRESSION = (AFF4_NAMESPACE + "compressionMethod")
AFF4_LEGACY_IMAGE_COMPRESSION = (AFF4_LEGACY_NAMESPACE + "CompressionMethod")
AFF4_IMAGE_COMPRESSION_ZLIB = "https://www.ietf.org/rfc/rfc1950.txt"
AFF4_IMAGE_COMPRESSION_SNAPPY = "http://code.google.com/p/snappy/"
AFF4_IMAGE_COMPRESSION_SNAPPY_SCUDETTE = "https://github.com/google/snappy"
AFF4_IMAGE_COMPRESSION_STORED = (AFF4_NAMESPACE + "compression/stored")
AFF4_IMAGE_AES_XTS = "https://doi.org/10.1109/IEEESTD.2008.4493450"
# AFF4Map - stores a mapping from one stream to another.
AFF4_MAP_TYPE = (AFF4_NAMESPACE + "Map")
AFF4_LEGACY_MAP_TYPE = (AFF4_LEGACY_NAMESPACE + "map")
AFF4_SCUDETTE_MAP_TYPE = (AFF4_NAMESPACE + "map")
# Encrypted Streams
AFF4_ENCRYPTEDSTREAM_TYPE = (AFF4_NAMESPACE + "EncryptedStream")
AFF4_RANDOMSTREAM_TYPE = (AFF4_NAMESPACE + "RandomAccessImageStream")
AFF4_KEYBAG = (AFF4_NAMESPACE + "keyBag")
AFF4_WRAPPEDKEY = (AFF4_NAMESPACE + "wrappedKey")
AFF4_SALT = (AFF4_NAMESPACE + "salt")
AFF4_ITERATIONS = (AFF4_NAMESPACE + "iterations")
AFF4_KEYSIZEBYTES = (AFF4_NAMESPACE + "keySizeInBytes")
AFF4_CERT_ENCRYPTED_KEYBAG = (AFF4_NAMESPACE + "PublicKeyEncryptedKeyBag")
AFF4_PASSWORD_WRAPPED_KEYBAG = (AFF4_NAMESPACE + "PasswordWrappedKeyBag")
AFF4_SERIALNUMBER = (AFF4_NAMESPACE + "serialNumber")
AFF4_SUBJECTNAME = (AFF4_NAMESPACE + "x509SubjectName")
# Categories describe the general type of an image.
AFF4_CATEGORY = (AFF4_NAMESPACE + "category")
# These represent standard attributes to describe memory forensics images.
AFF4_MEMORY_PHYSICAL = (AFF4_MEMORY_NAMESPACE + "physical")
AFF4_MEMORY_VIRTUAL = (AFF4_MEMORY_NAMESPACE + "virtual")
AFF4_MEMORY_PAGEFILE = (AFF4_MEMORY_NAMESPACE + "pagefile")
AFF4_MEMORY_PAGEFILE_NUM = (AFF4_MEMORY_NAMESPACE + "pagefile_number")
AFF4_DISK_RAW = (AFF4_DISK_NAMESPACE + "raw")
AFF4_DISK_PARTITION = (AFF4_DISK_NAMESPACE + "partition")
AFF4_DIRECTORY_TYPE = (AFF4_NAMESPACE + "directory")
#The constant stream is a psuedo stream which just returns a constant.
AFF4_CONSTANT_TYPE = (AFF4_NAMESPACE + "constant")
# The constant to repeat (default 0).
AFF4_CONSTANT_CHAR = (AFF4_NAMESPACE + "constant_char")
# An AFF4 Directory stores all members as files on the filesystem. Some
# filesystems can not represent the URNs properly, hence we need a mapping
# between the URN and the filename. This attribute stores the _relative_ path
# of the filename for the member URN relative to the container's path.
AFF4_DIRECTORY_CHILD_FILENAME = (AFF4_NAMESPACE + "directory/filename")
HASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#SHA512")
HASH_SHA256 = rdflib.URIRef("http://aff4.org/Schema#SHA256")
HASH_SHA1 = rdflib.URIRef("http://aff4.org/Schema#SHA1")
HASH_MD5 = rdflib.URIRef("http://aff4.org/Schema#MD5")
HASH_BLAKE2B = rdflib.URIRef("http://aff4.org/Schema#Blake2b")
HASH_BLOCKMAPHASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#blockMapHashSHA512")
class Lexicon(object):
def __init__(self):
pass
def of(self, end):
return self.base + end
class StdLexicon(Lexicon):
base = AFF4_NAMESPACE
map = base + "Map"
Image = base + "Image"
stored = base + "stored"
target = base + "target"
contains = base + "contains"
dataStream = base + "dataStream"
blockMapHash = base + "blockMapHash"
dependentStream = base + "dependentStream"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
blockHashesHash = base + "blockHashesHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunksInSegment"
chunkSize = base + "chunkSize"
streamSize = base + "size"
compressionMethod = base + "compressionMethod"
memoryPageTableEntryOffset = base + "memoryPageTableEntryOffset"
ntKernelBase = base + "NTKernelBase"
OSXKernelPhysicalOffset = base + "OSXKernelPhysicalOffset"
OSXKALSRSlide = base + "OSXKALSRSlide"
OSXDTBPhysicalOffset = base + "OSXDTBPhysicalOffset"
class Std11Lexicon(StdLexicon):
base = AFF4_NAMESPACE
FileImage = base + "FileImage"
FolderImage = base + "Folder"
lastWritten = base+ "lastWritten"
lastAccessed = base + "lastAccessed"
recordChanged = base + "recordChanged"
birthTime = base + "birthTime"
pathName = base + "originalFileName"
collidingDataStream = base + "collidingDataStream"
child = base + "child"
LogicalAcquisitionTask = base + "LogicalAcquisitionTask"
filesystemRoot = base + "filesystemRoot"
keyBag = AFF4_KEYBAG
salt = AFF4_SALT
iterations = AFF4_ITERATIONS
keySizeInBytes = AFF4_KEYSIZEBYTES
wrappedKey = AFF4_WRAPPEDKEY
EncryptedStream = AFF4_ENCRYPTEDSTREAM_TYPE
CertEncryptedKeyBag = AFF4_CERT_ENCRYPTED_KEYBAG
PasswordWrappedKeyBag = AFF4_PASSWORD_WRAPPED_KEYBAG
serialNumber = AFF4_SERIALNUMBER
subjectName = AFF4_SUBJECTNAME
class LegacyLexicon(Lexicon):
base = AFF4_LEGACY_NAMESPACE
map = base + "map"
stored = base + "stored"
Image = base + "Image"
blockHashesHash = base + "blockHashesHash"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunksInSegment"
chunkSize = base + "chunkSize"
streamSize = base + "size"
compressionMethod = base + "CompressionMethod"
class ScudetteLexicon(Lexicon):
base = AFF4_NAMESPACE
map = base + "map"
stored = base + "stored"
Image = base + "Image"
blockHashesHash = base + "blockHashesHash"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunks_per_segment"
chunkSize = base + "chunk_size"
streamSize = base + "size"
compressionMethod = base + "compression"
category = base + "category"
memoryPhysical = "http://aff4.org/Schema#memory/physical"
# early logical imaging support for pmem
class PmemLogicalPreStd(StdLexicon):
pathName = (AFF4_NAMESPACE + "original_filename")
legacy = LegacyLexicon()
standard = StdLexicon()
scudette = ScudetteLexicon()
standard11 = Std11Lexicon()
pmemlogical = PmemLogicalPreStd()
def AutoResolveAttribute(resolver, urn, attribute):
"""Iterate over all lexicons to autodetect the attribute."""
for lexicon in (standard, scudette, legacy):
result = resolver.Get(urn, getattr(lexicon, attribute))
if result is not None:
return result
transient_graph = rdfvalue.URN("http://aff4.org/Schema#transient")
any = rdfvalue.URN("http://aff4.org/Schema#any") | [
"rdflib.URIRef",
"pyaff4.rdfvalue.URN"
] | [((6622, 6668), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA512"""'], {}), "('http://aff4.org/Schema#SHA512')\n", (6635, 6668), False, 'import rdflib\n'), ((6683, 6729), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA256"""'], {}), "('http://aff4.org/Schema#SHA256')\n", (6696, 6729), False, 'import rdflib\n'), ((6742, 6786), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA1"""'], {}), "('http://aff4.org/Schema#SHA1')\n", (6755, 6786), False, 'import rdflib\n'), ((6798, 6841), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#MD5"""'], {}), "('http://aff4.org/Schema#MD5')\n", (6811, 6841), False, 'import rdflib\n'), ((6857, 6904), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#Blake2b"""'], {}), "('http://aff4.org/Schema#Blake2b')\n", (6870, 6904), False, 'import rdflib\n'), ((6933, 6991), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#blockMapHashSHA512"""'], {}), "('http://aff4.org/Schema#blockMapHashSHA512')\n", (6946, 6991), False, 'import rdflib\n'), ((10712, 10760), 'pyaff4.rdfvalue.URN', 'rdfvalue.URN', (['"""http://aff4.org/Schema#transient"""'], {}), "('http://aff4.org/Schema#transient')\n", (10724, 10760), False, 'from pyaff4 import rdfvalue\n'), ((10767, 10809), 'pyaff4.rdfvalue.URN', 'rdfvalue.URN', (['"""http://aff4.org/Schema#any"""'], {}), "('http://aff4.org/Schema#any')\n", (10779, 10809), False, 'from pyaff4 import rdfvalue\n')] |
import queue
import threading
from subprocess import PIPE, Popen
procs = []
stdout = queue.Queue()
def process_stdout(proc):
while (line := proc.stdout.readline()) != b"":
stdout.put(line.decode().strip())
procs.remove(proc)
def create_proc(uri):
proc = Popen(["unbuffer", "xdg-open", uri], stdout=PIPE, stderr=PIPE)
procs.append(proc)
threading.Thread(target=lambda: process_stdout(proc)).start()
| [
"subprocess.Popen",
"queue.Queue"
] | [((86, 99), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (97, 99), False, 'import queue\n'), ((279, 341), 'subprocess.Popen', 'Popen', (["['unbuffer', 'xdg-open', uri]"], {'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['unbuffer', 'xdg-open', uri], stdout=PIPE, stderr=PIPE)\n", (284, 341), False, 'from subprocess import PIPE, Popen\n')] |
import torch
import numpy as np
import matplotlib.pyplot as plt
from lib.utils import moving_average, check_numpy
@torch.no_grad()
def visualize_pdf(maml):
i = 0
plt.figure(figsize=[22, 34])
for name, (weight_maml_init, bias_maml_init) in maml.initializers.items():
weight_base_init, _ = maml.untrained_initializers[name]
base_mean = weight_base_init.mean.item()
base_std = weight_base_init.std.item()
maml_mean = weight_maml_init.mean.item()
maml_std = weight_maml_init.std.item()
base_init = torch.distributions.Normal(base_mean, base_std)
maml_init = torch.distributions.Normal(maml_mean, maml_std)
i += 1
plt.subplot(6, 4, i)
xx = np.linspace(min([base_mean - 3.*base_std, maml_mean - 3.*maml_std]),
max([base_mean + 3.*base_std, maml_mean + 3.*maml_std]), 1000)
if i == 12:
yy = base_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, '--', label='Fixup')
yy = maml_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, c='g', label='Fixup + DIMAML')
leg = plt.legend(loc=4, fontsize=14.5, frameon=False)
for line in leg.get_lines():
line.set_linewidth(1.6)
else:
yy = base_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, '--')
yy = maml_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, c='g')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title(name + '_weight', fontsize=14)
plt.show()
@torch.no_grad()
def visualize_quantile_functions(maml):
plt.figure(figsize=[22, 34])
i = 0
for name, (weight_quantile_function, bias_quantile_function) in maml.initializers.items():
wq_init, bq_init = maml.untrained_initializers[name]
i += 1
plt.subplot(6, 4, i)
xx = torch.linspace(0., 1., 1000).cuda()
if i == 12:
yy = wq_init(xx)
plt.plot(check_numpy(xx), check_numpy(yy), '--', label='Fixup')
yy = weight_quantile_function(xx)
plt.plot(check_numpy(xx), check_numpy(yy), c='g', label='Fixup $\\rightarrow$ DIMAML')
leg = plt.legend(loc=4, fontsize=14, frameon=False)
for line in leg.get_lines():
line.set_linewidth(1.6)
else:
yy = wq_init(xx)
plt.plot(check_numpy(xx), check_numpy(yy), '--')
yy = weight_quantile_function(xx)
plt.plot(check_numpy(xx), check_numpy(yy), c='g')
plt.xlim([0, 1])
plt.title(name + '_weight')
plt.show()
def draw_plots(base_train_loss, base_test_loss, base_test_error,
maml_train_loss, maml_test_loss, maml_test_error):
plt.figure(figsize=(20, 6))
plt.subplot(1,3,1)
plt.plot(moving_average(base_train_loss, span=10), label='Baseline')
plt.plot(moving_average(maml_train_loss, span=10), c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Train loss", fontsize=14)
plt.subplot(1,3,2)
plt.plot(base_test_loss, label='Baseline')
plt.plot(maml_test_loss, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test loss", fontsize=14)
plt.subplot(1,3,3)
plt.plot(base_test_error, label='Baseline')
plt.plot(maml_test_error, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test classification error", fontsize=14) | [
"matplotlib.pyplot.title",
"torch.distributions.Normal",
"matplotlib.pyplot.xticks",
"lib.utils.check_numpy",
"matplotlib.pyplot.plot",
"lib.utils.moving_average",
"torch.tensor",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"torch.linspace",
"torch.no_grad",
"matplotlib.pyplot.xlim... | [((117, 132), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (130, 132), False, 'import torch\n'), ((1674, 1689), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1687, 1689), False, 'import torch\n'), ((172, 200), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[22, 34]'}), '(figsize=[22, 34])\n', (182, 200), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1668, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1762), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[22, 34]'}), '(figsize=[22, 34])\n', (1744, 1762), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2731, 2733), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2898), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 6)'}), '(figsize=(20, 6))\n', (2881, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2903, 2923), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2914, 2923), True, 'import matplotlib.pyplot as plt\n'), ((3077, 3100), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (3087, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3141), 'matplotlib.pyplot.title', 'plt.title', (['"""Train loss"""'], {'fontsize': '(14)'}), "('Train loss', fontsize=14)\n", (3114, 3141), True, 'import matplotlib.pyplot as plt\n'), ((3146, 3166), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (3157, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3211), 'matplotlib.pyplot.plot', 'plt.plot', (['base_test_loss'], {'label': '"""Baseline"""'}), "(base_test_loss, label='Baseline')\n", (3177, 3211), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3263), 'matplotlib.pyplot.plot', 'plt.plot', (['maml_test_loss'], {'c': '"""g"""', 'label': '"""DIMAML"""'}), "(maml_test_loss, c='g', label='DIMAML')\n", (3224, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3291), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (3278, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3296, 3331), 'matplotlib.pyplot.title', 'plt.title', (['"""Test loss"""'], {'fontsize': '(14)'}), "('Test loss', fontsize=14)\n", (3305, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3356), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (3347, 3356), True, 'import matplotlib.pyplot as plt\n'), ((3359, 3402), 'matplotlib.pyplot.plot', 'plt.plot', (['base_test_error'], {'label': '"""Baseline"""'}), "(base_test_error, label='Baseline')\n", (3367, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3455), 'matplotlib.pyplot.plot', 'plt.plot', (['maml_test_error'], {'c': '"""g"""', 'label': '"""DIMAML"""'}), "(maml_test_error, c='g', label='DIMAML')\n", (3415, 3455), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (3470, 3483), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3539), 'matplotlib.pyplot.title', 'plt.title', (['"""Test classification error"""'], {'fontsize': '(14)'}), "('Test classification error', fontsize=14)\n", (3497, 3539), True, 'import matplotlib.pyplot as plt\n'), ((565, 612), 'torch.distributions.Normal', 'torch.distributions.Normal', (['base_mean', 'base_std'], {}), '(base_mean, base_std)\n', (591, 612), False, 'import torch\n'), ((633, 680), 'torch.distributions.Normal', 'torch.distributions.Normal', (['maml_mean', 'maml_std'], {}), '(maml_mean, maml_std)\n', (659, 680), False, 'import torch\n'), ((704, 724), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(4)', 'i'], {}), '(6, 4, i)\n', (715, 724), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1574), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1561, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1606), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1593, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1655), 'matplotlib.pyplot.title', 'plt.title', (["(name + '_weight')"], {'fontsize': '(14)'}), "(name + '_weight', fontsize=14)\n", (1624, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1972), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(4)', 'i'], {}), '(6, 4, i)\n', (1963, 1972), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2682), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (2674, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2691, 2718), 'matplotlib.pyplot.title', 'plt.title', (["(name + '_weight')"], {}), "(name + '_weight')\n", (2700, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2975), 'lib.utils.moving_average', 'moving_average', (['base_train_loss'], {'span': '(10)'}), '(base_train_loss, span=10)\n', (2949, 2975), False, 'from lib.utils import moving_average, check_numpy\n'), ((3008, 3048), 'lib.utils.moving_average', 'moving_average', (['maml_train_loss'], {'span': '(10)'}), '(maml_train_loss, span=10)\n', (3022, 3048), False, 'from lib.utils import moving_average, check_numpy\n'), ((1000, 1037), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""--"""'], {'label': '"""Fixup"""'}), "(xx, yy, '--', label='Fixup')\n", (1008, 1037), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1165), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy'], {'c': '"""g"""', 'label': '"""Fixup + DIMAML"""'}), "(xx, yy, c='g', label='Fixup + DIMAML')\n", (1126, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1231), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'fontsize': '(14.5)', 'frameon': '(False)'}), '(loc=4, fontsize=14.5, frameon=False)\n', (1194, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1429), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""--"""'], {}), "(xx, yy, '--')\n", (1415, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1533), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy'], {'c': '"""g"""'}), "(xx, yy, c='g')\n", (1518, 1533), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2355), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'fontsize': '(14)', 'frameon': '(False)'}), '(loc=4, fontsize=14, frameon=False)\n', (2320, 2355), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2016), 'torch.linspace', 'torch.linspace', (['(0.0)', '(1.0)', '(1000)'], {}), '(0.0, 1.0, 1000)\n', (2000, 2016), False, 'import torch\n'), ((2092, 2107), 'lib.utils.check_numpy', 'check_numpy', (['xx'], {}), '(xx)\n', (2103, 2107), False, 'from lib.utils import moving_average, check_numpy\n'), ((2109, 2124), 'lib.utils.check_numpy', 'check_numpy', (['yy'], {}), '(yy)\n', (2120, 2124), False, 'from lib.utils import moving_average, check_numpy\n'), ((2214, 2229), 'lib.utils.check_numpy', 'check_numpy', (['xx'], {}), '(xx)\n', (2225, 2229), False, 'from lib.utils import moving_average, check_numpy\n'), ((2231, 2246), 'lib.utils.check_numpy', 'check_numpy', (['yy'], {}), '(yy)\n', (2242, 2246), False, 'from lib.utils import moving_average, check_numpy\n'), ((2501, 2516), 'lib.utils.check_numpy', 'check_numpy', (['xx'], {}), '(xx)\n', (2512, 2516), False, 'from lib.utils import moving_average, check_numpy\n'), ((2518, 2533), 'lib.utils.check_numpy', 'check_numpy', (['yy'], {}), '(yy)\n', (2529, 2533), False, 'from lib.utils import moving_average, check_numpy\n'), ((2608, 2623), 'lib.utils.check_numpy', 'check_numpy', (['xx'], {}), '(xx)\n', (2619, 2623), False, 'from lib.utils import moving_average, check_numpy\n'), ((2625, 2640), 'lib.utils.check_numpy', 'check_numpy', (['yy'], {}), '(yy)\n', (2636, 2640), False, 'from lib.utils import moving_average, check_numpy\n'), ((956, 972), 'torch.tensor', 'torch.tensor', (['xx'], {}), '(xx)\n', (968, 972), False, 'import torch\n'), ((1074, 1090), 'torch.tensor', 'torch.tensor', (['xx'], {}), '(xx)\n', (1086, 1090), False, 'import torch\n'), ((1363, 1379), 'torch.tensor', 'torch.tensor', (['xx'], {}), '(xx)\n', (1375, 1379), False, 'import torch\n'), ((1466, 1482), 'torch.tensor', 'torch.tensor', (['xx'], {}), '(xx)\n', (1478, 1482), False, 'import torch\n')] |
import os
import re
import glob
import argparse
import pandas as pd
list_test = ['alexnet',
'inception3',
'inception4',
'resnet152',
'resnet50',
'vgg16']
# Naming convention
# Key: log name
# Value: ([num_gpus], [names])
# num_gpus: Since each log folder has all the record for different numbers of GPUs, it is convenient to specify the benchmarks you want to pull by listing the num_gpus
# names: rename the experiments so they are easier to undertand
list_system = {
"i7-6850K-GeForce_GTX_1080_Ti": ([1], ['GTX 1080Ti']),
"i7-9750H-GeForce_RTX_2070_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2070 MAX-Q']),
"i7-9750H-GeForce_RTX_2080_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2080 MAX-Q']),
"i7-10875H-GeForce_RTX_2080_Super_with_Max-Q_Design_XLA_TF2_2": ([1], ['RTX 2080 SUPER MAX-Q']),
"Gold_6230-GeForce_RTX_2080_Ti_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 2080Ti NVLink', '4x RTX 2080Ti NVLink', '8x RTX 2080Ti NVLink']),
"Gold_6230-GeForce_RTX_2080_Ti_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 2080Ti', '2x RTX 2080Ti', '4x RTX 2080Ti', '8x RTX 2080Ti']),
"Platinum-Tesla_V100-SXM3-32GB_HP16_TF2_2": ([1, 8], ['V100 32GB', '8x V100 32GB']),
"Gold_6230-Quadro_RTX_8000_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 8000', '2x RTX 8000', '4x RTX 8000', '8x RTX 8000']),
"Gold_6230-Quadro_RTX_8000_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 8000 NVLink', '4x RTX 8000 NVLink', '8x RTX 8000 NVLink']),
"7502-A100-PCIE-40GB": ([1, 2, 4, 8], ['A100 40GB PCIe', '2x A100 40GB PCIe', '4x A100 40GB PCIe', '8x A100 40GB PCIe']),
"3960X-GeForce_RTX_3080_XLA": ([1, 2], ['RTX 3080', '2x RTX 3080']),
"3970X-GeForce_RTX_3090_XLA": ([1, 2, 3], ['RTX 3090', '2x RTX 3090', '3x RTX 3090']),
"7502-RTX_A6000_XLA_TF1_15": ([1, 2, 4, 8], ['RTX A6000', '2x RTX A6000', '4x RTX A6000', '8x RTX A6000'])
}
def get_result(path_logs, folder, model):
folder_path = glob.glob(path_logs + '/' + folder + '/' + model + '*')[0]
folder_name = folder_path.split('/')[-1]
batch_size = folder_name.split('-')[-1]
file_throughput = folder_path + '/throughput/1'
with open(file_throughput, 'r') as f:
lines = f.read().splitlines()
line = lines[-2]
throughput = line.split(' ')[-1]
try:
throughput = int(round(float(throughput)))
except:
throughput = 0
return batch_size, throughput
def create_row_throughput(path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True):
if is_train:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus'
else:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus' + '-inference'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus' + '-inference'
for model in list_test:
if precision == 'fp32':
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = throughput
df.at[name, 'num_gpu'] = num_gpu
def create_row_batch_size(path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True):
if is_train:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus'
else:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus' + '-inference'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus' + '-inference'
for model in list_test:
if precision == 'fp32':
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = int(batch_size) * num_gpu
df.at[name, 'num_gpu'] = num_gpu
def main():
parser = argparse.ArgumentParser(description='Gather benchmark results.')
parser.add_argument('--path', type=str, default='logs',
help='path that has the logs')
parser.add_argument('--mode', type=str, default='replicated',
choices=['replicated', 'parameter_server'],
help='Method for parameter update')
parser.add_argument('--data', type=str, default='syn',
choices=['syn', 'real'],
help='Choose between synthetic data and real data')
parser.add_argument('--precision', type=str, default='fp32',
choices=['fp32', 'fp16'],
help='Choose becnhmark precision')
args = parser.parse_args()
columns = []
columns.append('num_gpu')
for model in list_test:
columns.append(model)
list_row = []
for key, value in sorted(list_system.items()):
for name in value[1]:
list_row.append(name)
# Train Throughput
df_throughput = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
# list_gpus = list_system[key][0]
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_throughput(args.path, args.mode, args.data, args.precision, key, num_gpu, name, df_throughput)
df_throughput.index.name = 'name_gpu'
df_throughput.to_csv('tf-train-throughput-' + args.precision + '.csv')
# # Inference Throughput
# df_throughput = pd.DataFrame(index=list_row, columns=columns)
# for key in list_system:
# list_gpus = list_system[key]
# for num_gpu in list_gpus:
# create_row_throughput(args.path, args.mode, key, num_gpu, df_throughput, False)
# df_throughput.index.name = 'name_gpu'
# df_throughput.to_csv('tf-inference-throughput-' + precision + '.csv')
# Train Batch Size
df_bs = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_batch_size(args.path, args.mode, args.data, args.precision, key, num_gpu, name, df_bs)
df_bs.index.name = 'name_gpu'
df_bs.to_csv('tf-train-bs-' + args.precision + '.csv')
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"glob.glob",
"argparse.ArgumentParser"
] | [((4377, 4441), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gather benchmark results."""'}), "(description='Gather benchmark results.')\n", (4400, 4441), False, 'import argparse\n'), ((5443, 5488), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'list_row', 'columns': 'columns'}), '(index=list_row, columns=columns)\n', (5455, 5488), True, 'import pandas as pd\n'), ((6332, 6377), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'list_row', 'columns': 'columns'}), '(index=list_row, columns=columns)\n', (6344, 6377), True, 'import pandas as pd\n'), ((1975, 2030), 'glob.glob', 'glob.glob', (["(path_logs + '/' + folder + '/' + model + '*')"], {}), "(path_logs + '/' + folder + '/' + model + '*')\n", (1984, 2030), False, 'import glob\n')] |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .survey_message_command_type import SurveyMessageCommandType
from .topology_response_body import TopologyResponseBody
__all__ = ["SurveyResponseBody"]
@type_checked
class SurveyResponseBody:
"""
XDR Source Code::
union SurveyResponseBody switch (SurveyMessageCommandType type)
{
case SURVEY_TOPOLOGY:
TopologyResponseBody topologyResponseBody;
};
"""
def __init__(
self,
type: SurveyMessageCommandType,
topology_response_body: TopologyResponseBody = None,
) -> None:
self.type = type
self.topology_response_body = topology_response_body
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == SurveyMessageCommandType.SURVEY_TOPOLOGY:
if self.topology_response_body is None:
raise ValueError("topology_response_body should not be None.")
self.topology_response_body.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "SurveyResponseBody":
type = SurveyMessageCommandType.unpack(unpacker)
if type == SurveyMessageCommandType.SURVEY_TOPOLOGY:
topology_response_body = TopologyResponseBody.unpack(unpacker)
return cls(type=type, topology_response_body=topology_response_body)
return cls(type=type)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "SurveyResponseBody":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "SurveyResponseBody":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.type == other.type
and self.topology_response_body == other.topology_response_body
)
def __str__(self):
out = []
out.append(f"type={self.type}")
out.append(
f"topology_response_body={self.topology_response_body}"
) if self.topology_response_body is not None else None
return f"<SurveyResponseBody {[', '.join(out)]}>"
| [
"xdrlib.Packer",
"base64.b64encode",
"xdrlib.Unpacker"
] | [((1621, 1629), 'xdrlib.Packer', 'Packer', ([], {}), '()\n', (1627, 1629), False, 'from xdrlib import Packer, Unpacker\n'), ((1793, 1806), 'xdrlib.Unpacker', 'Unpacker', (['xdr'], {}), '(xdr)\n', (1801, 1806), False, 'from xdrlib import Packer, Unpacker\n'), ((1928, 1955), 'base64.b64encode', 'base64.b64encode', (['xdr_bytes'], {}), '(xdr_bytes)\n', (1944, 1955), False, 'import base64\n')] |
# Generated by Django 3.1.5 on 2021-01-14 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_name', models.CharField(max_length=50)),
('topic_descrption', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='question',
name='topics',
field=models.ManyToManyField(related_name='questions', to='poll.Topic'),
),
]
| [
"django.db.models.AutoField",
"django.db.models.ManyToManyField",
"django.db.models.CharField"
] | [((679, 744), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""questions"""', 'to': '"""poll.Topic"""'}), "(related_name='questions', to='poll.Topic')\n", (701, 744), False, 'from django.db import migrations, models\n'), ((315, 408), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (331, 408), False, 'from django.db import migrations, models\n'), ((438, 469), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (454, 469), False, 'from django.db import migrations, models\n'), ((509, 541), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (525, 541), False, 'from django.db import migrations, models\n')] |
import wikipedia
from topicblob import TopicBlob
#get random wikipeida summaries
wiki_pages = ["Facebook","New York City","Barack Obama","Wikipedia","Topic Modeling","Python (programming language)","Snapchat"]
wiki_pages = ["Facebook","New York City","Barack Obama"]
texts = []
for page in wiki_pages:
text = wikipedia.summary(page)
#print(text)
texts.append(text)
tb = TopicBlob(texts, 20, 50)
#Do topic search for social
topic_search = tb.search_docs_by_topics("social")
print(topic_search)
print("\n")
#Do a ranked search for president
search = tb.ranked_search_docs_by_words("president")
print(search)
print("\n")
#Find similar text for
print("Finding similar document for\n" + tb.blobs[0]["doc"])
print("\n")
sims = tb.get_sim(0)
for sim in sims.keys():
print(tb.get_doc(sim)) | [
"topicblob.TopicBlob",
"wikipedia.summary"
] | [((393, 417), 'topicblob.TopicBlob', 'TopicBlob', (['texts', '(20)', '(50)'], {}), '(texts, 20, 50)\n', (402, 417), False, 'from topicblob import TopicBlob\n'), ((320, 343), 'wikipedia.summary', 'wikipedia.summary', (['page'], {}), '(page)\n', (337, 343), False, 'import wikipedia\n')] |
from sanic_jwt import exceptions
class User:
def __init__(self, id, username, password):
self.user_id = id
self.username = username
self.password = password
def __repr__(self):
return "User(id='{}')".format(self.user_id)
def to_dict(self):
return {"user_id": self.user_id, "username": self.username}
users = [User(1, "opi-user", "~Zñujh*B2D`9T!<j")]
username_table = {u.username: u for u in users}
userid_table = {u.user_id: u for u in users}
async def my_authenticate(request, *args, **kwargs):
username = request.json.get("username", None)
password = request.json.get("password", None)
if not username or not password:
raise exceptions.AuthenticationFailed("Missing username or password.")
user = username_table.get(username, None)
if user is None or password != user.password:
raise exceptions.AuthenticationFailed("Incorrect username or password")
return user | [
"sanic_jwt.exceptions.AuthenticationFailed"
] | [((708, 772), 'sanic_jwt.exceptions.AuthenticationFailed', 'exceptions.AuthenticationFailed', (['"""Missing username or password."""'], {}), "('Missing username or password.')\n", (739, 772), False, 'from sanic_jwt import exceptions\n'), ((884, 949), 'sanic_jwt.exceptions.AuthenticationFailed', 'exceptions.AuthenticationFailed', (['"""Incorrect username or password"""'], {}), "('Incorrect username or password')\n", (915, 949), False, 'from sanic_jwt import exceptions\n')] |
# coding=utf-8
def pdf2text(pdf_path,encoding="ASCII7"):
import subprocess
import os.path
pdf_path = os.path.abspath(pdf_path)
subprocess.call(["pdftotext","-l","1","-enc",encoding,"-q",pdf_path])
text = os.path.splitext(pdf_path)[0] + ".txt"
return text
def pick_out_doi(txt):
import re
body = open(txt)
reg = re.compile(r'\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b')
m = reg.search(body.read())
if m == None:
raise Warning("DOI is not found.")
else:
return m.group(0)
def doi2bib(doi):
import urllib2
uri = "http://dx.doi.org/"
edoi = urllib2.quote(doi)
url = uri + edoi
req = urllib2.Request(url, headers = {"Accept":"text/bibliography; style=bibtex"})
bibstr = urllib2.urlopen(req).read()
return unicode(bibstr, "utf-8")
| [
"urllib2.urlopen",
"re.compile",
"urllib2.Request",
"urllib2.quote",
"subprocess.call"
] | [((144, 219), 'subprocess.call', 'subprocess.call', (["['pdftotext', '-l', '1', '-enc', encoding, '-q', pdf_path]"], {}), "(['pdftotext', '-l', '1', '-enc', encoding, '-q', pdf_path])\n", (159, 219), False, 'import subprocess\n'), ((349, 423), 're.compile', 're.compile', (['"""\\\\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\\\\\'<>,])\\\\S)+)\\\\b"""'], {}), '(\'\\\\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\\\\\\\'<>,])\\\\S)+)\\\\b\')\n', (359, 423), False, 'import re\n'), ((629, 647), 'urllib2.quote', 'urllib2.quote', (['doi'], {}), '(doi)\n', (642, 647), False, 'import urllib2\n'), ((680, 755), 'urllib2.Request', 'urllib2.Request', (['url'], {'headers': "{'Accept': 'text/bibliography; style=bibtex'}"}), "(url, headers={'Accept': 'text/bibliography; style=bibtex'})\n", (695, 755), False, 'import urllib2\n'), ((770, 790), 'urllib2.urlopen', 'urllib2.urlopen', (['req'], {}), '(req)\n', (785, 790), False, 'import urllib2\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the TextLineDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class TextLineDatasetCheckpointTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
@combinations.generate(test_base.default_test_combinations())
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
num_outputs)
# pylint: enable=cell-var-from-loop
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.readers.TextLineDataset",
"tensorflow.python.platform.test.main"
] | [((2301, 2312), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (2310, 2312), False, 'from tensorflow.python.platform import test\n'), ((1520, 1620), 'tensorflow.python.data.ops.readers.TextLineDataset', 'core_readers.TextLineDataset', (['test_filenames'], {'compression_type': 'compression_type', 'buffer_size': '(10)'}), '(test_filenames, compression_type=\n compression_type, buffer_size=10)\n', (1548, 1620), True, 'from tensorflow.python.data.ops import readers as core_readers\n'), ((1651, 1688), 'tensorflow.python.data.kernel_tests.test_base.default_test_combinations', 'test_base.default_test_combinations', ([], {}), '()\n', (1686, 1688), False, 'from tensorflow.python.data.kernel_tests import test_base\n')] |
#Author:<NAME>
from django import forms
from apps.forms import FormMixin
class PublicCommentForm(forms.Form,FormMixin):
#CharField字长在form可不定义,但是在model模型中必须定义
content=forms.CharField()
news_id=forms.IntegerField()
| [
"django.forms.IntegerField",
"django.forms.CharField"
] | [((176, 193), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (191, 193), False, 'from django import forms\n'), ((206, 226), 'django.forms.IntegerField', 'forms.IntegerField', ([], {}), '()\n', (224, 226), False, 'from django import forms\n')] |
### Data Preprocessing
## 1. Json to Transcript
## 2. Aligner
## 3. Text Replace
from jamo import h2j
import json
import os, re, tqdm
import unicodedata
from tqdm import tqdm
import hparams as hp
name = hp.dataset
first_dir = os.getcwd()
transcript = name + '_transcript.txt'
dict_name = name + '_korean_dict.txt'
data_dir = 'wavs'
json_label_dir = 'label'
def change_name(base_dir, format):
print('Change', format, 'name')
cnt = 0
speaker_table = os.listdir(base_dir)
new_speaker_table = []
for speaker in speaker_table:
if cnt == 0:
os.chdir(base_dir)
new_speaker_name = re.sub(r'[^0-9]', '', speaker)
overlap = 1
while new_speaker_name in new_speaker_table:
print(new_speaker_name, 'is dangerous')
new_speaker_name = str(overlap) + new_speaker_name[1:]
overlap += 1
new_speaker_table.append(re.sub(r'[^0-9]', '', new_speaker_name))
print(new_speaker_name, 'ok')
temp = 0
for wav in os.listdir(speaker):
if temp == 0:
os.chdir(speaker)
new_wav_name = re.sub(r'[^0-9]', '', wav)
# new wav_name을 그대로 사용해야 함
if new_wav_name[:len(new_speaker_name)] != wav:
if new_wav_name[:len(new_speaker_name)] == new_speaker_name:
new_wav_name = new_wav_name + wav[-(len(format)+1):]
else:
new_wav_name = new_speaker_name + new_wav_name + wav[-(len(format)+1):]
os.rename(wav, new_wav_name)
temp+=1; cnt +=1
os.chdir('../')
os.rename(speaker, new_speaker_name)
print(cnt,'All Done', end='\n\n')
os.chdir('../')
def json_to_transcripts():
speakers = os.listdir(json_label_dir)
speakers.sort()
print(len(speakers), "speaker's are Sorted.")
os.chdir(json_label_dir)
utterance_text = []
cnt = 1
for speaker in speakers:
for file in os.listdir(speaker):
if cnt % 1000 == 0:
print(cnt, 'Done')
utterance_set = []
with open(os.path.join(speaker, file)) as f:
json_data = json.load(f)
utterance_set.append(file[:-4] + 'wav')
utterance_set.append(line_replace(json_data['발화정보']['stt']))
sep_text = unicodedata.normalize('NFD',line_replace(json_data['발화정보']['stt']))
utterance_set.append(sep_text)
utterance_set.append(round(float(json_data['발화정보']['recrdTime']),1))
utterance_text.append(utterance_set)
cnt+=1
print(cnt-1, 'All Done')
os.chdir('../')
with open(transcript, "w") as file:
for utt in utterance_text:
file.write(utt[0][:6] + '/' + utt[0] + '|' + utt[1] + '|' + utt[1] + '|' + utt[2] + '|' + str(utt[3]) + '|' + 'None\n')
def line_replace(line):
line = line.replace('(SP:)', '')
line = line.replace('(SP:', '')
line = line.replace('(SN:)', '')
line = line.replace('(SN:', '')
line = line.replace('(NO:)', '')
line = line.replace('(NO:', '')
line = line.replace('spn', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('毛', '')
line = line.replace(')', '')
line = line.replace('(', '')
line = line.replace('"', '')
line = line.replace('.', '')
line = line.replace('[', '')
line = line.replace(',', '')
line = line.replace('!', '')
line = line.replace('?', '')
line = line.replace(']', '')
line = line.replace('.', '')
line = line.replace(' ', ' ')
return line
def aligner():
filters = '([.,!?])"'
file_list = []
with open(transcript, 'r', encoding='utf-8') as f:
for line in f.readlines():
temp = line.split('|')
file_dir, script = temp[0], temp[3]
script = re.sub(re.compile(filters), '', script)
script = line_replace(script) # !!! 여기서 핵심 삭제
#file_dir = file_dir.split('/') 폴더 별로 나눌 경우
fn = file_dir[:-3] + 'lab'
file_dir = os.path.join(data_dir, fn)
#print(file_dir)
with open(file_dir, 'w', encoding='utf-8') as f:
f.write(script)
file_list.append(os.path.join(file_dir))
jamo_dict = {}
for file_name in tqdm(file_list):
sentence = open(file_name, 'r', encoding='utf-8').readline()
jamo = h2j(sentence).split(' ')
for i, s in enumerate(jamo):
if s not in jamo_dict:
jamo_dict[s] = ' '.join(jamo[i])
with open(dict_name, 'w', encoding='utf-8') as f:
for key in jamo_dict.keys():
content = '{}\t{}\n'.format(key, jamo_dict[key])
f.write(content)
print("Aligner Done\n")
def mfa_train():
print("MFA Training Start.. \n")
os.system('mfa train_g2p ' + dict_name + ' ' + name + '_korean.zip --clear')
print("MFA train_g2p Done\n")
os.system('mfa g2p ' + name + '_korean.zip ' + data_dir + ' ' + name + '_korean.txt')
print("MFA g2p Done\n")
os.system('mfa train ' + data_dir + ' ' + name + '_korean.txt ./textgrids --clean')
os.system('mv ~/Documents/MFA/wavs_train_acoustic_model/sat_2_ali/textgrids ./')
os.system('zip -r textgrids.zip textgrids')
os.system('mv textgrids.zip ' + first_dir) # 메인 dir로 옮겨
print("MFA Training Done! \n")
def lab_separate():
speaker_list = os.listdir('wavs')
os.mkdir('lab')
for speaker in speaker_list:
os.mkdir('lab/' + speaker)
lab_list = os.listdir(os.path.join('wavs', speaker))
for lab in lab_list:
if lab[-3:] == 'lab':
os.system('mv ' 'wavs/' + speaker + '/' + lab + ' lab/' + speaker)
if __name__ == '__main__':
os.chdir('dataset/' + hp.dataset)
change_name('wavs', 'wav')
#change_name('label', 'json')
#json_to_transcripts()
aligner()
mfa_train()
lab_separate() | [
"jamo.h2j",
"os.listdir",
"re.compile",
"os.rename",
"tqdm.tqdm",
"os.path.join",
"os.getcwd",
"os.chdir",
"json.load",
"os.mkdir",
"re.sub",
"os.system"
] | [((244, 255), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (253, 255), False, 'import os, re, tqdm\n'), ((494, 514), 'os.listdir', 'os.listdir', (['base_dir'], {}), '(base_dir)\n', (504, 514), False, 'import os, re, tqdm\n'), ((1843, 1858), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (1851, 1858), False, 'import os, re, tqdm\n'), ((1907, 1933), 'os.listdir', 'os.listdir', (['json_label_dir'], {}), '(json_label_dir)\n', (1917, 1933), False, 'import os, re, tqdm\n'), ((2011, 2035), 'os.chdir', 'os.chdir', (['json_label_dir'], {}), '(json_label_dir)\n', (2019, 2035), False, 'import os, re, tqdm\n'), ((2881, 2896), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2889, 2896), False, 'import os, re, tqdm\n'), ((4743, 4758), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (4747, 4758), False, 'from tqdm import tqdm\n'), ((5287, 5363), 'os.system', 'os.system', (["('mfa train_g2p ' + dict_name + ' ' + name + '_korean.zip --clear')"], {}), "('mfa train_g2p ' + dict_name + ' ' + name + '_korean.zip --clear')\n", (5296, 5363), False, 'import os, re, tqdm\n'), ((5406, 5495), 'os.system', 'os.system', (["('mfa g2p ' + name + '_korean.zip ' + data_dir + ' ' + name + '_korean.txt')"], {}), "('mfa g2p ' + name + '_korean.zip ' + data_dir + ' ' + name +\n '_korean.txt')\n", (5415, 5495), False, 'import os, re, tqdm\n'), ((5531, 5618), 'os.system', 'os.system', (["('mfa train ' + data_dir + ' ' + name + '_korean.txt ./textgrids --clean')"], {}), "('mfa train ' + data_dir + ' ' + name +\n '_korean.txt ./textgrids --clean')\n", (5540, 5618), False, 'import os, re, tqdm\n'), ((5626, 5711), 'os.system', 'os.system', (['"""mv ~/Documents/MFA/wavs_train_acoustic_model/sat_2_ali/textgrids ./"""'], {}), "('mv ~/Documents/MFA/wavs_train_acoustic_model/sat_2_ali/textgrids ./'\n )\n", (5635, 5711), False, 'import os, re, tqdm\n'), ((5712, 5755), 'os.system', 'os.system', (['"""zip -r textgrids.zip textgrids"""'], {}), "('zip -r textgrids.zip textgrids')\n", (5721, 5755), False, 'import os, re, tqdm\n'), ((5761, 5803), 'os.system', 'os.system', (["('mv textgrids.zip ' + first_dir)"], {}), "('mv textgrids.zip ' + first_dir)\n", (5770, 5803), False, 'import os, re, tqdm\n'), ((5902, 5920), 'os.listdir', 'os.listdir', (['"""wavs"""'], {}), "('wavs')\n", (5912, 5920), False, 'import os, re, tqdm\n'), ((5926, 5941), 'os.mkdir', 'os.mkdir', (['"""lab"""'], {}), "('lab')\n", (5934, 5941), False, 'import os, re, tqdm\n'), ((6260, 6293), 'os.chdir', 'os.chdir', (["('dataset/' + hp.dataset)"], {}), "('dataset/' + hp.dataset)\n", (6268, 6293), False, 'import os, re, tqdm\n'), ((680, 709), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'speaker'], {}), "('[^0-9]', '', speaker)\n", (686, 709), False, 'import os, re, tqdm\n'), ((1115, 1134), 'os.listdir', 'os.listdir', (['speaker'], {}), '(speaker)\n', (1125, 1134), False, 'import os, re, tqdm\n'), ((1737, 1752), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (1745, 1752), False, 'import os, re, tqdm\n'), ((1762, 1798), 'os.rename', 'os.rename', (['speaker', 'new_speaker_name'], {}), '(speaker, new_speaker_name)\n', (1771, 1798), False, 'import os, re, tqdm\n'), ((2127, 2146), 'os.listdir', 'os.listdir', (['speaker'], {}), '(speaker)\n', (2137, 2146), False, 'import os, re, tqdm\n'), ((5985, 6011), 'os.mkdir', 'os.mkdir', (["('lab/' + speaker)"], {}), "('lab/' + speaker)\n", (5993, 6011), False, 'import os, re, tqdm\n'), ((619, 637), 'os.chdir', 'os.chdir', (['base_dir'], {}), '(base_dir)\n', (627, 637), False, 'import os, re, tqdm\n'), ((987, 1025), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'new_speaker_name'], {}), "('[^0-9]', '', new_speaker_name)\n", (993, 1025), False, 'import os, re, tqdm\n'), ((1226, 1251), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'wav'], {}), "('[^0-9]', '', wav)\n", (1232, 1251), False, 'import os, re, tqdm\n'), ((4491, 4517), 'os.path.join', 'os.path.join', (['data_dir', 'fn'], {}), '(data_dir, fn)\n', (4503, 4517), False, 'import os, re, tqdm\n'), ((6043, 6072), 'os.path.join', 'os.path.join', (['"""wavs"""', 'speaker'], {}), "('wavs', speaker)\n", (6055, 6072), False, 'import os, re, tqdm\n'), ((1180, 1197), 'os.chdir', 'os.chdir', (['speaker'], {}), '(speaker)\n', (1188, 1197), False, 'import os, re, tqdm\n'), ((1641, 1669), 'os.rename', 'os.rename', (['wav', 'new_wav_name'], {}), '(wav, new_wav_name)\n', (1650, 1669), False, 'import os, re, tqdm\n'), ((2338, 2350), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2347, 2350), False, 'import json\n'), ((4250, 4269), 're.compile', 're.compile', (['filters'], {}), '(filters)\n', (4260, 4269), False, 'import os, re, tqdm\n'), ((4675, 4697), 'os.path.join', 'os.path.join', (['file_dir'], {}), '(file_dir)\n', (4687, 4697), False, 'import os, re, tqdm\n'), ((4847, 4860), 'jamo.h2j', 'h2j', (['sentence'], {}), '(sentence)\n', (4850, 4860), False, 'from jamo import h2j\n'), ((6156, 6219), 'os.system', 'os.system', (["('mv wavs/' + speaker + '/' + lab + ' lab/' + speaker)"], {}), "('mv wavs/' + speaker + '/' + lab + ' lab/' + speaker)\n", (6165, 6219), False, 'import os, re, tqdm\n'), ((2274, 2301), 'os.path.join', 'os.path.join', (['speaker', 'file'], {}), '(speaker, file)\n', (2286, 2301), False, 'import os, re, tqdm\n')] |
#!/usr/bin/env python3
import requests
import os
import json
CACHET_HOSTNAME = os.environ.get("CACHET_HOSTNAME")
URL = f"https://{CACHET_HOSTNAME}/api/v1/components"
HEADERS = {
'X-Cachet-Token': os.environ.get("CACHET_TOKEN")
}
with requests.Session() as session:
session.headers.update(HEADERS)
response = session.get(URL + "/groups", verify=False)
groups = response.json()['data']
print("Number of groups found: " + str(len(groups)))
for group in groups:
components = group['enabled_components']
print(group['name'] + " contains " + str(len(components)) + " components")
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
# delete the group
print("Deleting group " + group['name'])
gdr = session.delete(URL + "/groups/" + str(group['id']), verify=False, )
print(gdr)
# check and delete components not in any groups
response = session.get(URL, verify=False)
components = response.json()['data']
print("Number of components not in any group: " + str(len(components)))
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
print("Done!!!")
| [
"os.environ.get",
"requests.Session"
] | [((81, 114), 'os.environ.get', 'os.environ.get', (['"""CACHET_HOSTNAME"""'], {}), "('CACHET_HOSTNAME')\n", (95, 114), False, 'import os\n'), ((203, 233), 'os.environ.get', 'os.environ.get', (['"""CACHET_TOKEN"""'], {}), "('CACHET_TOKEN')\n", (217, 233), False, 'import os\n'), ((242, 260), 'requests.Session', 'requests.Session', ([], {}), '()\n', (258, 260), False, 'import requests\n')] |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2016 France-IOI, MIT license
#
# http://opensource.org/licenses/MIT
# This tool launches an isolated execution. It is intended as a wrapper around
# the execution of any command.
import argparse, os, sys
DEFAULT_EXECPARAMS = {
'timeLimitMs': 60000,
'memoryLimitKb': 128*1024,
'useCache': False,
'stdoutTruncateKb': -1,
'stderrTruncateKb': -1,
'getFiles': []
}
# Add taskgrader folder to PATH
SELFDIR = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(SELFDIR, '../'))
from taskgrader import IsolatedExecution
if __name__ == '__main__':
argParser = argparse.ArgumentParser(description="Makes a 'standalone' JSON file, bundling files referenced by path into the JSON to remove any reference to paths.")
argParser.add_argument('-i', '--stdin', help='Set file to pass on stdin.')
argParser.add_argument('-m', '--memory-limit', help='Set memory limit for execution, in kilobytes.', type=int)
argParser.add_argument('-t', '--time-limit', help='Set time limit for execution, in milliseconds.', type=int)
argParser.add_argument('-p', '--path', help='Set the working directory for the execution.', default='.')
argParser.add_argument('args', nargs=argparse.REMAINDER)
args = argParser.parse_args()
# Check cmd line
if not args.args:
argParser.error("No command specified.")
if '--' in args.args: args.args.remove('--')
# Set up execution parameters
execParams = {}
execParams.update(DEFAULT_EXECPARAMS)
if args.memory_limit: execParams['memoryLimitKb'] = args.memory_limit
if args.time_limit: execParams['timeLimitMs'] = args.time_limit
# Prepare files
cmdLine = ' '.join(args.args)
stdoutPath = os.path.join(args.path, 'isolate-run.stdout')
# Launch the isolated execution
execution = IsolatedExecution(None, execParams, cmdLine)
report = execution.execute(args.path, stdinFile=args.stdin, stdoutFile=stdoutPath)
sys.stdout.write(open(stdoutPath, 'r').read())
sys.stderr.write(report['stderr']['data'])
sys.exit(report['exitCode'])
| [
"argparse.ArgumentParser",
"os.path.join",
"sys.stderr.write",
"sys.exit",
"os.path.abspath",
"taskgrader.IsolatedExecution"
] | [((577, 605), 'os.path.join', 'os.path.join', (['SELFDIR', '"""../"""'], {}), "(SELFDIR, '../')\n", (589, 605), False, 'import argparse, os, sys\n'), ((694, 856), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Makes a \'standalone\' JSON file, bundling files referenced by path into the JSON to remove any reference to paths."""'}), '(description=\n "Makes a \'standalone\' JSON file, bundling files referenced by path into the JSON to remove any reference to paths."\n )\n', (717, 856), False, 'import argparse, os, sys\n'), ((1813, 1858), 'os.path.join', 'os.path.join', (['args.path', '"""isolate-run.stdout"""'], {}), "(args.path, 'isolate-run.stdout')\n", (1825, 1858), False, 'import argparse, os, sys\n'), ((1912, 1956), 'taskgrader.IsolatedExecution', 'IsolatedExecution', (['None', 'execParams', 'cmdLine'], {}), '(None, execParams, cmdLine)\n', (1929, 1956), False, 'from taskgrader import IsolatedExecution\n'), ((2100, 2142), 'sys.stderr.write', 'sys.stderr.write', (["report['stderr']['data']"], {}), "(report['stderr']['data'])\n", (2116, 2142), False, 'import argparse, os, sys\n'), ((2147, 2175), 'sys.exit', 'sys.exit', (["report['exitCode']"], {}), "(report['exitCode'])\n", (2155, 2175), False, 'import argparse, os, sys\n'), ((533, 558), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (548, 558), False, 'import argparse, os, sys\n')] |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from collections import deque
n, m = map(int, readline().split())
s = readline().rstrip().decode()[::-1]
index = 0
ans = deque([])
for i in range(n):
for j in range(m, 0, -1):
if index + j >= n:
ans.appendleft(n - index)
print(*ans)
exit()
if s[index + j] == '0':
ans.appendleft(j)
index += j
break
else:
print(-1)
exit()
| [
"sys.setrecursionlimit",
"collections.deque"
] | [((116, 146), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (137, 146), False, 'import sys\n'), ((270, 279), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (275, 279), False, 'from collections import deque\n')] |
"""Unit test to test app."""
import os
from unittest import TestCase
import mock
from src import app
class TestApp(TestCase):
"""Unit test class to test other methods in the app."""
def test_valid_env(self):
key = 'ENV_1'
os.environ[key] = 'test'
app.get_or_raise(key)
del os.environ[key]
def test_invalid_env(self):
with self.assertRaises(RuntimeError):
app.get_or_raise('ENV_2')
def test_valid_bool(self):
self.assertEqual(app.str_to_bool('True'), True)
self.assertEqual(app.str_to_bool('t'), True)
self.assertEqual(app.str_to_bool('1'), True)
self.assertEqual(app.str_to_bool('YES'), True)
def test_invalid_bool(self):
self.assertEqual(app.str_to_bool(''), False)
self.assertEqual(app.str_to_bool('test'), False)
def test_invalid_format(self):
self.assertEqual(app.str_to_bool(True), None)
@mock.patch('src.app.prepare_avd')
@mock.patch('subprocess.Popen')
def test_run_with_appium(self, mocked_avd, mocked_subprocess):
with mock.patch('src.app.appium_run') as mocked_appium:
os.environ['APPIUM'] = str(True)
app.run()
self.assertTrue(mocked_avd.called)
self.assertTrue(mocked_subprocess.called)
self.assertTrue(mocked_appium.called)
@mock.patch('src.app.prepare_avd')
@mock.patch('subprocess.Popen')
def test_run_withhout_appium(self, mocked_avd, mocked_subprocess):
with mock.patch('src.app.appium_run') as mocked_appium:
os.environ['APPIUM'] = str(False)
app.run()
self.assertTrue(mocked_avd.called)
self.assertTrue(mocked_subprocess.called)
self.assertFalse(mocked_appium.called)
| [
"src.app.run",
"mock.patch",
"src.app.str_to_bool",
"src.app.get_or_raise"
] | [((940, 973), 'mock.patch', 'mock.patch', (['"""src.app.prepare_avd"""'], {}), "('src.app.prepare_avd')\n", (950, 973), False, 'import mock\n'), ((979, 1009), 'mock.patch', 'mock.patch', (['"""subprocess.Popen"""'], {}), "('subprocess.Popen')\n", (989, 1009), False, 'import mock\n'), ((1365, 1398), 'mock.patch', 'mock.patch', (['"""src.app.prepare_avd"""'], {}), "('src.app.prepare_avd')\n", (1375, 1398), False, 'import mock\n'), ((1404, 1434), 'mock.patch', 'mock.patch', (['"""subprocess.Popen"""'], {}), "('subprocess.Popen')\n", (1414, 1434), False, 'import mock\n'), ((284, 305), 'src.app.get_or_raise', 'app.get_or_raise', (['key'], {}), '(key)\n', (300, 305), False, 'from src import app\n'), ((425, 450), 'src.app.get_or_raise', 'app.get_or_raise', (['"""ENV_2"""'], {}), "('ENV_2')\n", (441, 450), False, 'from src import app\n'), ((508, 531), 'src.app.str_to_bool', 'app.str_to_bool', (['"""True"""'], {}), "('True')\n", (523, 531), False, 'from src import app\n'), ((564, 584), 'src.app.str_to_bool', 'app.str_to_bool', (['"""t"""'], {}), "('t')\n", (579, 584), False, 'from src import app\n'), ((617, 637), 'src.app.str_to_bool', 'app.str_to_bool', (['"""1"""'], {}), "('1')\n", (632, 637), False, 'from src import app\n'), ((670, 692), 'src.app.str_to_bool', 'app.str_to_bool', (['"""YES"""'], {}), "('YES')\n", (685, 692), False, 'from src import app\n'), ((759, 778), 'src.app.str_to_bool', 'app.str_to_bool', (['""""""'], {}), "('')\n", (774, 778), False, 'from src import app\n'), ((812, 835), 'src.app.str_to_bool', 'app.str_to_bool', (['"""test"""'], {}), "('test')\n", (827, 835), False, 'from src import app\n'), ((905, 926), 'src.app.str_to_bool', 'app.str_to_bool', (['(True)'], {}), '(True)\n', (920, 926), False, 'from src import app\n'), ((1090, 1122), 'mock.patch', 'mock.patch', (['"""src.app.appium_run"""'], {}), "('src.app.appium_run')\n", (1100, 1122), False, 'import mock\n'), ((1198, 1207), 'src.app.run', 'app.run', ([], {}), '()\n', (1205, 1207), False, 'from src import app\n'), ((1519, 1551), 'mock.patch', 'mock.patch', (['"""src.app.appium_run"""'], {}), "('src.app.appium_run')\n", (1529, 1551), False, 'import mock\n'), ((1628, 1637), 'src.app.run', 'app.run', ([], {}), '()\n', (1635, 1637), False, 'from src import app\n')] |
#!/usr/bin/python3
import os
import sys
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-up':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-down':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc-0-replicas.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs/rhpam-central-console':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/bc-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/halt':
print("Halting server")
self.send_response(200)
self.end_headers()
sys.exit()
# for patch method, only return 200 for any path
def do_PATCH(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_PUT(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_DELETE(self):
self.send_response(200)
httpd = HTTPServer(("localhost", 8080), MyHandler)
httpd.serve_forever()
| [
"http.server.HTTPServer",
"os.path.join",
"sys.exit"
] | [((2158, 2200), 'http.server.HTTPServer', 'HTTPServer', (["('localhost', 8080)", 'MyHandler'], {}), "(('localhost', 8080), MyHandler)\n", (2168, 2200), False, 'from http.server import HTTPServer, BaseHTTPRequestHandler\n'), ((525, 581), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""responses/kieserver-dc.json"""'], {}), "(sys.path[0], 'responses/kieserver-dc.json')\n", (537, 581), False, 'import os\n'), ((1054, 1121), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""responses/kieserver-dc-0-replicas.json"""'], {}), "(sys.path[0], 'responses/kieserver-dc-0-replicas.json')\n", (1066, 1121), False, 'import os\n'), ((1503, 1552), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""responses/bc-dc.json"""'], {}), "(sys.path[0], 'responses/bc-dc.json')\n", (1515, 1552), False, 'import os\n'), ((1812, 1822), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1820, 1822), False, 'import sys\n')] |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from octopus_deploy_swagger_client.models.retention_period import RetentionPeriod # noqa: F401,E501
class PhaseResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'automatic_deployment_targets': 'list[str]',
'optional_deployment_targets': 'list[str]',
'minimum_environments_before_promotion': 'int',
'is_optional_phase': 'bool',
'release_retention_policy': 'RetentionPeriod',
'tentacle_retention_policy': 'RetentionPeriod'
}
attribute_map = {
'id': 'Id',
'name': 'Name',
'automatic_deployment_targets': 'AutomaticDeploymentTargets',
'optional_deployment_targets': 'OptionalDeploymentTargets',
'minimum_environments_before_promotion': 'MinimumEnvironmentsBeforePromotion',
'is_optional_phase': 'IsOptionalPhase',
'release_retention_policy': 'ReleaseRetentionPolicy',
'tentacle_retention_policy': 'TentacleRetentionPolicy'
}
def __init__(self, id=None, name=None, automatic_deployment_targets=None, optional_deployment_targets=None, minimum_environments_before_promotion=None, is_optional_phase=None, release_retention_policy=None, tentacle_retention_policy=None): # noqa: E501
"""PhaseResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._automatic_deployment_targets = None
self._optional_deployment_targets = None
self._minimum_environments_before_promotion = None
self._is_optional_phase = None
self._release_retention_policy = None
self._tentacle_retention_policy = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if automatic_deployment_targets is not None:
self.automatic_deployment_targets = automatic_deployment_targets
if optional_deployment_targets is not None:
self.optional_deployment_targets = optional_deployment_targets
if minimum_environments_before_promotion is not None:
self.minimum_environments_before_promotion = minimum_environments_before_promotion
if is_optional_phase is not None:
self.is_optional_phase = is_optional_phase
if release_retention_policy is not None:
self.release_retention_policy = release_retention_policy
if tentacle_retention_policy is not None:
self.tentacle_retention_policy = tentacle_retention_policy
@property
def id(self):
"""Gets the id of this PhaseResource. # noqa: E501
:return: The id of this PhaseResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PhaseResource.
:param id: The id of this PhaseResource. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this PhaseResource. # noqa: E501
:return: The name of this PhaseResource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PhaseResource.
:param name: The name of this PhaseResource. # noqa: E501
:type: str
"""
self._name = name
@property
def automatic_deployment_targets(self):
"""Gets the automatic_deployment_targets of this PhaseResource. # noqa: E501
:return: The automatic_deployment_targets of this PhaseResource. # noqa: E501
:rtype: list[str]
"""
return self._automatic_deployment_targets
@automatic_deployment_targets.setter
def automatic_deployment_targets(self, automatic_deployment_targets):
"""Sets the automatic_deployment_targets of this PhaseResource.
:param automatic_deployment_targets: The automatic_deployment_targets of this PhaseResource. # noqa: E501
:type: list[str]
"""
self._automatic_deployment_targets = automatic_deployment_targets
@property
def optional_deployment_targets(self):
"""Gets the optional_deployment_targets of this PhaseResource. # noqa: E501
:return: The optional_deployment_targets of this PhaseResource. # noqa: E501
:rtype: list[str]
"""
return self._optional_deployment_targets
@optional_deployment_targets.setter
def optional_deployment_targets(self, optional_deployment_targets):
"""Sets the optional_deployment_targets of this PhaseResource.
:param optional_deployment_targets: The optional_deployment_targets of this PhaseResource. # noqa: E501
:type: list[str]
"""
self._optional_deployment_targets = optional_deployment_targets
@property
def minimum_environments_before_promotion(self):
"""Gets the minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:return: The minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:rtype: int
"""
return self._minimum_environments_before_promotion
@minimum_environments_before_promotion.setter
def minimum_environments_before_promotion(self, minimum_environments_before_promotion):
"""Sets the minimum_environments_before_promotion of this PhaseResource.
:param minimum_environments_before_promotion: The minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:type: int
"""
self._minimum_environments_before_promotion = minimum_environments_before_promotion
@property
def is_optional_phase(self):
"""Gets the is_optional_phase of this PhaseResource. # noqa: E501
:return: The is_optional_phase of this PhaseResource. # noqa: E501
:rtype: bool
"""
return self._is_optional_phase
@is_optional_phase.setter
def is_optional_phase(self, is_optional_phase):
"""Sets the is_optional_phase of this PhaseResource.
:param is_optional_phase: The is_optional_phase of this PhaseResource. # noqa: E501
:type: bool
"""
self._is_optional_phase = is_optional_phase
@property
def release_retention_policy(self):
"""Gets the release_retention_policy of this PhaseResource. # noqa: E501
:return: The release_retention_policy of this PhaseResource. # noqa: E501
:rtype: RetentionPeriod
"""
return self._release_retention_policy
@release_retention_policy.setter
def release_retention_policy(self, release_retention_policy):
"""Sets the release_retention_policy of this PhaseResource.
:param release_retention_policy: The release_retention_policy of this PhaseResource. # noqa: E501
:type: RetentionPeriod
"""
self._release_retention_policy = release_retention_policy
@property
def tentacle_retention_policy(self):
"""Gets the tentacle_retention_policy of this PhaseResource. # noqa: E501
:return: The tentacle_retention_policy of this PhaseResource. # noqa: E501
:rtype: RetentionPeriod
"""
return self._tentacle_retention_policy
@tentacle_retention_policy.setter
def tentacle_retention_policy(self, tentacle_retention_policy):
"""Sets the tentacle_retention_policy of this PhaseResource.
:param tentacle_retention_policy: The tentacle_retention_policy of this PhaseResource. # noqa: E501
:type: RetentionPeriod
"""
self._tentacle_retention_policy = tentacle_retention_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PhaseResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PhaseResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((8635, 8668), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (8648, 8668), False, 'import six\n')] |
import re
def str2bool(my_str):
"""returns a reasonable boolean from a string so that "False" will result in False"""
if my_str is None:
return False
elif isinstance(my_str, str) and my_str.lower() == "false":
return False
elif isinstance(my_str, str) and my_str.lower() == "off":
return False
return bool(my_str)
def camelcase2snake_case(name):
"""If you pass DeviceManager it returns device_manager"""
pattern = re.compile(r"(?<!^)(?=[A-Z])")
name = pattern.sub("_", name).lower()
return name
def snake_case2camelcase(word):
return "".join(x.capitalize() or "_" for x in word.split("_"))
| [
"re.compile"
] | [((470, 499), 're.compile', 're.compile', (['"""(?<!^)(?=[A-Z])"""'], {}), "('(?<!^)(?=[A-Z])')\n", (480, 499), False, 'import re\n')] |
import collections
from typing import Optional
import numpy as np
from astromodels import Parameter, Model
from astromodels.functions.priors import Cosine_Prior, Uniform_prior
from threeML import PluginPrototype
from threeML.io.file_utils import sanitize_filename
from threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike
from threeML.plugins.SpectrumLike import SpectrumLike
from threeML.io.logging import setup_logger
from pyspi.utils.response.spi_drm import SPIDRM
log = setup_logger(__name__)
class SPILike(DispersionSpectrumLike):
"""
Plugin for the data of SPI, based on PySPI
"""
def __init__(
self,
name: str,
observation,
background,
bkg_base_array,
free_position: bool,
verbose: bool = True,
**kwargs
):
"""
Init the plugin for a constant source analysis with PySPI
:param name: Name of plugin
:param observation: observed spectrum
:param background: background spectrum
:param bkg_base_array: Base array for background model
:param free_position: Free the position in the fit?
:param verbose: Verbose?
:returns: Object
"""
self._free_position: bool = free_position
if not isinstance(
observation.response, SPIDRM
):
log.error("The response associated with the observation"
" is not a SPIDRM")
raise AssertionError()
super(SPILike, self).__init__(name,
observation,
background,
verbose,
**kwargs)
self._bkg_base_array = bkg_base_array
self._bkg_array = np.ones(len(self._bkg_base_array))
def set_model(self, likelihood_model: Model) -> None:
"""
Set the model to be used in the joint minimization.
:param likelihood_model: likelihood model instance
:returns:
"""
super(SPILike, self).set_model(likelihood_model)
if self._free_position:
log.info(f"Freeing the position of {self.name} and setting priors")
for key in self._like_model.point_sources.keys():
self._like_model.point_sources[key].position.ra.free = True
self._like_model.point_sources[key].position.dec.free = True
self._like_model.point_sources[key].position.ra.prior = \
Uniform_prior(lower_bound=0.0, upper_bound=360)
self._like_model.point_sources[key].position.dec.prior = \
Cosine_Prior(lower_bound=-90.0, upper_bound=90)
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
else:
for key in self._like_model.point_sources.keys():
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
self._response.set_location(ra, dec)
def _evaluate_model(self, precalc_fluxes=None):
"""
Evaluate the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
source = super(SPILike, self)._evaluate_model(precalc_fluxes=
precalc_fluxes)
self._update_bkg_array()
bkg = self._bkg_array*self._bkg_base_array
return source+bkg
def get_model(self, precalc_fluxes: Optional[np.ndarray] = None) -> np.ndarray:
"""
Get the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
if self._free_position:
# assumes that the is only one point source which is how
# it should be!
ra, dec = self._like_model.get_point_source_position(0)
self._response.set_location(ra, dec)
return super(SPILike, self).get_model(precalc_fluxes=precalc_fluxes)
def _add_bkg_nuisance_parameter(self, bkg_parameters) -> None:
"""
Add the bkg parameter. Are saved as array.
:param bkg_parameters:
:returns:
"""
self._bkg_parameters = bkg_parameters
for parameter in bkg_parameters:
self.nuisance_parameters[parameter.name] = parameter
self._bkg_array = np.ones(len(bkg_parameters))
def _update_bkg_array(self) -> None:
"""
Update the array with the background parameter
:returns:
"""
for key in self._like_model.parameters.keys():
if "bkg" in key:
idx = int(key.split("_")[-1])
self._bkg_array[idx] = self._like_model.parameters[key].value
def set_free_position(self, flag):
"""
Set the free position flag
:param flag: True or False
:returns:
"""
self._free_position = flag
@classmethod
def from_spectrumlike(
cls,
spectrum_like,
bkg_base_array,
free_position=False
):
"""
Generate SPILikeGRB from an existing SpectrumLike child
:param spectrum_like: SpectrumLike child
:param rsp_object: Response object
:free_position: Free the position? boolean
:returns: Initialized Object
"""
return cls(
spectrum_like.name,
spectrum_like._observed_spectrum,
spectrum_like._background_spectrum,
bkg_base_array,
free_position,
spectrum_like._verbose,
)
class SPILikeGRB(DispersionSpectrumLike):
"""
Plugin for the data of SPI, based on PySPI
"""
def __init__(
self,
name,
observation,
background=None,
free_position=False,
verbose=True,
**kwargs
):
"""
Init the plugin for a GRB analysis with PySPI
:param name: Name of plugin
:param observation: observed spectrum
:param background: background spectrum
:param free_position: Free the position in the fit?
:param verbose: Verbose?
"""
self._free_position = free_position
assert isinstance(
observation.response, SPIDRM
), "The response associated with the observation is not a SPIDRM"
super(SPILikeGRB, self).__init__(name,
observation,
background,
verbose,
**kwargs)
def set_model(self, likelihood_model):
"""
Set the model to be used in the joint minimization.
:param likelihood_model: likelihood model instance
:returns:
"""
super(SPILikeGRB, self).set_model(likelihood_model)
if self._free_position:
print("Freeing the position of %s and setting priors" % self.name)
for key in self._like_model.point_sources.keys():
self._like_model.point_sources[key].position.ra.free = True
self._like_model.point_sources[key].position.dec.free = True
self._like_model.point_sources[key].position.ra.prior = \
Uniform_prior(lower_bound=0.0, upper_bound=360)
self._like_model.point_sources[key].position.dec.prior = \
Cosine_Prior(lower_bound=-90.0, upper_bound=90)
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
else:
for key in self._like_model.point_sources.keys():
ra = self._like_model.point_sources[key].position.ra.value
dec = self._like_model.point_sources[key].position.dec.value
self._response.set_location(ra, dec)
def get_model(self, precalc_fluxes=None):
"""
Get the model
:param precalc_fluxes: Precaclulated flux of spectrum
:returns: model counts
"""
if self._free_position:
# assumes that the is only one point source which is how
# it should be!
ra, dec = self._like_model.get_point_source_position(0)
self._response.set_location(ra, dec)
return super(SPILikeGRB, self).get_model(precalc_fluxes=precalc_fluxes)
def set_free_position(self, flag):
"""
Set the free position flag
:param flag: True or False
:returns:
"""
self._free_position = flag
@classmethod
def from_spectrumlike(
cls, spectrum_like, free_position=False
):
"""
Generate SPILikeGRB from an existing SpectrumLike child
:param spectrum_like: SpectrumLike child
:param rsp_object: Response object
:free_position: Free the position? boolean
:returns: Initialized Object
"""
return cls(
spectrum_like.name,
spectrum_like._observed_spectrum,
spectrum_like._background_spectrum,
free_position,
spectrum_like._verbose,
)
| [
"astromodels.functions.priors.Uniform_prior",
"threeML.io.logging.setup_logger",
"astromodels.functions.priors.Cosine_Prior"
] | [((494, 516), 'threeML.io.logging.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'from threeML.io.logging import setup_logger\n'), ((2602, 2649), 'astromodels.functions.priors.Uniform_prior', 'Uniform_prior', ([], {'lower_bound': '(0.0)', 'upper_bound': '(360)'}), '(lower_bound=0.0, upper_bound=360)\n', (2615, 2649), False, 'from astromodels.functions.priors import Cosine_Prior, Uniform_prior\n'), ((2745, 2792), 'astromodels.functions.priors.Cosine_Prior', 'Cosine_Prior', ([], {'lower_bound': '(-90.0)', 'upper_bound': '(90)'}), '(lower_bound=-90.0, upper_bound=90)\n', (2757, 2792), False, 'from astromodels.functions.priors import Cosine_Prior, Uniform_prior\n'), ((7582, 7629), 'astromodels.functions.priors.Uniform_prior', 'Uniform_prior', ([], {'lower_bound': '(0.0)', 'upper_bound': '(360)'}), '(lower_bound=0.0, upper_bound=360)\n', (7595, 7629), False, 'from astromodels.functions.priors import Cosine_Prior, Uniform_prior\n'), ((7725, 7772), 'astromodels.functions.priors.Cosine_Prior', 'Cosine_Prior', ([], {'lower_bound': '(-90.0)', 'upper_bound': '(90)'}), '(lower_bound=-90.0, upper_bound=90)\n', (7737, 7772), False, 'from astromodels.functions.priors import Cosine_Prior, Uniform_prior\n')] |
import matplotlib.pyplot as plt
def _determine_vmax(max_data_value):
vmax = 1
if max_data_value > 255:
vmax = None
elif max_data_value > 1:
vmax = 255
return vmax
def plot_image(heatmap, original_data=None, heatmap_cmap=None, data_cmap=None, show_plot=True, output_filename=None): # pylint: disable=too-many-arguments
"""
Plots a heatmap image.
Optionally, the heatmap (typically a saliency map of an explainer) can be
plotted on top of the original data. In that case both images are plotted
transparantly with alpha = 0.5.
Args:
heatmap: the saliency map or other heatmap to be plotted.
original_data: the data to plot together with the heatmap, both with
alpha = 0.5 (optional).
heatmap_cmap: color map for the heatmap plot (see mpl.Axes.imshow
documentation for options).
data_cmap: color map for the (optional) data image (see mpl.Axes.imshow
documentation for options). By default, if the image is two
dimensional, the color map is set to 'gray'.
show_plot: Shows plot if true (for testing or writing plots to disk
instead).
output_filename: Name of the file to save the plot to (optional).
Returns:
None
"""
# default cmap depends on shape: grayscale or colour
_, ax = plt.subplots()
alpha = 1
if original_data is not None:
if len(original_data.shape) == 2 and data_cmap is None:
# 2D array, grayscale
data_cmap = 'gray'
ax.imshow(original_data, cmap=data_cmap, vmin=0, vmax=_determine_vmax(original_data.max()))
alpha = .5
ax.imshow(heatmap, cmap=heatmap_cmap, alpha=alpha)
if show_plot:
plt.show()
if output_filename:
plt.savefig(output_filename)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1415, 1429), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1427, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1817, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1880), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filename'], {}), '(output_filename)\n', (1863, 1880), True, 'import matplotlib.pyplot as plt\n')] |
""" All Available Module on Server Belong to Here """
AVAILABLE_MODULES = (
"api",
)
def init_app(app, **kwargs):
from importlib import import_module
for module in AVAILABLE_MODULES:
import_module(
f".{module}",
package=__name__
).init_app(app, **kwargs) | [
"importlib.import_module"
] | [((205, 250), 'importlib.import_module', 'import_module', (['f""".{module}"""'], {'package': '__name__'}), "(f'.{module}', package=__name__)\n", (218, 250), False, 'from importlib import import_module\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 23:19:43 2020
@author: elif.ayvali
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class deep_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(deep_Q_net, self).__init__()
self.seed = torch.manual_seed(seed)
self.dqn_net = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
('relu3', nn.ReLU()),
('fc4', nn.Linear(64, action_size))
]))
def forward(self, state):
"""Build a network that maps state -> action values."""
return self.dqn_net(state)
class dueling_Q_net(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(dueling_Q_net, self).__init__()
self.feature_modules = nn.Sequential(OrderedDict([
('fc1', nn.Linear(state_size, 256)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(128, 64)),
]))
self.value_modules = nn.Sequential(OrderedDict([
('fc_v1', nn.Linear(64, 32)),
('relu)v1', nn.ReLU()),
('fc_v2', nn.Linear(32, 1)),
]))
self.advantage_modules = nn.Sequential(OrderedDict([
('fc_a1', nn.Linear(64, 32)),
('relu_a1', nn.ReLU()),
('fc_a2', nn.Linear(32, action_size)),
]))
def forward(self, state):
#Get common features
common_layers=self.feature_modules(state)
advantage=self.advantage_modules(common_layers)# batch_size x action_size
value=self.value_modules(common_layers) #batch_size x 1
return value + advantage - advantage.mean(dim=1).unsqueeze(1)
| [
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Linear"
] | [((749, 772), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (766, 772), False, 'import torch\n'), ((863, 889), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (872, 889), True, 'import torch.nn as nn\n'), ((918, 927), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (925, 927), True, 'import torch.nn as nn\n'), ((954, 973), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (963, 973), True, 'import torch.nn as nn\n'), ((1002, 1011), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1009, 1011), True, 'import torch.nn as nn\n'), ((1038, 1056), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (1047, 1056), True, 'import torch.nn as nn\n'), ((1085, 1094), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1092, 1094), True, 'import torch.nn as nn\n'), ((1121, 1147), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'action_size'], {}), '(64, action_size)\n', (1130, 1147), True, 'import torch.nn as nn\n'), ((1755, 1781), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (1764, 1781), True, 'import torch.nn as nn\n'), ((1802, 1811), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1809, 1811), True, 'import torch.nn as nn\n'), ((1830, 1849), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (1839, 1849), True, 'import torch.nn as nn\n'), ((1870, 1879), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1877, 1879), True, 'import torch.nn as nn\n'), ((1898, 1916), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (1907, 1916), True, 'import torch.nn as nn\n'), ((2014, 2031), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (2023, 2031), True, 'import torch.nn as nn\n'), ((2054, 2063), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2061, 2063), True, 'import torch.nn as nn\n'), ((2092, 2108), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(1)'], {}), '(32, 1)\n', (2101, 2108), True, 'import torch.nn as nn\n'), ((2210, 2227), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (2219, 2227), True, 'import torch.nn as nn\n'), ((2250, 2259), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2257, 2259), True, 'import torch.nn as nn\n'), ((2287, 2313), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'action_size'], {}), '(32, action_size)\n', (2296, 2313), True, 'import torch.nn as nn\n')] |
import numpy as np
import pandas as pd
from typing import Tuple, Dict
from .load_csv import load_raw_classes, load_raw_epigenomic_data, load_raw_nucleotides_sequences
from .store_csv import store_raw_classes, store_raw_epigenomic_data, store_raw_nucleotides_sequences
from auto_tqdm import tqdm
def drop_unknown_datapoints(epigenomic_data:pd.DataFrame, nucleotides_sequences:np.ndarray, nucleotides_sequences_index:np.ndarray, classes:pd.DataFrame)->Tuple[pd.DataFrame, np.ndarray, np.ndarray, pd.DataFrame]:
"""Remove datapoints labeled as unknown (UK)."""
unknown = classes["UK"] == 1
epigenomic_data = epigenomic_data.drop(index=epigenomic_data.index[unknown])
nucleotides_sequences = nucleotides_sequences[~unknown]
nucleotides_sequences_index = nucleotides_sequences_index[~unknown]
classes = classes.drop(index=classes.index[unknown])
classes = classes.drop(columns=["UK"])
return epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes
def sanitize(target:str, settings:Dict):
for cell_line in tqdm(settings["cell_lines"], desc="Sanitizing data"):
classes = load_raw_classes(target, cell_line)
if "UK" not in classes.columns:
continue
epigenomic_data = load_raw_epigenomic_data(target, cell_line)
nucleotides_sequences, nucleotides_sequences_index, nucleotides_sequences_columns = load_raw_nucleotides_sequences(target, cell_line)
epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes = drop_unknown_datapoints(epigenomic_data, nucleotides_sequences, nucleotides_sequences_index, classes)
store_raw_epigenomic_data(target, cell_line, epigenomic_data)
store_raw_nucleotides_sequences(target, cell_line, nucleotides_sequences, nucleotides_sequences_index, nucleotides_sequences_columns)
store_raw_classes(target, cell_line, classes) | [
"auto_tqdm.tqdm"
] | [((1060, 1112), 'auto_tqdm.tqdm', 'tqdm', (["settings['cell_lines']"], {'desc': '"""Sanitizing data"""'}), "(settings['cell_lines'], desc='Sanitizing data')\n", (1064, 1112), False, 'from auto_tqdm import tqdm\n')] |
# -*- coding: utf-8 -*-
'''
plexOdus Add-on
'''
import json
from resources.lib.modules import client
from resources.lib.modules import control
user = control.setting('fanart.tv.user')
if user == '' or user is None:
user = 'cf0ebcc2f7b824bd04cf3a318f15c17d'
headers = {'api-key': '3eb5ed2c401a206391ea8d1a0312c347'}
if not user == '':
headers.update({'client-key': user})
base_url = "http://webservice.fanart.tv/v3/%s/%s"
lang = control.apiLanguage()['trakt']
def get_tvshow_art(tvdb):
url = base_url % ('tv', '%s')
try:
art = client.request(url % tvdb, headers=headers, timeout='30', error=True)
art = json.loads(art)
except:
return None
try:
poster2 = art['tvposter']
poster2 = [(x['url'], x['likes']) for x in poster2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in poster2 if x.get('lang') == '']
poster2 = [(x[0], x[1]) for x in poster2]
poster2 = sorted(poster2, key=lambda x: int(x[1]), reverse=True)
poster2 = [x[0] for x in poster2][0]
poster2 = poster2.encode('utf-8')
except:
poster2 = '0'
try:
fanart2 = art['showbackground']
fanart2 = [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == '']
fanart2 = [(x[0], x[1]) for x in fanart2]
fanart2 = sorted(fanart2, key=lambda x: int(x[1]), reverse=True)
fanart2 = [x[0] for x in fanart2][0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2= '0'
try:
banner2 = art['tvbanner']
banner2 = [(x['url'], x['likes']) for x in banner2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in banner2 if x.get('lang') == '']
banner2 = [(x[0], x[1]) for x in banner2]
banner2 = sorted(banner2, key=lambda x: int(x[1]), reverse=True)
banner2 = [x[0] for x in banner2][0]
banner2 = banner2.encode('utf-8')
except:
banner2 = '0'
try:
if 'hdtvlogo' in art:
clearlogo = art['hdtvlogo']
else:
clearlogo = art['clearlogo']
clearlogo = [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == '']
clearlogo = [(x[0], x[1]) for x in clearlogo]
clearlogo = sorted(clearlogo, key=lambda x: int(x[1]), reverse=True)
clearlogo = [x[0] for x in clearlogo][0]
clearlogo = clearlogo.encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdclearart' in art:
clearart = art['hdclearart']
else:
clearart = art['clearart']
clearart = [(x['url'], x['likes']) for x in clearart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearart if x.get('lang') == '']
clearart = [(x[0], x[1]) for x in clearart]
clearart = sorted(clearart, key=lambda x: int(x[1]), reverse=True)
clearart = [x[0] for x in clearart][0]
clearart = clearart.encode('utf-8')
except:
clearart = '0'
try:
if 'tvthumb' in art:
landscape = art['tvthumb']
else:
landscape = art['showbackground']
landscape = [(x['url'], x['likes']) for x in landscape if x.get('lang') == lang] + [(x['url'], x['likes']) for x in landscape if x.get('lang') == '']
landscape = [(x[0], x[1]) for x in landscape]
landscape = sorted(landscape, key=lambda x: int(x[1]), reverse=True)
landscape = [x[0] for x in landscape][0]
landscape = landscape.encode('utf-8')
except:
landscape = '0'
extended_art = {'extended': True, 'poster2': poster2, 'banner2': banner2, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'landscape': landscape}
return extended_art
def get_movie_art(imdb):
url = base_url % ('movies', '%s')
try:
art = client.request(url % imdb, headers=headers, timeout='30', error=True)
art = json.loads(art)
except:
return None
try:
poster2 = art['movieposter']
poster2 = [(x['url'], x['likes']) for x in poster2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in poster2 if x.get('lang') == '']
poster2 = [(x[0], x[1]) for x in poster2]
poster2 = sorted(poster2, key=lambda x: int(x[1]), reverse=True)
poster2 = [x[0] for x in poster2][0]
poster2 = poster2.encode('utf-8')
except:
poster2 = '0'
try:
if 'moviebackground' in art:
fanart2 = art['moviebackground']
else:
fanart2 = art['moviethumb']
fanart2 = [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in fanart2 if x.get('lang') == '']
fanart2 = [(x[0], x[1]) for x in fanart2]
fanart2 = sorted(fanart2, key=lambda x: int(x[1]), reverse=True)
fanart2 = [x[0] for x in fanart2][0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2 = '0'
try:
banner2 = art['moviebanner']
banner2 = [(x['url'], x['likes']) for x in banner2 if x.get('lang') == lang] + [(x['url'], x['likes']) for x in banner2 if x.get('lang') == '']
banner2 = [(x[0], x[1]) for x in banner2]
banner2 = sorted(banner2, key=lambda x: int(x[1]), reverse=True)
banner2 = [x[0] for x in banner2][0]
banner2 = banner2.encode('utf-8')
except:
banner2 = '0'
try:
if 'hdmovielogo' in art:
clearlogo = art['hdmovielogo']
else:
clearlogo = art['movielogo']
clearlogo = [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearlogo if x.get('lang') == '']
clearlogo = [(x[0], x[1]) for x in clearlogo]
clearlogo = sorted(clearlogo, key=lambda x: int(x[1]), reverse=True)
clearlogo = [x[0] for x in clearlogo][0]
clearlogo = clearlogo.encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdmovieclearart' in art:
clearart = art['hdmovieclearart']
else:
clearart = art['movieart']
clearart = [(x['url'], x['likes']) for x in clearart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in clearart if x.get('lang') == '']
clearart = [(x[0], x[1]) for x in clearart]
clearart = sorted(clearart, key=lambda x: int(x[1]), reverse=True)
clearart = [x[0] for x in clearart][0]
clearart = clearart.encode('utf-8')
except:
clearart = '0'
try:
discart = art['moviedisc']
discart = [(x['url'], x['likes']) for x in discart if x.get('lang') == lang] + [(x['url'], x['likes']) for x in discart if x.get('lang') == '']
discart = [(x[0], x[1]) for x in discart]
discart = sorted(discart, key=lambda x: int(x[1]), reverse=True)
discart = [x[0] for x in discart][0]
discart = discart.encode('utf-8')
except:
discart = '0'
try:
if 'moviethumb' in art:
landscape = art['moviethumb']
else:
landscape = art['moviebackground']
landscape = [(x['url'], x['likes']) for x in landscape if x.get('lang') == lang] + [(x['url'], x['likes']) for x in landscape if x.get('lang') == '']
landscape = [(x[0], x[1]) for x in landscape]
landscape = sorted(landscape, key=lambda x: int(x[1]), reverse=True)
landscape = [x[0] for x in landscape][0]
landscape = landscape.encode('utf-8')
except:
landscape = '0'
extended_art = {'extended': True, 'poster2': poster2, 'fanart2': fanart2, 'banner2': banner2, 'clearlogo': clearlogo, 'clearart': clearart, 'discart': discart, 'landscape': landscape}
return extended_art | [
"resources.lib.modules.control.setting",
"resources.lib.modules.client.request",
"resources.lib.modules.control.apiLanguage",
"json.loads"
] | [((158, 191), 'resources.lib.modules.control.setting', 'control.setting', (['"""fanart.tv.user"""'], {}), "('fanart.tv.user')\n", (173, 191), False, 'from resources.lib.modules import control\n'), ((446, 467), 'resources.lib.modules.control.apiLanguage', 'control.apiLanguage', ([], {}), '()\n', (465, 467), False, 'from resources.lib.modules import control\n'), ((562, 631), 'resources.lib.modules.client.request', 'client.request', (['(url % tvdb)'], {'headers': 'headers', 'timeout': '"""30"""', 'error': '(True)'}), "(url % tvdb, headers=headers, timeout='30', error=True)\n", (576, 631), False, 'from resources.lib.modules import client\n'), ((646, 661), 'json.loads', 'json.loads', (['art'], {}), '(art)\n', (656, 661), False, 'import json\n'), ((3960, 4029), 'resources.lib.modules.client.request', 'client.request', (['(url % imdb)'], {'headers': 'headers', 'timeout': '"""30"""', 'error': '(True)'}), "(url % imdb, headers=headers, timeout='30', error=True)\n", (3974, 4029), False, 'from resources.lib.modules import client\n'), ((4044, 4059), 'json.loads', 'json.loads', (['art'], {}), '(art)\n', (4054, 4059), False, 'import json\n')] |
"""
nuqql conversation helpers
"""
import datetime
import logging
from typing import TYPE_CHECKING
import nuqql.win
from .conversation import CONVERSATIONS
from .logmessage import LogMessage
if TYPE_CHECKING: # imports for typing
# pylint: disable=cyclic-import
from nuqql.backend import Backend # noqa
logger = logging.getLogger(__name__)
def remove_backend_conversations(backend: "Backend") -> None:
"""
Remove all conversations beloning to the backend
"""
logger.debug("removing all conversations of backend %s", backend.name)
for conv in CONVERSATIONS[:]:
if conv.backend == backend:
conv.wins.list_win.remove(conv)
conv.wins.list_win.redraw()
logger.debug("removed conversation %s of backend %s",
conv.name, backend.name)
def log_main_window(msg: str) -> None:
"""
Log message to main windows
"""
logger.debug("logging message to main window: %s", msg)
now = datetime.datetime.now()
log_msg = LogMessage(now, "nuqql", msg)
nuqql.win.MAIN_WINS["log"].add(log_msg)
def log_nuqql_conv(msg: str) -> None:
"""
Log message to the nuqql conversation
"""
logger.debug("logging message to nuqql conversation: %s", msg)
for conv in CONVERSATIONS:
if conv.name == "nuqql":
conv.log("nuqql", msg)
return
def resize_main_window() -> None:
"""
Resize main window
"""
logger.debug("resizing main window")
# get main win
screen = nuqql.win.MAIN_WINS["screen"]
# get new maxima
max_y, max_x = screen.getmaxyx()
# redraw main windows
screen.clear()
screen.refresh()
# redraw conversation windows
found_active = False
for conv in CONVERSATIONS:
# resize and move conversation windows
if conv.wins.list_win:
size_y, size_x = conv.wins.list_win.config.get_size()
conv.wins.list_win.resize_win(size_y, size_x)
if conv.wins.log_win:
# TODO: move zoom/resizing to win.py?
if conv.wins.log_win.state.zoomed:
size_y, size_x = max_y, max_x
pos_y, pos_x = 0, 0
conv.wins.log_win.state.pad_y = 0 # reset pad position
else:
size_y, size_x = conv.wins.log_win.config.get_size()
pos_y, pos_x = conv.wins.log_win.config.get_pos()
conv.wins.log_win.resize_win(size_y, size_x)
conv.wins.log_win.move_win(pos_y, pos_x)
if conv.wins.input_win:
size_y, size_x = conv.wins.input_win.config.get_size()
conv.wins.input_win.resize_win(size_y, size_x)
pos_y, pos_x = conv.wins.input_win.config.get_pos()
conv.wins.input_win.move_win(pos_y, pos_x)
# redraw active conversation windows
if conv.is_active():
found_active = True
conv.wins.list_win.redraw()
conv.wins.input_win.redraw()
conv.wins.log_win.redraw()
# if there are no active conversations, redraw nuqql main windows
if not found_active:
# list win
list_win = nuqql.win.MAIN_WINS["list"]
size_y, size_x = list_win.config.get_size()
list_win.resize_win(size_y, size_x)
list_win.redraw()
# log main win
log_win = nuqql.win.MAIN_WINS["log"]
size_y, size_x = log_win.config.get_size()
pos_y, pos_x = log_win.config.get_pos()
log_win.resize_win(size_y, size_x)
log_win.move_win(pos_y, pos_x)
log_win.redraw()
| [
"logging.getLogger",
"datetime.datetime.now"
] | [((329, 356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (346, 356), False, 'import logging\n'), ((996, 1019), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1017, 1019), False, 'import datetime\n')] |
import vaex
import pytest
@pytest.mark.skipif(vaex.utils.devmode, reason='runs too slow when developing')
def test_gcs():
df = vaex.open('gs://vaex-data/testing/xys.hdf5?cache=false&token=anon')
assert df.x.tolist() == [1, 2]
assert df.y.tolist() == [3, 4]
assert df.s.tolist() == ['5', '6']
df = vaex.open('gs://vaex-data/testing/xys.hdf5?cache=true&token=anon')
assert df.x.tolist() == [1, 2]
assert df.y.tolist() == [3, 4]
assert df.s.tolist() == ['5', '6']
@pytest.mark.skipif(vaex.utils.devmode, reason='runs too slow when developing')
def test_gcs_masked():
df = vaex.open('gs://vaex-data/testing/xys-masked.hdf5?cache=false&token=anon')
assert df.x.tolist() == [1, None]
assert df.y.tolist() == [None, 4]
assert df.s.tolist() == ['5', None]
| [
"vaex.open",
"pytest.mark.skipif"
] | [((29, 107), 'pytest.mark.skipif', 'pytest.mark.skipif', (['vaex.utils.devmode'], {'reason': '"""runs too slow when developing"""'}), "(vaex.utils.devmode, reason='runs too slow when developing')\n", (47, 107), False, 'import pytest\n'), ((499, 577), 'pytest.mark.skipif', 'pytest.mark.skipif', (['vaex.utils.devmode'], {'reason': '"""runs too slow when developing"""'}), "(vaex.utils.devmode, reason='runs too slow when developing')\n", (517, 577), False, 'import pytest\n'), ((133, 200), 'vaex.open', 'vaex.open', (['"""gs://vaex-data/testing/xys.hdf5?cache=false&token=anon"""'], {}), "('gs://vaex-data/testing/xys.hdf5?cache=false&token=anon')\n", (142, 200), False, 'import vaex\n'), ((320, 386), 'vaex.open', 'vaex.open', (['"""gs://vaex-data/testing/xys.hdf5?cache=true&token=anon"""'], {}), "('gs://vaex-data/testing/xys.hdf5?cache=true&token=anon')\n", (329, 386), False, 'import vaex\n'), ((610, 684), 'vaex.open', 'vaex.open', (['"""gs://vaex-data/testing/xys-masked.hdf5?cache=false&token=anon"""'], {}), "('gs://vaex-data/testing/xys-masked.hdf5?cache=false&token=anon')\n", (619, 684), False, 'import vaex\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from datetime import datetime
import os
from os.path import dirname, join
import sys
import time
import unittest
import uuid
import logging
LOGGING_FORMAT = '\n%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
logger = logging.getLogger(__name__)
import six
import django
from requests.exceptions import ConnectionError
from qiniu import BucketManager
from .utils import retry
# Add repo/demo_site to sys.path
DEMO_SITE_DIR = join(dirname(dirname(__file__)), 'demo_site')
sys.path.append(DEMO_SITE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo_site.settings")
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
from django.conf import settings
from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config
from qiniustorage.utils import QiniuError
USING_TRAVIS = os.environ.get('USING_TRAVIS', None) is None
UNIQUE_PATH = str(uuid.uuid4())
class QiniuStorageTest(unittest.TestCase):
def setUp(self):
self.storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
def test_read_file(self):
ASSET_FILE_NAMES = [u'Read.txt', u'读.txt']
for assert_file_name in ASSET_FILE_NAMES:
REMOTE_PATH = join(UNIQUE_PATH, assert_file_name)
test_file = six.BytesIO()
test_file.write(u"你好世界 Hello World".encode('utf-8'))
test_file.seek(0)
self.storage.save(REMOTE_PATH, test_file)
fil = self.storage.open(REMOTE_PATH, 'r')
assert fil._is_read == False
content = fil.read()
assert content.startswith(u"你好")
assert fil._is_read == True
# Test open mode
fil = self.storage.open(REMOTE_PATH, 'rb')
bin_content = fil.read()
assert bin_content.startswith(u"你好".encode('utf-8'))
@classmethod
def teardown_class(cls):
"""Delete all files in the test bucket.
"""
storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
auth = storage.auth
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(storage.bucket_name, limit=100)
if ret is None:
print(info)
break
for item in ret['items']:
name = item['key']
if six.PY2:
name = name.encode('utf-8')
ret, info = bucket.delete(storage.bucket_name, name)
if ret is None:
print(info)
if eof:
break
| [
"logging.basicConfig",
"os.environ.setdefault",
"logging.getLogger",
"django.setup",
"six.BytesIO",
"qiniustorage.backends.get_qiniu_config",
"os.environ.get",
"os.path.join",
"uuid.uuid4",
"os.path.dirname",
"sys.path.append",
"qiniu.BucketManager"
] | [((297, 359), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'LOGGING_FORMAT'}), '(level=logging.INFO, format=LOGGING_FORMAT)\n', (316, 359), False, 'import logging\n'), ((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((627, 657), 'sys.path.append', 'sys.path.append', (['DEMO_SITE_DIR'], {}), '(DEMO_SITE_DIR)\n', (642, 657), False, 'import sys\n'), ((658, 727), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""demo_site.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'demo_site.settings')\n", (679, 727), False, 'import os\n'), ((738, 752), 'django.setup', 'django.setup', ([], {}), '()\n', (750, 752), False, 'import django\n'), ((1002, 1038), 'os.environ.get', 'os.environ.get', (['"""USING_TRAVIS"""', 'None'], {}), "('USING_TRAVIS', None)\n", (1016, 1038), False, 'import os\n'), ((1066, 1078), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1076, 1078), False, 'import uuid\n'), ((594, 611), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (601, 611), False, 'from os.path import dirname, join\n'), ((2480, 2499), 'qiniu.BucketManager', 'BucketManager', (['auth'], {}), '(auth)\n', (2493, 2499), False, 'from qiniu import BucketManager\n'), ((1505, 1540), 'os.path.join', 'join', (['UNIQUE_PATH', 'assert_file_name'], {}), '(UNIQUE_PATH, assert_file_name)\n', (1509, 1540), False, 'from os.path import dirname, join\n'), ((1566, 1579), 'six.BytesIO', 'six.BytesIO', ([], {}), '()\n', (1577, 1579), False, 'import six\n'), ((1214, 1259), 'qiniustorage.backends.get_qiniu_config', 'get_qiniu_config', (['"""QINIU_PRIVATE_BUCKET_NAME"""'], {}), "('QINIU_PRIVATE_BUCKET_NAME')\n", (1230, 1259), False, 'from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config\n'), ((1287, 1334), 'qiniustorage.backends.get_qiniu_config', 'get_qiniu_config', (['"""QINIU_PRIVATE_BUCKET_DOMAIN"""'], {}), "('QINIU_PRIVATE_BUCKET_DOMAIN')\n", (1303, 1334), False, 'from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config\n'), ((2303, 2348), 'qiniustorage.backends.get_qiniu_config', 'get_qiniu_config', (['"""QINIU_PRIVATE_BUCKET_NAME"""'], {}), "('QINIU_PRIVATE_BUCKET_NAME')\n", (2319, 2348), False, 'from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config\n'), ((2376, 2423), 'qiniustorage.backends.get_qiniu_config', 'get_qiniu_config', (['"""QINIU_PRIVATE_BUCKET_DOMAIN"""'], {}), "('QINIU_PRIVATE_BUCKET_DOMAIN')\n", (2392, 2423), False, 'from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config\n')] |
# This file implements functions model_fn, input_fn, predict_fn and output_fn.
# Function model_fn is mandatory. The other functions can be omitted so the standard sagemaker function will be used.
# An alternative to the last 3 functions is to use function transform_fn(model, data, input_content_type, output_content_type)
#
# More info on https://github.com/aws/sagemaker-inference-toolkit/tree/master/src/sagemaker_inference
#
import torch
from mnist_demo.models.model import Net
import os
from torchvision import transforms
from sagemaker_inference import (
content_types,
decoder,
encoder,
errors,
utils,
)
def model_fn(model_dir):
"""
Function used for Sagemaker to load a model. The function must have this signature. Sagemaker will look for this function.
Used only when Elastic Inference is not used.
"""
print('Loading model')
model = Net()
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f: # model_cnn.pth is the name given in the train script
model.load_state_dict(torch.load(f))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device) #let's keep inference in CPU
print('Model loaded')
return model | [
"torch.cuda.is_available",
"torch.load",
"mnist_demo.models.model.Net",
"os.path.join"
] | [((891, 896), 'mnist_demo.models.model.Net', 'Net', ([], {}), '()\n', (894, 896), False, 'from mnist_demo.models.model import Net\n'), ((911, 947), 'os.path.join', 'os.path.join', (['model_dir', '"""model.pth"""'], {}), "(model_dir, 'model.pth')\n", (923, 947), False, 'import os\n'), ((1045, 1058), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (1055, 1058), False, 'import torch\n'), ((1097, 1122), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1120, 1122), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import kicad
import model
from stackups import JLCPCB6Layers
#from dram import lp4
# IMX8MM
# Diff pairs should be matched within 1ps
# CK_t/CK_c max 200 ps
# CA[5:0]
# CS[1:0] min: CK_t - 25ps, max: CK_t + 25ps
# CKE[1:0]
# DQS0_t/DQS0_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[7:0] min: DQS0_t - 10ps, max DQS0_t + 10ps
# DM0
# DQS1_t/DQS1_c min: CK_t - 85ps, max CK_t + 85ps
# DQ[15:8] min: DQS1_t - 10ps, max DQS1_t + 10ps
# DM1
if __name__ == "__main__":
pcb = kicad.KicadPCB("../mongoose.kicad_pcb", JLCPCB6Layers())
# DiffPair(pcb, "_n","_p", max_delay_ps=200.0, max_skew_ps=1.0)
for net_index in pcb.get_nets().keys():
net = pcb.get_nets()[net_index]
print(net.get_name() + " dly: %.2f ps"%(net.get_delay_ps()))
| [
"stackups.JLCPCB6Layers"
] | [((562, 577), 'stackups.JLCPCB6Layers', 'JLCPCB6Layers', ([], {}), '()\n', (575, 577), False, 'from stackups import JLCPCB6Layers\n')] |
import os
import urllib.request
from zipfile import ZipFile
HOME_DIRECTORY = os.path.join('datasets','raw')
ROOT_URL = 'https://os.unil.cloud.switch.ch/fma/fma_metadata.zip'
if not os.path.isdir(HOME_DIRECTORY):
os.makedirs(HOME_DIRECTORY)
zip_path = os.path.join(HOME_DIRECTORY, 'data.zip')
urllib.request.urlretrieve(ROOT_URL, zip_path)
with ZipFile(zip_path, 'r') as zip:
zip.extractall(HOME_DIRECTORY)
print("Done!") | [
"os.path.isdir",
"os.path.join",
"zipfile.ZipFile",
"os.makedirs"
] | [((78, 109), 'os.path.join', 'os.path.join', (['"""datasets"""', '"""raw"""'], {}), "('datasets', 'raw')\n", (90, 109), False, 'import os\n'), ((257, 297), 'os.path.join', 'os.path.join', (['HOME_DIRECTORY', '"""data.zip"""'], {}), "(HOME_DIRECTORY, 'data.zip')\n", (269, 297), False, 'import os\n'), ((183, 212), 'os.path.isdir', 'os.path.isdir', (['HOME_DIRECTORY'], {}), '(HOME_DIRECTORY)\n', (196, 212), False, 'import os\n'), ((218, 245), 'os.makedirs', 'os.makedirs', (['HOME_DIRECTORY'], {}), '(HOME_DIRECTORY)\n', (229, 245), False, 'import os\n'), ((351, 373), 'zipfile.ZipFile', 'ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (358, 373), False, 'from zipfile import ZipFile\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base custom model that is already retained by data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import tempfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core import compat
DEFAULT_QUANTIZATION_STEPS = 2000
def get_representative_dataset_gen(dataset, num_steps):
def representative_dataset_gen():
"""Generates representative dataset for quantized."""
for image, _ in dataset.take(num_steps):
yield [image]
return representative_dataset_gen
class CustomModel(abc.ABC):
""""The abstract base class that represents a Tensorflow classification model."""
def __init__(self, model_spec, shuffle):
"""Initialize a instance with data, deploy mode and other related parameters.
Args:
model_spec: Specification for the model.
shuffle: Whether the data should be shuffled.
"""
self.model_spec = model_spec
self.shuffle = shuffle
self.model = None
def preprocess(self, sample_data, label):
"""Preprocess the data."""
# TODO(yuqili): remove this method once preprocess for image classifier is
# also moved to DataLoader part.
return sample_data, label
@abc.abstractmethod
def train(self, train_data, validation_data=None, **kwargs):
return
@abc.abstractmethod
def export(self, **kwargs):
return
def summary(self):
self.model.summary()
@abc.abstractmethod
def evaluate(self, data, **kwargs):
return
def _gen_dataset(self,
data,
batch_size=32,
is_training=True,
input_pipeline_context=None):
"""Generates training / validation dataset."""
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
ds = data.dataset
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
ds = ds.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
ds = ds.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
if self.shuffle:
ds = ds.shuffle(buffer_size=min(data.size, 100))
ds = ds.repeat()
ds = ds.batch(batch_size)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _export_saved_model(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
Args:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the target
location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model to
Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in
TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: Optional `tf.saved_model.SaveOptions` object that specifies
options for saving to SavedModel.
"""
if filepath is None:
raise ValueError(
"SavedModel filepath couldn't be None when exporting to SavedModel.")
self.model.save(filepath, overwrite, include_optimizer, save_format,
signatures, options)
def _export_tflite(self,
tflite_filepath,
quantized=False,
quantization_steps=None,
representative_data=None):
"""Converts the retrained model to tflite format and saves it.
Args:
tflite_filepath: File path to save tflite model.
quantized: boolean, if True, save quantized model.
quantization_steps: Number of post-training quantization calibration steps
to run. Used only if `quantized` is True.
representative_data: Representative data used for post-training
quantization. Used only if `quantized` is True.
"""
if tflite_filepath is None:
raise ValueError(
"TFLite filepath couldn't be None when exporting to tflite.")
tf.compat.v1.logging.info('Exporting to tflite model in %s.',
tflite_filepath)
temp_dir = None
if compat.get_tf_behavior() == 1:
temp_dir = tempfile.TemporaryDirectory()
save_path = os.path.join(temp_dir.name, 'saved_model')
self.model.save(save_path, include_optimizer=False, save_format='tf')
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(save_path)
else:
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
if quantized:
if quantization_steps is None:
quantization_steps = DEFAULT_QUANTIZATION_STEPS
if representative_data is None:
raise ValueError(
'representative_data couldn\'t be None if model is quantized.')
ds = self._gen_dataset(
representative_data, batch_size=1, is_training=False)
converter.representative_dataset = tf.lite.RepresentativeDataset(
get_representative_dataset_gen(ds, quantization_steps))
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
tflite_model = converter.convert()
if temp_dir:
temp_dir.cleanup()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
| [
"tempfile.TemporaryDirectory",
"tensorflow.compat.v2.compat.v1.lite.TFLiteConverter.from_saved_model",
"tensorflow.compat.v2.lite.TFLiteConverter.from_keras_model",
"tensorflow.compat.v2.compat.v1.logging.info",
"os.path.join",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow_examples.lite.model_maker... | [((5206, 5284), 'tensorflow.compat.v2.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Exporting to tflite model in %s."""', 'tflite_filepath'], {}), "('Exporting to tflite model in %s.', tflite_filepath)\n", (5231, 5284), True, 'import tensorflow.compat.v2 as tf\n'), ((5342, 5366), 'tensorflow_examples.lite.model_maker.core.compat.get_tf_behavior', 'compat.get_tf_behavior', ([], {}), '()\n', (5364, 5366), False, 'from tensorflow_examples.lite.model_maker.core import compat\n'), ((5390, 5419), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5417, 5419), False, 'import tempfile\n'), ((5438, 5480), 'os.path.join', 'os.path.join', (['temp_dir.name', '"""saved_model"""'], {}), "(temp_dir.name, 'saved_model')\n", (5450, 5480), False, 'import os\n'), ((5575, 5636), 'tensorflow.compat.v2.compat.v1.lite.TFLiteConverter.from_saved_model', 'tf.compat.v1.lite.TFLiteConverter.from_saved_model', (['save_path'], {}), '(save_path)\n', (5625, 5636), True, 'import tensorflow.compat.v2 as tf\n'), ((5665, 5717), 'tensorflow.compat.v2.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['self.model'], {}), '(self.model)\n', (5705, 5717), True, 'import tensorflow.compat.v2 as tf\n'), ((6550, 6590), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['tflite_filepath', '"""wb"""'], {}), "(tflite_filepath, 'wb')\n", (6567, 6590), True, 'import tensorflow.compat.v2 as tf\n')] |
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from src.architectures.readout import READOUTS
from src.architectures.embedding import EMBEDDINGS
from .attention_pooling import POOLING_LAYERS
from ..message_passing import MP_LAYERS
from ..adjacency import construct_adjacency
from src.monitors import BatchMatrixMonitor
from src.monitors import Histogram
class AbstractStackedFixedNMP(nn.Module):
def __init__(
self,
scales=None,
features=None,
hidden=None,
iters=None,
readout=None,
pooling_layer=None,
pool_first=False,
mp_layer=None,
emb_init=None,
**kwargs
):
super().__init__()
emb_kwargs = {x: kwargs[x] for x in ['act', 'wn']}
self.embedding = EMBEDDINGS['n'](dim_in=features, dim_out=hidden, n_layers=int(emb_init), **emb_kwargs)
#self.embedding = EMBEDDINGS['n'](dim_in=features, dim_out=hidden, act=kwargs.get('act', None))
mp_kwargs = {x: kwargs[x] for x in ['act', 'wn', 'update', 'message']}
MPLayer = MP_LAYERS[mp_layer]
self.nmps = nn.ModuleList(
[nn.ModuleList(
[MPLayer(hidden=hidden,**mp_kwargs) for _ in range(iters)
]
)
for _ in scales
]
)
Pool = POOLING_LAYERS[pooling_layer]
self.attn_pools = nn.ModuleList([Pool(scales[i], hidden, **kwargs) for i in range(len(scales))])
Readout = READOUTS[readout]
self.readout = Readout(hidden, hidden)
self.pool_first = pool_first
def forward(self, *args, **kwargs):
raise NotImplementedError
class StackedFixedNMP(AbstractStackedFixedNMP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.adjs = self.set_adjacency_matrices(**kwargs)
def set_adjacency_matrices(self, scales=None, features=None, hidden=None,matrix=None, **kwargs):
m1 = construct_adjacency(
matrix=matrix,
dim_in=features,
index=str(1),
**kwargs
)
matrices = [construct_adjacency(
matrix=matrix,
dim_in=hidden,
index=str(i+2),
**kwargs
)
for i in range(len(scales) - 1)]
return nn.ModuleList([m1] + matrices)
def forward(self, jets, mask=None, **kwargs):
h = self.embedding(jets)
attns = None
#import ipdb; ipdb.set_trace()
for i, (nmp, pool, adj) in enumerate(zip(self.nmps, self.attn_pools, self.adjs)):
if i > 0:
#mask = None
dij = torch.bmm(attns, dij)
dij = torch.bmm(dij, attns.transpose(1,2))
#dij = adj(h, mask=None, **kwargs)
else:
dij = adj(jets, mask=mask, **kwargs)
if self.pool_first:
h, attns = pool(h, **kwargs)
#dij = adj(h, mask=mask)
for mp in nmp:
h = mp(h=h, mask=mask, dij=dij)
if not self.pool_first:
h, attns = pool(h, **kwargs)
out = self.readout(h)
return out
| [
"torch.nn.ModuleList",
"torch.bmm"
] | [((2614, 2644), 'torch.nn.ModuleList', 'nn.ModuleList', (['([m1] + matrices)'], {}), '([m1] + matrices)\n', (2627, 2644), True, 'import torch.nn as nn\n'), ((2953, 2974), 'torch.bmm', 'torch.bmm', (['attns', 'dij'], {}), '(attns, dij)\n', (2962, 2974), False, 'import torch\n')] |
#!/usr/bin/env python3
import lib
N=1000000
sieve = lib.get_prime_sieve(N)
primes = lib.primes(N, sieve)
primes = primes[4:]
def is_truncatable(n):
num = n
c = 0
while num:
if not sieve[num]:
return False
num = int(num / 10)
c += 1
while c:
num = n % 10**c
if not sieve[num]:
return False
c -= 1
return True
result = []
for i in primes:
if is_truncatable(i):
result.append(i)
if len(result) == 11:
break
print(sum(result)) | [
"lib.get_prime_sieve",
"lib.primes"
] | [((53, 75), 'lib.get_prime_sieve', 'lib.get_prime_sieve', (['N'], {}), '(N)\n', (72, 75), False, 'import lib\n'), ((85, 105), 'lib.primes', 'lib.primes', (['N', 'sieve'], {}), '(N, sieve)\n', (95, 105), False, 'import lib\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"迎欢迎\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
class RDFValueTest(absltest.TestCase):
"""RDFValue tests."""
def testStr(self):
"""Test RDFValue.__str__."""
self.assertEqual(str(rdfvalue.RDFInteger(1)), "1")
self.assertEqual(str(rdfvalue.RDFString(long_string)), long_string)
# TODO(hanuszczak): Current implementation of `repr` for RDF values is broken
# and not in line with Python guidelines. For example, `repr` should be
# unambiguous whereas current implementation will trim long representations
# with `...`. Moreover, the representation for most types is questionable at
# best.
#
# The implementation should be fixed and proper tests should be written.
class RDFBytesTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"zażółć gęślą jaźń"
result = rdfvalue.RDFBytes.FromHumanReadable(string)
expected = rdfvalue.RDFBytes.FromSerializedBytes(string.encode("utf-8"))
self.assertEqual(result, expected)
class RDFStringTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"pchnąć w tę łódź jeża lub ośm skrzyń fig"
result = rdfvalue.RDFString.FromHumanReadable(string)
self.assertEqual(str(result), string)
def testEqualWithBytes(self):
self.assertEqual(rdfvalue.RDFString(u"foo"), b"foo")
self.assertNotEqual(rdfvalue.RDFString(u"foo"), b"\x80\x81\x82")
def testLessThanWithBytes(self):
self.assertLess(rdfvalue.RDFString(u"abc"), b"def")
self.assertGreater(rdfvalue.RDFString(u"xyz"), b"ghi")
self.assertLess(rdfvalue.RDFString(u"012"), b"\x80\x81\x81")
# TODO: Python on Windows ships with UCS-2 by default, which does
# not properly support unicode.
@unittest.skipIf(
sys.maxunicode <= 65535,
"Your Python installation does not properly support Unicode (likely: "
"Python with no UCS4 support on Windows.")
def testLenOfEmoji(self):
self.assertLen(rdfvalue.RDFString("🚀🚀"), 2)
class RDFIntegerTest(absltest.TestCase):
def testFromHumanReadable(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"42")
self.assertEqual(result, rdfvalue.RDFInteger(42))
def testFromHumanReadablePositive(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"+108")
self.assertEqual(result, rdfvalue.RDFInteger(108))
def testFromHumanReadableNegative(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"-1337")
self.assertEqual(result, rdfvalue.RDFInteger(-1337))
def testFromHumanReadableZero(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"0")
self.assertEqual(result, rdfvalue.RDFInteger(0))
def testFromHumanReadableRaisesOnNonInteger(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12.3")
def testFromHumanReadableRaisesOnNonDecimal(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12A")
class RDFDateTimeTest(absltest.TestCase):
def testLerpMiddle(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = start_time + rdfvalue.Duration.From(10, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(5, rdfvalue.DAYS))
def testLerpZero(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, start_time)
def testLerpOne(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
1.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, end_time)
def testLerpQuarter(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = start_time + rdfvalue.Duration.From(4, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.25, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesTypeErrorIfTimesAreNotRDFDatetime(self):
now = rdfvalue.RDFDatetime.Now()
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(0.0, start_time=10, end_time=now)
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(
0.0,
start_time=now,
end_time=rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesValueErrorIfProgressIsNotNormalized(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2011-01-01")
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(1.5, start_time=start_time, end_time=end_time)
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(-0.5, start_time=start_time, end_time=end_time)
def testFloorToMinutes(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(60, rdfvalue.SECONDS)), expected)
def testFloorToHours(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.HOURS)), expected)
def testFloorToDays(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.DAYS)), expected)
def testFloorExact(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
self.assertEqual(dt.Floor(rdfvalue.Duration.From(1, rdfvalue.SECONDS)), dt)
class RDFDatetimeSecondsTest(absltest.TestCase):
def testFromDatetime_withMicroSeconds(self):
dt_with_micros = datetime.datetime(2000, 1, 1, microsecond=5000)
dt = datetime.datetime(2000, 1, 1)
self.assertEqual(
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt_with_micros),
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt))
def testBug122716179(self):
d = rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch(1)
self.assertEqual(d.AsMicrosecondsSinceEpoch(), 1000000)
diff = rdfvalue.RDFDatetimeSeconds(10) - rdfvalue.Duration("3s")
self.assertEqual(diff.AsMicrosecondsSinceEpoch(), 7000000)
class DurationSecondsTest(absltest.TestCase):
def testPublicAttributes(self):
duration = rdfvalue.DurationSeconds.FromHumanReadable("1h")
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3600)
self.assertEqual(duration.ToInt(rdfvalue.MILLISECONDS), 3600 * 1000)
self.assertEqual(duration.microseconds, 3600 * 1000 * 1000)
def testFromDays(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(2, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("2d"))
self.assertEqual(
rdfvalue.DurationSeconds.From(31, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("31d"))
def testFromHours(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(48, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("48h"))
self.assertEqual(
rdfvalue.DurationSeconds.From(24, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("24h"))
def testFromSeconds(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(1337,
rdfvalue.SECONDS).ToInt(rdfvalue.SECONDS),
1337)
def testFromMicroseconds(self):
duration = rdfvalue.DurationSeconds.From(3000000, rdfvalue.MICROSECONDS)
self.assertEqual(duration.microseconds, 3000000)
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3)
def testFloatConstructorRaises(self):
with self.assertRaises(TypeError):
rdfvalue.DurationSeconds(3.14)
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.DurationSeconds.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.DurationSeconds.From(1, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.DurationSeconds.From(2, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.DurationSeconds.From(999, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.DurationSeconds.From(1000,
rdfvalue.SECONDS).SerializeToBytes())
def testFromWireFormat(self):
for i in [0, 7, 1337]:
val = rdfvalue.DurationSeconds.FromWireFormat(i)
self.assertEqual(i, val.ToInt(rdfvalue.SECONDS))
val2 = rdfvalue.DurationSeconds.FromWireFormat(
val.SerializeToWireFormat())
self.assertEqual(val, val2)
MAX_UINT64 = 18446744073709551615
class DurationTest(absltest.TestCase):
def testInitializationFromMicroseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(i, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} us".format(i)))
self.assertEqual(val, rdfvalue.Duration(i))
def testInitializationFromMilliseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000]:
val = rdfvalue.Duration.From(i, rdfvalue.MILLISECONDS)
self.assertEqual(i * 1000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} ms".format(i)))
def testInitializationFromSeconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000000]:
val = rdfvalue.Duration.From(i, rdfvalue.SECONDS)
self.assertEqual(i * 1000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} s".format(i)))
def testInitializationFromMinutes(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 60000000]:
val = rdfvalue.Duration.From(i, rdfvalue.MINUTES)
self.assertEqual(i * 60000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} m".format(i)))
def testInitializationFromHours(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 3600000000]:
val = rdfvalue.Duration.From(i, rdfvalue.HOURS)
self.assertEqual(i * 3600000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} h".format(i)))
def testInitializationFromDays(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 86400000000]:
val = rdfvalue.Duration.From(i, rdfvalue.DAYS)
self.assertEqual(i * 86400000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} d".format(i)))
def testInitializationFromWeeks(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 604800000000]:
val = rdfvalue.Duration.From(i, rdfvalue.WEEKS)
self.assertEqual(i * 604800000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} w".format(i)))
def testConversionToInt(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(val.ToInt(rdfvalue.MICROSECONDS), i)
self.assertEqual(val.ToInt(rdfvalue.MILLISECONDS), i // 1000)
self.assertEqual(val.ToInt(rdfvalue.SECONDS), i // (1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.MINUTES), i // (60 * 1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.HOURS), i // (60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.DAYS), i // (24 * 60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.WEEKS), i // (7 * 24 * 60 * 60 * 1000 * 1000))
def testConversionToFractional(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MICROSECONDS), i)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MILLISECONDS), i / 1000)
self.assertAlmostEqual(
val.ToFractional(rdfvalue.SECONDS), i / (1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.MINUTES), i / (60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.HOURS), i / (60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.DAYS), i / (24 * 60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.WEEKS),
i / (7 * 24 * 60 * 60 * 1000 * 1000))
def testStringDeserialization(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(
rdfvalue.Duration.FromSerializedBytes(val.SerializeToBytes()), val)
def testHumanReadableStringSerialization(self):
self.assertEqual("0 us", str(rdfvalue.Duration.From(0, rdfvalue.WEEKS)))
self.assertEqual("1 us",
str(rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS)))
self.assertEqual("2 us",
str(rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS)))
self.assertEqual("999 us",
str(rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1, rdfvalue.MILLISECONDS)))
self.assertEqual(
"{} us".format(MAX_UINT64),
str(rdfvalue.Duration.From(MAX_UINT64, rdfvalue.MICROSECONDS)))
self.assertEqual("3 s", str(rdfvalue.Duration.From(3, rdfvalue.SECONDS)))
self.assertEqual("3 m", str(rdfvalue.Duration.From(3, rdfvalue.MINUTES)))
self.assertEqual("3 h", str(rdfvalue.Duration.From(3, rdfvalue.HOURS)))
self.assertEqual("3 d", str(rdfvalue.Duration.From(3, rdfvalue.DAYS)))
self.assertEqual("3 w", str(rdfvalue.Duration.From(21, rdfvalue.DAYS)))
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.Duration.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
str(MAX_UINT64).encode("utf-8"),
rdfvalue.Duration.From(MAX_UINT64,
rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"3000000",
rdfvalue.Duration.From(3, rdfvalue.SECONDS).SerializeToBytes())
def testAdditionOfDurationsIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
self.assertEqual(
rdfvalue.Duration(a) + rdfvalue.Duration(b),
rdfvalue.Duration(a + b))
def testSubtractionOfDurationsIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64]:
self.assertEqual(
rdfvalue.Duration(a) - rdfvalue.Duration(min(a, b)),
rdfvalue.Duration(a - min(a, b)))
def testFromWireFormat(self):
for i in [0, 7, 1337, MAX_UINT64]:
val = rdfvalue.Duration.FromWireFormat(i)
self.assertEqual(i, val.microseconds)
def testSubtractionFromDateTimeIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(min(a, b))
result = lhs - rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a - min(a, b))
def testAdditionToDateTimeIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(b)
result = lhs + rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a + b)
def testComparisonIsEqualToIntegerComparison(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
dur_a = rdfvalue.Duration(a)
dur_b = rdfvalue.Duration(b)
if a > b:
self.assertGreater(dur_a, dur_b)
if a >= b:
self.assertGreaterEqual(dur_a, dur_b)
if a == b:
self.assertEqual(dur_a, dur_b)
if a <= b:
self.assertLessEqual(dur_a, dur_b)
if a < b:
self.assertLess(dur_a, dur_b)
if a != b:
self.assertNotEqual(dur_a, dur_b)
class DocTest(test_lib.DocTest):
module = rdfvalue
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| [
"grr_response_core.lib.rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch",
"unittest.skipIf",
"grr_response_core.lib.rdfvalue.Duration.From",
"grr_response_core.lib.rdfvalue.RDFDatetimeSeconds.FromDatetime",
"datetime.datetime",
"grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable",
"absl.app.run"... | [((2607, 2767), 'unittest.skipIf', 'unittest.skipIf', (['(sys.maxunicode <= 65535)', '"""Your Python installation does not properly support Unicode (likely: Python with no UCS4 support on Windows."""'], {}), "(sys.maxunicode <= 65535,\n 'Your Python installation does not properly support Unicode (likely: Python with no UCS4 support on Windows.'\n )\n", (2622, 2767), False, 'import unittest\n'), ((18543, 18562), 'grr.test_lib.test_lib.main', 'test_lib.main', (['argv'], {}), '(argv)\n', (18556, 18562), False, 'from grr.test_lib import test_lib\n'), ((18594, 18607), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (18601, 18607), False, 'from absl import app\n'), ((1730, 1773), 'grr_response_core.lib.rdfvalue.RDFBytes.FromHumanReadable', 'rdfvalue.RDFBytes.FromHumanReadable', (['string'], {}), '(string)\n', (1765, 1773), False, 'from grr_response_core.lib import rdfvalue\n'), ((2039, 2083), 'grr_response_core.lib.rdfvalue.RDFString.FromHumanReadable', 'rdfvalue.RDFString.FromHumanReadable', (['string'], {}), '(string)\n', (2075, 2083), False, 'from grr_response_core.lib import rdfvalue\n'), ((2949, 2993), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""42"""'], {}), "(u'42')\n", (2986, 2993), False, 'from grr_response_core.lib import rdfvalue\n'), ((3105, 3151), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""+108"""'], {}), "(u'+108')\n", (3142, 3151), False, 'from grr_response_core.lib import rdfvalue\n'), ((3264, 3311), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""-1337"""'], {}), "(u'-1337')\n", (3301, 3311), False, 'from grr_response_core.lib import rdfvalue\n'), ((3422, 3465), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""0"""'], {}), "(u'0')\n", (3459, 3465), False, 'from grr_response_core.lib import rdfvalue\n'), ((3902, 3954), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (3940, 3954), False, 'from grr_response_core.lib import rdfvalue\n'), ((4043, 4115), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(0.5)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(0.5, start_time=start_time, end_time=end_time)\n', (4068, 4115), False, 'from grr_response_core.lib import rdfvalue\n'), ((4279, 4331), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (4317, 4331), False, 'from grr_response_core.lib import rdfvalue\n'), ((4347, 4399), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4385, 4399), False, 'from grr_response_core.lib import rdfvalue\n'), ((4418, 4490), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(0.0)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(0.0, start_time=start_time, end_time=end_time)\n', (4443, 4490), False, 'from grr_response_core.lib import rdfvalue\n'), ((4589, 4641), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (4627, 4641), False, 'from grr_response_core.lib import rdfvalue\n'), ((4657, 4709), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4695, 4709), False, 'from grr_response_core.lib import rdfvalue\n'), ((4728, 4800), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(1.0)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(1.0, start_time=start_time, end_time=end_time)\n', (4753, 4800), False, 'from grr_response_core.lib import rdfvalue\n'), ((4901, 4953), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (4939, 4953), False, 'from grr_response_core.lib import rdfvalue\n'), ((5041, 5114), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(0.25)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(0.25, start_time=start_time, end_time=end_time)\n', (5066, 5114), False, 'from grr_response_core.lib import rdfvalue\n'), ((5306, 5332), 'grr_response_core.lib.rdfvalue.RDFDatetime.Now', 'rdfvalue.RDFDatetime.Now', ([], {}), '()\n', (5330, 5332), False, 'from grr_response_core.lib import rdfvalue\n'), ((5737, 5789), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (5775, 5789), False, 'from grr_response_core.lib import rdfvalue\n'), ((5805, 5857), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (5843, 5857), False, 'from grr_response_core.lib import rdfvalue\n'), ((6141, 6202), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:34:56"""'], {}), "('2011-11-11 12:34:56')\n", (6179, 6202), False, 'from grr_response_core.lib import rdfvalue\n'), ((6218, 6276), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:34"""'], {}), "('2011-11-11 12:34')\n", (6256, 6276), False, 'from grr_response_core.lib import rdfvalue\n'), ((6413, 6471), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:34"""'], {}), "('2011-11-11 12:34')\n", (6451, 6471), False, 'from grr_response_core.lib import rdfvalue\n'), ((6487, 6545), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:00"""'], {}), "('2011-11-11 12:00')\n", (6525, 6545), False, 'from grr_response_core.lib import rdfvalue\n'), ((6678, 6736), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:34"""'], {}), "('2011-11-11 12:34')\n", (6716, 6736), False, 'from grr_response_core.lib import rdfvalue\n'), ((6752, 6804), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11"""'], {}), "('2011-11-11')\n", (6790, 6804), False, 'from grr_response_core.lib import rdfvalue\n'), ((6935, 6996), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromHumanReadable', 'rdfvalue.RDFDatetime.FromHumanReadable', (['"""2011-11-11 12:34:56"""'], {}), "('2011-11-11 12:34:56')\n", (6973, 6996), False, 'from grr_response_core.lib import rdfvalue\n'), ((7197, 7244), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {'microsecond': '(5000)'}), '(2000, 1, 1, microsecond=5000)\n', (7214, 7244), False, 'import datetime\n'), ((7254, 7283), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (7271, 7283), False, 'import datetime\n'), ((7465, 7517), 'grr_response_core.lib.rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch', 'rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch', (['(1)'], {}), '(1)\n', (7514, 7517), False, 'from grr_response_core.lib import rdfvalue\n'), ((7808, 7856), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromHumanReadable', 'rdfvalue.DurationSeconds.FromHumanReadable', (['"""1h"""'], {}), "('1h')\n", (7850, 7856), False, 'from grr_response_core.lib import rdfvalue\n'), ((8907, 8968), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(3000000)', 'rdfvalue.MICROSECONDS'], {}), '(3000000, rdfvalue.MICROSECONDS)\n', (8936, 8968), False, 'from grr_response_core.lib import rdfvalue\n'), ((2180, 2206), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['u"""foo"""'], {}), "(u'foo')\n", (2198, 2206), False, 'from grr_response_core.lib import rdfvalue\n'), ((2240, 2266), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['u"""foo"""'], {}), "(u'foo')\n", (2258, 2266), False, 'from grr_response_core.lib import rdfvalue\n'), ((2341, 2367), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['u"""abc"""'], {}), "(u'abc')\n", (2359, 2367), False, 'from grr_response_core.lib import rdfvalue\n'), ((2400, 2426), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['u"""xyz"""'], {}), "(u'xyz')\n", (2418, 2426), False, 'from grr_response_core.lib import rdfvalue\n'), ((2456, 2482), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['u"""012"""'], {}), "(u'012')\n", (2474, 2482), False, 'from grr_response_core.lib import rdfvalue\n'), ((2828, 2852), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['"""🚀🚀"""'], {}), "('🚀🚀')\n", (2846, 2852), False, 'from grr_response_core.lib import rdfvalue\n'), ((3023, 3046), 'grr_response_core.lib.rdfvalue.RDFInteger', 'rdfvalue.RDFInteger', (['(42)'], {}), '(42)\n', (3042, 3046), False, 'from grr_response_core.lib import rdfvalue\n'), ((3181, 3205), 'grr_response_core.lib.rdfvalue.RDFInteger', 'rdfvalue.RDFInteger', (['(108)'], {}), '(108)\n', (3200, 3205), False, 'from grr_response_core.lib import rdfvalue\n'), ((3341, 3367), 'grr_response_core.lib.rdfvalue.RDFInteger', 'rdfvalue.RDFInteger', (['(-1337)'], {}), '(-1337)\n', (3360, 3367), False, 'from grr_response_core.lib import rdfvalue\n'), ((3495, 3517), 'grr_response_core.lib.rdfvalue.RDFInteger', 'rdfvalue.RDFInteger', (['(0)'], {}), '(0)\n', (3514, 3517), False, 'from grr_response_core.lib import rdfvalue\n'), ((3619, 3665), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""12.3"""'], {}), "(u'12.3')\n", (3656, 3665), False, 'from grr_response_core.lib import rdfvalue\n'), ((3766, 3811), 'grr_response_core.lib.rdfvalue.RDFInteger.FromHumanReadable', 'rdfvalue.RDFInteger.FromHumanReadable', (['u"""12A"""'], {}), "(u'12A')\n", (3803, 3811), False, 'from grr_response_core.lib import rdfvalue\n'), ((3983, 4024), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(10)', 'rdfvalue.DAYS'], {}), '(10, rdfvalue.DAYS)\n', (4005, 4024), False, 'from grr_response_core.lib import rdfvalue\n'), ((4982, 5022), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(4)', 'rdfvalue.DAYS'], {}), '(4, rdfvalue.DAYS)\n', (5004, 5022), False, 'from grr_response_core.lib import rdfvalue\n'), ((5400, 5459), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(0.0)'], {'start_time': '(10)', 'end_time': 'now'}), '(0.0, start_time=10, end_time=now)\n', (5425, 5459), False, 'from grr_response_core.lib import rdfvalue\n'), ((5905, 5977), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(1.5)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(1.5, start_time=start_time, end_time=end_time)\n', (5930, 5977), False, 'from grr_response_core.lib import rdfvalue\n'), ((6025, 6098), 'grr_response_core.lib.rdfvalue.RDFDatetime.Lerp', 'rdfvalue.RDFDatetime.Lerp', (['(-0.5)'], {'start_time': 'start_time', 'end_time': 'end_time'}), '(-0.5, start_time=start_time, end_time=end_time)\n', (6050, 6098), False, 'from grr_response_core.lib import rdfvalue\n'), ((7314, 7370), 'grr_response_core.lib.rdfvalue.RDFDatetimeSeconds.FromDatetime', 'rdfvalue.RDFDatetimeSeconds.FromDatetime', (['dt_with_micros'], {}), '(dt_with_micros)\n', (7354, 7370), False, 'from grr_response_core.lib import rdfvalue\n'), ((7380, 7424), 'grr_response_core.lib.rdfvalue.RDFDatetimeSeconds.FromDatetime', 'rdfvalue.RDFDatetimeSeconds.FromDatetime', (['dt'], {}), '(dt)\n', (7420, 7424), False, 'from grr_response_core.lib import rdfvalue\n'), ((7589, 7620), 'grr_response_core.lib.rdfvalue.RDFDatetimeSeconds', 'rdfvalue.RDFDatetimeSeconds', (['(10)'], {}), '(10)\n', (7616, 7620), False, 'from grr_response_core.lib import rdfvalue\n'), ((7623, 7646), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['"""3s"""'], {}), "('3s')\n", (7640, 7646), False, 'from grr_response_core.lib import rdfvalue\n'), ((8112, 8159), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(2)', 'rdfvalue.DAYS'], {}), '(2, rdfvalue.DAYS)\n', (8141, 8159), False, 'from grr_response_core.lib import rdfvalue\n'), ((8169, 8217), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromHumanReadable', 'rdfvalue.DurationSeconds.FromHumanReadable', (['"""2d"""'], {}), "('2d')\n", (8211, 8217), False, 'from grr_response_core.lib import rdfvalue\n'), ((8249, 8297), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(31)', 'rdfvalue.DAYS'], {}), '(31, rdfvalue.DAYS)\n', (8278, 8297), False, 'from grr_response_core.lib import rdfvalue\n'), ((8307, 8356), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromHumanReadable', 'rdfvalue.DurationSeconds.FromHumanReadable', (['"""31d"""'], {}), "('31d')\n", (8349, 8356), False, 'from grr_response_core.lib import rdfvalue\n'), ((8416, 8465), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(48)', 'rdfvalue.HOURS'], {}), '(48, rdfvalue.HOURS)\n', (8445, 8465), False, 'from grr_response_core.lib import rdfvalue\n'), ((8475, 8524), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromHumanReadable', 'rdfvalue.DurationSeconds.FromHumanReadable', (['"""48h"""'], {}), "('48h')\n", (8517, 8524), False, 'from grr_response_core.lib import rdfvalue\n'), ((8556, 8605), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(24)', 'rdfvalue.HOURS'], {}), '(24, rdfvalue.HOURS)\n', (8585, 8605), False, 'from grr_response_core.lib import rdfvalue\n'), ((8615, 8664), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromHumanReadable', 'rdfvalue.DurationSeconds.FromHumanReadable', (['"""24h"""'], {}), "('24h')\n", (8657, 8664), False, 'from grr_response_core.lib import rdfvalue\n'), ((9166, 9196), 'grr_response_core.lib.rdfvalue.DurationSeconds', 'rdfvalue.DurationSeconds', (['(3.14)'], {}), '(3.14)\n', (9190, 9196), False, 'from grr_response_core.lib import rdfvalue\n'), ((9925, 9967), 'grr_response_core.lib.rdfvalue.DurationSeconds.FromWireFormat', 'rdfvalue.DurationSeconds.FromWireFormat', (['i'], {}), '(i)\n', (9964, 9967), False, 'from grr_response_core.lib import rdfvalue\n'), ((10335, 10383), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MICROSECONDS'], {}), '(i, rdfvalue.MICROSECONDS)\n', (10357, 10383), False, 'from grr_response_core.lib import rdfvalue\n'), ((10700, 10748), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MILLISECONDS'], {}), '(i, rdfvalue.MILLISECONDS)\n', (10722, 10748), False, 'from grr_response_core.lib import rdfvalue\n'), ((11020, 11063), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.SECONDS'], {}), '(i, rdfvalue.SECONDS)\n', (11042, 11063), False, 'from grr_response_core.lib import rdfvalue\n'), ((11338, 11381), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MINUTES'], {}), '(i, rdfvalue.MINUTES)\n', (11360, 11381), False, 'from grr_response_core.lib import rdfvalue\n'), ((11657, 11698), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.HOURS'], {}), '(i, rdfvalue.HOURS)\n', (11679, 11698), False, 'from grr_response_core.lib import rdfvalue\n'), ((11976, 12016), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.DAYS'], {}), '(i, rdfvalue.DAYS)\n', (11998, 12016), False, 'from grr_response_core.lib import rdfvalue\n'), ((12297, 12338), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.WEEKS'], {}), '(i, rdfvalue.WEEKS)\n', (12319, 12338), False, 'from grr_response_core.lib import rdfvalue\n'), ((12620, 12668), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MICROSECONDS'], {}), '(i, rdfvalue.MICROSECONDS)\n', (12642, 12668), False, 'from grr_response_core.lib import rdfvalue\n'), ((13344, 13392), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MICROSECONDS'], {}), '(i, rdfvalue.MICROSECONDS)\n', (13366, 13392), False, 'from grr_response_core.lib import rdfvalue\n'), ((14195, 14243), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['i', 'rdfvalue.MICROSECONDS'], {}), '(i, rdfvalue.MICROSECONDS)\n', (14217, 14243), False, 'from grr_response_core.lib import rdfvalue\n'), ((17086, 17121), 'grr_response_core.lib.rdfvalue.Duration.FromWireFormat', 'rdfvalue.Duration.FromWireFormat', (['i'], {}), '(i)\n', (17118, 17121), False, 'from grr_response_core.lib import rdfvalue\n'), ((1102, 1124), 'grr_response_core.lib.rdfvalue.RDFInteger', 'rdfvalue.RDFInteger', (['(1)'], {}), '(1)\n', (1121, 1124), False, 'from grr_response_core.lib import rdfvalue\n'), ((1157, 1188), 'grr_response_core.lib.rdfvalue.RDFString', 'rdfvalue.RDFString', (['long_string'], {}), '(long_string)\n', (1175, 1188), False, 'from grr_response_core.lib import rdfvalue\n'), ((4193, 4233), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(5)', 'rdfvalue.DAYS'], {}), '(5, rdfvalue.DAYS)\n', (4215, 4233), False, 'from grr_response_core.lib import rdfvalue\n'), ((5192, 5232), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.DAYS'], {}), '(1, rdfvalue.DAYS)\n', (5214, 5232), False, 'from grr_response_core.lib import rdfvalue\n'), ((6316, 6360), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(60)', 'rdfvalue.SECONDS'], {}), '(60, rdfvalue.SECONDS)\n', (6338, 6360), False, 'from grr_response_core.lib import rdfvalue\n'), ((6585, 6626), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.HOURS'], {}), '(1, rdfvalue.HOURS)\n', (6607, 6626), False, 'from grr_response_core.lib import rdfvalue\n'), ((6844, 6884), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.DAYS'], {}), '(1, rdfvalue.DAYS)\n', (6866, 6884), False, 'from grr_response_core.lib import rdfvalue\n'), ((7027, 7070), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.SECONDS'], {}), '(1, rdfvalue.SECONDS)\n', (7049, 7070), False, 'from grr_response_core.lib import rdfvalue\n'), ((10563, 10583), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['i'], {}), '(i)\n', (10580, 10583), False, 'from grr_response_core.lib import rdfvalue\n'), ((14430, 14471), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(0)', 'rdfvalue.WEEKS'], {}), '(0, rdfvalue.WEEKS)\n', (14452, 14471), False, 'from grr_response_core.lib import rdfvalue\n'), ((14528, 14576), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.MICROSECONDS'], {}), '(1, rdfvalue.MICROSECONDS)\n', (14550, 14576), False, 'from grr_response_core.lib import rdfvalue\n'), ((14633, 14681), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(2)', 'rdfvalue.MICROSECONDS'], {}), '(2, rdfvalue.MICROSECONDS)\n', (14655, 14681), False, 'from grr_response_core.lib import rdfvalue\n'), ((14740, 14790), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(999)', 'rdfvalue.MICROSECONDS'], {}), '(999, rdfvalue.MICROSECONDS)\n', (14762, 14790), False, 'from grr_response_core.lib import rdfvalue\n'), ((14847, 14898), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1000)', 'rdfvalue.MICROSECONDS'], {}), '(1000, rdfvalue.MICROSECONDS)\n', (14869, 14898), False, 'from grr_response_core.lib import rdfvalue\n'), ((14955, 15003), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.MILLISECONDS'], {}), '(1, rdfvalue.MILLISECONDS)\n', (14977, 15003), False, 'from grr_response_core.lib import rdfvalue\n'), ((15076, 15133), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['MAX_UINT64', 'rdfvalue.MICROSECONDS'], {}), '(MAX_UINT64, rdfvalue.MICROSECONDS)\n', (15098, 15133), False, 'from grr_response_core.lib import rdfvalue\n'), ((15168, 15211), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(3)', 'rdfvalue.SECONDS'], {}), '(3, rdfvalue.SECONDS)\n', (15190, 15211), False, 'from grr_response_core.lib import rdfvalue\n'), ((15246, 15289), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(3)', 'rdfvalue.MINUTES'], {}), '(3, rdfvalue.MINUTES)\n', (15268, 15289), False, 'from grr_response_core.lib import rdfvalue\n'), ((15324, 15365), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(3)', 'rdfvalue.HOURS'], {}), '(3, rdfvalue.HOURS)\n', (15346, 15365), False, 'from grr_response_core.lib import rdfvalue\n'), ((15400, 15440), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(3)', 'rdfvalue.DAYS'], {}), '(3, rdfvalue.DAYS)\n', (15422, 15440), False, 'from grr_response_core.lib import rdfvalue\n'), ((15475, 15516), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(21)', 'rdfvalue.DAYS'], {}), '(21, rdfvalue.DAYS)\n', (15497, 15516), False, 'from grr_response_core.lib import rdfvalue\n'), ((17319, 17369), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch', 'rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch', (['a'], {}), '(a)\n', (17366, 17369), False, 'from grr_response_core.lib import rdfvalue\n'), ((17660, 17710), 'grr_response_core.lib.rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch', 'rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch', (['a'], {}), '(a)\n', (17707, 17710), False, 'from grr_response_core.lib import rdfvalue\n'), ((17725, 17745), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['b'], {}), '(b)\n', (17742, 17745), False, 'from grr_response_core.lib import rdfvalue\n'), ((18037, 18057), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['a'], {}), '(a)\n', (18054, 18057), False, 'from grr_response_core.lib import rdfvalue\n'), ((18074, 18094), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['b'], {}), '(b)\n', (18091, 18094), False, 'from grr_response_core.lib import rdfvalue\n'), ((5614, 5654), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.DAYS'], {}), '(1, rdfvalue.DAYS)\n', (5636, 5654), False, 'from grr_response_core.lib import rdfvalue\n'), ((8726, 8779), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(1337)', 'rdfvalue.SECONDS'], {}), '(1337, rdfvalue.SECONDS)\n', (8755, 8779), False, 'from grr_response_core.lib import rdfvalue\n'), ((9276, 9324), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(0)', 'rdfvalue.WEEKS'], {}), '(0, rdfvalue.WEEKS)\n', (9305, 9324), False, 'from grr_response_core.lib import rdfvalue\n'), ((9389, 9439), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(1)', 'rdfvalue.SECONDS'], {}), '(1, rdfvalue.SECONDS)\n', (9418, 9439), False, 'from grr_response_core.lib import rdfvalue\n'), ((9504, 9554), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(2)', 'rdfvalue.SECONDS'], {}), '(2, rdfvalue.SECONDS)\n', (9533, 9554), False, 'from grr_response_core.lib import rdfvalue\n'), ((9621, 9673), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(999)', 'rdfvalue.SECONDS'], {}), '(999, rdfvalue.SECONDS)\n', (9650, 9673), False, 'from grr_response_core.lib import rdfvalue\n'), ((9741, 9794), 'grr_response_core.lib.rdfvalue.DurationSeconds.From', 'rdfvalue.DurationSeconds.From', (['(1000)', 'rdfvalue.SECONDS'], {}), '(1000, rdfvalue.SECONDS)\n', (9770, 9794), False, 'from grr_response_core.lib import rdfvalue\n'), ((15598, 15639), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(0)', 'rdfvalue.WEEKS'], {}), '(0, rdfvalue.WEEKS)\n', (15620, 15639), False, 'from grr_response_core.lib import rdfvalue\n'), ((15704, 15752), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1)', 'rdfvalue.MICROSECONDS'], {}), '(1, rdfvalue.MICROSECONDS)\n', (15726, 15752), False, 'from grr_response_core.lib import rdfvalue\n'), ((15817, 15865), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(2)', 'rdfvalue.MICROSECONDS'], {}), '(2, rdfvalue.MICROSECONDS)\n', (15839, 15865), False, 'from grr_response_core.lib import rdfvalue\n'), ((15932, 15982), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(999)', 'rdfvalue.MICROSECONDS'], {}), '(999, rdfvalue.MICROSECONDS)\n', (15954, 15982), False, 'from grr_response_core.lib import rdfvalue\n'), ((16050, 16101), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(1000)', 'rdfvalue.MICROSECONDS'], {}), '(1000, rdfvalue.MICROSECONDS)\n', (16072, 16101), False, 'from grr_response_core.lib import rdfvalue\n'), ((16193, 16250), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['MAX_UINT64', 'rdfvalue.MICROSECONDS'], {}), '(MAX_UINT64, rdfvalue.MICROSECONDS)\n', (16215, 16250), False, 'from grr_response_core.lib import rdfvalue\n'), ((16352, 16395), 'grr_response_core.lib.rdfvalue.Duration.From', 'rdfvalue.Duration.From', (['(3)', 'rdfvalue.SECONDS'], {}), '(3, rdfvalue.SECONDS)\n', (16374, 16395), False, 'from grr_response_core.lib import rdfvalue\n'), ((16677, 16701), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['(a + b)'], {}), '(a + b)\n', (16694, 16701), False, 'from grr_response_core.lib import rdfvalue\n'), ((16620, 16640), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['a'], {}), '(a)\n', (16637, 16640), False, 'from grr_response_core.lib import rdfvalue\n'), ((16643, 16663), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['b'], {}), '(b)\n', (16660, 16663), False, 'from grr_response_core.lib import rdfvalue\n'), ((16903, 16923), 'grr_response_core.lib.rdfvalue.Duration', 'rdfvalue.Duration', (['a'], {}), '(a)\n', (16920, 16923), False, 'from grr_response_core.lib import rdfvalue\n')] |
#!/usr/bin/python
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
import os
from pyxrd.data import settings
from pyxrd.project.models import Project
from pyxrd.phases.models import Component, Phase
def generate_expandables(
filename_format, phase_name, maxR,
phase_kwargs_AD, phase_kwargs_EG, phase_kwargs_350,
code_AD, code_EG, code_350,
comp_kwargs_AD, comp_kwargs_EG, comp_kwargs_350):
"""
Generates a list of phase descriptions for a combination of an
AD, EG and 350° Ca-saturated phase linked together
"""
return [
('%s' + (filename_format % R), [
(dict(R=R, name=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_AD), code_AD, comp_kwargs_AD),
(dict(R=R, name=phase_name + (' R%d Ca-EG' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_EG), code_EG, comp_kwargs_EG),
(dict(R=R, name=phase_name + (' R%d Ca-350' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_350), code_350, comp_kwargs_350)
]) for R in range(maxR)
]
def run(args=None, ui_callback=None):
"""
How this script works:
- 'code_length' is the length of the aliases keys (see below)
- 'aliases' is a dictionary contain 4-character long keys describing a
specific layer-type (or with other words: a Component object)
E.g. dS2w stands for Di-octahedral Smectite with 2 layers of water.
The values are file path formats, in which a single '%s' string placeholder
will be filled with the absolute path to the default components folder.
- 'default_phases' is an initially empty list that will be filled with two-
tuples. The first element in this tuple is the filename of the generated
phases, the second element is describing what this phase contains. This
second element is again a tuple, containing three parts:
- A dictionary of key-word arguments passed on to the Phase
constructor. If a 'based_on' keyword is defined, an attempt is
made to translate it to an earlier generated phase. This way, it
is possible to pass the name of an earlier generated phase, and
the script will pass in the actual Phase object instead.
- A component code (string) built by the keys of the 'aliases'
dictionary. This string's length should be a multiple of 'code_length'.
There is no separator, rather, the 'code_length' is used to split the
code into its parts.
- Component keyword arguments dictionaries: this is a dictionary in
which the keys match with the components code parts. The values are
property-value dictionaries used to set Component properties after
importing them. Similarly to the Phases' 'based_on' keyword, the
value for the 'linked_with' key is translated to the actual
Component named as such.
### Setup:
"""
code_length = 4
aliases = {
'C ': '%sChlorite.cmp',
'K ': '%sKaolinite.cmp',
'I ': '%sIllite.cmp',
'Se ': '%sSerpentine.cmp',
'T ': '%sTalc.cmp',
'Ma ': '%sMargarite.cmp',
'Pa ': '%sParagonite.cmp',
'L ': '%sLeucophyllite.cmp',
'dS2w': '%sDi-Smectite/Di-Smectite - Ca 2WAT.cmp',
'dS1w': '%sDi-Smectite/Di-Smectite - Ca 1WAT.cmp',
'dS0w': '%sDi-Smectite/Di-Smectite - Ca Dehydr.cmp',
'dS2g': '%sDi-Smectite/Di-Smectite - Ca 2GLY.cmp',
'dS1g': '%sDi-Smectite/Di-Smectite - Ca 1GLY.cmp',
'dSht': '%sDi-Smectite/Di-Smectite - Ca Heated.cmp',
'tS2w': '%sTri-Smectite/Tri-Smectite - Ca 2WAT.cmp',
'tS1w': '%sTri-Smectite/Tri-Smectite - Ca 1WAT.cmp',
'tS0w': '%sTri-Smectite/Tri-Smectite - Ca Dehydr.cmp',
'tS2g': '%sTri-Smectite/Tri-Smectite - Ca 2GLY.cmp',
'tS1g': '%sTri-Smectite/Tri-Smectite - Ca 1GLY.cmp',
'tSht': '%sTri-Smectite/Tri-Smectite - Ca Heated.cmp',
'dV2w': '%sDi-Vermiculite/Di-Vermiculite - Ca 2WAT.cmp',
'dV1w': '%sDi-Vermiculite/Di-Vermiculite - Ca 1WAT.cmp',
'dV0w': '%sDi-Vermiculite/Di-Vermiculite - Ca Dehydr.cmp',
'dV2g': '%sDi-Vermiculite/Di-Vermiculite - Ca 2GLY.cmp',
'dV1g': '%sDi-Vermiculite/Di-Vermiculite - Ca 1GLY.cmp',
'dVht': '%sDi-Vermiculite/Di-Vermiculite - Ca Heated.cmp',
}
default_phases = []
"""
### Commonly used inherit flag dicts:
"""
inherit_S = dict(
inherit_ucp_a=True,
inherit_ucp_b=True,
inherit_delta_c=True,
inherit_layer_atoms=True,
)
inherit_all = dict(
inherit_d001=True,
inherit_default_c=True,
inherit_interlayer_atoms=True,
inherit_atom_relations=True,
**inherit_S
)
inherit_phase = dict(
inherit_display_color=True,
inherit_sigma_star=True,
inherit_CSDS_distribution=True,
inherit_probabilities=True
)
"""
### Single-layer phases:
"""
default_phases += [
('%sKaolinite.phs', [(dict(R=0, name='Kaolinite'), 'K ', {}), ]),
('%sIllite.phs', [(dict(R=0, name='Illite'), 'I ', {})]),
('%sSerpentine.phs', [(dict(R=0, name='Serpentine'), 'Se ', {})]),
('%sTalc.phs', [(dict(R=0, name='Talc'), 'T ', {})]),
('%sChlorite.phs', [(dict(R=0, name='Chlorite'), 'C ', {})]),
('%sMargarite.phs', [(dict(R=0, name='Margarite'), 'Ma ', {})]),
('%sLeucophyllite.phs', [(dict(R=0, name='Leucophyllite'), 'L ', {})]),
('%sParagonite.phs', [(dict(R=0, name='Paragonite'), 'Pa ', {})]),
]
"""
### Dioctahedral smectites:
"""
S_code_AD = 'dS2w'
S_code_EG = 'dS2g'
S_code_350 = 'dSht'
S_inh_comp_args = {
'dS2g': dict(linked_with='dS2w', **inherit_S),
'dSht': dict(linked_with='dS2w', **inherit_S),
}
SS_code_AD = S_code_AD + 'dS1w'
SS_code_EG = S_code_EG + 'dS1g'
SS_code_350 = S_code_350 + 'dS1g'
SS_inh_comp_args = dict(S_inh_comp_args)
SS_inh_comp_args.update({
'dS1g': dict(linked_with='dS1w', **inherit_S),
})
SSS_code_AD = SS_code_AD + 'dS0w'
SSS_code_EG = SS_code_EG + 'dS0w'
SSS_code_350 = SS_code_350 + 'dS0w'
SSS_inh_comp_args = dict(SS_inh_comp_args)
SSS_inh_comp_args.update({
'dS0w': dict(linked_with='dS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Di-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), S_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), S_code_EG, S_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), S_code_350, S_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Di-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
SS_code_AD, SS_code_EG, SS_code_350,
{}, SS_inh_comp_args, SS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Di-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
SSS_code_AD, SSS_code_EG, SSS_code_350,
{}, SSS_inh_comp_args, SSS_inh_comp_args,
)
"""
### Trioctahedral smectites:
"""
tS_code_AD = 'tS2w'
tS_code_EG = 'tS2g'
tS_code_350 = 'tSht'
tS_inh_comp_args = {
'tS2g': dict(linked_with='tS2w', **inherit_S),
'tSht': dict(linked_with='tS2w', **inherit_S),
}
tSS_code_AD = tS_code_AD + 'tS1w'
tSS_code_EG = tS_code_EG + 'tS1g'
tSS_code_350 = tS_code_350 + 'tS1g'
tSS_inh_comp_args = dict(S_inh_comp_args)
tSS_inh_comp_args.update({
'tS1g': dict(linked_with='tS1w', **inherit_S),
})
tSSS_code_AD = tSS_code_AD + 'tS0w'
tSSS_code_EG = tSS_code_EG + 'tS0w'
tSSS_code_350 = tSS_code_350 + 'tS0w'
tSSS_inh_comp_args = dict(SS_inh_comp_args)
tSSS_inh_comp_args.update({
'tS0w': dict(linked_with='tS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Tri-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), tS_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), tS_code_EG, tS_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), tS_code_350, tS_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Tri-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
tSS_code_AD, tSS_code_EG, tSS_code_350,
{}, tSS_inh_comp_args, tSS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Tri-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
tSSS_code_AD, tSSS_code_EG, tSSS_code_350,
{}, tSSS_inh_comp_args, tSSS_inh_comp_args,
)
"""
### Dioctahedral vermiculites:
"""
V_code_AD = 'dV2w'
V_code_EG = 'dV2g'
V_code_350 = 'dVht'
V_inh_comp_args = {
'dV2g': dict(linked_with='dV2w', **inherit_S),
'dVht': dict(linked_with='dV2w', **inherit_S),
}
VV_code_AD = V_code_AD + 'dV1w'
VV_code_EG = V_code_EG + 'dV1g'
VV_code_350 = V_code_350 + 'dV1g'
VV_inh_comp_args = dict(V_inh_comp_args)
VV_inh_comp_args.update({
'dV1g': dict(linked_with='dV1w', **inherit_S),
})
VVV_code_AD = VV_code_AD + 'dV0w'
VVV_code_EG = VV_code_EG + 'dV0w'
VVV_code_350 = VV_code_350 + 'dV0w'
VVV_inh_comp_args = dict(VV_inh_comp_args)
VVV_inh_comp_args.update({
'dV0w': dict(linked_with='dV0w', **inherit_S),
})
default_phases += [
('%sVermiculites/Di-Vermiculite Ca.phs', [
(dict(R=0, name='V R0 Ca-AD'), V_code_AD, {}),
(dict(R=0, name='V R0 Ca-EG', based_on='V R0 Ca-AD', **inherit_phase), V_code_EG, V_inh_comp_args),
(dict(R=0, name='V R0 Ca-350', based_on='V R0 Ca-AD', **inherit_phase), V_code_350, V_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Vermiculites/VV/Di-VV R%d Ca.phs', 'VV', 4,
{}, inherit_phase, inherit_phase,
VV_code_AD, VV_code_EG, VV_code_350,
{}, VV_inh_comp_args, VV_inh_comp_args,
)
default_phases += generate_expandables(
'Vermiculites/VVV/Di-VVV R%d Ca.phs', 'VVV', 3,
{}, inherit_phase, inherit_phase,
VVV_code_AD, VVV_code_EG, VVV_code_350,
{}, VVV_inh_comp_args, VVV_inh_comp_args,
)
"""
### Kaolinite - Smectites:
"""
K_code = 'K '
K_inh_comp_args = {
'K ': dict(linked_with='K ', **inherit_all),
}
KS_code_AD = K_code + S_code_AD
KS_code_EG = K_code + S_code_EG
KS_code_350 = K_code + S_code_350
KS_inh_comp_args = dict(S_inh_comp_args)
KS_inh_comp_args.update(K_inh_comp_args)
KSS_code_AD = K_code + SS_code_AD
KSS_code_EG = K_code + SS_code_EG
KSS_code_350 = K_code + SS_code_350
KSS_inh_comp_args = dict(SS_inh_comp_args)
KSS_inh_comp_args.update(K_inh_comp_args)
KSSS_code_AD = K_code + SSS_code_AD
KSSS_code_EG = K_code + SSS_code_EG
KSSS_code_350 = K_code + SSS_code_350
KSSS_inh_comp_args = dict(SSS_inh_comp_args)
KSSS_inh_comp_args.update(K_inh_comp_args)
default_phases += generate_expandables(
'Kaolinite-Smectites/KS/KS R%d Ca.phs', 'KS', 4,
{}, inherit_phase, inherit_phase,
KS_code_AD, KS_code_EG, KS_code_350,
{}, KS_inh_comp_args, KS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSS/KSS R%d Ca.phs', 'KSS', 3,
{}, inherit_phase, inherit_phase,
KSS_code_AD, KSS_code_EG, KSS_code_350,
{}, KSS_inh_comp_args, KSS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSSS/KSSS R%d Ca.phs', 'KSSS', 2,
{}, inherit_phase, inherit_phase,
KSSS_code_AD, KSSS_code_EG, KSSS_code_350,
{}, KSSS_inh_comp_args, KSSS_inh_comp_args,
)
"""
### Illite - Smectites:
"""
I_code = 'I '
I_inh_comp_args = {
'I ': dict(linked_with='I ', **inherit_all),
}
IS_code_AD = I_code + S_code_AD
IS_code_EG = I_code + S_code_EG
IS_code_350 = I_code + S_code_350
IS_inh_comp_args = dict(S_inh_comp_args)
IS_inh_comp_args.update(I_inh_comp_args)
ISS_code_AD = I_code + SS_code_AD
ISS_code_EG = I_code + SS_code_EG
ISS_code_350 = I_code + SS_code_350
ISS_inh_comp_args = dict(SS_inh_comp_args)
ISS_inh_comp_args.update(I_inh_comp_args)
ISSS_code_AD = I_code + SSS_code_AD
ISSS_code_EG = I_code + SSS_code_EG
ISSS_code_350 = I_code + SSS_code_350
ISSS_inh_comp_args = dict(SSS_inh_comp_args)
ISSS_inh_comp_args.update(I_inh_comp_args)
default_phases += generate_expandables(
'Illite-Smectites/IS/IS R%d Ca.phs', 'IS', 4,
{}, inherit_phase, inherit_phase,
IS_code_AD, IS_code_EG, IS_code_350,
{}, IS_inh_comp_args, IS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISS/ISS R%d Ca.phs', 'ISS', 3,
{}, inherit_phase, inherit_phase,
ISS_code_AD, ISS_code_EG, ISS_code_350,
{}, ISS_inh_comp_args, ISS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISSS/ISSS R%d Ca.phs', 'ISSS', 2,
{}, inherit_phase, inherit_phase,
ISSS_code_AD, ISSS_code_EG, ISSS_code_350,
{}, ISSS_inh_comp_args, ISSS_inh_comp_args,
)
"""
### Chlorite - Smectites:
"""
C_code = 'C '
C_inh_comp_args = {
'C ': dict(linked_with='C ', **inherit_all),
}
CS_code_AD = C_code + tS_code_AD
CS_code_EG = C_code + tS_code_EG
CS_code_350 = C_code + tS_code_350
CS_inh_comp_args = dict(tS_inh_comp_args)
CS_inh_comp_args.update(C_inh_comp_args)
CSS_code_AD = C_code + tSS_code_AD
CSS_code_EG = C_code + tSS_code_EG
CSS_code_350 = C_code + tSS_code_350
CSS_inh_comp_args = dict(tSS_inh_comp_args)
CSS_inh_comp_args.update(C_inh_comp_args)
CSSS_code_AD = C_code + tSSS_code_AD
CSSS_code_EG = C_code + tSSS_code_EG
CSSS_code_350 = C_code + tSSS_code_350
CSSS_inh_comp_args = dict(tSSS_inh_comp_args)
CSSS_inh_comp_args.update(C_inh_comp_args)
default_phases += generate_expandables(
'Chlorite-Smectites/CS/CS R%d Ca.phs', 'CS', 4,
{}, inherit_phase, inherit_phase,
CS_code_AD, CS_code_EG, CS_code_350,
{}, CS_inh_comp_args, CS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSS/CSS R%d Ca.phs', 'CSS', 3,
{}, inherit_phase, inherit_phase,
CSS_code_AD, CSS_code_EG, CSS_code_350,
{}, CSS_inh_comp_args, CSS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSSS/CSSS R%d Ca.phs', 'CSSS', 2,
{}, inherit_phase, inherit_phase,
CSSS_code_AD, CSSS_code_EG, CSSS_code_350,
{}, CSSS_inh_comp_args, CSSS_inh_comp_args,
)
"""
### Talc - Smectites:
"""
T_code = 'T '
T_inh_comp_args = {
'T ': dict(linked_with='T ', **inherit_all),
}
TS_code_AD = T_code + S_code_AD
TS_code_EG = T_code + S_code_EG
TS_code_350 = T_code + S_code_350
TS_inh_comp_args = dict(S_inh_comp_args)
TS_inh_comp_args.update(T_inh_comp_args)
TSS_code_AD = T_code + SS_code_AD
TSS_code_EG = T_code + SS_code_EG
TSS_code_350 = T_code + SS_code_350
TSS_inh_comp_args = dict(SS_inh_comp_args)
TSS_inh_comp_args.update(T_inh_comp_args)
TSSS_code_AD = T_code + SSS_code_AD
TSSS_code_EG = T_code + SSS_code_EG
TSSS_code_350 = T_code + SSS_code_350
TSSS_inh_comp_args = dict(SSS_inh_comp_args)
TSSS_inh_comp_args.update(T_inh_comp_args)
default_phases += generate_expandables(
'Talc-Smectites/TS/TS R%d Ca.phs', 'TS', 4,
{}, inherit_phase, inherit_phase,
TS_code_AD, TS_code_EG, TS_code_350,
{}, TS_inh_comp_args, TS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSS/TSS R%d Ca.phs', 'TSS', 3,
{}, inherit_phase, inherit_phase,
TSS_code_AD, TSS_code_EG, TSS_code_350,
{}, TSS_inh_comp_args, TSS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSSS/TSSS R%d Ca.phs', 'TSSS', 2,
{}, inherit_phase, inherit_phase,
TSSS_code_AD, TSSS_code_EG, TSSS_code_350,
{}, TSSS_inh_comp_args, TSSS_inh_comp_args,
)
"""
### Illite - Chlorite - Smectites:
"""
IC_code = I_code + C_code
IC_inh_comp_args = dict(I_inh_comp_args)
IC_inh_comp_args.update(C_inh_comp_args)
ICS_code_AD = IC_code + S_code_AD
ICS_code_EG = IC_code + S_code_EG
ICS_inh_comp_args = dict(S_inh_comp_args)
ICS_inh_comp_args.update(IC_inh_comp_args)
ICSS_code_AD = IC_code + SS_code_AD
ICSS_code_EG = IC_code + SS_code_EG
ICSS_inh_comp_args = dict(SS_inh_comp_args)
ICSS_inh_comp_args.update(IC_inh_comp_args)
ICSSS_code_AD = IC_code + SSS_code_AD
ICSSS_code_EG = IC_code + SSS_code_EG
ICSSS_inh_comp_args = dict(SSS_inh_comp_args)
ICSSS_inh_comp_args.update(IC_inh_comp_args)
default_phases += [
('%sIllite-Chlorite-Smectites/ICS/ICS R0 Ca.phs', [
(dict(R=0, name='ICS R0 Ca-AD'), ICS_code_AD, {}),
(dict(R=0, name='ICS R0 Ca-EG', based_on='ICS R0 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R1 Ca.phs', [
(dict(R=1, name='ICS R1 Ca-AD'), ICS_code_AD, {}),
(dict(R=1, name='ICS R1 Ca-EG', based_on='ICS R1 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R2 Ca.phs', [
(dict(R=2, name='ICS R2 Ca-AD'), ICS_code_AD, {}),
(dict(R=2, name='ICS R2 Ca-EG', based_on='ICS R2 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R0 Ca.phs', [
(dict(R=0, name='ICSS R0 Ca-AD'), ICSS_code_AD, {}),
(dict(R=0, name='ICSS R0 Ca-EG', based_on='ICSS R0 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R1 Ca.phs', [
(dict(R=1, name='ICSS R1 Ca-AD'), ICSS_code_AD, {}),
(dict(R=1, name='ICSS R1 Ca-EG', based_on='ICSS R1 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSSS/ICSSS R0 Ca.phs', [
(dict(R=0, name='ICSSS R0 Ca-AD'), ICSSS_code_AD, {}),
(dict(R=0, name='ICSSS R0 Ca-EG', based_on='ICSSS R0 Ca-AD', **inherit_phase), ICSSS_code_EG, ICSSS_inh_comp_args)
]),
]
"""
### Kaolinite - Chlorite - Smectites:
"""
KC_code = K_code + C_code
KC_inh_comp_args = dict(K_inh_comp_args)
KC_inh_comp_args.update(C_inh_comp_args)
KCS_code_AD = KC_code + S_code_AD
KCS_code_EG = KC_code + S_code_EG
KCS_inh_comp_args = dict(S_inh_comp_args)
KCS_inh_comp_args.update(KC_inh_comp_args)
KCSS_code_AD = KC_code + SS_code_AD
KCSS_code_EG = KC_code + SS_code_EG
KCSS_inh_comp_args = dict(SS_inh_comp_args)
KCSS_inh_comp_args.update(KC_inh_comp_args)
KCSSS_code_AD = KC_code + SSS_code_AD
KCSSS_code_EG = KC_code + SSS_code_EG
KCSSS_inh_comp_args = dict(SSS_inh_comp_args)
KCSSS_inh_comp_args.update(KC_inh_comp_args)
default_phases += [
('%sKaolinite-Chlorite-Smectites/KCS/KCS R0 Ca.phs', [
(dict(R=0, name='KCS R0 Ca-AD'), KCS_code_AD, {}),
(dict(R=0, name='KCS R0 Ca-EG', based_on='KCS R0 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R1 Ca.phs', [
(dict(R=1, name='KCS R1 Ca-AD'), KCS_code_AD, {}),
(dict(R=1, name='KCS R1 Ca-EG', based_on='KCS R1 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R2 Ca.phs', [
(dict(R=2, name='KCS R2 Ca-AD'), KCS_code_AD, {}),
(dict(R=2, name='KCS R2 Ca-EG', based_on='KCS R2 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R0 Ca.phs', [
(dict(R=0, name='KCSS R0 Ca-AD'), KCSS_code_AD, {}),
(dict(R=0, name='KCSS R0 Ca-EG', based_on='KCSS R0 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R1 Ca.phs', [
(dict(R=1, name='KCSS R1 Ca-AD'), KCSS_code_AD, {}),
(dict(R=1, name='KCSS R1 Ca-EG', based_on='KCSS R1 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSSS/KCSSS R0 Ca.phs', [
(dict(R=0, name='KCSSS R0 Ca-AD'), KCSSS_code_AD, {}),
(dict(R=0, name='KCSSS R0 Ca-EG', based_on='KCSSS R0 Ca-AD', **inherit_phase), KCSSS_code_EG, KCSSS_inh_comp_args)
]),
]
"""
### Actual object generation routine:
"""
import queue
import threading
def ioworker(in_queue, stop):
"""
Saves Phase objects from the in_queue.
If the Queue is empty this function will only stop
if the 'stop' event is set.
"""
while True:
try:
phases_path, phases = in_queue.get(timeout=0.5)
create_dir_recursive(phases_path)
Phase.save_phases(phases, phases_path)
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
save_queue = queue.Queue()
io_stop = threading.Event()
iothread = threading.Thread(target=ioworker, args=(save_queue, io_stop))
iothread.start()
def phaseworker(in_queue, save_queue, stop):
"""
Parses Phase descriptions into actual objects and passes them
to the save_queue.
'stop' should be a threading.Event() that should be toggled
once all elements have been Queued.
This way, the worker will only stop once the Queue is really empty,
and not when it's processing faster than the Queue can be filled.
"""
while True:
try:
phases_path, phase_descr = in_queue.get(timeout=0.5)
project = Project()
phase_lookup = {}
component_lookup = {}
for phase_kwargs, code, comp_props in phase_descr:
# create phase:
G = len(code) / code_length
based_on = None
if "based_on" in phase_kwargs:
based_on = phase_lookup.get(phase_kwargs.pop("based_on"), None)
phase = Phase(G=G, parent=project, **phase_kwargs)
phase.based_on = based_on
phase_lookup[phase.name] = phase
# derive upper and lower limits for the codes using code lengths:
limits = list(zip(
list(range(0, len(code), code_length)),
list(range(code_length, len(code) + 1, code_length))
))
# create components:
phase.components[:] = []
for ll, ul in limits:
part = code[ll: ul]
for component in Component.load_components(aliases[part] % (settings.DATA_REG.get_directory_path("DEFAULT_COMPONENTS") + "/"), parent=phase):
component.resolve_json_references()
phase.components.append(component)
props = comp_props.get(part, {})
for prop, value in props.items():
if prop == 'linked_with':
value = component_lookup[value]
setattr(component, prop, value)
component_lookup[part] = component
# put phases on the save queue:
phases_path = phases_path % (settings.DATA_REG.get_directory_path("DEFAULT_PHASES") + "/")
save_queue.put((phases_path, list(phase_lookup.values())))
# Flag this as finished
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
phase_queue = queue.Queue()
phase_stop = threading.Event()
phasethread = threading.Thread(target=phaseworker, args=(phase_queue, save_queue, phase_stop))
phasethread.start()
# Queue phases:
for phases_path, phase_descr in default_phases:
phase_queue.put((phases_path, phase_descr))
# Signal phaseworker it can stop if the phase_queue is emptied:
phase_stop.set()
while phasethread.is_alive():
# Try to join the thread, but don't block, inform the UI
# of our progress if a callback is provided:
phasethread.join(timeout=0.1)
if callable(ui_callback):
progress = float(len(default_phases) - phase_queue.qsize()) / float(len(default_phases))
ui_callback(progress)
if callable(ui_callback):
ui_callback(1.0)
# Signal the IO worker the phaseworker has stopped, so it can stop
# if the save_queue is empty
io_stop.set()
while iothread.is_alive():
# Try to join the thread, but don't block
iothread.join(timeout=0.1)
pass # end of run
def create_dir_recursive(path):
"""
Creates the path 'path' recursively.
"""
to_create = []
while not os.path.exists(path):
to_create.insert(0, path)
path = os.path.dirname(path)
for path in to_create[:-1]:
os.mkdir(path)
| [
"pyxrd.project.models.Project",
"os.path.exists",
"pyxrd.phases.models.Phase.save_phases",
"pyxrd.phases.models.Phase",
"threading.Event",
"os.path.dirname",
"os.mkdir",
"threading.Thread",
"queue.Queue",
"pyxrd.data.settings.DATA_REG.get_directory_path"
] | [((22450, 22463), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (22461, 22463), False, 'import queue\n'), ((22478, 22495), 'threading.Event', 'threading.Event', ([], {}), '()\n', (22493, 22495), False, 'import threading\n'), ((22511, 22572), 'threading.Thread', 'threading.Thread', ([], {'target': 'ioworker', 'args': '(save_queue, io_stop)'}), '(target=ioworker, args=(save_queue, io_stop))\n', (22527, 22572), False, 'import threading\n'), ((25372, 25385), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (25383, 25385), False, 'import queue\n'), ((25403, 25420), 'threading.Event', 'threading.Event', ([], {}), '()\n', (25418, 25420), False, 'import threading\n'), ((25439, 25524), 'threading.Thread', 'threading.Thread', ([], {'target': 'phaseworker', 'args': '(phase_queue, save_queue, phase_stop)'}), '(target=phaseworker, args=(phase_queue, save_queue, phase_stop)\n )\n', (25455, 25524), False, 'import threading\n'), ((26563, 26583), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (26577, 26583), False, 'import os\n'), ((26634, 26655), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (26649, 26655), False, 'import os\n'), ((26696, 26710), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (26704, 26710), False, 'import os\n'), ((22208, 22246), 'pyxrd.phases.models.Phase.save_phases', 'Phase.save_phases', (['phases', 'phases_path'], {}), '(phases, phases_path)\n', (22225, 22246), False, 'from pyxrd.phases.models import Component, Phase\n'), ((23184, 23193), 'pyxrd.project.models.Project', 'Project', ([], {}), '()\n', (23191, 23193), False, 'from pyxrd.project.models import Project\n'), ((23622, 23664), 'pyxrd.phases.models.Phase', 'Phase', ([], {'G': 'G', 'parent': 'project'}), '(G=G, parent=project, **phase_kwargs)\n', (23627, 23664), False, 'from pyxrd.phases.models import Component, Phase\n'), ((24991, 25045), 'pyxrd.data.settings.DATA_REG.get_directory_path', 'settings.DATA_REG.get_directory_path', (['"""DEFAULT_PHASES"""'], {}), "('DEFAULT_PHASES')\n", (25027, 25045), False, 'from pyxrd.data import settings\n'), ((24311, 24369), 'pyxrd.data.settings.DATA_REG.get_directory_path', 'settings.DATA_REG.get_directory_path', (['"""DEFAULT_COMPONENTS"""'], {}), "('DEFAULT_COMPONENTS')\n", (24347, 24369), False, 'from pyxrd.data import settings\n')] |
"""
Some persistent maps (gdbm) require special encoding of keys
and/or values. This is an abstraction for these kinds of quirks.
"""
from itertools import imap
import collections
import gdbm as dbm
import json
from sqlitedict import SqliteDict
import os
class EncodedDict(collections.MutableMapping):
"""
Subclass this and provide any of the following (see
implementatiokn for signatures)
- db
- _init()
- _encode_key
- _decode_key.
"""
def __init__(self, wrapped=None):
self.db = wrapped if wrapped is not None else dict()
def _encode_key(self, key):
"""
Override to encode keys coming in.
"""
return key
def _decode_key(self, key):
"""
Override to encode keys going out.
"""
return key
def __del__(self):
del self.db
def __setitem__(self, key, val):
self.db[self._encode_key(key)] = val
def __getitem__(self, key):
return self.db[self._encode_key(key)]
def __contains__(self, key):
return self._encode_key(key) in self.keys()
def __delitem__(self, key):
del self.db[self._encode_key(key)]
def __iter__(self):
return imap(self._decode_key, self.db.keys())
def __len__(self):
return len(self.db)
def keys(self):
return list(self)
def values(self):
return self.db.values()
def items(self):
return [(self._decode_key(key), v) for key,v in self.db.iteritems()]
def to_json(self, filename):
json.dump([(k,v) for k,v in self.db.iteritems()],
open(filename, 'w'))
def from_json(self, filename):
for k,v in json.load(open(filename)):
self.db[k] = v
class DbmPersistentDict(EncodedDict):
"""
Persistent dict using dbm. Will open or create filename.
"""
def __init__(self, filename):
flag = 'w' if os.path.exists(filename) else 'n'
super(DbmPersistentDict, self).__init__(dbm.open(filename, flag))
def _encode_key(self, key):
# Asciify
if isinstance(key, unicode):
return key.encode('unicode_escape')
return str(key)
def _decode_key(self, key):
# Unicodify
return key.decode('unicode_escape')
class SqlitePersistentDict(EncodedDict):
def __init__(self, filename):
if not filename.endswith('.sqlite'):
filename += '.sqlite'
db = SqliteDict(filename)
super(SqlitePersistentDict, self).__init__(db)
def __del__(self):
self.db.close()
super(SqlitePersistentDict, self).__del__()
"""
Some info on performance:
>>> import timeit
>>> sqlkv = SqlitePersistentDict('/tmp/bench1.sqlite')
>>> timeit.timeit(lambda : benchmark_write(sqlkv), number=100)
10.847157955169678
>>> timeit.timeit(lambda : benchmark_read(sqlkv), number=100)
18.88098978996277
>>> dbmkv = DbmPersistentDict('/tmp/bench.dbm')
>>> timeit.timeit(lambda : benchmark_write(dbmkv), number=100)
0.18030309677124023
>>> timeit.timeit(lambda : benchmark_read(dbmkv), number=100)
0.14914202690124512
SqliteDict is a pretty thin wrapper around sqlite, I would probably
not have made it much thinner. Just use Dbm.
Keep this around in case anyone considers changing to sqlite.
XXX: see how gdbm does when data is larger than memory. Also check out
bsddb
"""
# PersistentDict = SqlitePersistentDict
PersistentDict = DbmPersistentDict
def benchmark_write(dic, times=100000):
for i in xrange(times):
dic['o' + str(i)] = str(i) * 1000
def benchmark_read(dic, times=100000):
for i in xrange(times):
dic['o' + str(i)]
| [
"os.path.exists",
"gdbm.open",
"sqlitedict.SqliteDict"
] | [((2463, 2483), 'sqlitedict.SqliteDict', 'SqliteDict', (['filename'], {}), '(filename)\n', (2473, 2483), False, 'from sqlitedict import SqliteDict\n'), ((1926, 1950), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1940, 1950), False, 'import os\n'), ((2009, 2033), 'gdbm.open', 'dbm.open', (['filename', 'flag'], {}), '(filename, flag)\n', (2017, 2033), True, 'import gdbm as dbm\n')] |
from PySide6.QtGui import QColor
from PySide6.QtWidgets import QFrame, QHBoxLayout, QWidget
from PySide6.QtCore import Qt, QSettings, QEvent
from utils import colors
import pyqtgraph as pg
import numpy as np
class ScrollablePlotWidget(pg.PlotWidget):
"""
Subclass of `pg.PlotWidget` that overrides `wheelEvent` and `mouse(Press/Release)Event`
so that user scrolls the parent widget when scrolling on the plot.
Widget performs no action on mouse press/release.
"""
def __init__(self, parent: QWidget = None) -> None:
"""
The constructor for scrollable plot widget.
Parameters:
parent (QWidget): Parent widget of this widget. Default: None.
"""
super().__init__()
self.parent = parent
def wheelEvent(self, event: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `wheelEvent` so that parent widget is scrolled.
Parameters:
event (QEvent): Scrolling event.
"""
self.parent.wheelEvent(event)
def mousePressEvent(self, QMouseEvent: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `mousePressEvent` so that it does nothing.
Parameters:
event (QEvent): Mouse press event.
"""
pass
def mouseReleaseEvent(self, QMouseEvent: QEvent):
"""
A function that overrides `pg.PlotWidget`'s `mouseReleaseEvent` so that it does nothing.
Parameters:
event (QEvent): Mouse release event.
"""
pass
class Component(QFrame):
"""
A widget representing one Raman component. It displays a spectral map and a single spectral plot.
"""
def __init__(self, x: np.ndarray, y: np.ndarray, map: np.ndarray, parent: QWidget = None) -> None:
super().__init__(parent)
self.settings = QSettings()
# limit size of one component
self.setMinimumHeight(175)
self.setMaximumHeight(400)
self.x_data = x
self.y_data = y
self.map_data = map
# NOTE: scrolling over spectral map does nothing at all as wheelEvent works
# different for `pg.ImageView`
self.component_map = pg.ImageView(parent)
# hide controll buttons
self.component_map.ui.histogram.hide()
self.component_map.ui.roiBtn.hide()
self.component_map.ui.menuBtn.hide()
# set colors
bg_color = (240,240,240)
color_map = colors.COLORMAPS[str(self.settings.value("spectral_map/cmap"))]
cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, len(color_map)), color=color_map)
# component map properties
self.component_map.setColorMap(cmap)
self.component_map.setImage(self.map_data, autoRange=False)
self.component_map.getView().setMouseEnabled(False, False)
self.component_map.getView().setDefaultPadding(0)
self.component_map.getView().setAspectLocked(True, ratio=None)
self.component_map.getView().setBackgroundColor(QColor(240,240,240))
self.component_map.setMinimumWidth(175)
self.component_map.setMaximumWidth(250)
# spectral plot is the scrollable one
self.component_plot = ScrollablePlotWidget(parent)
self.component_plot.setBackground(bg_color)
plot_pen = pg.mkPen(color="#266867", width=1.5)
self.line = self.component_plot.plot(self.x_data, self.y_data, pen=plot_pen)
# make final layout
layout = QHBoxLayout()
layout.setAlignment(Qt.AlignHCenter)
layout.addWidget(self.component_map)
layout.addWidget(self.component_plot)
self.setLayout(layout)
| [
"PySide6.QtWidgets.QHBoxLayout",
"PySide6.QtCore.QSettings",
"pyqtgraph.mkPen",
"pyqtgraph.ImageView",
"PySide6.QtGui.QColor"
] | [((1875, 1886), 'PySide6.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (1884, 1886), False, 'from PySide6.QtCore import Qt, QSettings, QEvent\n'), ((2240, 2260), 'pyqtgraph.ImageView', 'pg.ImageView', (['parent'], {}), '(parent)\n', (2252, 2260), True, 'import pyqtgraph as pg\n'), ((3351, 3387), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '"""#266867"""', 'width': '(1.5)'}), "(color='#266867', width=1.5)\n", (3359, 3387), True, 'import pyqtgraph as pg\n'), ((3519, 3532), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (3530, 3532), False, 'from PySide6.QtWidgets import QFrame, QHBoxLayout, QWidget\n'), ((3057, 3078), 'PySide6.QtGui.QColor', 'QColor', (['(240)', '(240)', '(240)'], {}), '(240, 240, 240)\n', (3063, 3078), False, 'from PySide6.QtGui import QColor\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
from . import fpn as fpn_module
from . import res2net
@registry.BACKBONES.register("R2-50-C4")
@registry.BACKBONES.register("R2-50-C5")
@registry.BACKBONES.register("R2-101-C4")
@registry.BACKBONES.register("R2-101-C5")
def build_res2net_backbone(cfg):
body = res2net.Res2Net(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
return model
@registry.BACKBONES.register("R2-50-FPN")
@registry.BACKBONES.register("R2-101-FPN")
@registry.BACKBONES.register("R2-152-FPN")
def build_res2net_fpn_backbone(cfg):
body = res2net.Res2Net(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(
in_channels_list=[
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelMaxPool(),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
@registry.BACKBONES.register("R2-50-FPN-RETINANET")
@registry.BACKBONES.register("R2-101-FPN-RETINANET")
def build_res2net_fpn_p3p7_backbone(cfg):
body = res2net.Res2Net(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \
else out_channels
fpn = fpn_module.FPN(
in_channels_list=[
0,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
# def build_backbone(cfg):
# assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
# "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
# cfg.MODEL.BACKBONE.CONV_BODY
# )
# return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
| [
"collections.OrderedDict",
"maskrcnn_benchmark.modeling.registry.BACKBONES.register",
"maskrcnn_benchmark.modeling.make_layers.conv_with_kaiming_uniform"
] | [((315, 354), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-50-C4"""'], {}), "('R2-50-C4')\n", (342, 354), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((356, 395), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-50-C5"""'], {}), "('R2-50-C5')\n", (383, 395), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((397, 437), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-101-C4"""'], {}), "('R2-101-C4')\n", (424, 437), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((439, 479), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-101-C5"""'], {}), "('R2-101-C5')\n", (466, 479), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((687, 727), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-50-FPN"""'], {}), "('R2-50-FPN')\n", (714, 727), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((729, 770), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-101-FPN"""'], {}), "('R2-101-FPN')\n", (756, 770), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((772, 813), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-152-FPN"""'], {}), "('R2-152-FPN')\n", (799, 813), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((1541, 1591), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-50-FPN-RETINANET"""'], {}), "('R2-50-FPN-RETINANET')\n", (1568, 1591), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((1593, 1644), 'maskrcnn_benchmark.modeling.registry.BACKBONES.register', 'registry.BACKBONES.register', (['"""R2-101-FPN-RETINANET"""'], {}), "('R2-101-FPN-RETINANET')\n", (1620, 1644), False, 'from maskrcnn_benchmark.modeling import registry\n'), ((571, 600), 'collections.OrderedDict', 'OrderedDict', (["[('body', body)]"], {}), "([('body', body)])\n", (582, 600), False, 'from collections import OrderedDict\n'), ((1438, 1481), 'collections.OrderedDict', 'OrderedDict', (["[('body', body), ('fpn', fpn)]"], {}), "([('body', body), ('fpn', fpn)])\n", (1449, 1481), False, 'from collections import OrderedDict\n'), ((2388, 2431), 'collections.OrderedDict', 'OrderedDict', (["[('body', body), ('fpn', fpn)]"], {}), "([('body', body), ('fpn', fpn)])\n", (2399, 2431), False, 'from collections import OrderedDict\n'), ((1261, 1332), 'maskrcnn_benchmark.modeling.make_layers.conv_with_kaiming_uniform', 'conv_with_kaiming_uniform', (['cfg.MODEL.FPN.USE_GN', 'cfg.MODEL.FPN.USE_RELU'], {}), '(cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU)\n', (1286, 1332), False, 'from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform\n'), ((2184, 2255), 'maskrcnn_benchmark.modeling.make_layers.conv_with_kaiming_uniform', 'conv_with_kaiming_uniform', (['cfg.MODEL.FPN.USE_GN', 'cfg.MODEL.FPN.USE_RELU'], {}), '(cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU)\n', (2209, 2255), False, 'from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform\n')] |
"""Memory watchdog: periodically read the memory usage of the main test process
and print it out, until terminated."""
import os
import sys
import time
try:
page_size = os.sysconf('SC_PAGESIZE')
except (ValueError, AttributeError):
try:
page_size = os.sysconf('SC_PAGE_SIZE')
except (ValueError, AttributeError):
page_size = 4096
while True:
sys.stdin.seek(0)
statm = sys.stdin.read()
data = int(statm.split()[5])
sys.stdout.write(' ... process data size: {data:.1f}G\n'.format(data=
data * page_size / 1024 ** 3))
sys.stdout.flush()
time.sleep(1)
| [
"time.sleep",
"sys.stdin.read",
"os.sysconf",
"sys.stdout.flush",
"sys.stdin.seek"
] | [((173, 198), 'os.sysconf', 'os.sysconf', (['"""SC_PAGESIZE"""'], {}), "('SC_PAGESIZE')\n", (183, 198), False, 'import os\n'), ((374, 391), 'sys.stdin.seek', 'sys.stdin.seek', (['(0)'], {}), '(0)\n', (388, 391), False, 'import sys\n'), ((404, 420), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (418, 420), False, 'import sys\n'), ((571, 589), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (587, 589), False, 'import sys\n'), ((594, 607), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (604, 607), False, 'import time\n'), ((265, 291), 'os.sysconf', 'os.sysconf', (['"""SC_PAGE_SIZE"""'], {}), "('SC_PAGE_SIZE')\n", (275, 291), False, 'import os\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PermissionsArgs', 'Permissions']
@pulumi.input_type
class PermissionsArgs:
def __init__(__self__, *,
access_controls: pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]],
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Permissions resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
pulumi.set(__self__, "access_controls", access_controls)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_policy_id is not None:
pulumi.set(__self__, "cluster_policy_id", cluster_policy_id)
if directory_id is not None:
pulumi.set(__self__, "directory_id", directory_id)
if directory_path is not None:
pulumi.set(__self__, "directory_path", directory_path)
if experiment_id is not None:
pulumi.set(__self__, "experiment_id", experiment_id)
if instance_pool_id is not None:
pulumi.set(__self__, "instance_pool_id", instance_pool_id)
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if notebook_id is not None:
pulumi.set(__self__, "notebook_id", notebook_id)
if notebook_path is not None:
pulumi.set(__self__, "notebook_path", notebook_path)
if object_type is not None:
pulumi.set(__self__, "object_type", object_type)
if registered_model_id is not None:
pulumi.set(__self__, "registered_model_id", registered_model_id)
if repo_id is not None:
pulumi.set(__self__, "repo_id", repo_id)
if repo_path is not None:
pulumi.set(__self__, "repo_path", repo_path)
if sql_alert_id is not None:
pulumi.set(__self__, "sql_alert_id", sql_alert_id)
if sql_dashboard_id is not None:
pulumi.set(__self__, "sql_dashboard_id", sql_dashboard_id)
if sql_endpoint_id is not None:
pulumi.set(__self__, "sql_endpoint_id", sql_endpoint_id)
if sql_query_id is not None:
pulumi.set(__self__, "sql_query_id", sql_query_id)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]:
return pulumi.get(self, "access_controls")
@access_controls.setter
def access_controls(self, value: pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]):
pulumi.set(self, "access_controls", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@cluster_policy_id.setter
def cluster_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_policy_id", value)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[pulumi.Input[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@directory_id.setter
def directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_id", value)
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> Optional[pulumi.Input[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@directory_path.setter
def directory_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_path", value)
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "experiment_id")
@experiment_id.setter
def experiment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_id", value)
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@instance_pool_id.setter
def instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_pool_id", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@notebook_id.setter
def notebook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_id", value)
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> Optional[pulumi.Input[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@notebook_path.setter
def notebook_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_path", value)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> Optional[pulumi.Input[str]]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@object_type.setter
def object_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_type", value)
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registered_model_id")
@registered_model_id.setter
def registered_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registered_model_id", value)
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> Optional[pulumi.Input[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@repo_id.setter
def repo_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_id", value)
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> Optional[pulumi.Input[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@repo_path.setter
def repo_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_path", value)
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_alert_id")
@sql_alert_id.setter
def sql_alert_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_alert_id", value)
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_dashboard_id")
@sql_dashboard_id.setter
def sql_dashboard_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_dashboard_id", value)
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_endpoint_id")
@sql_endpoint_id.setter
def sql_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_endpoint_id", value)
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_query_id")
@sql_query_id.setter
def sql_query_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_query_id", value)
@pulumi.input_type
class _PermissionsState:
def __init__(__self__, *,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Permissions resources.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
if access_controls is not None:
pulumi.set(__self__, "access_controls", access_controls)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_policy_id is not None:
pulumi.set(__self__, "cluster_policy_id", cluster_policy_id)
if directory_id is not None:
pulumi.set(__self__, "directory_id", directory_id)
if directory_path is not None:
pulumi.set(__self__, "directory_path", directory_path)
if experiment_id is not None:
pulumi.set(__self__, "experiment_id", experiment_id)
if instance_pool_id is not None:
pulumi.set(__self__, "instance_pool_id", instance_pool_id)
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if notebook_id is not None:
pulumi.set(__self__, "notebook_id", notebook_id)
if notebook_path is not None:
pulumi.set(__self__, "notebook_path", notebook_path)
if object_type is not None:
pulumi.set(__self__, "object_type", object_type)
if registered_model_id is not None:
pulumi.set(__self__, "registered_model_id", registered_model_id)
if repo_id is not None:
pulumi.set(__self__, "repo_id", repo_id)
if repo_path is not None:
pulumi.set(__self__, "repo_path", repo_path)
if sql_alert_id is not None:
pulumi.set(__self__, "sql_alert_id", sql_alert_id)
if sql_dashboard_id is not None:
pulumi.set(__self__, "sql_dashboard_id", sql_dashboard_id)
if sql_endpoint_id is not None:
pulumi.set(__self__, "sql_endpoint_id", sql_endpoint_id)
if sql_query_id is not None:
pulumi.set(__self__, "sql_query_id", sql_query_id)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]]:
return pulumi.get(self, "access_controls")
@access_controls.setter
def access_controls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PermissionsAccessControlArgs']]]]):
pulumi.set(self, "access_controls", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@cluster_policy_id.setter
def cluster_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_policy_id", value)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[pulumi.Input[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@directory_id.setter
def directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_id", value)
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> Optional[pulumi.Input[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@directory_path.setter
def directory_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_path", value)
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "experiment_id")
@experiment_id.setter
def experiment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "experiment_id", value)
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@instance_pool_id.setter
def instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_pool_id", value)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[pulumi.Input[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@job_id.setter
def job_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_id", value)
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@notebook_id.setter
def notebook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_id", value)
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> Optional[pulumi.Input[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@notebook_path.setter
def notebook_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notebook_path", value)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> Optional[pulumi.Input[str]]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@object_type.setter
def object_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_type", value)
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registered_model_id")
@registered_model_id.setter
def registered_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registered_model_id", value)
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> Optional[pulumi.Input[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@repo_id.setter
def repo_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_id", value)
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> Optional[pulumi.Input[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@repo_path.setter
def repo_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_path", value)
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_alert_id")
@sql_alert_id.setter
def sql_alert_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_alert_id", value)
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_dashboard_id")
@sql_dashboard_id.setter
def sql_dashboard_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_dashboard_id", value)
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_endpoint_id")
@sql_endpoint_id.setter
def sql_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_endpoint_id", value)
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_query_id")
@sql_query_id.setter
def sql_query_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_query_id", value)
class Permissions(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
The resource permissions can be imported using the object id bash
```sh
$ pulumi import databricks:index/permissions:Permissions this /<object type>/<object id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PermissionsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
The resource permissions can be imported using the object id bash
```sh
$ pulumi import databricks:index/permissions:Permissions this /<object type>/<object id>
```
:param str resource_name: The name of the resource.
:param PermissionsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PermissionsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PermissionsArgs.__new__(PermissionsArgs)
if access_controls is None and not opts.urn:
raise TypeError("Missing required property 'access_controls'")
__props__.__dict__["access_controls"] = access_controls
__props__.__dict__["authorization"] = authorization
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_policy_id"] = cluster_policy_id
__props__.__dict__["directory_id"] = directory_id
__props__.__dict__["directory_path"] = directory_path
__props__.__dict__["experiment_id"] = experiment_id
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["job_id"] = job_id
__props__.__dict__["notebook_id"] = notebook_id
__props__.__dict__["notebook_path"] = notebook_path
__props__.__dict__["object_type"] = object_type
__props__.__dict__["registered_model_id"] = registered_model_id
__props__.__dict__["repo_id"] = repo_id
__props__.__dict__["repo_path"] = repo_path
__props__.__dict__["sql_alert_id"] = sql_alert_id
__props__.__dict__["sql_dashboard_id"] = sql_dashboard_id
__props__.__dict__["sql_endpoint_id"] = sql_endpoint_id
__props__.__dict__["sql_query_id"] = sql_query_id
super(Permissions, __self__).__init__(
'databricks:index/permissions:Permissions',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None) -> 'Permissions':
"""
Get an existing Permissions resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization: either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
:param pulumi.Input[str] cluster_id: cluster id
:param pulumi.Input[str] cluster_policy_id: cluster policy id
:param pulumi.Input[str] directory_id: directory id
:param pulumi.Input[str] directory_path: path of directory
:param pulumi.Input[str] instance_pool_id: instance pool id
:param pulumi.Input[str] job_id: job id
:param pulumi.Input[str] notebook_id: ID of notebook within workspace
:param pulumi.Input[str] notebook_path: path of notebook
:param pulumi.Input[str] object_type: type of permissions.
:param pulumi.Input[str] repo_id: repo id
:param pulumi.Input[str] repo_path: path of databricks repo directory(`/Repos/<username>/...`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PermissionsState.__new__(_PermissionsState)
__props__.__dict__["access_controls"] = access_controls
__props__.__dict__["authorization"] = authorization
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_policy_id"] = cluster_policy_id
__props__.__dict__["directory_id"] = directory_id
__props__.__dict__["directory_path"] = directory_path
__props__.__dict__["experiment_id"] = experiment_id
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["job_id"] = job_id
__props__.__dict__["notebook_id"] = notebook_id
__props__.__dict__["notebook_path"] = notebook_path
__props__.__dict__["object_type"] = object_type
__props__.__dict__["registered_model_id"] = registered_model_id
__props__.__dict__["repo_id"] = repo_id
__props__.__dict__["repo_path"] = repo_path
__props__.__dict__["sql_alert_id"] = sql_alert_id
__props__.__dict__["sql_dashboard_id"] = sql_dashboard_id
__props__.__dict__["sql_endpoint_id"] = sql_endpoint_id
__props__.__dict__["sql_query_id"] = sql_query_id
return Permissions(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessControls")
def access_controls(self) -> pulumi.Output[Sequence['outputs.PermissionsAccessControl']]:
return pulumi.get(self, "access_controls")
@property
@pulumi.getter
def authorization(self) -> pulumi.Output[Optional[str]]:
"""
either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission).
"""
return pulumi.get(self, "authorization")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
"""
cluster id
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterPolicyId")
def cluster_policy_id(self) -> pulumi.Output[Optional[str]]:
"""
cluster policy id
"""
return pulumi.get(self, "cluster_policy_id")
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> pulumi.Output[Optional[str]]:
"""
directory id
"""
return pulumi.get(self, "directory_id")
@property
@pulumi.getter(name="directoryPath")
def directory_path(self) -> pulumi.Output[Optional[str]]:
"""
path of directory
"""
return pulumi.get(self, "directory_path")
@property
@pulumi.getter(name="experimentId")
def experiment_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "experiment_id")
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> pulumi.Output[Optional[str]]:
"""
instance pool id
"""
return pulumi.get(self, "instance_pool_id")
@property
@pulumi.getter(name="jobId")
def job_id(self) -> pulumi.Output[Optional[str]]:
"""
job id
"""
return pulumi.get(self, "job_id")
@property
@pulumi.getter(name="notebookId")
def notebook_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of notebook within workspace
"""
return pulumi.get(self, "notebook_id")
@property
@pulumi.getter(name="notebookPath")
def notebook_path(self) -> pulumi.Output[Optional[str]]:
"""
path of notebook
"""
return pulumi.get(self, "notebook_path")
@property
@pulumi.getter(name="objectType")
def object_type(self) -> pulumi.Output[str]:
"""
type of permissions.
"""
return pulumi.get(self, "object_type")
@property
@pulumi.getter(name="registeredModelId")
def registered_model_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "registered_model_id")
@property
@pulumi.getter(name="repoId")
def repo_id(self) -> pulumi.Output[Optional[str]]:
"""
repo id
"""
return pulumi.get(self, "repo_id")
@property
@pulumi.getter(name="repoPath")
def repo_path(self) -> pulumi.Output[Optional[str]]:
"""
path of databricks repo directory(`/Repos/<username>/...`)
"""
return pulumi.get(self, "repo_path")
@property
@pulumi.getter(name="sqlAlertId")
def sql_alert_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_alert_id")
@property
@pulumi.getter(name="sqlDashboardId")
def sql_dashboard_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_dashboard_id")
@property
@pulumi.getter(name="sqlEndpointId")
def sql_endpoint_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_endpoint_id")
@property
@pulumi.getter(name="sqlQueryId")
def sql_query_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "sql_query_id")
| [
"pulumi.getter",
"pulumi.set",
"pulumi.ResourceOptions",
"pulumi.get"
] | [((4837, 4873), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accessControls"""'}), "(name='accessControls')\n", (4850, 4873), False, 'import pulumi\n'), ((5784, 5815), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterId"""'}), "(name='clusterId')\n", (5797, 5815), False, 'import pulumi\n'), ((6114, 6151), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterPolicyId"""'}), "(name='clusterPolicyId')\n", (6127, 6151), False, 'import pulumi\n'), ((6492, 6525), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryId"""'}), "(name='directoryId')\n", (6505, 6525), False, 'import pulumi\n'), ((6836, 6871), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryPath"""'}), "(name='directoryPath')\n", (6849, 6871), False, 'import pulumi\n'), ((7197, 7231), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""experimentId"""'}), "(name='experimentId')\n", (7210, 7231), False, 'import pulumi\n'), ((7502, 7538), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instancePoolId"""'}), "(name='instancePoolId')\n", (7515, 7538), False, 'import pulumi\n'), ((7873, 7900), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""jobId"""'}), "(name='jobId')\n", (7886, 7900), False, 'import pulumi\n'), ((8175, 8207), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookId"""'}), "(name='notebookId')\n", (8188, 8207), False, 'import pulumi\n'), ((8532, 8566), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookPath"""'}), "(name='notebookPath')\n", (8545, 8566), False, 'import pulumi\n'), ((8886, 8918), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""objectType"""'}), "(name='objectType')\n", (8899, 8918), False, 'import pulumi\n'), ((9232, 9271), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""registeredModelId"""'}), "(name='registeredModelId')\n", (9245, 9271), False, 'import pulumi\n'), ((9572, 9600), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoId"""'}), "(name='repoId')\n", (9585, 9600), False, 'import pulumi\n'), ((9881, 9911), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoPath"""'}), "(name='repoPath')\n", (9894, 9911), False, 'import pulumi\n'), ((10253, 10285), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlAlertId"""'}), "(name='sqlAlertId')\n", (10266, 10285), False, 'import pulumi\n'), ((10551, 10587), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlDashboardId"""'}), "(name='sqlDashboardId')\n", (10564, 10587), False, 'import pulumi\n'), ((10873, 10908), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlEndpointId"""'}), "(name='sqlEndpointId')\n", (10886, 10908), False, 'import pulumi\n'), ((11189, 11221), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlQueryId"""'}), "(name='sqlQueryId')\n", (11202, 11221), False, 'import pulumi\n'), ((15958, 15994), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accessControls"""'}), "(name='accessControls')\n", (15971, 15994), False, 'import pulumi\n'), ((16925, 16956), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterId"""'}), "(name='clusterId')\n", (16938, 16956), False, 'import pulumi\n'), ((17255, 17292), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterPolicyId"""'}), "(name='clusterPolicyId')\n", (17268, 17292), False, 'import pulumi\n'), ((17633, 17666), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryId"""'}), "(name='directoryId')\n", (17646, 17666), False, 'import pulumi\n'), ((17977, 18012), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryPath"""'}), "(name='directoryPath')\n", (17990, 18012), False, 'import pulumi\n'), ((18338, 18372), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""experimentId"""'}), "(name='experimentId')\n", (18351, 18372), False, 'import pulumi\n'), ((18643, 18679), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instancePoolId"""'}), "(name='instancePoolId')\n", (18656, 18679), False, 'import pulumi\n'), ((19014, 19041), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""jobId"""'}), "(name='jobId')\n", (19027, 19041), False, 'import pulumi\n'), ((19316, 19348), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookId"""'}), "(name='notebookId')\n", (19329, 19348), False, 'import pulumi\n'), ((19673, 19707), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookPath"""'}), "(name='notebookPath')\n", (19686, 19707), False, 'import pulumi\n'), ((20027, 20059), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""objectType"""'}), "(name='objectType')\n", (20040, 20059), False, 'import pulumi\n'), ((20373, 20412), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""registeredModelId"""'}), "(name='registeredModelId')\n", (20386, 20412), False, 'import pulumi\n'), ((20713, 20741), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoId"""'}), "(name='repoId')\n", (20726, 20741), False, 'import pulumi\n'), ((21022, 21052), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoPath"""'}), "(name='repoPath')\n", (21035, 21052), False, 'import pulumi\n'), ((21394, 21426), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlAlertId"""'}), "(name='sqlAlertId')\n", (21407, 21426), False, 'import pulumi\n'), ((21692, 21728), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlDashboardId"""'}), "(name='sqlDashboardId')\n", (21705, 21728), False, 'import pulumi\n'), ((22014, 22049), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlEndpointId"""'}), "(name='sqlEndpointId')\n", (22027, 22049), False, 'import pulumi\n'), ((22330, 22362), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlQueryId"""'}), "(name='sqlQueryId')\n", (22343, 22362), False, 'import pulumi\n'), ((34447, 34483), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accessControls"""'}), "(name='accessControls')\n", (34460, 34483), False, 'import pulumi\n'), ((35057, 35088), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterId"""'}), "(name='clusterId')\n", (35070, 35088), False, 'import pulumi\n'), ((35256, 35293), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clusterPolicyId"""'}), "(name='clusterPolicyId')\n", (35269, 35293), False, 'import pulumi\n'), ((35482, 35515), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryId"""'}), "(name='directoryId')\n", (35495, 35515), False, 'import pulumi\n'), ((35689, 35724), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryPath"""'}), "(name='directoryPath')\n", (35702, 35724), False, 'import pulumi\n'), ((35907, 35941), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""experimentId"""'}), "(name='experimentId')\n", (35920, 35941), False, 'import pulumi\n'), ((36072, 36108), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instancePoolId"""'}), "(name='instancePoolId')\n", (36085, 36108), False, 'import pulumi\n'), ((36294, 36321), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""jobId"""'}), "(name='jobId')\n", (36307, 36321), False, 'import pulumi\n'), ((36477, 36509), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookId"""'}), "(name='notebookId')\n", (36490, 36509), False, 'import pulumi\n'), ((36700, 36734), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""notebookPath"""'}), "(name='notebookPath')\n", (36713, 36734), False, 'import pulumi\n'), ((36914, 36946), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""objectType"""'}), "(name='objectType')\n", (36927, 36946), False, 'import pulumi\n'), ((37116, 37155), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""registeredModelId"""'}), "(name='registeredModelId')\n", (37129, 37155), False, 'import pulumi\n'), ((37298, 37326), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoId"""'}), "(name='repoId')\n", (37311, 37326), False, 'import pulumi\n'), ((37485, 37515), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""repoPath"""'}), "(name='repoPath')\n", (37498, 37515), False, 'import pulumi\n'), ((37729, 37761), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlAlertId"""'}), "(name='sqlAlertId')\n", (37742, 37761), False, 'import pulumi\n'), ((37890, 37926), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlDashboardId"""'}), "(name='sqlDashboardId')\n", (37903, 37926), False, 'import pulumi\n'), ((38063, 38098), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlEndpointId"""'}), "(name='sqlEndpointId')\n", (38076, 38098), False, 'import pulumi\n'), ((38233, 38265), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sqlQueryId"""'}), "(name='sqlQueryId')\n", (38246, 38265), False, 'import pulumi\n'), ((2930, 2986), 'pulumi.set', 'pulumi.set', (['__self__', '"""access_controls"""', 'access_controls'], {}), "(__self__, 'access_controls', access_controls)\n", (2940, 2986), False, 'import pulumi\n'), ((4992, 5027), 'pulumi.get', 'pulumi.get', (['self', '"""access_controls"""'], {}), "(self, 'access_controls')\n", (5002, 5027), False, 'import pulumi\n'), ((5173, 5215), 'pulumi.set', 'pulumi.set', (['self', '"""access_controls"""', 'value'], {}), "(self, 'access_controls', value)\n", (5183, 5215), False, 'import pulumi\n'), ((5589, 5622), 'pulumi.get', 'pulumi.get', (['self', '"""authorization"""'], {}), "(self, 'authorization')\n", (5599, 5622), False, 'import pulumi\n'), ((5723, 5763), 'pulumi.set', 'pulumi.set', (['self', '"""authorization"""', 'value'], {}), "(self, 'authorization', value)\n", (5733, 5763), False, 'import pulumi\n'), ((5931, 5961), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_id"""'], {}), "(self, 'cluster_id')\n", (5941, 5961), False, 'import pulumi\n'), ((6056, 6093), 'pulumi.set', 'pulumi.set', (['self', '"""cluster_id"""', 'value'], {}), "(self, 'cluster_id', value)\n", (6066, 6093), False, 'import pulumi\n'), ((6281, 6318), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_policy_id"""'], {}), "(self, 'cluster_policy_id')\n", (6291, 6318), False, 'import pulumi\n'), ((6427, 6471), 'pulumi.set', 'pulumi.set', (['self', '"""cluster_policy_id"""', 'value'], {}), "(self, 'cluster_policy_id', value)\n", (6437, 6471), False, 'import pulumi\n'), ((6645, 6677), 'pulumi.get', 'pulumi.get', (['self', '"""directory_id"""'], {}), "(self, 'directory_id')\n", (6655, 6677), False, 'import pulumi\n'), ((6776, 6815), 'pulumi.set', 'pulumi.set', (['self', '"""directory_id"""', 'value'], {}), "(self, 'directory_id', value)\n", (6786, 6815), False, 'import pulumi\n'), ((6998, 7032), 'pulumi.get', 'pulumi.get', (['self', '"""directory_path"""'], {}), "(self, 'directory_path')\n", (7008, 7032), False, 'import pulumi\n'), ((7135, 7176), 'pulumi.set', 'pulumi.set', (['self', '"""directory_path"""', 'value'], {}), "(self, 'directory_path', value)\n", (7145, 7176), False, 'import pulumi\n'), ((7307, 7340), 'pulumi.get', 'pulumi.get', (['self', '"""experiment_id"""'], {}), "(self, 'experiment_id')\n", (7317, 7340), False, 'import pulumi\n'), ((7441, 7481), 'pulumi.set', 'pulumi.set', (['self', '"""experiment_id"""', 'value'], {}), "(self, 'experiment_id', value)\n", (7451, 7481), False, 'import pulumi\n'), ((7666, 7702), 'pulumi.get', 'pulumi.get', (['self', '"""instance_pool_id"""'], {}), "(self, 'instance_pool_id')\n", (7676, 7702), False, 'import pulumi\n'), ((7809, 7852), 'pulumi.set', 'pulumi.set', (['self', '"""instance_pool_id"""', 'value'], {}), "(self, 'instance_pool_id', value)\n", (7819, 7852), False, 'import pulumi\n'), ((8008, 8034), 'pulumi.get', 'pulumi.get', (['self', '"""job_id"""'], {}), "(self, 'job_id')\n", (8018, 8034), False, 'import pulumi\n'), ((8121, 8154), 'pulumi.set', 'pulumi.set', (['self', '"""job_id"""', 'value'], {}), "(self, 'job_id', value)\n", (8131, 8154), False, 'import pulumi\n'), ((8345, 8376), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_id"""'], {}), "(self, 'notebook_id')\n", (8355, 8376), False, 'import pulumi\n'), ((8473, 8511), 'pulumi.set', 'pulumi.set', (['self', '"""notebook_id"""', 'value'], {}), "(self, 'notebook_id', value)\n", (8483, 8511), False, 'import pulumi\n'), ((8691, 8724), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_path"""'], {}), "(self, 'notebook_path')\n", (8701, 8724), False, 'import pulumi\n'), ((8825, 8865), 'pulumi.set', 'pulumi.set', (['self', '"""notebook_path"""', 'value'], {}), "(self, 'notebook_path', value)\n", (8835, 8865), False, 'import pulumi\n'), ((9045, 9076), 'pulumi.get', 'pulumi.get', (['self', '"""object_type"""'], {}), "(self, 'object_type')\n", (9055, 9076), False, 'import pulumi\n'), ((9173, 9211), 'pulumi.set', 'pulumi.set', (['self', '"""object_type"""', 'value'], {}), "(self, 'object_type', value)\n", (9183, 9211), False, 'import pulumi\n'), ((9353, 9392), 'pulumi.get', 'pulumi.get', (['self', '"""registered_model_id"""'], {}), "(self, 'registered_model_id')\n", (9363, 9392), False, 'import pulumi\n'), ((9505, 9551), 'pulumi.set', 'pulumi.set', (['self', '"""registered_model_id"""', 'value'], {}), "(self, 'registered_model_id', value)\n", (9515, 9551), False, 'import pulumi\n'), ((9710, 9737), 'pulumi.get', 'pulumi.get', (['self', '"""repo_id"""'], {}), "(self, 'repo_id')\n", (9720, 9737), False, 'import pulumi\n'), ((9826, 9860), 'pulumi.set', 'pulumi.set', (['self', '"""repo_id"""', 'value'], {}), "(self, 'repo_id', value)\n", (9836, 9860), False, 'import pulumi\n'), ((10074, 10103), 'pulumi.get', 'pulumi.get', (['self', '"""repo_path"""'], {}), "(self, 'repo_path')\n", (10084, 10103), False, 'import pulumi\n'), ((10196, 10232), 'pulumi.set', 'pulumi.set', (['self', '"""repo_path"""', 'value'], {}), "(self, 'repo_path', value)\n", (10206, 10232), False, 'import pulumi\n'), ((10360, 10392), 'pulumi.get', 'pulumi.get', (['self', '"""sql_alert_id"""'], {}), "(self, 'sql_alert_id')\n", (10370, 10392), False, 'import pulumi\n'), ((10491, 10530), 'pulumi.set', 'pulumi.set', (['self', '"""sql_alert_id"""', 'value'], {}), "(self, 'sql_alert_id', value)\n", (10501, 10530), False, 'import pulumi\n'), ((10666, 10702), 'pulumi.get', 'pulumi.get', (['self', '"""sql_dashboard_id"""'], {}), "(self, 'sql_dashboard_id')\n", (10676, 10702), False, 'import pulumi\n'), ((10809, 10852), 'pulumi.set', 'pulumi.set', (['self', '"""sql_dashboard_id"""', 'value'], {}), "(self, 'sql_dashboard_id', value)\n", (10819, 10852), False, 'import pulumi\n'), ((10986, 11021), 'pulumi.get', 'pulumi.get', (['self', '"""sql_endpoint_id"""'], {}), "(self, 'sql_endpoint_id')\n", (10996, 11021), False, 'import pulumi\n'), ((11126, 11168), 'pulumi.set', 'pulumi.set', (['self', '"""sql_endpoint_id"""', 'value'], {}), "(self, 'sql_endpoint_id', value)\n", (11136, 11168), False, 'import pulumi\n'), ((11296, 11328), 'pulumi.get', 'pulumi.get', (['self', '"""sql_query_id"""'], {}), "(self, 'sql_query_id')\n", (11306, 11328), False, 'import pulumi\n'), ((11427, 11466), 'pulumi.set', 'pulumi.set', (['self', '"""sql_query_id"""', 'value'], {}), "(self, 'sql_query_id', value)\n", (11437, 11466), False, 'import pulumi\n'), ((16123, 16158), 'pulumi.get', 'pulumi.get', (['self', '"""access_controls"""'], {}), "(self, 'access_controls')\n", (16133, 16158), False, 'import pulumi\n'), ((16314, 16356), 'pulumi.set', 'pulumi.set', (['self', '"""access_controls"""', 'value'], {}), "(self, 'access_controls', value)\n", (16324, 16356), False, 'import pulumi\n'), ((16730, 16763), 'pulumi.get', 'pulumi.get', (['self', '"""authorization"""'], {}), "(self, 'authorization')\n", (16740, 16763), False, 'import pulumi\n'), ((16864, 16904), 'pulumi.set', 'pulumi.set', (['self', '"""authorization"""', 'value'], {}), "(self, 'authorization', value)\n", (16874, 16904), False, 'import pulumi\n'), ((17072, 17102), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_id"""'], {}), "(self, 'cluster_id')\n", (17082, 17102), False, 'import pulumi\n'), ((17197, 17234), 'pulumi.set', 'pulumi.set', (['self', '"""cluster_id"""', 'value'], {}), "(self, 'cluster_id', value)\n", (17207, 17234), False, 'import pulumi\n'), ((17422, 17459), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_policy_id"""'], {}), "(self, 'cluster_policy_id')\n", (17432, 17459), False, 'import pulumi\n'), ((17568, 17612), 'pulumi.set', 'pulumi.set', (['self', '"""cluster_policy_id"""', 'value'], {}), "(self, 'cluster_policy_id', value)\n", (17578, 17612), False, 'import pulumi\n'), ((17786, 17818), 'pulumi.get', 'pulumi.get', (['self', '"""directory_id"""'], {}), "(self, 'directory_id')\n", (17796, 17818), False, 'import pulumi\n'), ((17917, 17956), 'pulumi.set', 'pulumi.set', (['self', '"""directory_id"""', 'value'], {}), "(self, 'directory_id', value)\n", (17927, 17956), False, 'import pulumi\n'), ((18139, 18173), 'pulumi.get', 'pulumi.get', (['self', '"""directory_path"""'], {}), "(self, 'directory_path')\n", (18149, 18173), False, 'import pulumi\n'), ((18276, 18317), 'pulumi.set', 'pulumi.set', (['self', '"""directory_path"""', 'value'], {}), "(self, 'directory_path', value)\n", (18286, 18317), False, 'import pulumi\n'), ((18448, 18481), 'pulumi.get', 'pulumi.get', (['self', '"""experiment_id"""'], {}), "(self, 'experiment_id')\n", (18458, 18481), False, 'import pulumi\n'), ((18582, 18622), 'pulumi.set', 'pulumi.set', (['self', '"""experiment_id"""', 'value'], {}), "(self, 'experiment_id', value)\n", (18592, 18622), False, 'import pulumi\n'), ((18807, 18843), 'pulumi.get', 'pulumi.get', (['self', '"""instance_pool_id"""'], {}), "(self, 'instance_pool_id')\n", (18817, 18843), False, 'import pulumi\n'), ((18950, 18993), 'pulumi.set', 'pulumi.set', (['self', '"""instance_pool_id"""', 'value'], {}), "(self, 'instance_pool_id', value)\n", (18960, 18993), False, 'import pulumi\n'), ((19149, 19175), 'pulumi.get', 'pulumi.get', (['self', '"""job_id"""'], {}), "(self, 'job_id')\n", (19159, 19175), False, 'import pulumi\n'), ((19262, 19295), 'pulumi.set', 'pulumi.set', (['self', '"""job_id"""', 'value'], {}), "(self, 'job_id', value)\n", (19272, 19295), False, 'import pulumi\n'), ((19486, 19517), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_id"""'], {}), "(self, 'notebook_id')\n", (19496, 19517), False, 'import pulumi\n'), ((19614, 19652), 'pulumi.set', 'pulumi.set', (['self', '"""notebook_id"""', 'value'], {}), "(self, 'notebook_id', value)\n", (19624, 19652), False, 'import pulumi\n'), ((19832, 19865), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_path"""'], {}), "(self, 'notebook_path')\n", (19842, 19865), False, 'import pulumi\n'), ((19966, 20006), 'pulumi.set', 'pulumi.set', (['self', '"""notebook_path"""', 'value'], {}), "(self, 'notebook_path', value)\n", (19976, 20006), False, 'import pulumi\n'), ((20186, 20217), 'pulumi.get', 'pulumi.get', (['self', '"""object_type"""'], {}), "(self, 'object_type')\n", (20196, 20217), False, 'import pulumi\n'), ((20314, 20352), 'pulumi.set', 'pulumi.set', (['self', '"""object_type"""', 'value'], {}), "(self, 'object_type', value)\n", (20324, 20352), False, 'import pulumi\n'), ((20494, 20533), 'pulumi.get', 'pulumi.get', (['self', '"""registered_model_id"""'], {}), "(self, 'registered_model_id')\n", (20504, 20533), False, 'import pulumi\n'), ((20646, 20692), 'pulumi.set', 'pulumi.set', (['self', '"""registered_model_id"""', 'value'], {}), "(self, 'registered_model_id', value)\n", (20656, 20692), False, 'import pulumi\n'), ((20851, 20878), 'pulumi.get', 'pulumi.get', (['self', '"""repo_id"""'], {}), "(self, 'repo_id')\n", (20861, 20878), False, 'import pulumi\n'), ((20967, 21001), 'pulumi.set', 'pulumi.set', (['self', '"""repo_id"""', 'value'], {}), "(self, 'repo_id', value)\n", (20977, 21001), False, 'import pulumi\n'), ((21215, 21244), 'pulumi.get', 'pulumi.get', (['self', '"""repo_path"""'], {}), "(self, 'repo_path')\n", (21225, 21244), False, 'import pulumi\n'), ((21337, 21373), 'pulumi.set', 'pulumi.set', (['self', '"""repo_path"""', 'value'], {}), "(self, 'repo_path', value)\n", (21347, 21373), False, 'import pulumi\n'), ((21501, 21533), 'pulumi.get', 'pulumi.get', (['self', '"""sql_alert_id"""'], {}), "(self, 'sql_alert_id')\n", (21511, 21533), False, 'import pulumi\n'), ((21632, 21671), 'pulumi.set', 'pulumi.set', (['self', '"""sql_alert_id"""', 'value'], {}), "(self, 'sql_alert_id', value)\n", (21642, 21671), False, 'import pulumi\n'), ((21807, 21843), 'pulumi.get', 'pulumi.get', (['self', '"""sql_dashboard_id"""'], {}), "(self, 'sql_dashboard_id')\n", (21817, 21843), False, 'import pulumi\n'), ((21950, 21993), 'pulumi.set', 'pulumi.set', (['self', '"""sql_dashboard_id"""', 'value'], {}), "(self, 'sql_dashboard_id', value)\n", (21960, 21993), False, 'import pulumi\n'), ((22127, 22162), 'pulumi.get', 'pulumi.get', (['self', '"""sql_endpoint_id"""'], {}), "(self, 'sql_endpoint_id')\n", (22137, 22162), False, 'import pulumi\n'), ((22267, 22309), 'pulumi.set', 'pulumi.set', (['self', '"""sql_endpoint_id"""', 'value'], {}), "(self, 'sql_endpoint_id', value)\n", (22277, 22309), False, 'import pulumi\n'), ((22437, 22469), 'pulumi.get', 'pulumi.get', (['self', '"""sql_query_id"""'], {}), "(self, 'sql_query_id')\n", (22447, 22469), False, 'import pulumi\n'), ((22568, 22607), 'pulumi.set', 'pulumi.set', (['self', '"""sql_query_id"""', 'value'], {}), "(self, 'sql_query_id', value)\n", (22578, 22607), False, 'import pulumi\n'), ((34593, 34628), 'pulumi.get', 'pulumi.get', (['self', '"""access_controls"""'], {}), "(self, 'access_controls')\n", (34603, 34628), False, 'import pulumi\n'), ((35003, 35036), 'pulumi.get', 'pulumi.get', (['self', '"""authorization"""'], {}), "(self, 'authorization')\n", (35013, 35036), False, 'import pulumi\n'), ((35205, 35235), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_id"""'], {}), "(self, 'cluster_id')\n", (35215, 35235), False, 'import pulumi\n'), ((35424, 35461), 'pulumi.get', 'pulumi.get', (['self', '"""cluster_policy_id"""'], {}), "(self, 'cluster_policy_id')\n", (35434, 35461), False, 'import pulumi\n'), ((35636, 35668), 'pulumi.get', 'pulumi.get', (['self', '"""directory_id"""'], {}), "(self, 'directory_id')\n", (35646, 35668), False, 'import pulumi\n'), ((35852, 35886), 'pulumi.get', 'pulumi.get', (['self', '"""directory_path"""'], {}), "(self, 'directory_path')\n", (35862, 35886), False, 'import pulumi\n'), ((36018, 36051), 'pulumi.get', 'pulumi.get', (['self', '"""experiment_id"""'], {}), "(self, 'experiment_id')\n", (36028, 36051), False, 'import pulumi\n'), ((36237, 36273), 'pulumi.get', 'pulumi.get', (['self', '"""instance_pool_id"""'], {}), "(self, 'instance_pool_id')\n", (36247, 36273), False, 'import pulumi\n'), ((36430, 36456), 'pulumi.get', 'pulumi.get', (['self', '"""job_id"""'], {}), "(self, 'job_id')\n", (36440, 36456), False, 'import pulumi\n'), ((36648, 36679), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_id"""'], {}), "(self, 'notebook_id')\n", (36658, 36679), False, 'import pulumi\n'), ((36860, 36893), 'pulumi.get', 'pulumi.get', (['self', '"""notebook_path"""'], {}), "(self, 'notebook_path')\n", (36870, 36893), False, 'import pulumi\n'), ((37064, 37095), 'pulumi.get', 'pulumi.get', (['self', '"""object_type"""'], {}), "(self, 'object_type')\n", (37074, 37095), False, 'import pulumi\n'), ((37238, 37277), 'pulumi.get', 'pulumi.get', (['self', '"""registered_model_id"""'], {}), "(self, 'registered_model_id')\n", (37248, 37277), False, 'import pulumi\n'), ((37437, 37464), 'pulumi.get', 'pulumi.get', (['self', '"""repo_id"""'], {}), "(self, 'repo_id')\n", (37447, 37464), False, 'import pulumi\n'), ((37679, 37708), 'pulumi.get', 'pulumi.get', (['self', '"""repo_path"""'], {}), "(self, 'repo_path')\n", (37689, 37708), False, 'import pulumi\n'), ((37837, 37869), 'pulumi.get', 'pulumi.get', (['self', '"""sql_alert_id"""'], {}), "(self, 'sql_alert_id')\n", (37847, 37869), False, 'import pulumi\n'), ((38006, 38042), 'pulumi.get', 'pulumi.get', (['self', '"""sql_dashboard_id"""'], {}), "(self, 'sql_dashboard_id')\n", (38016, 38042), False, 'import pulumi\n'), ((38177, 38212), 'pulumi.get', 'pulumi.get', (['self', '"""sql_endpoint_id"""'], {}), "(self, 'sql_endpoint_id')\n", (38187, 38212), False, 'import pulumi\n'), ((38341, 38373), 'pulumi.get', 'pulumi.get', (['self', '"""sql_query_id"""'], {}), "(self, 'sql_query_id')\n", (38351, 38373), False, 'import pulumi\n'), ((3037, 3089), 'pulumi.set', 'pulumi.set', (['__self__', '"""authorization"""', 'authorization'], {}), "(__self__, 'authorization', authorization)\n", (3047, 3089), False, 'import pulumi\n'), ((3137, 3183), 'pulumi.set', 'pulumi.set', (['__self__', '"""cluster_id"""', 'cluster_id'], {}), "(__self__, 'cluster_id', cluster_id)\n", (3147, 3183), False, 'import pulumi\n'), ((3238, 3298), 'pulumi.set', 'pulumi.set', (['__self__', '"""cluster_policy_id"""', 'cluster_policy_id'], {}), "(__self__, 'cluster_policy_id', cluster_policy_id)\n", (3248, 3298), False, 'import pulumi\n'), ((3348, 3398), 'pulumi.set', 'pulumi.set', (['__self__', '"""directory_id"""', 'directory_id'], {}), "(__self__, 'directory_id', directory_id)\n", (3358, 3398), False, 'import pulumi\n'), ((3450, 3504), 'pulumi.set', 'pulumi.set', (['__self__', '"""directory_path"""', 'directory_path'], {}), "(__self__, 'directory_path', directory_path)\n", (3460, 3504), False, 'import pulumi\n'), ((3555, 3607), 'pulumi.set', 'pulumi.set', (['__self__', '"""experiment_id"""', 'experiment_id'], {}), "(__self__, 'experiment_id', experiment_id)\n", (3565, 3607), False, 'import pulumi\n'), ((3661, 3719), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_pool_id"""', 'instance_pool_id'], {}), "(__self__, 'instance_pool_id', instance_pool_id)\n", (3671, 3719), False, 'import pulumi\n'), ((3763, 3801), 'pulumi.set', 'pulumi.set', (['__self__', '"""job_id"""', 'job_id'], {}), "(__self__, 'job_id', job_id)\n", (3773, 3801), False, 'import pulumi\n'), ((3850, 3898), 'pulumi.set', 'pulumi.set', (['__self__', '"""notebook_id"""', 'notebook_id'], {}), "(__self__, 'notebook_id', notebook_id)\n", (3860, 3898), False, 'import pulumi\n'), ((3949, 4001), 'pulumi.set', 'pulumi.set', (['__self__', '"""notebook_path"""', 'notebook_path'], {}), "(__self__, 'notebook_path', notebook_path)\n", (3959, 4001), False, 'import pulumi\n'), ((4050, 4098), 'pulumi.set', 'pulumi.set', (['__self__', '"""object_type"""', 'object_type'], {}), "(__self__, 'object_type', object_type)\n", (4060, 4098), False, 'import pulumi\n'), ((4155, 4219), 'pulumi.set', 'pulumi.set', (['__self__', '"""registered_model_id"""', 'registered_model_id'], {}), "(__self__, 'registered_model_id', registered_model_id)\n", (4165, 4219), False, 'import pulumi\n'), ((4264, 4304), 'pulumi.set', 'pulumi.set', (['__self__', '"""repo_id"""', 'repo_id'], {}), "(__self__, 'repo_id', repo_id)\n", (4274, 4304), False, 'import pulumi\n'), ((4351, 4395), 'pulumi.set', 'pulumi.set', (['__self__', '"""repo_path"""', 'repo_path'], {}), "(__self__, 'repo_path', repo_path)\n", (4361, 4395), False, 'import pulumi\n'), ((4445, 4495), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_alert_id"""', 'sql_alert_id'], {}), "(__self__, 'sql_alert_id', sql_alert_id)\n", (4455, 4495), False, 'import pulumi\n'), ((4549, 4607), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_dashboard_id"""', 'sql_dashboard_id'], {}), "(__self__, 'sql_dashboard_id', sql_dashboard_id)\n", (4559, 4607), False, 'import pulumi\n'), ((4660, 4716), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_endpoint_id"""', 'sql_endpoint_id'], {}), "(__self__, 'sql_endpoint_id', sql_endpoint_id)\n", (4670, 4716), False, 'import pulumi\n'), ((4766, 4816), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_query_id"""', 'sql_query_id'], {}), "(__self__, 'sql_query_id', sql_query_id)\n", (4776, 4816), False, 'import pulumi\n'), ((14051, 14107), 'pulumi.set', 'pulumi.set', (['__self__', '"""access_controls"""', 'access_controls'], {}), "(__self__, 'access_controls', access_controls)\n", (14061, 14107), False, 'import pulumi\n'), ((14158, 14210), 'pulumi.set', 'pulumi.set', (['__self__', '"""authorization"""', 'authorization'], {}), "(__self__, 'authorization', authorization)\n", (14168, 14210), False, 'import pulumi\n'), ((14258, 14304), 'pulumi.set', 'pulumi.set', (['__self__', '"""cluster_id"""', 'cluster_id'], {}), "(__self__, 'cluster_id', cluster_id)\n", (14268, 14304), False, 'import pulumi\n'), ((14359, 14419), 'pulumi.set', 'pulumi.set', (['__self__', '"""cluster_policy_id"""', 'cluster_policy_id'], {}), "(__self__, 'cluster_policy_id', cluster_policy_id)\n", (14369, 14419), False, 'import pulumi\n'), ((14469, 14519), 'pulumi.set', 'pulumi.set', (['__self__', '"""directory_id"""', 'directory_id'], {}), "(__self__, 'directory_id', directory_id)\n", (14479, 14519), False, 'import pulumi\n'), ((14571, 14625), 'pulumi.set', 'pulumi.set', (['__self__', '"""directory_path"""', 'directory_path'], {}), "(__self__, 'directory_path', directory_path)\n", (14581, 14625), False, 'import pulumi\n'), ((14676, 14728), 'pulumi.set', 'pulumi.set', (['__self__', '"""experiment_id"""', 'experiment_id'], {}), "(__self__, 'experiment_id', experiment_id)\n", (14686, 14728), False, 'import pulumi\n'), ((14782, 14840), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_pool_id"""', 'instance_pool_id'], {}), "(__self__, 'instance_pool_id', instance_pool_id)\n", (14792, 14840), False, 'import pulumi\n'), ((14884, 14922), 'pulumi.set', 'pulumi.set', (['__self__', '"""job_id"""', 'job_id'], {}), "(__self__, 'job_id', job_id)\n", (14894, 14922), False, 'import pulumi\n'), ((14971, 15019), 'pulumi.set', 'pulumi.set', (['__self__', '"""notebook_id"""', 'notebook_id'], {}), "(__self__, 'notebook_id', notebook_id)\n", (14981, 15019), False, 'import pulumi\n'), ((15070, 15122), 'pulumi.set', 'pulumi.set', (['__self__', '"""notebook_path"""', 'notebook_path'], {}), "(__self__, 'notebook_path', notebook_path)\n", (15080, 15122), False, 'import pulumi\n'), ((15171, 15219), 'pulumi.set', 'pulumi.set', (['__self__', '"""object_type"""', 'object_type'], {}), "(__self__, 'object_type', object_type)\n", (15181, 15219), False, 'import pulumi\n'), ((15276, 15340), 'pulumi.set', 'pulumi.set', (['__self__', '"""registered_model_id"""', 'registered_model_id'], {}), "(__self__, 'registered_model_id', registered_model_id)\n", (15286, 15340), False, 'import pulumi\n'), ((15385, 15425), 'pulumi.set', 'pulumi.set', (['__self__', '"""repo_id"""', 'repo_id'], {}), "(__self__, 'repo_id', repo_id)\n", (15395, 15425), False, 'import pulumi\n'), ((15472, 15516), 'pulumi.set', 'pulumi.set', (['__self__', '"""repo_path"""', 'repo_path'], {}), "(__self__, 'repo_path', repo_path)\n", (15482, 15516), False, 'import pulumi\n'), ((15566, 15616), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_alert_id"""', 'sql_alert_id'], {}), "(__self__, 'sql_alert_id', sql_alert_id)\n", (15576, 15616), False, 'import pulumi\n'), ((15670, 15728), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_dashboard_id"""', 'sql_dashboard_id'], {}), "(__self__, 'sql_dashboard_id', sql_dashboard_id)\n", (15680, 15728), False, 'import pulumi\n'), ((15781, 15837), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_endpoint_id"""', 'sql_endpoint_id'], {}), "(__self__, 'sql_endpoint_id', sql_endpoint_id)\n", (15791, 15837), False, 'import pulumi\n'), ((15887, 15937), 'pulumi.set', 'pulumi.set', (['__self__', '"""sql_query_id"""', 'sql_query_id'], {}), "(__self__, 'sql_query_id', sql_query_id)\n", (15897, 15937), False, 'import pulumi\n'), ((28204, 28228), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (28226, 28228), False, 'import pulumi\n'), ((33127, 33156), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (33149, 33156), False, 'import pulumi\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import sys
sys.path.insert(0, '../PGP/')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from parametric_GP import PGP
if __name__ == "__main__":
# Import the data
data = pd.read_pickle('airline.pickle')
# Convert time of day from hhmm to minutes since midnight
data.ArrTime = 60*np.floor(data.ArrTime/100)+np.mod(data.ArrTime, 100)
data.DepTime = 60*np.floor(data.DepTime/100)+np.mod(data.DepTime, 100)
# Pick out the data
Y = data['ArrDelay'].values
names = ['Month', 'DayofMonth', 'DayOfWeek', 'plane_age', 'AirTime', 'Distance', 'ArrTime', 'DepTime']
X = data[names].values
N = len(data)
np.random.seed(N)
# Shuffle the data and only consider a subset of it
perm = np.random.permutation(N)
X = X[perm]
Y = Y[perm]
XT = X[int(2*N/3):N]
YT = Y[int(2*N/3):N]
X = X[:int(2*N/3)]
Y = Y[:int(2*N/3)]
# Normalize Y scale and offset
Ymean = Y.mean()
Ystd = Y.std()
Y = (Y - Ymean) / Ystd
Y = Y.reshape(-1, 1)
YT = (YT - Ymean) / Ystd
YT = YT.reshape(-1, 1)
# Normalize X on [0, 1]
Xmin, Xmax = X.min(0), X.max(0)
X = (X - Xmin) / (Xmax - Xmin)
XT = (XT - Xmin) / (Xmax - Xmin)
# Model creation
M = 500
pgp = PGP(X, Y, M, max_iter = 10000, N_batch = 1000,
monitor_likelihood = 10, lrate = 1e-3)
# Training
pgp.train()
# Prediction
mean_star, var_star = pgp.predict(XT)
# MSE
print('MSE: %f' % ((mean_star-YT)**2).mean())
print('MSE_mean: %f' % ((Y.mean()-YT)**2).mean())
# ARD
ARD = 1/np.sqrt(np.exp(pgp.hyp[1:-1]))
ARD_x = np.arange(len(ARD))
fig, ax = plt.subplots(figsize=(10,5))
plt.rcParams.update({'font.size': 16})
ax.barh(ARD_x,ARD)
ax.set_yticks(ARD_x)
ax.set_yticklabels(names)
ax.set_xlabel('ARD weights')
plt.savefig('../Fig/Flights.eps', format='eps', dpi=1000)
#####
# MSE: 0.832810
# MSE_mean: 0.999799 | [
"pandas.read_pickle",
"sys.path.insert",
"matplotlib.pyplot.savefig",
"parametric_GP.PGP",
"numpy.floor",
"numpy.exp",
"matplotlib.pyplot.rcParams.update",
"numpy.random.seed",
"numpy.mod",
"matplotlib.pyplot.subplots",
"numpy.random.permutation"
] | [((83, 112), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../PGP/"""'], {}), "(0, '../PGP/')\n", (98, 112), False, 'import sys\n'), ((285, 317), 'pandas.read_pickle', 'pd.read_pickle', (['"""airline.pickle"""'], {}), "('airline.pickle')\n", (299, 317), True, 'import pandas as pd\n'), ((749, 766), 'numpy.random.seed', 'np.random.seed', (['N'], {}), '(N)\n', (763, 766), True, 'import numpy as np\n'), ((835, 859), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (856, 859), True, 'import numpy as np\n'), ((1357, 1435), 'parametric_GP.PGP', 'PGP', (['X', 'Y', 'M'], {'max_iter': '(10000)', 'N_batch': '(1000)', 'monitor_likelihood': '(10)', 'lrate': '(0.001)'}), '(X, Y, M, max_iter=10000, N_batch=1000, monitor_likelihood=10, lrate=0.001)\n', (1360, 1435), False, 'from parametric_GP import PGP\n'), ((1789, 1818), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1801, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1860), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (1841, 1860), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2038), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Fig/Flights.eps"""'], {'format': '"""eps"""', 'dpi': '(1000)'}), "('../Fig/Flights.eps', format='eps', dpi=1000)\n", (1992, 2038), True, 'import matplotlib.pyplot as plt\n'), ((430, 455), 'numpy.mod', 'np.mod', (['data.ArrTime', '(100)'], {}), '(data.ArrTime, 100)\n', (436, 455), True, 'import numpy as np\n'), ((505, 530), 'numpy.mod', 'np.mod', (['data.DepTime', '(100)'], {}), '(data.DepTime, 100)\n', (511, 530), True, 'import numpy as np\n'), ((403, 431), 'numpy.floor', 'np.floor', (['(data.ArrTime / 100)'], {}), '(data.ArrTime / 100)\n', (411, 431), True, 'import numpy as np\n'), ((478, 506), 'numpy.floor', 'np.floor', (['(data.DepTime / 100)'], {}), '(data.DepTime / 100)\n', (486, 506), True, 'import numpy as np\n'), ((1719, 1740), 'numpy.exp', 'np.exp', (['pgp.hyp[1:-1]'], {}), '(pgp.hyp[1:-1])\n', (1725, 1740), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`seccertmgmt_show` - PyFOS util for displaying certificates in the switch.
***********************************************************************************
The :mod:`seccertmgmt_show` util provides the option to display a certificate.
This module can be used to display a certificate. If the certificate entity \
and type are not provided, information for all certificates is displayed.
* Input:
| Infrastructure Options:
| -i,--ipaddr=IPADDR The IP address of the FOS switch.
| -L,--login=LOGIN The login name.
| -P,--password=PASSWORD The password.
| -s,--secured=MODE The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose Verbose mode [OPTIONAL].
* Util Script Options:
| --certificate-entity=ENTITY-NAME Sets the certificate entity name.
| --certificate-type=CERT-TYPE Sets the certificate type.
| --is-hexdump-show Displays the raw hex data.
* Output:
* The certificate information.
.. function:: seccertmgmt_show.show_system_security_seccertmgmt(session)
* Displays the certificate and its information in the switch.
Example Usage of the Method:
ret = seccertmgmt_show.show_system_security_seccertmgmt(session, \
cert_entity, cert_type)
print (ret)
Details::
result = seccertmgmt_show.show_system_security_seccertmgmt(
session, \'cert\', \'https\')
* Input:
:param session: The session returned by the login.
:param cert_entity: The associated certificate entity.
:param cert_type: The associated certificate type.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the certificate-related information.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_security import security_certificate
from pyfos.utils import brcd_util
def _show_cert(session, restobject):
return restobject.get(session)
def show_security_certificate(session, cert_entity, cert_type):
seccertmgmt_obj = security_certificate()
seccertmgmt_obj.set_certificate_entity(cert_entity)
seccertmgmt_obj.set_certificate_entity(cert_type)
result = _show_cert(session, seccertmgmt_obj)
return result
def main(argv):
# Print arguments
# print(sys.argv[1:])
filters = ['certificate_entity', 'certificate_type']
inputs = brcd_util.parse(argv, security_certificate, filters)
session = brcd_util.getsession(inputs)
result = _show_cert(inputs['session'], inputs['utilobject'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"pyfos.pyfos_brocade_security.security_certificate",
"pyfos.pyfos_auth.logout",
"pyfos.utils.brcd_util.getsession",
"pyfos.utils.brcd_util.parse",
"pyfos.pyfos_util.response_print"
] | [((2800, 2822), 'pyfos.pyfos_brocade_security.security_certificate', 'security_certificate', ([], {}), '()\n', (2820, 2822), False, 'from pyfos.pyfos_brocade_security import security_certificate\n'), ((3140, 3192), 'pyfos.utils.brcd_util.parse', 'brcd_util.parse', (['argv', 'security_certificate', 'filters'], {}), '(argv, security_certificate, filters)\n', (3155, 3192), False, 'from pyfos.utils import brcd_util\n'), ((3208, 3236), 'pyfos.utils.brcd_util.getsession', 'brcd_util.getsession', (['inputs'], {}), '(inputs)\n', (3228, 3236), False, 'from pyfos.utils import brcd_util\n'), ((3308, 3341), 'pyfos.pyfos_util.response_print', 'pyfos_util.response_print', (['result'], {}), '(result)\n', (3333, 3341), False, 'from pyfos import pyfos_util\n'), ((3347, 3373), 'pyfos.pyfos_auth.logout', 'pyfos_auth.logout', (['session'], {}), '(session)\n', (3364, 3373), False, 'from pyfos import pyfos_auth\n')] |
from flask import Flask, render_template, session, redirect, url_for, request, flash, abort, current_app, make_response
from flask_login import login_user, logout_user, login_required, current_user
from . import admin
from .. import db
from ..models import User, Post
from ..form import PostForm
from functools import wraps
from flask import g, request, redirect, url_for
@admin.route('/admin', methods = ['GET', 'POST'])
@login_required
def admin():
form = PostForm()
error = None
if request.method == 'POST' and form.validate():
print(form.body.data)
print('MMM----------NNNN')
post = Post(body=form.body.data, title=form.title.data)
db.session.add(post)
db.session.commit()
return redirect(url_for('main.home'))
flash('Invalid username or password.')
return render_template('admin.html', title='Admin', form=form)
| [
"flask.render_template",
"flask.flash",
"flask.url_for"
] | [((834, 889), 'flask.render_template', 'render_template', (['"""admin.html"""'], {'title': '"""Admin"""', 'form': 'form'}), "('admin.html', title='Admin', form=form)\n", (849, 889), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash, abort, current_app, make_response\n'), ((784, 822), 'flask.flash', 'flash', (['"""Invalid username or password."""'], {}), "('Invalid username or password.')\n", (789, 822), False, 'from flask import Flask, render_template, session, redirect, url_for, request, flash, abort, current_app, make_response\n'), ((754, 774), 'flask.url_for', 'url_for', (['"""main.home"""'], {}), "('main.home')\n", (761, 774), False, 'from flask import g, request, redirect, url_for\n')] |
"""!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
class type_conn(IntEnum):
"""!
@brief Enumeration of connection types for SOM.
@see som
"""
## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four = 0
## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight = 1
## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb = 2
## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor = 3
class type_init(IntEnum):
"""!
@brief Enumeration of initialization types for SOM.
@see som
"""
## Weights are randomly distributed using Gaussian distribution (0, 1).
random = 0
## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid = 1
## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface = 2
## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid = 3
class som_parameters:
"""!
@brief Represents SOM parameters.
"""
def __init__(self):
"""!
@brief Creates SOM parameters.
"""
## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type = type_init.uniform_grid
## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius = None
## Rate of learning.
self.init_learn_rate = 0.1
## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold = 0.001
## Seed for random state (by default is `None`, current system time is used).
self.random_state = None
class som:
"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""
@property
def size(self):
"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
return self._size
@property
def weights(self):
"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
return self._weights
@property
def awards(self):
"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
return self._award
@property
def capture_objects(self):
"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""
if self.__ccore_som_pointer is not None:
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
return self._capture_objects
def __init__(self, rows, cols, conn_type=type_conn.grid_eight, parameters=None, ccore=True):
"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
# some of these parameters are required despite core implementation, for example, for network visualization.
self._cols = cols
self._rows = rows
self._size = cols * rows
self._conn_type = conn_type
self._data = None
self._neighbors = None
self._local_radius = 0.0
self._learn_rate = 0.0
self.__ccore_som_pointer = None
self._params = parameters or som_parameters()
if self._params.init_radius is None:
self._params.init_radius = self.__initialize_initial_radius(rows, cols)
if (ccore is True) and ccore_library.workable():
self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params)
else:
# location
self._location = self.__initialize_locations(rows, cols)
# default weights
self._weights = [[0.0]] * self._size
# awards
self._award = [0] * self._size
# captured objects
self._capture_objects = [[] for i in range(self._size)]
# distances - calculate and store them only during training
self._sqrt_distances = None
# connections
if conn_type != type_conn.func_neighbor:
self._create_connections(conn_type)
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, location):
"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""
sqrt_distances = [[[] for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(i, size, 1):
dist = euclidean_distance_square(location[i], location[j])
sqrt_distances[i][j] = dist
sqrt_distances[j][i] = dist
return sqrt_distances
def _create_initial_weights(self, init_type):
"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""
dim_info = dimension_info(self._data)
step_x = dim_info.get_center()[0]
if self._rows > 1:
step_x = dim_info.get_width()[0] / (self._rows - 1)
step_y = 0.0
if dim_info.get_dimensions() > 1:
step_y = dim_info.get_center()[1]
if self._cols > 1:
step_y = dim_info.get_width()[1] / (self._cols - 1)
# generate weights (topological coordinates)
random.seed(self._params.random_state)
# Uniform grid.
if init_type == type_init.uniform_grid:
# Predefined weights in line with input data.
self._weights = [[[] for i in range(dim_info.get_dimensions())] for j in range(self._size)]
for i in range(self._size):
location = self._location[i]
for dim in range(dim_info.get_dimensions()):
if dim == 0:
if self._rows > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif dim == 1:
if self._cols > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif init_type == type_init.random_surface:
# Random weights at the full surface.
self._weights = [
[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in
range(dim_info.get_dimensions())] for _ in range(self._size)]
elif init_type == type_init.random_centroid:
# Random weights at the center of input data.
self._weights = [[(random.random() + dim_info.get_center()[i]) for i in range(dim_info.get_dimensions())]
for _ in range(self._size)]
else:
# Random weights of input data.
self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]
def _create_connections(self, conn_type):
"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""
self._neighbors = [[] for index in range(self._size)]
for index in range(0, self._size, 1):
upper_index = index - self._cols
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols + 1
lower_index = index + self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols + 1
left_index = index - 1
right_index = index + 1
node_row_index = math.floor(index / self._cols)
upper_row_index = node_row_index - 1
lower_row_index = node_row_index + 1
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four):
if upper_index >= 0:
self._neighbors[index].append(upper_index)
if lower_index < self._size:
self._neighbors[index].append(lower_index)
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (
conn_type == type_conn.honeycomb):
if (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index):
self._neighbors[index].append(left_index)
if (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index):
self._neighbors[index].append(right_index)
if conn_type == type_conn.grid_eight:
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
if conn_type == type_conn.honeycomb:
if (node_row_index % 2) == 0:
upper_left_index = index - self._cols
upper_right_index = index - self._cols + 1
lower_left_index = index + self._cols
lower_right_index = index + self._cols + 1
else:
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
def _adaptation(self, index, x):
"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""
dimension = len(self._weights[0])
if self._conn_type == type_conn.func_neighbor:
for neuron_index in range(self._size):
distance = self._sqrt_distances[index][neuron_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neuron_index][i] = self._weights[neuron_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neuron_index][i])
else:
for i in range(dimension):
self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i])
for neighbor_index in self._neighbors[index]:
distance = self._sqrt_distances[index][neighbor_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neighbor_index][i] = self._weights[neighbor_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neighbor_index][i])
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop is True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs
def simulate(self, input_pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""
if self.__ccore_som_pointer is not None:
return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)
return self._competition(input_pattern)
def _get_maximal_adaptation(self, previous_weights):
"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""
dimension = len(self._data[0])
maximal_adaptation = 0.0
for neuron_index in range(self._size):
for dim in range(dimension):
current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]
if current_adaptation < 0:
current_adaptation = -current_adaptation
if maximal_adaptation < current_adaptation:
maximal_adaptation = current_adaptation
return maximal_adaptation
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number
def show_distance_matrix(self):
"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""
distance_matrix = self.get_distance_matrix()
plt.imshow(distance_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("U-Matrix")
plt.colorbar()
plt.show()
def get_distance_matrix(self):
"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
if self._conn_type != type_conn.func_neighbor:
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
distance_matrix = [[0.0] * self._cols for i in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
if self._conn_type == type_conn.func_neighbor:
self._create_connections(type_conn.grid_eight)
for neighbor_index in self._neighbors[neuron_index]:
distance_matrix[i][j] += euclidean_distance_square(self._weights[neuron_index],
self._weights[neighbor_index])
distance_matrix[i][j] /= len(self._neighbors[neuron_index])
return distance_matrix
def show_density_matrix(self, surface_divider=20.0):
"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""
density_matrix = self.get_density_matrix(surface_divider)
plt.imshow(density_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("P-Matrix")
plt.colorbar()
plt.show()
def get_density_matrix(self, surface_divider=20.0):
"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
density_matrix = [[0] * self._cols for i in range(self._rows)]
dimension = len(self._weights[0])
dim_max = [float('-Inf')] * dimension
dim_min = [float('Inf')] * dimension
for weight in self._weights:
for index_dim in range(dimension):
if weight[index_dim] > dim_max[index_dim]:
dim_max[index_dim] = weight[index_dim]
if weight[index_dim] < dim_min[index_dim]:
dim_min[index_dim] = weight[index_dim]
radius = [0.0] * len(self._weights[0])
for index_dim in range(dimension):
radius[index_dim] = (dim_max[index_dim] - dim_min[index_dim]) / surface_divider
## TODO: do not use data
for point in self._data:
for index_neuron in range(len(self)):
point_covered = True
for index_dim in range(dimension):
if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:
point_covered = False
break
row = int(math.floor(index_neuron / self._cols))
col = index_neuron - row * self._cols
if point_covered is True:
density_matrix[row][col] += 1
return density_matrix
def show_winner_matrix(self):
"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
(fig, ax) = plt.subplots()
winner_matrix = [[0] * self._cols for _ in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
winner_matrix[i][j] = self._award[neuron_index]
ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center')
ax.imshow(winner_matrix, cmap=plt.get_cmap('cool'), interpolation='none')
ax.grid(True)
plt.title("Winner Matrix")
plt.show()
plt.close(fig)
def show_network(self, awards=False, belongs=False, coupling=True, dataset=True, marker_type='o'):
"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
dimension = len(self._weights[0])
fig = plt.figure()
# Check for dimensions
if (dimension == 1) or (dimension == 2):
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
else:
raise NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')
if (self._data is not None) and (dataset is True):
for x in self._data:
if dimension == 1:
axes.plot(x[0], 0.0, 'b|', ms=30)
elif dimension == 2:
axes.plot(x[0], x[1], 'b.')
elif dimension == 3:
axes.scatter(x[0], x[1], x[2], c='b', marker='.')
# Show neurons
for index in range(self._size):
color = 'g'
if self._award[index] == 0:
color = 'y'
if dimension == 1:
axes.plot(self._weights[index][0], 0.0, color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], 0.0, location, color='blue', fontsize=10)
if dimension == 2:
axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], point[1], location, color='blue', fontsize=10)
if (self._conn_type != type_conn.func_neighbor) and (coupling is True):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
'g', linewidth=0.5)
elif dimension == 3:
axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c=color,
marker=marker_type)
if (self._conn_type != type_conn.func_neighbor) and (coupling != False):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
[self._weights[index][2], self._weights[neighbor][2]],
'g-', linewidth=0.5)
plt.title("Network Structure")
plt.grid()
plt.show()
plt.close(fig)
def __get_dump_from_python(self, ccore_usage):
return {'ccore': ccore_usage,
'state': {'cols': self._cols,
'rows': self._rows,
'size': self._size,
'conn_type': self._conn_type,
'neighbors': self._neighbors,
'local_radius': self._local_radius,
'learn_rate': self._learn_rate,
'params': self._params,
'location': self._location,
'weights': self._weights,
'award': self._award,
'capture_objects': self._capture_objects}}
def __download_dump_from_ccore(self):
self._location = self.__initialize_locations(self._rows, self._cols)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
def __upload_common_part(self, state_dump):
self._cols = state_dump['cols']
self._rows = state_dump['rows']
self._size = state_dump['size']
self._conn_type = state_dump['conn_type']
self._neighbors = state_dump['neighbors']
self._local_radius = state_dump['local_radius']
self._learn_rate = state_dump['learn_rate']
self._params = state_dump['params']
self._neighbors = None
def __upload_dump_to_python(self, state_dump):
self.__ccore_som_pointer = None
self.__upload_common_part(state_dump)
self._location = state_dump['location']
self._weights = state_dump['weights']
self._award = state_dump['award']
self._capture_objects = state_dump['capture_objects']
self._location = self.__initialize_locations(self._rows, self._cols)
self._create_connections(self._conn_type)
def __upload_dump_to_ccore(self, state_dump):
self.__upload_common_part(state_dump)
self.__ccore_som_pointer = wrapper.som_create(self._rows, self._cols, self._conn_type, self._params)
wrapper.som_load(self.__ccore_som_pointer, state_dump['weights'], state_dump['award'],
state_dump['capture_objects'])
| [
"pyclustering.utils.dimension.dimension_info",
"matplotlib.pyplot.grid",
"math.floor",
"pyclustering.core.som_wrapper.som_get_neighbors",
"pyclustering.core.wrapper.ccore_library.workable",
"pyclustering.core.som_wrapper.som_train",
"pyclustering.core.som_wrapper.som_load",
"pyclustering.core.som_wrap... | [((13291, 13317), 'pyclustering.utils.dimension.dimension_info', 'dimension_info', (['self._data'], {}), '(self._data)\n', (13305, 13317), False, 'from pyclustering.utils.dimension import dimension_info\n'), ((13736, 13774), 'random.seed', 'random.seed', (['self._params.random_state'], {}), '(self._params.random_state)\n', (13747, 13774), False, 'import random\n'), ((20016, 20062), 'pyclustering.utils.euclidean_distance_square', 'euclidean_distance_square', (['self._weights[0]', 'x'], {}), '(self._weights[0], x)\n', (20041, 20062), False, 'from pyclustering.utils import euclidean_distance_square\n'), ((26952, 26973), 'matplotlib.pyplot.title', 'plt.title', (['"""U-Matrix"""'], {}), "('U-Matrix')\n", (26961, 26973), True, 'import matplotlib.pyplot as plt\n'), ((26983, 26997), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (26995, 26997), True, 'import matplotlib.pyplot as plt\n'), ((27007, 27017), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27015, 27017), True, 'import matplotlib.pyplot as plt\n'), ((28944, 28965), 'matplotlib.pyplot.title', 'plt.title', (['"""P-Matrix"""'], {}), "('P-Matrix')\n", (28953, 28965), True, 'import matplotlib.pyplot as plt\n'), ((28975, 28989), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (28987, 28989), True, 'import matplotlib.pyplot as plt\n'), ((28999, 29009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29007, 29009), True, 'import matplotlib.pyplot as plt\n'), ((31367, 31381), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (31379, 31381), True, 'import matplotlib.pyplot as plt\n'), ((31853, 31879), 'matplotlib.pyplot.title', 'plt.title', (['"""Winner Matrix"""'], {}), "('Winner Matrix')\n", (31862, 31879), True, 'import matplotlib.pyplot as plt\n'), ((31889, 31899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31897, 31899), True, 'import matplotlib.pyplot as plt\n'), ((31909, 31923), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (31918, 31923), True, 'import matplotlib.pyplot as plt\n'), ((33133, 33145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (33143, 33145), True, 'import matplotlib.pyplot as plt\n'), ((36791, 36821), 'matplotlib.pyplot.title', 'plt.title', (['"""Network Structure"""'], {}), "('Network Structure')\n", (36800, 36821), True, 'import matplotlib.pyplot as plt\n'), ((36831, 36841), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (36839, 36841), True, 'import matplotlib.pyplot as plt\n'), ((36851, 36861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36859, 36861), True, 'import matplotlib.pyplot as plt\n'), ((36871, 36885), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (36880, 36885), True, 'import matplotlib.pyplot as plt\n'), ((37782, 37831), 'pyclustering.core.som_wrapper.som_get_weights', 'wrapper.som_get_weights', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (37805, 37831), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((37855, 37903), 'pyclustering.core.som_wrapper.som_get_awards', 'wrapper.som_get_awards', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (37877, 37903), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((37937, 37994), 'pyclustering.core.som_wrapper.som_get_capture_objects', 'wrapper.som_get_capture_objects', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (37968, 37994), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((39073, 39146), 'pyclustering.core.som_wrapper.som_create', 'wrapper.som_create', (['self._rows', 'self._cols', 'self._conn_type', 'self._params'], {}), '(self._rows, self._cols, self._conn_type, self._params)\n', (39091, 39146), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((39156, 39277), 'pyclustering.core.som_wrapper.som_load', 'wrapper.som_load', (['self.__ccore_som_pointer', "state_dump['weights']", "state_dump['award']", "state_dump['capture_objects']"], {}), "(self.__ccore_som_pointer, state_dump['weights'],\n state_dump['award'], state_dump['capture_objects'])\n", (39172, 39277), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((6030, 6076), 'pyclustering.core.som_wrapper.som_get_size', 'wrapper.som_get_size', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (6050, 6076), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((6361, 6410), 'pyclustering.core.som_wrapper.som_get_weights', 'wrapper.som_get_weights', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (6384, 6410), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((6773, 6821), 'pyclustering.core.som_wrapper.som_get_awards', 'wrapper.som_get_awards', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (6795, 6821), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((7579, 7636), 'pyclustering.core.som_wrapper.som_get_capture_objects', 'wrapper.som_get_capture_objects', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (7610, 7636), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((9034, 9058), 'pyclustering.core.wrapper.ccore_library.workable', 'ccore_library.workable', ([], {}), '()\n', (9056, 9058), False, 'from pyclustering.core.wrapper import ccore_library\n'), ((9100, 9155), 'pyclustering.core.som_wrapper.som_create', 'wrapper.som_create', (['rows', 'cols', 'conn_type', 'self._params'], {}), '(rows, cols, conn_type, self._params)\n', (9118, 9155), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((9963, 10008), 'pyclustering.core.som_wrapper.som_destroy', 'wrapper.som_destroy', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (9982, 10008), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((10790, 10814), 'pyclustering.core.wrapper.ccore_library.workable', 'ccore_library.workable', ([], {}), '()\n', (10812, 10814), False, 'from pyclustering.core.wrapper import ccore_library\n'), ((16536, 16566), 'math.floor', 'math.floor', (['(index / self._cols)'], {}), '(index / self._cols)\n', (16546, 16566), False, 'import math\n'), ((20133, 20179), 'pyclustering.utils.euclidean_distance_square', 'euclidean_distance_square', (['self._weights[i]', 'x'], {}), '(self._weights[i], x)\n', (20158, 20179), False, 'from pyclustering.utils import euclidean_distance_square\n'), ((22720, 22787), 'pyclustering.core.som_wrapper.som_train', 'wrapper.som_train', (['self.__ccore_som_pointer', 'data', 'epochs', 'autostop'], {}), '(self.__ccore_som_pointer, data, epochs, autostop)\n', (22737, 22787), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((25005, 25066), 'pyclustering.core.som_wrapper.som_simulate', 'wrapper.som_simulate', (['self.__ccore_som_pointer', 'input_pattern'], {}), '(self.__ccore_som_pointer, input_pattern)\n', (25025, 25066), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((26387, 26435), 'pyclustering.core.som_wrapper.som_get_awards', 'wrapper.som_get_awards', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (26409, 26435), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((27500, 27549), 'pyclustering.core.som_wrapper.som_get_weights', 'wrapper.som_get_weights', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (27523, 27549), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((29474, 29523), 'pyclustering.core.som_wrapper.som_get_weights', 'wrapper.som_get_weights', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (29497, 29523), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((31295, 31343), 'pyclustering.core.som_wrapper.som_get_awards', 'wrapper.som_get_awards', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (31317, 31343), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((32786, 32832), 'pyclustering.core.som_wrapper.som_get_size', 'wrapper.som_get_size', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (32806, 32832), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((32862, 32911), 'pyclustering.core.som_wrapper.som_get_weights', 'wrapper.som_get_weights', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (32885, 32911), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((32943, 32994), 'pyclustering.core.som_wrapper.som_get_neighbors', 'wrapper.som_get_neighbors', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (32968, 32994), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((33022, 33070), 'pyclustering.core.som_wrapper.som_get_awards', 'wrapper.som_get_awards', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (33044, 33070), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((12695, 12746), 'pyclustering.utils.euclidean_distance_square', 'euclidean_distance_square', (['location[i]', 'location[j]'], {}), '(location[i], location[j])\n', (12720, 12746), False, 'from pyclustering.utils import euclidean_distance_square\n'), ((23361, 23388), 'math.exp', 'math.exp', (['(-(epoch / epochs))'], {}), '(-(epoch / epochs))\n', (23369, 23388), False, 'import math\n'), ((26898, 26917), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hot"""'], {}), "('hot')\n", (26910, 26917), True, 'import matplotlib.pyplot as plt\n'), ((27647, 27698), 'pyclustering.core.som_wrapper.som_get_neighbors', 'wrapper.som_get_neighbors', (['self.__ccore_som_pointer'], {}), '(self.__ccore_som_pointer)\n', (27672, 27698), True, 'import pyclustering.core.som_wrapper as wrapper\n'), ((28890, 28909), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hot"""'], {}), "('hot')\n", (28902, 28909), True, 'import matplotlib.pyplot as plt\n'), ((31775, 31795), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""cool"""'], {}), "('cool')\n", (31787, 31795), True, 'import matplotlib.pyplot as plt\n'), ((20896, 20946), 'math.exp', 'math.exp', (['(-(distance / (2.0 * self._local_radius)))'], {}), '(-(distance / (2.0 * self._local_radius)))\n', (20904, 20946), False, 'import math\n'), ((21684, 21734), 'math.exp', 'math.exp', (['(-(distance / (2.0 * self._local_radius)))'], {}), '(-(distance / (2.0 * self._local_radius)))\n', (21692, 21734), False, 'import math\n'), ((23264, 23291), 'math.exp', 'math.exp', (['(-(epoch / epochs))'], {}), '(-(epoch / epochs))\n', (23272, 23291), False, 'import math\n'), ((28159, 28249), 'pyclustering.utils.euclidean_distance_square', 'euclidean_distance_square', (['self._weights[neuron_index]', 'self._weights[neighbor_index]'], {}), '(self._weights[neuron_index], self._weights[\n neighbor_index])\n', (28184, 28249), False, 'from pyclustering.utils import euclidean_distance_square\n'), ((30681, 30718), 'math.floor', 'math.floor', (['(index_neuron / self._cols)'], {}), '(index_neuron / self._cols)\n', (30691, 30718), False, 'import math\n'), ((17170, 17205), 'math.floor', 'math.floor', (['(left_index / self._cols)'], {}), '(left_index / self._cols)\n', (17180, 17205), False, 'import math\n'), ((17343, 17379), 'math.floor', 'math.floor', (['(right_index / self._cols)'], {}), '(right_index / self._cols)\n', (17353, 17379), False, 'import math\n'), ((17566, 17607), 'math.floor', 'math.floor', (['(upper_left_index / self._cols)'], {}), '(upper_left_index / self._cols)\n', (17576, 17607), False, 'import math\n'), ((17750, 17792), 'math.floor', 'math.floor', (['(upper_right_index / self._cols)'], {}), '(upper_right_index / self._cols)\n', (17760, 17792), False, 'import math\n'), ((17943, 17984), 'math.floor', 'math.floor', (['(lower_left_index / self._cols)'], {}), '(lower_left_index / self._cols)\n', (17953, 17984), False, 'import math\n'), ((18135, 18177), 'math.floor', 'math.floor', (['(lower_right_index / self._cols)'], {}), '(lower_right_index / self._cols)\n', (18145, 18177), False, 'import math\n'), ((18938, 18979), 'math.floor', 'math.floor', (['(upper_left_index / self._cols)'], {}), '(upper_left_index / self._cols)\n', (18948, 18979), False, 'import math\n'), ((19122, 19164), 'math.floor', 'math.floor', (['(upper_right_index / self._cols)'], {}), '(upper_right_index / self._cols)\n', (19132, 19164), False, 'import math\n'), ((19315, 19356), 'math.floor', 'math.floor', (['(lower_left_index / self._cols)'], {}), '(lower_left_index / self._cols)\n', (19325, 19356), False, 'import math\n'), ((19507, 19549), 'math.floor', 'math.floor', (['(lower_right_index / self._cols)'], {}), '(lower_right_index / self._cols)\n', (19517, 19549), False, 'import math\n'), ((15603, 15618), 'random.random', 'random.random', ([], {}), '()\n', (15616, 15618), False, 'import random\n'), ((15365, 15380), 'random.random', 'random.random', ([], {}), '()\n', (15378, 15380), False, 'import random\n')] |
import rospy
from styx_msgs.msg import TrafficLight
import numpy as np
from keras.models import Model
from keras import applications
from keras.models import load_model
from keras.preprocessing import image as img_preprocessing
import cv2
# load the trained model
from keras.utils.generic_utils import CustomObjectScope
model_filepath = 'saved_models/model.MobileNet-3-classes.h5'
n_classes = 3
class TLClassifier(object):
def __init__(self):
# load classifier
# load keras libraies and load the MobileNet model
self.model_loaded = False
def load_model(self):
rospy.loginfo("TLClassifier: Loading model...")
with CustomObjectScope({'relu6': applications.mobilenet.relu6,'DepthwiseConv2D': applications.mobilenet.DepthwiseConv2D}):
self.model = load_model(model_filepath)
self.model._make_predict_function() # Otherwise there is a "Tensor %s is not an element of this grap..." when predicting
self.model_loaded = True
rospy.loginfo("TLClassifier: Model loaded - READY")
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Implement light color prediction
if not self.model_loaded:
rospy.logwarn("Model not loaded yet, clssification not possible!")
return TrafficLight.UNKNOWN
# The model was trained with RGB images.
# So the image needs to be provided as RGB:
# self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
# Otherwise a conversion would be necessary
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# The model expects RGB images in (224, 224) as input
image = cv2.resize(image,(224,224))
# to tensors and normalize it
x = img_preprocessing.img_to_array(image)
x = np.expand_dims(x, axis=0).astype('float32')/255
# get index of predicted signal sign for the image
signal_prediction = np.argmax(self.model.predict(x))
return signal_prediction | [
"keras.preprocessing.image.img_to_array",
"keras.models.load_model",
"rospy.logwarn",
"keras.utils.generic_utils.CustomObjectScope",
"numpy.expand_dims",
"cv2.resize",
"rospy.loginfo"
] | [((605, 652), 'rospy.loginfo', 'rospy.loginfo', (['"""TLClassifier: Loading model..."""'], {}), "('TLClassifier: Loading model...')\n", (618, 652), False, 'import rospy\n'), ((1010, 1061), 'rospy.loginfo', 'rospy.loginfo', (['"""TLClassifier: Model loaded - READY"""'], {}), "('TLClassifier: Model loaded - READY')\n", (1023, 1061), False, 'import rospy\n'), ((1918, 1947), 'cv2.resize', 'cv2.resize', (['image', '(224, 224)'], {}), '(image, (224, 224))\n', (1928, 1947), False, 'import cv2\n'), ((2005, 2042), 'keras.preprocessing.image.img_to_array', 'img_preprocessing.img_to_array', (['image'], {}), '(image)\n', (2035, 2042), True, 'from keras.preprocessing import image as img_preprocessing\n'), ((666, 787), 'keras.utils.generic_utils.CustomObjectScope', 'CustomObjectScope', (["{'relu6': applications.mobilenet.relu6, 'DepthwiseConv2D': applications.\n mobilenet.DepthwiseConv2D}"], {}), "({'relu6': applications.mobilenet.relu6, 'DepthwiseConv2D':\n applications.mobilenet.DepthwiseConv2D})\n", (683, 787), False, 'from keras.utils.generic_utils import CustomObjectScope\n'), ((809, 835), 'keras.models.load_model', 'load_model', (['model_filepath'], {}), '(model_filepath)\n', (819, 835), False, 'from keras.models import load_model\n'), ((1450, 1516), 'rospy.logwarn', 'rospy.logwarn', (['"""Model not loaded yet, clssification not possible!"""'], {}), "('Model not loaded yet, clssification not possible!')\n", (1463, 1516), False, 'import rospy\n'), ((2055, 2080), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2069, 2080), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Cluster plotting tools"""
__author__ = ["<NAME>", "<NAME>"]
__all__ = ["plot_cluster_algorithm"]
import pandas as pd
from sktime.clustering.base._typing import NumpyOrDF
from sktime.clustering.base.base import BaseClusterer
from sktime.clustering.partitioning._lloyds_partitioning import (
TimeSeriesLloydsPartitioning,
)
from sktime.datatypes._panel._convert import from_nested_to_2d_array
from sktime.utils.validation._dependencies import _check_soft_dependencies
def _plot(cluster_values, center, axes):
for cluster_series in cluster_values:
axes.plot(cluster_series, color="b")
axes.plot(center, color="r")
def plot_cluster_algorithm(model: BaseClusterer, predict_series: NumpyOrDF, k: int):
"""
Method that is used to plot a clustering algorithms output
Parameters
----------
model: BaseClusterer
Clustering model to plot
predict_series: Numpy or Dataframe
The series to predict the values for
k: int
Number of centers
"""
_check_soft_dependencies("matplotlib")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if isinstance(predict_series, pd.DataFrame):
predict_series = from_nested_to_2d_array(predict_series, return_numpy=True)
plt.figure(figsize=(5, 10))
plt.rcParams["figure.dpi"] = 100
indexes = model.predict(predict_series)
centers = model.get_centers()
series_values = TimeSeriesLloydsPartitioning.get_cluster_values(
indexes, predict_series, k
)
fig, axes = plt.subplots(nrows=k, ncols=1)
for i in range(k):
_plot(series_values[i], centers[i], axes[i])
blue_patch = mpatches.Patch(color="blue", label="Series that belong to the cluster")
red_patch = mpatches.Patch(color="red", label="Cluster centers")
plt.legend(
handles=[red_patch, blue_patch],
loc="upper center",
bbox_to_anchor=(0.5, -0.40),
fancybox=True,
shadow=True,
ncol=5,
)
plt.tight_layout()
plt.show()
| [
"sktime.utils.validation._dependencies._check_soft_dependencies",
"sktime.datatypes._panel._convert.from_nested_to_2d_array",
"matplotlib.pyplot.figure",
"matplotlib.patches.Patch",
"matplotlib.pyplot.tight_layout",
"sktime.clustering.partitioning._lloyds_partitioning.TimeSeriesLloydsPartitioning.get_clus... | [((1045, 1083), 'sktime.utils.validation._dependencies._check_soft_dependencies', '_check_soft_dependencies', (['"""matplotlib"""'], {}), "('matplotlib')\n", (1069, 1083), False, 'from sktime.utils.validation._dependencies import _check_soft_dependencies\n'), ((1300, 1327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 10)'}), '(figsize=(5, 10))\n', (1310, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1539), 'sktime.clustering.partitioning._lloyds_partitioning.TimeSeriesLloydsPartitioning.get_cluster_values', 'TimeSeriesLloydsPartitioning.get_cluster_values', (['indexes', 'predict_series', 'k'], {}), '(indexes, predict_series, k)\n', (1511, 1539), False, 'from sktime.clustering.partitioning._lloyds_partitioning import TimeSeriesLloydsPartitioning\n'), ((1570, 1600), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'k', 'ncols': '(1)'}), '(nrows=k, ncols=1)\n', (1582, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1766), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""blue"""', 'label': '"""Series that belong to the cluster"""'}), "(color='blue', label='Series that belong to the cluster')\n", (1709, 1766), True, 'import matplotlib.patches as mpatches\n'), ((1783, 1835), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""red"""', 'label': '"""Cluster centers"""'}), "(color='red', label='Cluster centers')\n", (1797, 1835), True, 'import matplotlib.patches as mpatches\n'), ((1840, 1971), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[red_patch, blue_patch]', 'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.4)', 'fancybox': '(True)', 'shadow': '(True)', 'ncol': '(5)'}), "(handles=[red_patch, blue_patch], loc='upper center',\n bbox_to_anchor=(0.5, -0.4), fancybox=True, shadow=True, ncol=5)\n", (1850, 1971), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2046), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2044, 2046), True, 'import matplotlib.pyplot as plt\n'), ((2051, 2061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2059, 2061), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1295), 'sktime.datatypes._panel._convert.from_nested_to_2d_array', 'from_nested_to_2d_array', (['predict_series'], {'return_numpy': '(True)'}), '(predict_series, return_numpy=True)\n', (1260, 1295), False, 'from sktime.datatypes._panel._convert import from_nested_to_2d_array\n')] |
from django.test import TestCase
from transductor.forms import EnergyForm
from transductor.models import TransductorModel
class EnergyTransductorForm(TestCase):
def setUp(self):
t_model = TransductorModel()
t_model.name = "TR 4020"
t_model.transport_protocol = "UDP"
t_model.serial_protocol = "Mosbus RTU"
t_model.measurements_type = "EnergyMeasurements"
t_model.register_addresses = [[68, 0], [70, 1]]
t_model.save()
self.t_model = t_model
def test_valid_form(self):
data = {
'serie_number': 1,
'ip_address': "192.168.127.12",
'description': "Test",
'model': self.t_model.id
}
form = EnergyForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_form(self):
data = {
'serie_number': u'',
'ip_address': "1",
'description': u'',
'model': u''
}
form = EnergyForm(data=data)
self.assertFalse(form.is_valid())
| [
"transductor.models.TransductorModel",
"transductor.forms.EnergyForm"
] | [((202, 220), 'transductor.models.TransductorModel', 'TransductorModel', ([], {}), '()\n', (218, 220), False, 'from transductor.models import TransductorModel\n'), ((734, 755), 'transductor.forms.EnergyForm', 'EnergyForm', ([], {'data': 'data'}), '(data=data)\n', (744, 755), False, 'from transductor.forms import EnergyForm\n'), ((996, 1017), 'transductor.forms.EnergyForm', 'EnergyForm', ([], {'data': 'data'}), '(data=data)\n', (1006, 1017), False, 'from transductor.forms import EnergyForm\n')] |
from typing import Any, List, Optional
import hydra
import torch
import torchmetrics
from omegaconf import DictConfig
from pytorch_lightning import LightningModule
from ..optimizer.scheduler import create_scheduler
from ..utils import utils
from ..utils.misc import mixup_data
log = utils.get_logger(__name__)
class LitBase(LightningModule):
def __init__(self, cfg: Optional[DictConfig] = None, **kwargs):
super().__init__()
self.save_hyperparameters()
config = cfg
self.config = config
# model
log.info(f"Instantiating module <{config.module._target_}>")
self.model = hydra.utils.instantiate(
config.module, num_classes=config.datamodule.num_classes
)
# load from checkpoint
if config.get("load_from_checkpoint"):
ckpt = torch.load(config.load_from_checkpoint)
missing_keys, unexpected_keys = self.load_state_dict(ckpt["state_dict"], strict=False)
log.info(f"[ckpt] Missing keys: {missing_keys}, Unexpected keys: {unexpected_keys}.")
log.info(f"[ckpt] Load checkpoint from {config.load_from_checkpoint}.")
# loss function
log.info(f"Instantiating module <{config.loss._target_}>")
self.criterion = hydra.utils.instantiate(config.loss)
def forward(self, x: torch.Tensor):
return self.model(x)
# ------------
# train
# ------------
def training_epoch_end(self, outputs: List[Any]):
pass
# ------------
# validation
# ------------
def validation_epoch_end(self, outputs: List[Any]):
pass
# ------------
# test
# ------------
def test_epoch_end(self, outputs: List[Any]):
pass
# ------------
# optim
# ------------
def configure_scheduler(self, optimizer):
config = self.config
num_steps_per_epoch = int(
self.trainer.datamodule.train_len / config.datamodule.effective_batch_size + 0.5
)
max_epoch = config.trainer.max_epochs
max_iterations = max_epoch * num_steps_per_epoch
if config.scheduler.policy == "epoch":
sch_times = max_epoch
else:
sch_times = max_iterations
if config.scheduler.get("warmup"):
if config.scheduler.policy == "epoch":
sch_times -= config.scheduler.warmup.times
elif config.scheduler.policy == "iteration":
if isinstance(config.scheduler.warmup.times, float):
sch_times -= config.scheduler.warmup.times * num_steps_per_epoch
else:
sch_times -= config.scheduler.warmup.times
else:
raise ValueError(
"scheduler_policy should be epoch or iteration,"
f"but '{config.scheduler.policy}' given."
)
schedulers = []
if config.scheduler.get("name"):
log.info(f"Creating module <{config.scheduler.name}>")
sch = create_scheduler(optimizer=optimizer, sch_times=sch_times, **config.scheduler)
schedulers.append(sch)
return schedulers
def configure_optimizers(self):
config = self.config
# === Optimizer ===
log.info(f"Instantiating module <{config.optimizer._target_}>")
if config.optimizer._target_.split(".")[-1] in ["LARS"]:
optimizer = hydra.utils.instantiate(config.optimizer, self.model)
else:
optimizer = hydra.utils.instantiate(config.optimizer, self.model.parameters())
# === Scheduler ===
schedulers = self.configure_scheduler(optimizer)
return [optimizer], schedulers
| [
"hydra.utils.instantiate",
"torch.load"
] | [((636, 722), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.module'], {'num_classes': 'config.datamodule.num_classes'}), '(config.module, num_classes=config.datamodule.\n num_classes)\n', (659, 722), False, 'import hydra\n'), ((1276, 1312), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.loss'], {}), '(config.loss)\n', (1299, 1312), False, 'import hydra\n'), ((838, 877), 'torch.load', 'torch.load', (['config.load_from_checkpoint'], {}), '(config.load_from_checkpoint)\n', (848, 877), False, 'import torch\n'), ((3444, 3497), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.optimizer', 'self.model'], {}), '(config.optimizer, self.model)\n', (3467, 3497), False, 'import hydra\n')] |
from everett.component import RequiredConfigMixin, ConfigOptions
from everett.manager import ConfigManager, ConfigOSEnv
class BotConfig(RequiredConfigMixin):
required_config = ConfigOptions()
required_config.add_option('flask_loglevel', parser=str, default='info', doc='Set the log level for Flask.')
required_config.add_option('flask_password', parser=str, doc='Password for HTTP authentication in Flask.')
required_config.add_option('flask_username', parser=str, doc='Username for HTTP authentication in Flask.')
required_config.add_option('github_owner', parser=str, default='harvester', doc='Set the owner of the target GitHub '
'repository.')
required_config.add_option('github_repository', parser=str, default='harvester', doc='Set the name of the target '
'GitHub repository.')
required_config.add_option('github_repository_test', parser=str, default='tests', doc='Set the name of the tests '
'GitHub repository.')
required_config.add_option('github_token', parser=str, doc='Set the token of the GitHub machine user.')
required_config.add_option('zenhub_pipeline', parser=str, default='Review', doc='Set the target ZenHub pipeline to '
'handle events for.')
def get_config():
config = ConfigManager(environments=[
ConfigOSEnv()
])
return config.with_options(BotConfig())
| [
"everett.manager.ConfigOSEnv",
"everett.component.ConfigOptions"
] | [((182, 197), 'everett.component.ConfigOptions', 'ConfigOptions', ([], {}), '()\n', (195, 197), False, 'from everett.component import RequiredConfigMixin, ConfigOptions\n'), ((1616, 1629), 'everett.manager.ConfigOSEnv', 'ConfigOSEnv', ([], {}), '()\n', (1627, 1629), False, 'from everett.manager import ConfigManager, ConfigOSEnv\n')] |
try:
import MyTable
except NameError as e:
print(e)
import importlib
importlib.import_module('Constraints')
# 現在 locals() にある module に Constraints.py モジュールを加える。
# (MyTableにConstraintsを加える。`from Constraints import PK,UK,FK,NN,D,C`する)
# トレースバックで例外発生モジュールを補足
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
last_tb = None
for tb in traceback.extract_tb(exc_traceback):
print(tb)
last_tb = tb
#print(last_tb)
#print(type(last_tb))
#print(dir(last_tb))
print(last_tb.filename)
print(last_tb.line)
print(last_tb.lineno)
print(last_tb.name)
import pathlib
module_path = pathlib.Path(last_tb.filename)
module_name = module_path.name.replace(module_path.suffix, '')
print(module_name)
# モジュール インスタンスに Constraints を挿入しようと思ったが、できない。PK未定義エラーのため。
# ソースコードを文字列で作成すればいいか? `from Constraints import PK,UK,FK,NN,D,C`を加えて。
# exec(source_code)すればいい?
#import importlib
#importlib.import_module(module_name)
print(e)
#print('未定義', e)
#print(type(e))
#print(dir(e))
#print(e.args)
#print(type(e.with_traceback()))
#print(e.with_traceback())
#print(type(e.with_traceback))
#print(dir(e.with_traceback))
# #!python3などの行が先頭にあるが処理省略!
source_code = 'from Constraints import PK,UK,FK,NN,D,C' + '\n'
with pathlib.Path(last_tb.filename).open() as f:
source_code += f.read()
exec(source_code)
assert(module_name in locals())
cls = locals()[module_name]
print(dir(cls))
print(cls.Id)
# name 'PK' is not defined
#print(locals())
#print(locals()['__loader__'])
#print(dir(locals()['__loader__']))
#print(locals()['__loader__'].get_filename())
| [
"sys.exc_info",
"importlib.import_module",
"traceback.extract_tb",
"pathlib.Path"
] | [((85, 123), 'importlib.import_module', 'importlib.import_module', (['"""Constraints"""'], {}), "('Constraints')\n", (108, 123), False, 'import importlib\n'), ((352, 366), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (364, 366), False, 'import sys, traceback\n'), ((400, 435), 'traceback.extract_tb', 'traceback.extract_tb', (['exc_traceback'], {}), '(exc_traceback)\n', (420, 435), False, 'import sys, traceback\n'), ((686, 716), 'pathlib.Path', 'pathlib.Path', (['last_tb.filename'], {}), '(last_tb.filename)\n', (698, 716), False, 'import pathlib\n'), ((1376, 1406), 'pathlib.Path', 'pathlib.Path', (['last_tb.filename'], {}), '(last_tb.filename)\n', (1388, 1406), False, 'import pathlib\n')] |
import click, requests, sys
bootstrap_ip = '192.168.0.2'
bootstrap_port = '8000'
base_url = 'http://' + bootstrap_ip + ':' + bootstrap_port
@click.group()
def toychord():
"""CLI client for toy-chord."""
pass
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--value', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def insert(key, value, host, port):
"""Make an insert request for a key-value pair, to a specific Node.
NOTE: The key-value pair may not be inserted to the database
of the Node that receives the request. It will be inserted in
the database of the Node that is the owner of the hash ID of
the key-value pair.
"""
url = 'http://' + host + ':' + str(port) + '/insert'
data = {
'key': key,
'value': value
}
r = requests.post(url, data)
if(r.status_code == 200):
click.echo('The key value pair was successfully inserted!')
else:
click.echo('Something went wrong with inserting the key-value pair.')
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def delete(key, host, port):
"""Make a delete request for a key-value pair, to a specific Node.
NOTE: The key-value pair doesn't have to be stored in the database
of the Node that receives the request.
"""
url = 'http://' + host + ':' + str(port) + '/delete'
data = {
'key': key
}
r = requests.post(url, data)
click.echo(r.text)
@toychord.command()
@click.option('--key', required=True, type=str)
@click.option('--host', default=bootstrap_ip, type=str)
@click.option('--port', default=bootstrap_port, type=int)
def query(key, host, port):
"""Query for a key-value pair."""
url = 'http://' + host + ':' + str(port) + '/query'
data = {
'key': key
}
r = requests.post(url, data)
click.echo(r.text)
@toychord.command()
@click.option('--host', required=True, type=str)
@click.option('--port', required=True, type=int)
def depart(host, port):
"""Send a request to a specific Node to depart from toy-chord."""
url = 'http://' + host + ':' + str(port) + '/node/depart'
r = requests.post(url, {})
click.echo(r.text)
@toychord.command()
def overlay():
"""Print the placement of the Nodes in toy-chord."""
url = base_url + '/overlay'
r = requests.get(url)
click.echo(r.text)
if __name__ == '__main__':
toychord()
| [
"requests.post",
"click.group",
"click.option",
"requests.get",
"click.echo"
] | [((144, 157), 'click.group', 'click.group', ([], {}), '()\n', (155, 157), False, 'import click, requests, sys\n'), ((242, 288), 'click.option', 'click.option', (['"""--key"""'], {'required': '(True)', 'type': 'str'}), "('--key', required=True, type=str)\n", (254, 288), False, 'import click, requests, sys\n'), ((290, 338), 'click.option', 'click.option', (['"""--value"""'], {'required': '(True)', 'type': 'str'}), "('--value', required=True, type=str)\n", (302, 338), False, 'import click, requests, sys\n'), ((340, 394), 'click.option', 'click.option', (['"""--host"""'], {'default': 'bootstrap_ip', 'type': 'str'}), "('--host', default=bootstrap_ip, type=str)\n", (352, 394), False, 'import click, requests, sys\n'), ((396, 452), 'click.option', 'click.option', (['"""--port"""'], {'default': 'bootstrap_port', 'type': 'int'}), "('--port', default=bootstrap_port, type=int)\n", (408, 452), False, 'import click, requests, sys\n'), ((1153, 1199), 'click.option', 'click.option', (['"""--key"""'], {'required': '(True)', 'type': 'str'}), "('--key', required=True, type=str)\n", (1165, 1199), False, 'import click, requests, sys\n'), ((1201, 1255), 'click.option', 'click.option', (['"""--host"""'], {'default': 'bootstrap_ip', 'type': 'str'}), "('--host', default=bootstrap_ip, type=str)\n", (1213, 1255), False, 'import click, requests, sys\n'), ((1257, 1313), 'click.option', 'click.option', (['"""--port"""'], {'default': 'bootstrap_port', 'type': 'int'}), "('--port', default=bootstrap_port, type=int)\n", (1269, 1313), False, 'import click, requests, sys\n'), ((1714, 1760), 'click.option', 'click.option', (['"""--key"""'], {'required': '(True)', 'type': 'str'}), "('--key', required=True, type=str)\n", (1726, 1760), False, 'import click, requests, sys\n'), ((1762, 1816), 'click.option', 'click.option', (['"""--host"""'], {'default': 'bootstrap_ip', 'type': 'str'}), "('--host', default=bootstrap_ip, type=str)\n", (1774, 1816), False, 'import click, requests, sys\n'), ((1818, 1874), 'click.option', 'click.option', (['"""--port"""'], {'default': 'bootstrap_port', 'type': 'int'}), "('--port', default=bootstrap_port, type=int)\n", (1830, 1874), False, 'import click, requests, sys\n'), ((2116, 2163), 'click.option', 'click.option', (['"""--host"""'], {'required': '(True)', 'type': 'str'}), "('--host', required=True, type=str)\n", (2128, 2163), False, 'import click, requests, sys\n'), ((2165, 2212), 'click.option', 'click.option', (['"""--port"""'], {'required': '(True)', 'type': 'int'}), "('--port', required=True, type=int)\n", (2177, 2212), False, 'import click, requests, sys\n'), ((918, 942), 'requests.post', 'requests.post', (['url', 'data'], {}), '(url, data)\n', (931, 942), False, 'import click, requests, sys\n'), ((1642, 1666), 'requests.post', 'requests.post', (['url', 'data'], {}), '(url, data)\n', (1655, 1666), False, 'import click, requests, sys\n'), ((1672, 1690), 'click.echo', 'click.echo', (['r.text'], {}), '(r.text)\n', (1682, 1690), False, 'import click, requests, sys\n'), ((2044, 2068), 'requests.post', 'requests.post', (['url', 'data'], {}), '(url, data)\n', (2057, 2068), False, 'import click, requests, sys\n'), ((2074, 2092), 'click.echo', 'click.echo', (['r.text'], {}), '(r.text)\n', (2084, 2092), False, 'import click, requests, sys\n'), ((2378, 2400), 'requests.post', 'requests.post', (['url', '{}'], {}), '(url, {})\n', (2391, 2400), False, 'import click, requests, sys\n'), ((2406, 2424), 'click.echo', 'click.echo', (['r.text'], {}), '(r.text)\n', (2416, 2424), False, 'import click, requests, sys\n'), ((2560, 2577), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2572, 2577), False, 'import click, requests, sys\n'), ((2583, 2601), 'click.echo', 'click.echo', (['r.text'], {}), '(r.text)\n', (2593, 2601), False, 'import click, requests, sys\n'), ((982, 1041), 'click.echo', 'click.echo', (['"""The key value pair was successfully inserted!"""'], {}), "('The key value pair was successfully inserted!')\n", (992, 1041), False, 'import click, requests, sys\n'), ((1060, 1129), 'click.echo', 'click.echo', (['"""Something went wrong with inserting the key-value pair."""'], {}), "('Something went wrong with inserting the key-value pair.')\n", (1070, 1129), False, 'import click, requests, sys\n')] |
import networkx as nx
class _CentralityMetrics:
def __init__(self, G, metrics):
self.G = G
self.metrics = metrics
def _compute_metrics(self):
metrics = self.metrics
if metrics == 'degree_centrality':
c = self.degree_centrality()
elif metrics == 'betweenness_centrality':
c = self.betweenness_centrality()
elif metrics == 'closeness_centrality':
c = self.closeness_centrality()
elif metrics == 'eigenvector_centrality':
c = self.bonachi_eigenvector_centrality()
return c
def degree_centrality(self):
centrality = nx.degree_centrality(self.G, weight='weight')
return centrality
def betweenness_centrality(self):
centrality = nx.betweenness_centrality(self.G, weight='weight')
return centrality
def closeness_centrality(self):
centrality = nx.closeness_centrality(self.G, weight='weight')
return centrality
def bonachi_eigenvector_centrality(self):
centrality = nx.eigenvector_centrality(self.G, weight='weight')
return centrality
| [
"networkx.closeness_centrality",
"networkx.betweenness_centrality",
"networkx.eigenvector_centrality",
"networkx.degree_centrality"
] | [((699, 744), 'networkx.degree_centrality', 'nx.degree_centrality', (['self.G'], {'weight': '"""weight"""'}), "(self.G, weight='weight')\n", (719, 744), True, 'import networkx as nx\n'), ((839, 889), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['self.G'], {'weight': '"""weight"""'}), "(self.G, weight='weight')\n", (864, 889), True, 'import networkx as nx\n'), ((982, 1030), 'networkx.closeness_centrality', 'nx.closeness_centrality', (['self.G'], {'weight': '"""weight"""'}), "(self.G, weight='weight')\n", (1005, 1030), True, 'import networkx as nx\n'), ((1133, 1183), 'networkx.eigenvector_centrality', 'nx.eigenvector_centrality', (['self.G'], {'weight': '"""weight"""'}), "(self.G, weight='weight')\n", (1158, 1183), True, 'import networkx as nx\n')] |
###############################################################################
# Copyright (C) 2016 <NAME>
# This is part of <NAME>'s PoDoCo project.
#
# This file is licensed under the MIT License.
###############################################################################
"""
Particle filters for tracking the incoming traffic intensity.
See, the files script_test_poisson_1.py and script_test_negbin.py for
usage.
"""
import numpy as np
import resampling # resampling (c) <NAME> Jr (MIT License)
from scipy.special import gammaln
def pf_init(Nrep, params):
"""
Initialize particle filter from MCMC samples.
"""
for key in params.keys():
params[key] = np.tile(params[key], Nrep)
N = params['A_x'].shape[0]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][0, :],
params['sqrtQ_x'] / np.sqrt((1 - params['A_x']**2)))
return x, params, W
def pf_update_poisson(y, x, params, W):
"""Update weights according to measurement"""
logW = np.log(W) + y * np.log(np.exp(x)) - np.exp(x)
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_poisson(y, x, params, W, resample=True):
"""One step (measurement) of the particle filter, Poisson obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_poisson(y, x, params, W)
return x, params, W
def predict_mean(x, params, W):
"""Expected value of the next observation after the update step"""
return np.sum(W * (np.exp(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]) + 0.5 * params['sqrtQ_x']**2)))
def trim_base(params):
"""Cuts the first component of base"""
params['base'] = params['base'][1:, :]
return params
def pf_update_negbin(y, x, params, W):
"""Update weights per measurement, NegBin obs. model"""
phi = np.exp(x) / (params['omega'] - 1)
logW = (gammaln(y + phi) - gammaln(phi) +
y * (np.log(params['omega'] - 1) - np.log(params['omega'])) -
phi * (np.log(params['omega'])))
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_negbin(y, x, params, W, resample=True):
"""
One step (measurement) of the particle filter, NegBin obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
params['omega'] = params['omega'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_negbin(y, x, params, W)
return x, params, W
| [
"numpy.random.normal",
"numpy.tile",
"numpy.repeat",
"numpy.sqrt",
"numpy.log",
"numpy.max",
"numpy.exp",
"scipy.special.gammaln",
"resampling.residual_resample"
] | [((755, 774), 'numpy.repeat', 'np.repeat', (['(1 / N)', 'N'], {}), '(1 / N, N)\n', (764, 774), True, 'import numpy as np\n'), ((1700, 1807), 'numpy.random.normal', 'np.random.normal', (["(params['base'][1, :] + params['A_x'] * (x - params['base'][0, :]))", "params['sqrtQ_x']"], {}), "(params['base'][1, :] + params['A_x'] * (x - params['base']\n [0, :]), params['sqrtQ_x'])\n", (1716, 1807), True, 'import numpy as np\n'), ((3303, 3410), 'numpy.random.normal', 'np.random.normal', (["(params['base'][1, :] + params['A_x'] * (x - params['base'][0, :]))", "params['sqrtQ_x']"], {}), "(params['base'][1, :] + params['A_x'] * (x - params['base']\n [0, :]), params['sqrtQ_x'])\n", (3319, 3410), True, 'import numpy as np\n'), ((688, 714), 'numpy.tile', 'np.tile', (['params[key]', 'Nrep'], {}), '(params[key], Nrep)\n', (695, 714), True, 'import numpy as np\n'), ((1062, 1071), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1068, 1071), True, 'import numpy as np\n'), ((1468, 1499), 'resampling.residual_resample', 'resampling.residual_resample', (['W'], {}), '(W)\n', (1496, 1499), False, 'import resampling\n'), ((1673, 1692), 'numpy.repeat', 'np.repeat', (['(1 / N)', 'N'], {}), '(1 / N, N)\n', (1682, 1692), True, 'import numpy as np\n'), ((2429, 2438), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2435, 2438), True, 'import numpy as np\n'), ((3024, 3055), 'resampling.residual_resample', 'resampling.residual_resample', (['W'], {}), '(W)\n', (3052, 3055), False, 'import resampling\n'), ((3276, 3295), 'numpy.repeat', 'np.repeat', (['(1 / N)', 'N'], {}), '(1 / N, N)\n', (3285, 3295), True, 'import numpy as np\n'), ((866, 897), 'numpy.sqrt', 'np.sqrt', (["(1 - params['A_x'] ** 2)"], {}), "(1 - params['A_x'] ** 2)\n", (873, 897), True, 'import numpy as np\n'), ((1026, 1035), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (1032, 1035), True, 'import numpy as np\n'), ((1094, 1106), 'numpy.max', 'np.max', (['logW'], {}), '(logW)\n', (1100, 1106), True, 'import numpy as np\n'), ((2065, 2174), 'numpy.exp', 'np.exp', (["(params['base'][1, :] + params['A_x'] * (x - params['base'][0, :]) + 0.5 * \n params['sqrtQ_x'] ** 2)"], {}), "(params['base'][1, :] + params['A_x'] * (x - params['base'][0, :]) + \n 0.5 * params['sqrtQ_x'] ** 2)\n", (2071, 2174), True, 'import numpy as np\n'), ((2603, 2626), 'numpy.log', 'np.log', (["params['omega']"], {}), "(params['omega'])\n", (2609, 2626), True, 'import numpy as np\n'), ((2652, 2664), 'numpy.max', 'np.max', (['logW'], {}), '(logW)\n', (2658, 2664), True, 'import numpy as np\n'), ((2476, 2492), 'scipy.special.gammaln', 'gammaln', (['(y + phi)'], {}), '(y + phi)\n', (2483, 2492), False, 'from scipy.special import gammaln\n'), ((2495, 2507), 'scipy.special.gammaln', 'gammaln', (['phi'], {}), '(phi)\n', (2502, 2507), False, 'from scipy.special import gammaln\n'), ((1049, 1058), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1055, 1058), True, 'import numpy as np\n'), ((2527, 2554), 'numpy.log', 'np.log', (["(params['omega'] - 1)"], {}), "(params['omega'] - 1)\n", (2533, 2554), True, 'import numpy as np\n'), ((2557, 2580), 'numpy.log', 'np.log', (["params['omega']"], {}), "(params['omega'])\n", (2563, 2580), True, 'import numpy as np\n')] |
from inspect import cleandoc
from textwrap import indent
from lark import Lark, Transformer
from lark.exceptions import LarkError
from .interval import Interval
from . import errors
def parse(ctx, fname):
with open(fname, "r") as f:
t = f.read()
try:
l = Lark(open(ctx.implfile("script.lark")).read(), parser='lalr', debug=True)
t = l.parse(t)
ScriptTransformer(ctx).transform(t)
except LarkError as e:
etext = indent(str(e), " ")
raise errors.ParserException("Error parsing '{}:\n{}'".format(fname, etext))
class ScriptTransformer(Transformer):
def __init__(self, ctx):
self.ctx = ctx
def decent(self, t):
def handle(decsrc, decdst):
self.ctx.parse_decoderentry(decsrc, decdst)
handle(**dict(t))
def decsrc(self, t):
return ("decsrc", str(t[0]))
def decdst(self, t):
return ("decdst", str(t[0]))
def options(self, t):
self.ctx.parse_options(t[0][1])
def datasource(self, t):
self.ctx.parse_datasource(t[0][1])
def memmap(self, t):
def handle(self, range, mmdecoder, properties={}, mmdataaddr=None):
self.ctx.memtype.parse_add(range, self.ctx.decoder(mmdecoder), properties, mmdataaddr)
self.ctx.memtype.parse_begin()
for e in t[0]:
handle(self, **dict(e))
self.ctx.memtype.parse_end()
def mmbody(self, t):
return t
def mmentry(self, t):
return t
def mmdecoder(self, t):
return ("mmdecoder", str(t[0]))
def mmdataaddr(self, t):
return ("mmdataaddr", t[0])
def mmfromreset(selt, t):
return -1
def label(self, t):
def handle(self, range, lname, lflags=""):
self.ctx.syms.parse_add(self.ctx, range, lname, 'i' in lflags)
handle(self, **dict(t))
def lflags(self, t):
return ("lflags", str(t[0]))
def lname(self, t):
return ("lname", str(t[0]))
def directive(self, t):
def handle(daddress, dcommand, doaddress=None, dosymbol=None):
self.ctx.directives.parse_add(daddress, dcommand, doaddress, dosymbol)
handle(**dict(t))
def daddress(self, t):
return ("daddress", t[0])
def dcommand(self, t):
return ("dcommand", str(t[0]))
def doaddress(self, t):
return ("doaddress", t[0])
def dosymbol(self, t):
return ('dosymbol', str(t[0]))
def comment(self, t):
def handle(self, caddress, ctext, cpos="^"):
if not ctext:
ctext = "\n"
if cpos=='^':
self.ctx.cmts.add_before(caddress, ctext)
elif cpos=='v':
self.ctx.cmts.add_after(caddress, ctext)
elif cpos=='>':
self.ctx.cmts.add_inline(caddress, ctext)
handle(self, **dict(t))
def caddress(self, t):
return ("caddress", t[0])
def cpos(self, t):
return ("cpos", str(t[0]))
def ctext(self, t):
return ('ctext', str(t[0]))
def properties(self, t):
return ("properties", {str(i[0]) : i[1] for i in t})
def propentry(self, t):
return t
def hexnum(self, t):
return int(t[0][1:], 16)
def decimal(self, t):
return int(t[0])
def boolean(self, t):
return bool(t[0])
def list(self, t):
return list(t)
def name(self, t):
return str(t[0])
def quoted(self, t):
return t[0][1:-1]
def tquoted(self, t):
return cleandoc(t[0][3:-3])
def range(self, t):
ivl = Interval(int(t[0]), int(t[1])) if len(t)==2 else Interval(int(t[0]))
return ("range", ivl)
| [
"inspect.cleandoc"
] | [((3644, 3664), 'inspect.cleandoc', 'cleandoc', (['t[0][3:-3]'], {}), '(t[0][3:-3])\n', (3652, 3664), False, 'from inspect import cleandoc\n')] |
from template import Template
from template.test import TestCase, main
class Stringy:
def __init__(self, text):
self.text = text
def asString(self):
return self.text
__str__ = asString
class TextTest(TestCase):
def testText(self):
tt = (("basic", Template()),
("interp", Template({ "INTERPOLATE": 1 })))
vars = self._callsign()
v2 = { "ref": lambda obj: "%s[%s]" % (obj, obj.__class__.__name__),
"sfoo": Stringy("foo"),
"sbar": Stringy("bar") }
vars.update(v2)
self.Expect(DATA, tt, vars)
DATA = r"""
-- test --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
# [% PROCESS foo %]
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
-- use interp --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
\$ @ { } @{ } \${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
alpha bravo charlie
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
#
# [% PROCESS foo %]
#
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
[% a = "C'est un test"; a %]
-- expect --
C'est un test
-- test --
[% META title = "C'est un test" -%]
[% component.title -%]
-- expect --
C'est un test
-- test --
[% META title = 'C\'est un autre test' -%]
[% component.title -%]
-- expect --
C'est un autre test
-- test --
[% META title = "C'est un \"test\"" -%]
[% component.title -%]
-- expect --
C'est un "test"
-- test --
[% sfoo %]/[% sbar %]
-- expect --
foo/bar
-- test --
[% s1 = "$sfoo"
s2 = "$sbar ";
s3 = sfoo;
ref(s1);
'/';
ref(s2);
'/';
ref(s3);
-%]
-- expect --
foo[str]/bar [str]/foo[Stringy]
"""
| [
"template.Template"
] | [((273, 283), 'template.Template', 'Template', ([], {}), '()\n', (281, 283), False, 'from template import Template\n'), ((307, 335), 'template.Template', 'Template', (["{'INTERPOLATE': 1}"], {}), "({'INTERPOLATE': 1})\n", (315, 335), False, 'from template import Template\n')] |
# Copyright (c) 2021 <NAME>
import math
from pprint import pprint
import matplotlib.pyplot as plt
from frispy import Disc
from frispy import Discs
model = Discs.destroyer
mph_to_mps = 0.44704
v = 70 * mph_to_mps
rot = -v / model.diameter * 1.2
x0 = [6, -3, 25]
a, nose_up, hyzer = x0
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer, "gamma": -2})
result = disc.compute_trajectory(20.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
plt.plot(x, result.theta)
plt.plot(x, y)
plt.plot(x, z)
#plt.plot(t, x)
#plt.plot(t, y)
#plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| [
"matplotlib.pyplot.plot",
"math.cos",
"math.sin",
"pprint.pprint",
"matplotlib.pyplot.show"
] | [((606, 631), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'result.theta'], {}), '(x, result.theta)\n', (614, 631), True, 'import matplotlib.pyplot as plt\n'), ((632, 646), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (640, 646), True, 'import matplotlib.pyplot as plt\n'), ((647, 661), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z'], {}), '(x, z)\n', (655, 661), True, 'import matplotlib.pyplot as plt\n'), ((712, 735), 'pprint.pprint', 'pprint', (['(x[-1] * 3.28084)'], {}), '(x[-1] * 3.28084)\n', (718, 735), False, 'from pprint import pprint\n'), ((744, 754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (752, 754), True, 'import matplotlib.pyplot as plt\n'), ((314, 341), 'math.cos', 'math.cos', (['(a * math.pi / 180)'], {}), '(a * math.pi / 180)\n', (322, 341), False, 'import math\n'), ((368, 395), 'math.sin', 'math.sin', (['(a * math.pi / 180)'], {}), '(a * math.pi / 180)\n', (376, 395), False, 'import math\n')] |
#
# This file is part of LiteX.
#
# Copyright (c) 2018-2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.io import DifferentialInput
from litex.soc.interconnect.csr import *
from litex.soc.cores.clock.common import *
# Xilinx / Generic ---------------------------------------------------------------------------------
class XilinxClocking(Module, AutoCSR):
clkfbout_mult_frange = (2, 64+1)
clkout_divide_range = (1, 128+1)
def __init__(self, vco_margin=0):
self.vco_margin = vco_margin
self.reset = Signal()
self.power_down = Signal()
self.locked = Signal()
self.clkin_freq = None
self.vcxo_freq = None
self.nclkouts = 0
self.clkouts = {}
self.config = {}
self.params = {}
def register_clkin(self, clkin, freq):
self.clkin = Signal()
if isinstance(clkin, (Signal, ClockSignal)):
self.comb += self.clkin.eq(clkin)
elif isinstance(clkin, Record):
self.specials += DifferentialInput(clkin.p, clkin.n, self.clkin)
else:
raise ValueError
self.clkin_freq = freq
register_clkin_log(self.logger, clkin, freq)
def create_clkout(self, cd, freq, phase=0, buf="bufg", margin=1e-2, with_reset=True, ce=None):
assert self.nclkouts < self.nclkouts_max
clkout = Signal()
self.clkouts[self.nclkouts] = (clkout, freq, phase, margin)
if with_reset:
self.specials += AsyncResetSynchronizer(cd, ~self.locked)
if buf is None:
self.comb += cd.clk.eq(clkout)
else:
clkout_buf = Signal()
self.comb += cd.clk.eq(clkout_buf)
if buf == "bufg":
self.specials += Instance("BUFG", i_I=clkout, o_O=clkout_buf)
elif buf == "bufr":
self.specials += Instance("BUFR", i_I=clkout, o_O=clkout_buf)
elif buf == "bufgce":
if ce is None:
raise ValueError("BUFGCE requires user to provide a clock enable ce Signal")
self.specials += Instance("BUFGCE", i_I=clkout, o_O=clkout_buf, i_CE=ce)
elif buf == "bufio":
self.specials += Instance("BUFIO", i_I=clkout, o_O=clkout_buf)
else:
raise ValueError("Unsupported clock buffer: {}".format(buf))
create_clkout_log(self.logger, cd.name, freq, margin, self.nclkouts)
self.nclkouts += 1
def compute_config(self):
config = {}
for divclk_divide in range(*self.divclk_divide_range):
config["divclk_divide"] = divclk_divide
for clkfbout_mult in reversed(range(*self.clkfbout_mult_frange)):
all_valid = True
vco_freq = self.clkin_freq*clkfbout_mult/divclk_divide
(vco_freq_min, vco_freq_max) = self.vco_freq_range
if (vco_freq >= vco_freq_min*(1 + self.vco_margin) and
vco_freq <= vco_freq_max*(1 - self.vco_margin)):
for n, (clk, f, p, m) in sorted(self.clkouts.items()):
valid = False
d_ranges = [self.clkout_divide_range]
if getattr(self, "clkout{}_divide_range".format(n), None) is not None:
d_ranges += [getattr(self, "clkout{}_divide_range".format(n))]
for d_range in d_ranges:
for d in clkdiv_range(*d_range):
clk_freq = vco_freq/d
if abs(clk_freq - f) <= f*m:
config["clkout{}_freq".format(n)] = clk_freq
config["clkout{}_divide".format(n)] = d
config["clkout{}_phase".format(n)] = p
valid = True
break
if valid:
break
if not valid:
all_valid = False
else:
all_valid = False
if all_valid:
config["vco"] = vco_freq
config["clkfbout_mult"] = clkfbout_mult
compute_config_log(self.logger, config)
return config
raise ValueError("No PLL config found")
def expose_drp(self):
self.drp_reset = CSR()
self.drp_locked = CSRStatus()
self.drp_read = CSR()
self.drp_write = CSR()
self.drp_drdy = CSRStatus()
self.drp_adr = CSRStorage(7, reset_less=True)
self.drp_dat_w = CSRStorage(16, reset_less=True)
self.drp_dat_r = CSRStatus(16)
# # #
den_pipe = Signal()
dwe_pipe = Signal()
drp_drdy = Signal()
self.params.update(
i_DCLK = ClockSignal(),
i_DWE = dwe_pipe,
i_DEN = den_pipe,
o_DRDY = drp_drdy,
i_DADDR = self.drp_adr.storage,
i_DI = self.drp_dat_w.storage,
o_DO = self.drp_dat_r.status
)
self.sync += [
den_pipe.eq(self.drp_read.re | self.drp_write.re),
dwe_pipe.eq(self.drp_write.re),
If(self.drp_read.re | self.drp_write.re,
self.drp_drdy.status.eq(0)
).Elif(drp_drdy,
self.drp_drdy.status.eq(1)
)
]
self.comb += self.drp_locked.status.eq(self.locked)
self.logger.info("Exposing DRP interface.")
def add_reset_delay(self, cycles):
for i in range(cycles):
reset = Signal()
self.specials += Instance("FDCE", i_C=self.clkin, i_CE=1, i_CLR=0, i_D=self.reset, o_Q=reset)
self.reset = reset
def do_finalize(self):
assert hasattr(self, "clkin")
self.add_reset_delay(cycles=8) # Prevents interlock when reset driven from sys_clk.
| [
"migen.genlib.resetsync.AsyncResetSynchronizer",
"litex.build.io.DifferentialInput"
] | [((1616, 1656), 'migen.genlib.resetsync.AsyncResetSynchronizer', 'AsyncResetSynchronizer', (['cd', '(~self.locked)'], {}), '(cd, ~self.locked)\n', (1638, 1656), False, 'from migen.genlib.resetsync import AsyncResetSynchronizer\n'), ((1146, 1193), 'litex.build.io.DifferentialInput', 'DifferentialInput', (['clkin.p', 'clkin.n', 'self.clkin'], {}), '(clkin.p, clkin.n, self.clkin)\n', (1163, 1193), False, 'from litex.build.io import DifferentialInput\n')] |
__all__ = (
'des_encrypt',
'des_decrypt',
)
from typing import List, Any
from ._tools import *
from ._constant import *
from rich import print
from rich.text import Text
from rich.panel import Panel
def keygen(key: bytes) -> List[List[int]]:
res = []
key = bytes_to_binlist(key)
key = permute_with(key, table=PC_1)
print(f'[red b]Key after PC-1[/]: {binlist_to_str(key, 7)}\n')
c, d = split_half(key)
for i in range(16):
print(f'[white b u]Round {i}[/]')
lshift(c, ROTATION[i])
lshift(d, ROTATION[i])
print(f'[red b]Rotated key[/]: {binlist_to_str(c, 7)} | {binlist_to_str(d, 7)}')
key = permute_with(c + d, table=PC_2)
print(f'[red b]Key after PC-2[/]: {binlist_to_str(key, 6)}')
res.append(key)
print('')
return res
def f_func(x: List[int], *, key: List[int]) -> List[int]:
x = permute_with(x, table=E)
print(f'[red b]r[/] (Permutated): {binlist_to_str(x)}')
x = binlist_xor(x, key)
print(f'[red b]r xor key[/]: {binlist_to_str(x)}')
res = []
for i, binlist in enumerate(split_every(x, 6)):
num = binlist_to_num(binlist)
res += num_to_binlist(S[i][num], length=4)
return permute_with(res, table=P)
def des_encrypt_core(x: List[int], keys: List[List[int]]) -> List[int]:
print('')
print(Panel(Text('Stage 2. Initial Permutation', 'green bold', justify='center')))
print(f'Plaintext = {binlist_to_str(x)}')
x = permute_with(x, table=IP)
print(Text('↓ After IP: ↓', justify='center'))
print(f'Plaintext = {binlist_to_str(x)}\n')
l, r = split_half(x)
print(Panel(Text('Stage 3. Feistel structure', 'green bold', justify='center')))
for i in range(16):
print(f'[white b u]Round {i}[/]')
print(f'[red b]l[/] = {binlist_to_str(l)}')
print(f'[red b]r[/] = {binlist_to_str(r)}')
r_new = binlist_xor(l, f_func(r, key=keys[i]))
l_new = r
l, r = l_new, r_new
print(f'[red b]Encrypted:[/] {binlist_to_str(l)} {binlist_to_str(r)}\n')
print(Panel(Text('Stage 4. Swap and Reverse IP', 'green bold', justify='center')))
l, r = r, l
print(f'[red b]Swaped ciphertext[/]: {binlist_to_str(l)} {binlist_to_str(r)}')
after_fp = permute_with(l + r, table=FP)
print(f'[red b]After FP[/]: {binlist_to_str(after_fp)}\n')
return after_fp
def des_encrypt(x: bytes, key: bytes) -> List[int]:
x = bytes_to_binlist(x)
print(f'[red b]Plaintext:[/] {binlist_to_str(x)}')
print(f'[red b]Key:[/] {binlist_to_str(bytes_to_binlist(key))}\n')
print('')
print(Panel(Text('Stage 1. Generate keys', 'green bold', justify='center')))
keys = keygen(key)
ciphertext = des_encrypt_core(x, keys)
print('[white]Finally we got our ciphertext:[/]')
print(binlist_to_str(ciphertext))
return ciphertext
def des_decrypt(x: bytes, key: bytes) -> List[int]:
x, keys = bytes_to_binlist(x), keygen(key)
keys = [*reversed(keys)]
return des_encrypt_core(x, keys)
| [
"rich.print",
"rich.text.Text"
] | [((1332, 1341), 'rich.print', 'print', (['""""""'], {}), "('')\n", (1337, 1341), False, 'from rich import print\n'), ((2603, 2612), 'rich.print', 'print', (['""""""'], {}), "('')\n", (2608, 2612), False, 'from rich import print\n'), ((2766, 2815), 'rich.print', 'print', (['"""[white]Finally we got our ciphertext:[/]"""'], {}), "('[white]Finally we got our ciphertext:[/]')\n", (2771, 2815), False, 'from rich import print\n'), ((466, 499), 'rich.print', 'print', (['f"""[white b u]Round {i}[/]"""'], {}), "(f'[white b u]Round {i}[/]')\n", (471, 499), False, 'from rich import print\n'), ((798, 807), 'rich.print', 'print', (['""""""'], {}), "('')\n", (803, 807), False, 'from rich import print\n'), ((1519, 1558), 'rich.text.Text', 'Text', (['"""↓ After IP: ↓"""'], {'justify': '"""center"""'}), "('↓ After IP: ↓', justify='center')\n", (1523, 1558), False, 'from rich.text import Text\n'), ((1751, 1784), 'rich.print', 'print', (['f"""[white b u]Round {i}[/]"""'], {}), "(f'[white b u]Round {i}[/]')\n", (1756, 1784), False, 'from rich import print\n'), ((1358, 1426), 'rich.text.Text', 'Text', (['"""Stage 2. Initial Permutation"""', '"""green bold"""'], {'justify': '"""center"""'}), "('Stage 2. Initial Permutation', 'green bold', justify='center')\n", (1362, 1426), False, 'from rich.text import Text\n'), ((1650, 1716), 'rich.text.Text', 'Text', (['"""Stage 3. Feistel structure"""', '"""green bold"""'], {'justify': '"""center"""'}), "('Stage 3. Feistel structure', 'green bold', justify='center')\n", (1654, 1716), False, 'from rich.text import Text\n'), ((2091, 2159), 'rich.text.Text', 'Text', (['"""Stage 4. Swap and Reverse IP"""', '"""green bold"""'], {'justify': '"""center"""'}), "('Stage 4. Swap and Reverse IP', 'green bold', justify='center')\n", (2095, 2159), False, 'from rich.text import Text\n'), ((2629, 2691), 'rich.text.Text', 'Text', (['"""Stage 1. Generate keys"""', '"""green bold"""'], {'justify': '"""center"""'}), "('Stage 1. Generate keys', 'green bold', justify='center')\n", (2633, 2691), False, 'from rich.text import Text\n')] |
from time import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
from singly_linkedlist.singly_linkedlist import SinglyLinkedList
start = time()
linked_list = SinglyLinkedList()
for i in range(100000):
linked_list.insert_head(111111111111)
end = time()
print("Took {0} seconds".format(start-end))
# linked_list.print_elements()
| [
"singly_linkedlist.singly_linkedlist.SinglyLinkedList",
"os.path.dirname",
"time.time"
] | [((182, 188), 'time.time', 'time', ([], {}), '()\n', (186, 188), False, 'from time import time\n'), ((203, 221), 'singly_linkedlist.singly_linkedlist.SinglyLinkedList', 'SinglyLinkedList', ([], {}), '()\n', (219, 221), False, 'from singly_linkedlist.singly_linkedlist import SinglyLinkedList\n'), ((294, 300), 'time.time', 'time', ([], {}), '()\n', (298, 300), False, 'from time import time\n'), ((72, 97), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n')] |
import pytest
from ..iocsh import IocshRedirect, IocshSplit, split_words
@pytest.mark.parametrize(
"line, expected",
[
pytest.param(
"""dbLoadRecords(a, "b", "c")""",
["dbLoadRecords", "a", "b", "c"],
id="basic_paren"
),
pytest.param(
"""dbLoadRecords a, "b", "c\"""",
["dbLoadRecords", "a", "b", "c"],
id="basic_no_paren"
),
pytest.param(
"""< input_file""",
IocshSplit(
[],
redirects={
0: IocshRedirect(fileno=0, name="input_file", mode="r"),
},
error=None,
),
id="basic_input_redirect",
),
pytest.param(
"""> output_file""",
IocshSplit(
[],
redirects={
1: IocshRedirect(fileno=1, name="output_file", mode="w"),
},
error=None,
),
id="basic_output_redirect",
),
pytest.param(
"""< input_file > output_file""",
IocshSplit(
[],
redirects={
0: IocshRedirect(fileno=0, name="input_file", mode="r"),
1: IocshRedirect(fileno=1, name="output_file", mode="w"),
},
error=None,
),
id="input_output_redirect",
),
pytest.param(
"""2> output_file""",
IocshSplit(
[],
redirects={
2: IocshRedirect(fileno=2, name="output_file", mode="w"),
},
error=None,
),
id="output_fd_num",
),
pytest.param(
"""test > stdout 2> stderr 3> whoknows""",
IocshSplit(
["test"],
redirects={
1: IocshRedirect(fileno=1, name="stdout", mode="w"),
2: IocshRedirect(fileno=2, name="stderr", mode="w"),
3: IocshRedirect(fileno=3, name="whoknows", mode="w"),
},
error=None,
),
id="output_fd_num_more",
),
]
)
def test_split_words(line, expected):
if isinstance(expected, list):
expected = IocshSplit(
argv=expected,
redirects={},
error=None,
)
assert split_words(line) == expected
| [
"pytest.param"
] | [((138, 238), 'pytest.param', 'pytest.param', (['"""dbLoadRecords(a, "b", "c")"""', "['dbLoadRecords', 'a', 'b', 'c']"], {'id': '"""basic_paren"""'}), '(\'dbLoadRecords(a, "b", "c")\', [\'dbLoadRecords\', \'a\', \'b\', \'c\'\n ], id=\'basic_paren\')\n', (150, 238), False, 'import pytest\n'), ((293, 394), 'pytest.param', 'pytest.param', (['"""dbLoadRecords a, "b", "c\\""""', "['dbLoadRecords', 'a', 'b', 'c']"], {'id': '"""basic_no_paren"""'}), '(\'dbLoadRecords a, "b", "c"\', [\'dbLoadRecords\', \'a\', \'b\', \'c\'],\n id=\'basic_no_paren\')\n', (305, 394), False, 'import pytest\n')] |
import os
import numpy as np
import pandas as pd
from Base import Train, Predict
def getTest(boolNormalize, boolDeep, boolBias, strProjectFolder):
if boolNormalize:
if boolDeep:
strOutputPath = "02-Output/" + "Deep" + "Normal"
else:
if boolBias:
strOutputPath = "02-Output/" + "Bias" + "Normal"
else:
strOutputPath = "02-Output/" + "unBias" + "Normal"
else:
if boolDeep:
strOutputPath = "02-Output/" + "Deep"
else:
if boolBias:
strOutputPath = "02-Output/" + "Bias"
else:
strOutputPath = "02-Output/" + "unBias"
strOutputPath = strOutputPath + "Test"
DataTrain = pd.read_csv(os.path.join(strProjectFolder, "01-Data/Train.csv"))
DataTest = pd.read_csv(os.path.join(strProjectFolder, "01-Data/Test.csv"))
submisson = pd.read_csv(os.path.join(strProjectFolder, "01-Data/SampleSubmisson.csv"))
DataTrain = DataTrain.sample(frac=1)
intUserSize = len(DataTrain["UserID"].drop_duplicates())
intMovieSize = len(DataTrain["MovieID"].drop_duplicates())
arrayUsers = DataTrain["UserID"].values
arrayMovies = DataTrain["MovieID"].values
arrayRate = DataTrain["Rating"].values
arrayTestUsers = DataTest["UserID"].values
arrayTestMovies = DataTest["MovieID"].values
intLatentSize = 32
if boolNormalize:
arrayRateAvg = np.mean(arrayRate)
arrayRateStd = np.std(arrayRate)
arrayRate = (arrayRate - arrayRateAvg)/arrayRateStd
Train.getTrain(arrayTrainUser=arrayUsers, arrayTrainMovie=arrayMovies, arrayTrainRate=arrayRate
, arrayValidUser=arrayUsers, arrayValidMovie=arrayMovies, arrayValidRate=arrayRate
, intUserSize=intUserSize
, intMovieSize=intMovieSize
, intLatentSize=intLatentSize
, boolBias=boolBias
, boolDeep=boolDeep
, strProjectFolder=strProjectFolder, strOutputPath=strOutputPath)
arrayPredict = Predict.makePredict(arrayTestUsers, arrayTestMovies, strProjectFolder, strOutputPath)
if boolNormalize:
arrayPredict = (arrayPredict * arrayRateStd) + arrayRateAvg
submisson["Rating"] = pd.DataFrame(arrayPredict)
submisson.to_csv(os.path.join(strProjectFolder, strOutputPath + "submission.csv"), index=False)
if __name__ == "__main__":
strProjectFolder = os.path.dirname(__file__)
getTest(boolNormalize=True, boolDeep=False, boolBias=True, strProjectFolder=strProjectFolder) | [
"numpy.mean",
"os.path.join",
"Base.Predict.makePredict",
"os.path.dirname",
"Base.Train.getTrain",
"numpy.std",
"pandas.DataFrame"
] | [((1585, 1968), 'Base.Train.getTrain', 'Train.getTrain', ([], {'arrayTrainUser': 'arrayUsers', 'arrayTrainMovie': 'arrayMovies', 'arrayTrainRate': 'arrayRate', 'arrayValidUser': 'arrayUsers', 'arrayValidMovie': 'arrayMovies', 'arrayValidRate': 'arrayRate', 'intUserSize': 'intUserSize', 'intMovieSize': 'intMovieSize', 'intLatentSize': 'intLatentSize', 'boolBias': 'boolBias', 'boolDeep': 'boolDeep', 'strProjectFolder': 'strProjectFolder', 'strOutputPath': 'strOutputPath'}), '(arrayTrainUser=arrayUsers, arrayTrainMovie=arrayMovies,\n arrayTrainRate=arrayRate, arrayValidUser=arrayUsers, arrayValidMovie=\n arrayMovies, arrayValidRate=arrayRate, intUserSize=intUserSize,\n intMovieSize=intMovieSize, intLatentSize=intLatentSize, boolBias=\n boolBias, boolDeep=boolDeep, strProjectFolder=strProjectFolder,\n strOutputPath=strOutputPath)\n', (1599, 1968), False, 'from Base import Train, Predict\n'), ((2093, 2182), 'Base.Predict.makePredict', 'Predict.makePredict', (['arrayTestUsers', 'arrayTestMovies', 'strProjectFolder', 'strOutputPath'], {}), '(arrayTestUsers, arrayTestMovies, strProjectFolder,\n strOutputPath)\n', (2112, 2182), False, 'from Base import Train, Predict\n'), ((2298, 2324), 'pandas.DataFrame', 'pd.DataFrame', (['arrayPredict'], {}), '(arrayPredict)\n', (2310, 2324), True, 'import pandas as pd\n'), ((2478, 2503), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2493, 2503), False, 'import os\n'), ((770, 821), 'os.path.join', 'os.path.join', (['strProjectFolder', '"""01-Data/Train.csv"""'], {}), "(strProjectFolder, '01-Data/Train.csv')\n", (782, 821), False, 'import os\n'), ((850, 900), 'os.path.join', 'os.path.join', (['strProjectFolder', '"""01-Data/Test.csv"""'], {}), "(strProjectFolder, '01-Data/Test.csv')\n", (862, 900), False, 'import os\n'), ((930, 991), 'os.path.join', 'os.path.join', (['strProjectFolder', '"""01-Data/SampleSubmisson.csv"""'], {}), "(strProjectFolder, '01-Data/SampleSubmisson.csv')\n", (942, 991), False, 'import os\n'), ((1460, 1478), 'numpy.mean', 'np.mean', (['arrayRate'], {}), '(arrayRate)\n', (1467, 1478), True, 'import numpy as np\n'), ((1502, 1519), 'numpy.std', 'np.std', (['arrayRate'], {}), '(arrayRate)\n', (1508, 1519), True, 'import numpy as np\n'), ((2346, 2410), 'os.path.join', 'os.path.join', (['strProjectFolder', "(strOutputPath + 'submission.csv')"], {}), "(strProjectFolder, strOutputPath + 'submission.csv')\n", (2358, 2410), False, 'import os\n')] |
import re
import subprocess
import shutil
import sys
import json
import argparse
from typing import List, NamedTuple, Optional, Sequence
PATTERN_IMPORT_TIME = re.compile(r"^import time:\s+(\d+) \|\s+(\d+) \|(\s+.*)")
class InvalidInput(Exception):
pass
class Import(dict):
def __init__(self, name: str, t_self: int, t_cumu: int, depth: int, childs: List):
super().__init__()
self.__dict__ = self
self.name = name
self.depth = depth
self.t_self_us = t_self
self.t_cumulative_us = t_cumu
self.nested_imports = childs
def get_import_time(module: str) -> str:
"""
Call the importtime function as subprocess, pass all selected modules
and return the stderr output.
"""
try:
ret = subprocess.run(
(sys.executable, "-Ximporttime", "-c", f"import {module}"),
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
encoding="UTF-8",
)
except subprocess.CalledProcessError:
raise InvalidInput(f'Invalid input: Could not import module "{module}"')
return ret.stderr
def parse_import_time(s: str) -> List[Import]:
"""
Recursively parse the importtime strderr output into a uniform tree structure.
"""
root = Import("root", 0, 0, 0, [])
import_stack = [root]
for line in reversed(s.splitlines()):
m = PATTERN_IMPORT_TIME.match(line)
if m:
t_self = int(m[1])
t_cumu = int(m[2])
name = str(m[3])
depth = int((len(name) - len(name.lstrip()) - 1) / 2) + 1
new_imp = Import(
name=name.strip(), t_self=t_self, t_cumu=t_cumu, depth=depth, childs=[]
)
for _ in range(len(import_stack) - depth):
import_stack.pop()
import_stack[-1].nested_imports.insert(0, new_imp)
import_stack.append(new_imp)
if root.nested_imports == []:
raise InvalidInput("Invalid input: could not parse any imports")
return [root]
def prune_import_depth(
imports: List[Import], depth: Optional[int] = None
) -> List[Import]:
"""
Prune the unified tree structure to the desired depth level.
"""
def prune_children(childs: List[Import], depth: int):
if childs == []:
return
if depth == 0:
childs.clear()
for imp in childs:
prune_children(imp.nested_imports, depth - 1)
if depth is not None:
prune_children(imports, depth + 1)
return imports
def sort_imports(imports: List[Import], sort_by="self") -> List[Import]:
"""
Sort the unified tree structure according to the desired time key.
"""
def sort_children(childs: List[Import]) -> None:
if childs == []:
return
else:
if sort_by == "self":
childs.sort(key=lambda x: x.t_self_us, reverse=True)
elif sort_by == "cumulative":
childs.sort(key=lambda x: x.t_cumulative_us, reverse=True)
for imp in childs:
sort_children(imp.nested_imports)
sort_children(imports)
return imports
def import_tree_to_json_str(imports=List[Import]) -> str:
"""
Print the imported modules tree in json format.
"""
exclude_root = imports[0]["nested_imports"]
return json.dumps(exclude_root, indent=2)
def import_tree_to_waterfall(imports=List[Import], time_key="self", width=79) -> str:
"""
Print the imported modules tree as a waterfall diagram.
"""
output_str = ""
waterfall_output = []
max_time = 0
max_name_len = 0
imp = NamedTuple("imp", [("name", str), ("space", int), ("time", int)])
def create_name_str(childs: List[Import]) -> None:
nonlocal max_time
nonlocal max_name_len
nonlocal waterfall_output
nonlocal time_key
if childs == []:
return
else:
for child in childs:
time = {"self": child.t_self_us, "cumulative": child.t_cumulative_us}[
time_key
]
waterfall_output.append(
imp(name=child.name, space=child.depth - 1, time=time)
)
if time > max_time:
max_time = time
if (len(child.name) + child.depth) > max_name_len:
max_name_len = len(child.name) + child.depth
create_name_str(child.nested_imports)
return
create_name_str(imports[0]["nested_imports"])
header = "module name" + " " * ((max_name_len + 1) - len("module name")) + " "
header += " import time (us)" + "\n" + "-" * width + "\n"
output_str += header
for node in waterfall_output:
name = node.space * "." + str(node.name)
offset = ((max_name_len - len(name)) + 3) * " "
time_str = str(node.time)
water = "=" * int(
(node.time / max_time)
* (width - len(offset) - len(time_str) - len(name) - 2)
)
line_str = f"{name}{offset}{water}({time_str})\n"
output_str += line_str
min_width = round(1 / (node.time / max_time) + len(time_str) + len(name) + 2)
if width < min_width:
warning_msg = f"WARNING: The waterfall diagram may not be displayed correctly if the set width is too small!"
output_str += warning_msg
return output_str
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
description="""
This script calls the python3 -X importtime implementation with a given module
and parses the stderr output into a json format, which can then be used to
search or display the given information. It can also display the data as a
waterfall diagram in the terminal.
"""
)
parser.add_argument("module", help="the module to import")
parser.add_argument(
"--format",
nargs="?",
default="json",
choices=["json", "waterfall"],
help="output format",
)
parser.add_argument(
"--sort",
nargs="?",
choices=["self", "cumulative"],
help="sort imported modules by import-time",
)
parser.add_argument(
"--time",
nargs="?",
choices=["self", "cumulative"],
help="time to use in waterfall format (default self)",
)
parser.add_argument(
"--width",
nargs="?",
type=int,
help="width of entries in waterfall format (default to "
"environement variable COLUMNS or terminal's width)",
)
parser.add_argument(
"--depth",
nargs="?",
type=int,
help="limit depth of output format (default unlimited)",
)
args = parser.parse_args(argv)
if args.time and args.format != "waterfall":
parser.error(
"--time requires format to be set to waterfall (--format waterfall)"
)
if args.width and args.format != "waterfall":
parser.error(
"--length requires format to be set to waterfall (--format waterfall)"
)
raw_output = get_import_time(module=str(args.module))
all_imports = parse_import_time(raw_output)
pruned_imports = prune_import_depth(all_imports, args.depth)
if args.sort:
output_imports = sort_imports(imports=pruned_imports, sort_by=args.sort)
else:
output_imports = pruned_imports
if args.format == "json":
print(import_tree_to_json_str(output_imports))
elif args.format == "waterfall":
width = args.width or shutil.get_terminal_size().columns
time = args.time or "self"
print(import_tree_to_waterfall(output_imports, time_key=time, width=width))
return 0
if __name__ == "__main__":
exit(main())
| [
"argparse.ArgumentParser",
"re.compile",
"subprocess.run",
"json.dumps",
"shutil.get_terminal_size",
"typing.NamedTuple"
] | [((161, 224), 're.compile', 're.compile', (['"""^import time:\\\\s+(\\\\d+) \\\\|\\\\s+(\\\\d+) \\\\|(\\\\s+.*)"""'], {}), "('^import time:\\\\s+(\\\\d+) \\\\|\\\\s+(\\\\d+) \\\\|(\\\\s+.*)')\n", (171, 224), False, 'import re\n'), ((3396, 3430), 'json.dumps', 'json.dumps', (['exclude_root'], {'indent': '(2)'}), '(exclude_root, indent=2)\n', (3406, 3430), False, 'import json\n'), ((3689, 3754), 'typing.NamedTuple', 'NamedTuple', (['"""imp"""', "[('name', str), ('space', int), ('time', int)]"], {}), "('imp', [('name', str), ('space', int), ('time', int)])\n", (3699, 3754), False, 'from typing import List, NamedTuple, Optional, Sequence\n'), ((5538, 5892), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n This script calls the python3 -X importtime implementation with a given module\n and parses the stderr output into a json format, which can then be used to\n search or display the given information. It can also display the data as a\n waterfall diagram in the terminal.\n """'}), '(description=\n """\n This script calls the python3 -X importtime implementation with a given module\n and parses the stderr output into a json format, which can then be used to\n search or display the given information. It can also display the data as a\n waterfall diagram in the terminal.\n """\n )\n', (5561, 5892), False, 'import argparse\n'), ((775, 939), 'subprocess.run', 'subprocess.run', (["(sys.executable, '-Ximporttime', '-c', f'import {module}')"], {'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.PIPE', 'encoding': '"""UTF-8"""'}), "((sys.executable, '-Ximporttime', '-c', f'import {module}'),\n check=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, encoding\n ='UTF-8')\n", (789, 939), False, 'import subprocess\n'), ((7660, 7686), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ([], {}), '()\n', (7684, 7686), False, 'import shutil\n')] |
import csv
import collections
import pandas as pd
from random import shuffle
from tqdm import tqdm
def get_all_tokens_conll(conll_file):
"""
Reads a CoNLL-2011 file and returns all tokens with their annotations in a dataframe including the original
sentence identifiers from OntoNotes
"""
all_tokens = list()
most_semroles = 0
with open(conll_file, "r") as infile:
for line in infile:
# Get sentence identifiers: distinguish between sentence count per file and per file part
# (some files are divided into multiple parts numbered as 000, 001, 002, ... etc.)
if line.startswith("#begin document"):
sent_id_part = 0
part_id = line.split("; part ")[1].rstrip("\n")
if part_id == "000":
sent_id_file = 0
else:
sent_id_file += 1
elif line.startswith("#end document"):
sent_id_file -= 1 # prevent counting too much (empty line followed by end document)
elif line == "\n":
sent_id_part += 1
sent_id_file += 1
else:
columns = line.split()
dict_token = {"file_id": columns[0],
"part_id": int(columns[1]),
"sent_id_part": int(sent_id_part),
"sent_id_file": int(sent_id_file),
"token_id": columns[2],
"word_form": columns[3],
"POS": columns[4],
"parse": columns[5],
"pred_lemma": columns[6],
"pred_frameset": columns[7],
"word_sense": columns[8],
"speaker": columns[9],
"NE": columns[10],
"coref": columns[-1].rstrip("\n")
}
semroles = {f"APRED{i}": role for i, role in enumerate(columns[11:-1], 1)}
dict_token.update(semroles)
all_tokens.append(dict_token)
if len(semroles) > most_semroles:
most_semroles = len(semroles)
cols = list(dict_token.keys())
df_tokens = pd.DataFrame(all_tokens, columns=cols)
return df_tokens
def find_original_sent_ids(df_instances, df_conll):
"""
Takes the file_id, part_id and sent_id indicating a specific sentence in the CoNLL-2011 data (where file is split
into smaller parts and sent_id restarts for each part) and finds the corresponding 'original' sentence identifier
"""
print("Finding original sentence identifiers")
for index, row in tqdm(df_instances.iterrows(), total=len(df_instances)):
# For each instance in the set, find the corresponding sent_id_file in the annotations of CoNLL-2011
file_id = row["file_id"]
part_id = row["part_id"]
sent_id_part = row["sent_id_part"]
matching_rows = df_conll.loc[(df_conll["file_id"] == file_id) & (df_conll["part_id"] == part_id) &
(df_conll["sent_id_part"] == sent_id_part)]
sent_id_file = matching_rows.iloc[0]["sent_id_file"]
df_instances.set_value(index, "sent_id_file", sent_id_file)
return df_instances
def get_role_features_from_annotations(role_annotations):
"""Splits the verb and role information (in original annotations file) to separate values"""
head, role = role_annotations.split(")] ")
head_pos, head_wf = head.lstrip("[(").split()
span, tokens = role.split(maxsplit=1)
span, label = span.rstrip(":").split(":")
role_features = (head_wf, head_pos, span, label, tokens)
return role_features
def rewrite_verb_and_role_features(df):
"""Rewrites the verb and role information in the original annotations file to separate columns"""
instances = df.to_dict("records")
for index, inst in enumerate(instances):
# Get verb features
verb = inst["verb"]
verb_features = get_role_features_from_annotations(verb)
verb_wf, verb_pos, verb_span, verb_label, verb_tokens = verb_features
# Get role features
role = inst["role"]
role_features = get_role_features_from_annotations(role)
role_head_wf, role_head_pos, role_span, role_label, role_tokens = role_features
new_dict = {"verb_wf": verb_wf,
"verb_pos": verb_pos,
"verb_span": verb_span,
"verb_label": verb_label,
"verb_tokens": verb_tokens,
"role_head_wf": role_head_wf,
"role_head_pos": role_head_pos,
"role_span": role_span,
"role_label": role_label,
"role_tokens": role_tokens,
"role_tokens": role_tokens}
inst.update(new_dict)
del inst["verb"]
del inst["role"]
instances[index] = inst
columns = list(instances[0].keys())
df = pd.DataFrame(instances, columns=columns)
return df
def transform_labels_three(row):
"""Takes original score (label) and converts to tertiary classes"""
label = int(row['label'])
if label <= 1:
return 0
if 1 < label <= 3:
return 1
if label >= 4:
return 2
def transform_labels_two(row):
"""Takes original score (label) and converts to binary classes"""
label = int(row['label'])
if label <= 2:
return 0
else:
return 1
def categorize_scores(df):
"""Takes original score (label) and converts to tertiary/binary classes"""
df["class_tertiary"] = df.apply(lambda row: transform_labels_three(row),axis=1)
df["class_binary"] = df.apply(lambda row: transform_labels_two(row),axis=1)
return df
def split_train_test(df_instances, test_ratio=0.2, to_shuffle=True):
"""Splits the instances into train and test sets. Each negation is either assigned to the train or test set."""
instances = df_instances.to_dict("records")
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
if to_shuffle:
shuffle(neg_ids)
test_size = int(len(neg_ids) * test_ratio)
test_ids = neg_ids[0:test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
columns = list(train_instances[0].keys())
df_train = pd.DataFrame(train_instances, columns=columns)
df_test = pd.DataFrame(test_instances, columns=columns)
return df_train, df_test
def k_fold(df_instances, k=10):
"""Divides all the samples in k groups of samples. Each negation is either assigned to the train or test set."""
instances = df_instances.T.to_dict().values()
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
kf = list()
test_size = int(len(neg_ids) / k)
start = 0
for n in range(0, k):
test_ids = neg_ids[start:start+test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
train_test = (pd.DataFrame(train_instances), pd.DataFrame(test_instances))
kf.append(train_test)
start += test_size
return kf
| [
"pandas.DataFrame",
"random.shuffle"
] | [((2379, 2417), 'pandas.DataFrame', 'pd.DataFrame', (['all_tokens'], {'columns': 'cols'}), '(all_tokens, columns=cols)\n', (2391, 2417), True, 'import pandas as pd\n'), ((5165, 5205), 'pandas.DataFrame', 'pd.DataFrame', (['instances'], {'columns': 'columns'}), '(instances, columns=columns)\n', (5177, 5205), True, 'import pandas as pd\n'), ((6953, 6999), 'pandas.DataFrame', 'pd.DataFrame', (['train_instances'], {'columns': 'columns'}), '(train_instances, columns=columns)\n', (6965, 6999), True, 'import pandas as pd\n'), ((7014, 7059), 'pandas.DataFrame', 'pd.DataFrame', (['test_instances'], {'columns': 'columns'}), '(test_instances, columns=columns)\n', (7026, 7059), True, 'import pandas as pd\n'), ((6321, 6337), 'random.shuffle', 'shuffle', (['neg_ids'], {}), '(neg_ids)\n', (6328, 6337), False, 'from random import shuffle\n'), ((8053, 8082), 'pandas.DataFrame', 'pd.DataFrame', (['train_instances'], {}), '(train_instances)\n', (8065, 8082), True, 'import pandas as pd\n'), ((8084, 8112), 'pandas.DataFrame', 'pd.DataFrame', (['test_instances'], {}), '(test_instances)\n', (8096, 8112), True, 'import pandas as pd\n')] |
from setuptools import find_packages, setup
import re
VERSIONFILE = "{{ cookiecutter.project_name }}/__init__.py"
with open(VERSIONFILE, "rt") as versionfle:
verstrline = versionfle.read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(version_re, verstrline, re.M)
if mo:
ver_str = mo.group(1)
else:
raise ValueError("Unable to find version string in %s." % (VERSIONFILE,))
# add prod requires to setup so that pip can install dependencies for you
with open("requirements_prod.txt") as f:
required_pkgs = f.read().splitlines()
setup(
name='{{ cookiecutter.project_name }}',
packages=find_packages(),
version=ver_str,
description='{{ cookiecutter.description }}',
author='{{ cookiecutter.author_name }}',
install_requires=required_pkgs
)
| [
"setuptools.find_packages",
"re.search"
] | [((250, 289), 're.search', 're.search', (['version_re', 'verstrline', 're.M'], {}), '(version_re, verstrline, re.M)\n', (259, 289), False, 'import re\n'), ((630, 645), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (643, 645), False, 'from setuptools import find_packages, setup\n')] |
import os
from setuptools import setup
try:
import concurrent.futures
except ImportError:
CONCURRENT_FUTURES_PRESENT = False
else:
CONCURRENT_FUTURES_PRESENT = True
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="threadedprocess",
version="0.0.5",
author="<NAME>",
author_email="<EMAIL>",
description=(
"A `ThreadedProcessPoolExecutor` is formed by a modified "
"`ProcessPoolExecutor` that generates processes that use a "
"`ThreadPoolExecutor` instance to run the given tasks."),
license="BSD",
keywords="concurrent futures executor process thread",
url="https://github.com/nilp0inter/threadedprocess",
py_modules=['threadedprocess'],
long_description=read('README.rst'),
install_requires=[] if CONCURRENT_FUTURES_PRESENT else ["futures"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
| [
"os.path.dirname"
] | [((226, 251), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (241, 251), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def get_const_mode(val):
# Heuristics to determine if a val should be file value or immediate
# value.
if isinstance(val, (str, bool, int)):
return "immediate_value"
if isinstance(val, (np.generic, np.ndarray)):
if val.size > 10:
return "file_value"
return "immediate_value"
raise ValueError("val {} not recognized.".format(val))
def const_elimination_block(block):
# shallow copy hides changes on f.operations during the loop
for op in list(block.operations):
if op.op_type == "const":
continue
for b in op.blocks:
const_elimination_block(b)
all_outputs_are_const = True
for i, o in enumerate(op.outputs):
if o.val is not None:
with block:
res = mb.const(
val=o.val,
mode=get_const_mode(o.val),
before_op=op,
# same var name, but different python
# instance does not violate SSA property.
name=o.name,
)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=o, new_var=res
)
# rename the const output
o.set_name(o.name+'_ignored')
else:
all_outputs_are_const = False
if all_outputs_are_const:
op.remove_from_block()
@register_pass(namespace="common")
def const_elimination(prog):
"""
prog: Program
# Replace non-const ops that have const Var
# outputs replaced with const op. Example:
#
# Given:
# %2, %3 = non_const_op(...) # %2 is const, %3 isn't const
# %4 = other_op(%2, %3)
#
# Result:
# _, %3 = non_const_op(...) # _ is the ignored output
# %2_const = const(mode=m) # %2_const name is for illustration only
# %4 = other_op(%2_const, %3)
#
# where m is 'file_value' / 'immediate_value' depending on heuristics
# in get_const_mode.
"""
for f_name, f in prog.functions.items():
const_elimination_block(f)
| [
"coremltools.converters.mil.mil.passes.pass_registry.register_pass"
] | [((1907, 1940), 'coremltools.converters.mil.mil.passes.pass_registry.register_pass', 'register_pass', ([], {'namespace': '"""common"""'}), "(namespace='common')\n", (1920, 1940), False, 'from coremltools.converters.mil.mil.passes.pass_registry import register_pass\n')] |
from simcse import SimCSE
from esimcse import ESimCSE
from promptbert import PromptBERT
from sbert import SBERT
from cosent import CoSent
from config import Params
from log import logger
import torch
from transformers import AutoTokenizer
class SimCSERetrieval(object):
def __init__(self, pretrained_model_path, simcse_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SimCSE(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(simcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class ESimCSERetrieval(object):
def __init__(self, pretrained_model_path, esimcse_path, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = ESimCSE(Params.pretrained_model, dropout)
self.checkpoint = torch.load(esimcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class PromptBertRetrieval(object):
def __init__(self, pretrained_model_path, promptbert_path, dropout):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
special_token_dict = {'additional_special_tokens': ['[X]']}
self.tokenizer.add_special_tokens(special_token_dict)
mask_id = self.tokenizer.convert_tokens_to_ids(Params.mask_token)
model = PromptBERT(pretrained_model_path, dropout, mask_id)
model.encoder.resize_token_embeddings(len(self.tokenizer))
checkpoint = torch.load(promptbert_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
self.checkpoint = checkpoint
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_mask_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_mask_embedding = self.model.calculate_mask_embedding(prompt_encodings['input_ids'].to(device),
prompt_encodings['attention_mask'].to(device),
prompt_encodings['token_type_ids'].to(device))
return sentence_mask_embedding
def calculate_sentence_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
sentence_num = len(self.tokenizer.tokenize(sentence))
template_sentence = Params.prompt_templates[0].replace("[X]", "[X]"*sentence_num)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
template_encodings = self.tokenizer(template_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(prompt_input_ids=prompt_encodings['input_ids'].to(device),
prompt_attention_mask=prompt_encodings['attention_mask'].to(device),
prompt_token_type_ids=prompt_encodings['token_type_ids'].to(device),
template_input_ids=template_encodings['input_ids'].to(device),
template_attention_mask=template_encodings['attention_mask'].to(device),
template_token_type_ids=template_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
# sentence1_embedding = self.calculate_sentence_mask_embedding(sentence1)
# sentence2_embedding = self.calculate_sentence_mask_embedding(sentence2)
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class SBERTRetrieval(object):
def __init__(self, pretrained_model_path, sbert_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SBERT(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(sbert_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['train_loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class CoSentRetrieval(object):
def __init__(self, pretrained_model_path, cosent_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = CoSent(Params.pretrained_model, Params.cosent_pool_type, Params.cosent_dropout)
self.checkpoint = torch.load(cosent_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
logger.info("start simcse model succussfully!")
esimcse_repeat_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_repeat_model, Params.esimcse_repeat_dropout)
logger.info("start esimcse repeat model succussfully!")
esimcse_same_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_same_model, Params.esimcse_same_dropout)
logger.info("start esimcse same model succussfully!")
esimcse_multi_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_multi_model, Params.esimcse_multi_dropout)
logger.info("start esimcse multi model succussfully!")
promptbert_retrieval = PromptBertRetrieval(Params.pretrained_model, Params.promptbert_model, Params.promptbert_dropout)
logger.info("start promptbert model succussfully!")
sbert_retrieval = SBERTRetrieval(Params.pretrained_model, Params.sbert_model, Params.sbert_pool_type, Params.sbert_dropout)
logger.info("start sbert model succussfully!")
cosent_retrieval = CoSentRetrieval(Params.pretrained_model, Params.cosent_model)
logger.info("start cosent model succussfully!")
if __name__ == "__main__":
# model_path = "models/esimcse_0.32_0.15_160.pth"
# model_path = "models/esimcse_multi_0.15_64.pth"
# model_path = "models/esimcse_0.15_64.pth"
# simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
# model_info = simcse_retrieval.print_checkpoint_info()
# print(model_info)
model_info = sbert_retrieval.print_checkpoint_info()
print(model_info)
while True:
print("input your sentence1:")
sentence1 = input()
print("input your sentence2:")
sentence2 = input()
sbert_sentence_similarity = sbert_retrieval.calculate_sentence_similarity(sentence1, sentence2)
# promptbert_sentence_similarity = prom.calculate_sentence_similarity(sentence1, sentence2)
# print("simcse sim: {}, promptbert sim: {}".format(simcse_sentence_similarity, promptbert_sentence_similarity))
print("sbert similarity: {}".format(sbert_sentence_similarity)) | [
"promptbert.PromptBERT",
"torch.load",
"torch.cosine_similarity",
"simcse.SimCSE",
"log.logger.info",
"esimcse.ESimCSE",
"sbert.SBERT",
"cosent.CoSent",
"transformers.AutoTokenizer.from_pretrained"
] | [((11630, 11677), 'log.logger.info', 'logger.info', (['"""start simcse model succussfully!"""'], {}), "('start simcse model succussfully!')\n", (11641, 11677), False, 'from log import logger\n'), ((11807, 11862), 'log.logger.info', 'logger.info', (['"""start esimcse repeat model succussfully!"""'], {}), "('start esimcse repeat model succussfully!')\n", (11818, 11862), False, 'from log import logger\n'), ((11986, 12039), 'log.logger.info', 'logger.info', (['"""start esimcse same model succussfully!"""'], {}), "('start esimcse same model succussfully!')\n", (11997, 12039), False, 'from log import logger\n'), ((12166, 12220), 'log.logger.info', 'logger.info', (['"""start esimcse multi model succussfully!"""'], {}), "('start esimcse multi model succussfully!')\n", (12177, 12220), False, 'from log import logger\n'), ((12341, 12392), 'log.logger.info', 'logger.info', (['"""start promptbert model succussfully!"""'], {}), "('start promptbert model succussfully!')\n", (12352, 12392), False, 'from log import logger\n'), ((12517, 12563), 'log.logger.info', 'logger.info', (['"""start sbert model succussfully!"""'], {}), "('start sbert model succussfully!')\n", (12528, 12563), False, 'from log import logger\n'), ((12645, 12692), 'log.logger.info', 'logger.info', (['"""start cosent model succussfully!"""'], {}), "('start cosent model succussfully!')\n", (12656, 12692), False, 'from log import logger\n'), ((377, 429), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['pretrained_model_path'], {}), '(pretrained_model_path)\n', (406, 429), False, 'from transformers import AutoTokenizer\n'), ((446, 497), 'simcse.SimCSE', 'SimCSE', (['Params.pretrained_model', 'pool_type', 'dropout'], {}), '(Params.pretrained_model, pool_type, dropout)\n', (452, 497), False, 'from simcse import SimCSE\n'), ((524, 567), 'torch.load', 'torch.load', (['simcse_path'], {'map_location': '"""cpu"""'}), "(simcse_path, map_location='cpu')\n", (534, 567), False, 'import torch\n'), ((1899, 1972), 'torch.cosine_similarity', 'torch.cosine_similarity', (['sentence1_embedding', 'sentence2_embedding'], {'dim': '(-1)'}), '(sentence1_embedding, sentence2_embedding, dim=-1)\n', (1922, 1972), False, 'import torch\n'), ((2174, 2226), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['pretrained_model_path'], {}), '(pretrained_model_path)\n', (2203, 2226), False, 'from transformers import AutoTokenizer\n'), ((2243, 2284), 'esimcse.ESimCSE', 'ESimCSE', (['Params.pretrained_model', 'dropout'], {}), '(Params.pretrained_model, dropout)\n', (2250, 2284), False, 'from esimcse import ESimCSE\n'), ((2311, 2355), 'torch.load', 'torch.load', (['esimcse_path'], {'map_location': '"""cpu"""'}), "(esimcse_path, map_location='cpu')\n", (2321, 2355), False, 'import torch\n'), ((3687, 3760), 'torch.cosine_similarity', 'torch.cosine_similarity', (['sentence1_embedding', 'sentence2_embedding'], {'dim': '(-1)'}), '(sentence1_embedding, sentence2_embedding, dim=-1)\n', (3710, 3760), False, 'import torch\n'), ((4003, 4055), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['pretrained_model_path'], {}), '(pretrained_model_path)\n', (4032, 4055), False, 'from transformers import AutoTokenizer\n'), ((4276, 4327), 'promptbert.PromptBERT', 'PromptBERT', (['pretrained_model_path', 'dropout', 'mask_id'], {}), '(pretrained_model_path, dropout, mask_id)\n', (4286, 4327), False, 'from promptbert import PromptBERT\n'), ((4416, 4463), 'torch.load', 'torch.load', (['promptbert_path'], {'map_location': '"""cpu"""'}), "(promptbert_path, map_location='cpu')\n", (4426, 4463), False, 'import torch\n'), ((7722, 7795), 'torch.cosine_similarity', 'torch.cosine_similarity', (['sentence1_embedding', 'sentence2_embedding'], {'dim': '(-1)'}), '(sentence1_embedding, sentence2_embedding, dim=-1)\n', (7745, 7795), False, 'import torch\n'), ((8012, 8064), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['pretrained_model_path'], {}), '(pretrained_model_path)\n', (8041, 8064), False, 'from transformers import AutoTokenizer\n'), ((8081, 8131), 'sbert.SBERT', 'SBERT', (['Params.pretrained_model', 'pool_type', 'dropout'], {}), '(Params.pretrained_model, pool_type, dropout)\n', (8086, 8131), False, 'from sbert import SBERT\n'), ((8158, 8200), 'torch.load', 'torch.load', (['sbert_path'], {'map_location': '"""cpu"""'}), "(sbert_path, map_location='cpu')\n", (8168, 8200), False, 'import torch\n'), ((9538, 9611), 'torch.cosine_similarity', 'torch.cosine_similarity', (['sentence1_embedding', 'sentence2_embedding'], {'dim': '(-1)'}), '(sentence1_embedding, sentence2_embedding, dim=-1)\n', (9561, 9611), False, 'import torch\n'), ((9806, 9858), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['pretrained_model_path'], {}), '(pretrained_model_path)\n', (9835, 9858), False, 'from transformers import AutoTokenizer\n'), ((9875, 9954), 'cosent.CoSent', 'CoSent', (['Params.pretrained_model', 'Params.cosent_pool_type', 'Params.cosent_dropout'], {}), '(Params.pretrained_model, Params.cosent_pool_type, Params.cosent_dropout)\n', (9881, 9954), False, 'from cosent import CoSent\n'), ((9981, 10024), 'torch.load', 'torch.load', (['cosent_path'], {'map_location': '"""cpu"""'}), "(cosent_path, map_location='cpu')\n", (9991, 10024), False, 'import torch\n'), ((11356, 11429), 'torch.cosine_similarity', 'torch.cosine_similarity', (['sentence1_embedding', 'sentence2_embedding'], {'dim': '(-1)'}), '(sentence1_embedding, sentence2_embedding, dim=-1)\n', (11379, 11429), False, 'import torch\n')] |
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Horizon, output_file, show
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
output_file("horizon.html")
hp = Horizon(
xyvalues, index='Date',
title="horizon plot using stock inputs",
width=800, height=300
)
show(hp)
| [
"collections.OrderedDict",
"pandas.read_csv",
"bokeh.charts.output_file",
"bokeh.charts.Horizon",
"bokeh.charts.show"
] | [((171, 287), 'pandas.read_csv', 'pd.read_csv', (['"""http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010"""'], {'parse_dates': "['Date']"}), "(\n 'http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010',\n parse_dates=['Date'])\n", (182, 287), True, 'import pandas as pd\n'), ((295, 411), 'pandas.read_csv', 'pd.read_csv', (['"""http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010"""'], {'parse_dates': "['Date']"}), "(\n 'http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010',\n parse_dates=['Date'])\n", (306, 411), True, 'import pandas as pd\n'), ((418, 533), 'pandas.read_csv', 'pd.read_csv', (['"""http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010"""'], {'parse_dates': "['Date']"}), "(\n 'http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010',\n parse_dates=['Date'])\n", (429, 533), True, 'import pandas as pd\n'), ((546, 651), 'collections.OrderedDict', 'OrderedDict', ([], {'AAPL': "AAPL['Adj Close']", 'Date': "AAPL['Date']", 'MSFT': "MSFT['Adj Close']", 'IBM': "IBM['Adj Close']"}), "(AAPL=AAPL['Adj Close'], Date=AAPL['Date'], MSFT=MSFT[\n 'Adj Close'], IBM=IBM['Adj Close'])\n", (557, 651), False, 'from collections import OrderedDict\n'), ((667, 694), 'bokeh.charts.output_file', 'output_file', (['"""horizon.html"""'], {}), "('horizon.html')\n", (678, 694), False, 'from bokeh.charts import Horizon, output_file, show\n'), ((701, 800), 'bokeh.charts.Horizon', 'Horizon', (['xyvalues'], {'index': '"""Date"""', 'title': '"""horizon plot using stock inputs"""', 'width': '(800)', 'height': '(300)'}), "(xyvalues, index='Date', title='horizon plot using stock inputs',\n width=800, height=300)\n", (708, 800), False, 'from bokeh.charts import Horizon, output_file, show\n'), ((812, 820), 'bokeh.charts.show', 'show', (['hp'], {}), '(hp)\n', (816, 820), False, 'from bokeh.charts import Horizon, output_file, show\n')] |
# from __future__ import print_function
import pymzn
import time
from pprint import pprint
from collections import OrderedDict
import openrouteservice
from openrouteservice.geocode import pelias_search
from openrouteservice.distance_matrix import distance_matrix
client = openrouteservice.Client(key='')
# routes = client.directions(coords)
# print(routes)
def geocode(address):
return pelias_search(client, address, size = 1)
def matrix(coordinates):
# query = {'locations': coordinates, 'metrics': ['duration']}
return distance_matrix(client, coordinates)
# TODO add error classes for distance matrix errors etc
def test_matrix():
request = {'locations': [[8.34234,48.23424],[8.34423,48.26424], [8.34523,48.24424], [8.41423,48.21424]],
'profile': 'driving-car',
'metrics': ['duration']}
return distance_matrix(client, [[8.34234,48.23424],[8.34423,48.26424], [8.34523,48.24424], [8.41423,48.21424]])
def compute_results(form_input):
details = OrderedDict() # A fixed order of the entities is needed for distance matrix calls
for item in form_input:
[field_type, entity] = item.split('-')
value = form_input[item]
if entity not in details:
details[entity] = {}
details[entity][field_type] = value
pprint(details)
for entity in details:
# Get missing coordinates and well-formatted address using openrouteservice API
if details[entity]['coords'] == '':
loc = geocode(details[entity]['addr'])
details[entity]['coords'] = loc['features'][0]['geometry']['coordinates']
details[entity]['addr'] = loc_details['features'][0]['properties']['label']
# Otherwise, convert coordinates from string into list of floats and put lng before lat for ORS compatibility
else:
details[entity]['coords'] = (details[entity]['coords'][1:len(details[entity]['coords']) - 1].split(', '))[::-1]
print('FILLED IN MISSING COORDS')
pprint(details)
coordinates_list = []
for entity_value in details.values():
coordinates_list.append(entity_value['coords'])
durations = matrix(coordinates_list)
print('DURATIONS:')
pprint(durations)
for i, entity_value in enumerate(details.values()):
entity_value['matrix-durations'] = durations['durations'][i]
print('Updated details:')
pprint(details)
# MiniZinc test code
try:
solns = pymzn.minizinc('minizinc-test.mzn', 'minizinc-test.dzn', data={'capacity': 20})
pprint(solns)
except:
print('Minizinc didn\'t work lol')
# details.append(solns)
return details
if __name__ == '__main__':
"""
This just contains testing code. Delete before deploying to production environment. Code in this file shoudl only
be accessed through the compute_results function.
"""
loc_details = geocode('5 Bolinda Pl')
print(loc_details['features'][0]['geometry']['coordinates'])
print(loc_details['features'][0]['properties']['label'])
# compute_results(test_input)
# pprint(test_matrix())
# people = []
# # Prompt user to enter all names and addresses
# personId = 1
# name = ""
# while name != "DONE":
# name = input("Enter the name of person " + str(personId) + " or type \"DONE\" when you have entered everyone.")
# if name != "DONE":
# address = input("Enter their address: ")
# loc = geocode(address)
# # pprint(loc)
# people.append({'id': personId, 'address': address, 'coords': loc['features'][0]['geometry']['coordinates']})
# personId += 1
# if people == []:
# print("You haven't entered any addresses.")
# else:
# coordinates = []
# for person in people:
# coordinates.append(person['coords'])
# # print(coordinates)
# distances = matrix(coordinates)
# # distances = testMatrix()
# pprint(distances) | [
"collections.OrderedDict",
"openrouteservice.geocode.pelias_search",
"pymzn.minizinc",
"openrouteservice.Client",
"openrouteservice.distance_matrix.distance_matrix",
"pprint.pprint"
] | [((274, 305), 'openrouteservice.Client', 'openrouteservice.Client', ([], {'key': '""""""'}), "(key='')\n", (297, 305), False, 'import openrouteservice\n'), ((395, 433), 'openrouteservice.geocode.pelias_search', 'pelias_search', (['client', 'address'], {'size': '(1)'}), '(client, address, size=1)\n', (408, 433), False, 'from openrouteservice.geocode import pelias_search\n'), ((540, 576), 'openrouteservice.distance_matrix.distance_matrix', 'distance_matrix', (['client', 'coordinates'], {}), '(client, coordinates)\n', (555, 576), False, 'from openrouteservice.distance_matrix import distance_matrix\n'), ((854, 967), 'openrouteservice.distance_matrix.distance_matrix', 'distance_matrix', (['client', '[[8.34234, 48.23424], [8.34423, 48.26424], [8.34523, 48.24424], [8.41423, \n 48.21424]]'], {}), '(client, [[8.34234, 48.23424], [8.34423, 48.26424], [8.34523,\n 48.24424], [8.41423, 48.21424]])\n', (869, 967), False, 'from openrouteservice.distance_matrix import distance_matrix\n'), ((1007, 1020), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1018, 1020), False, 'from collections import OrderedDict\n'), ((1322, 1337), 'pprint.pprint', 'pprint', (['details'], {}), '(details)\n', (1328, 1337), False, 'from pprint import pprint\n'), ((2022, 2037), 'pprint.pprint', 'pprint', (['details'], {}), '(details)\n', (2028, 2037), False, 'from pprint import pprint\n'), ((2232, 2249), 'pprint.pprint', 'pprint', (['durations'], {}), '(durations)\n', (2238, 2249), False, 'from pprint import pprint\n'), ((2411, 2426), 'pprint.pprint', 'pprint', (['details'], {}), '(details)\n', (2417, 2426), False, 'from pprint import pprint\n'), ((2478, 2557), 'pymzn.minizinc', 'pymzn.minizinc', (['"""minizinc-test.mzn"""', '"""minizinc-test.dzn"""'], {'data': "{'capacity': 20}"}), "('minizinc-test.mzn', 'minizinc-test.dzn', data={'capacity': 20})\n", (2492, 2557), False, 'import pymzn\n'), ((2566, 2579), 'pprint.pprint', 'pprint', (['solns'], {}), '(solns)\n', (2572, 2579), False, 'from pprint import pprint\n')] |
import random
import requests
import time
HOSTS = [
'us-east-1',
'us-west-1',
'eu-west-1',
]
VEHICLES = [
'bike',
'scooter',
'car',
]
if __name__ == "__main__":
print(f"starting load generator")
time.sleep(15)
print('done sleeping')
while True:
host = HOSTS[random.randint(0, len(HOSTS) - 1)]
vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)]
print(f"requesting {vehicle} from {host}")
resp = requests.get(f'http://web:8000/{vehicle}')
print(f"received {resp}")
time.sleep(random.uniform(0.2, 0.4))
| [
"requests.get",
"random.uniform",
"time.sleep"
] | [((230, 244), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (240, 244), False, 'import time\n'), ((475, 517), 'requests.get', 'requests.get', (['f"""http://web:8000/{vehicle}"""'], {}), "(f'http://web:8000/{vehicle}')\n", (487, 517), False, 'import requests\n'), ((571, 595), 'random.uniform', 'random.uniform', (['(0.2)', '(0.4)'], {}), '(0.2, 0.4)\n', (585, 595), False, 'import random\n')] |
from django.contrib import admin
from . import models
class ReadOnlyAdminMixin():
def get_readonly_fields(self, request, obj=None):
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
class ReadOnlyAdmin(ReadOnlyAdminMixin, admin.ModelAdmin):
pass
class DerivationCodeAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FoodDescriptionAdmin(ReadOnlyAdmin):
list_display = ('ndb_no', 'food_group', 'short_desc')
class FoodGroupAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FootnoteAdmin(ReadOnlyAdmin):
list_display = ('pk', 'footnote_no', 'food_description', 'footnote_type')
class NutrientDefinitionAdmin(ReadOnlyAdmin):
list_display = ('nutrient_number', 'tagname', 'nutrient_description')
class SourceCodeAdmin(ReadOnlyAdmin):
list_display = ('source_code', 'description')
class WeightAdmin(ReadOnlyAdmin):
list_display = ('food_description', 'amount', 'measure_description')
admin.site.register(models.DerivationCode, DerivationCodeAdmin)
admin.site.register(models.FoodDescription, FoodDescriptionAdmin)
admin.site.register(models.FoodGroup, FoodGroupAdmin)
admin.site.register(models.Footnote, FootnoteAdmin)
admin.site.register(models.NutrientDefinition, NutrientDefinitionAdmin)
admin.site.register(models.SourceCode, SourceCodeAdmin)
admin.site.register(models.Weight, WeightAdmin)
| [
"django.contrib.admin.site.register"
] | [((1086, 1149), 'django.contrib.admin.site.register', 'admin.site.register', (['models.DerivationCode', 'DerivationCodeAdmin'], {}), '(models.DerivationCode, DerivationCodeAdmin)\n', (1105, 1149), False, 'from django.contrib import admin\n'), ((1150, 1215), 'django.contrib.admin.site.register', 'admin.site.register', (['models.FoodDescription', 'FoodDescriptionAdmin'], {}), '(models.FoodDescription, FoodDescriptionAdmin)\n', (1169, 1215), False, 'from django.contrib import admin\n'), ((1216, 1269), 'django.contrib.admin.site.register', 'admin.site.register', (['models.FoodGroup', 'FoodGroupAdmin'], {}), '(models.FoodGroup, FoodGroupAdmin)\n', (1235, 1269), False, 'from django.contrib import admin\n'), ((1270, 1321), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Footnote', 'FootnoteAdmin'], {}), '(models.Footnote, FootnoteAdmin)\n', (1289, 1321), False, 'from django.contrib import admin\n'), ((1322, 1393), 'django.contrib.admin.site.register', 'admin.site.register', (['models.NutrientDefinition', 'NutrientDefinitionAdmin'], {}), '(models.NutrientDefinition, NutrientDefinitionAdmin)\n', (1341, 1393), False, 'from django.contrib import admin\n'), ((1394, 1449), 'django.contrib.admin.site.register', 'admin.site.register', (['models.SourceCode', 'SourceCodeAdmin'], {}), '(models.SourceCode, SourceCodeAdmin)\n', (1413, 1449), False, 'from django.contrib import admin\n'), ((1450, 1497), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Weight', 'WeightAdmin'], {}), '(models.Weight, WeightAdmin)\n', (1469, 1497), False, 'from django.contrib import admin\n')] |
from decimal import Decimal
def Binominal(n: int, k: int) -> int:
if k > n:
return 0
result = 1
if k > n - k:
k = n - k
i = 1
while i <= k:
result *= n
result //= i
n -= 1
i += 1
return result
def pvalue(a: int, b: int,
c: int, d: int) -> Decimal:
return (Decimal(Binominal(a + b, a)
* Binominal(c + d, c))
/ Decimal(Binominal(a + b + c + d, a + c)))
def FisherLeftSide(a: int, b: int,
c: int, d: int,
baseP: Decimal) -> float:
p = 0.0
curP = float(baseP)
while(a > 0 and d > 0):
curP *= a * d
a -= 1
b += 1
c += 1
d -= 1
curP /= b * c
if curP <= baseP:
p += curP
return p
def FisherRightSide(a: int, b: int,
c: int, d: int,
baseP: Decimal) -> float:
p = float(0)
curP = float(baseP)
while(b > 0 and c > 0):
curP *= b * c
a += 1
b -= 1
c -= 1
d += 1
curP /= a * d
if curP <= baseP:
p += curP
return p
def FisherExact(a: int, b: int,
c: int, d: int) -> Decimal:
"""Calculate two-tailed Fisher's exact test for 2x2 continguency table
Args:
a: column 1 row 1
b: column 2 row 1
c: column 1 row 2
c: column 2 row 2
Returns:
Result of two-tailed Fisher's exact test stored in Decimal class
"""
if a == b == c == d:
return Decimal(1)
p = t = pvalue(a, b, c, d)
leftTail = Decimal(FisherLeftSide(a, b, c, d, t))
p += leftTail
rightTail = Decimal(FisherRightSide(a, b, c, d, t))
p += rightTail
return p
| [
"decimal.Decimal"
] | [((1577, 1587), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (1584, 1587), False, 'from decimal import Decimal\n')] |
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi # CGIモジュールのインポート
import cgitb
import sys
import requests
import json
import subprocess
import traceback
import os
import base64
import io
import logging
from django.shortcuts import render
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
ita_host = os.environ['EPOCH_ITA_HOST']
ita_port = os.environ['EPOCH_ITA_PORT']
ita_user = os.environ['EPOCH_ITA_USER']
ita_pass = os.environ['EPOCH_ITA_PASSWORD']
# メニューID
ite_menu_operation = '2100000304'
ita_restapi_endpoint='http://' + ita_host + ':' + ita_port + '/default/menu/07_rest_api_ver1.php'
logger = logging.getLogger('apilog')
@require_http_methods(['GET'])
@csrf_exempt
def index(request):
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
logger.debug("CALL " + __name__ + ":{}".format(request.method))
if request.method == 'GET':
return get(request)
else:
return ""
@csrf_exempt
def get(request):
# HTTPヘッダの生成
filter_headers = {
'host': ita_host + ':' + ita_port,
'Content-Type': 'application/json',
'Authorization': base64.b64encode((ita_user + ':' + ita_pass).encode()),
'X-Command': 'FILTER',
}
#
# オペレーションの取得
#
opelist_resp = requests.post(ita_restapi_endpoint + '?no=' + ite_menu_operation, headers=filter_headers)
opelist_json = json.loads(opelist_resp.text)
logger.debug('---- Operation ----')
logger.debug(opelist_resp.text)
return JsonResponse(opelist_json, status=200)
| [
"logging.getLogger",
"json.loads",
"requests.post",
"django.views.decorators.http.require_http_methods",
"django.http.response.JsonResponse"
] | [((1318, 1345), 'logging.getLogger', 'logging.getLogger', (['"""apilog"""'], {}), "('apilog')\n", (1335, 1345), False, 'import logging\n'), ((1348, 1377), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['GET']"], {}), "(['GET'])\n", (1368, 1377), False, 'from django.views.decorators.http import require_http_methods\n'), ((1966, 2060), 'requests.post', 'requests.post', (["(ita_restapi_endpoint + '?no=' + ite_menu_operation)"], {'headers': 'filter_headers'}), "(ita_restapi_endpoint + '?no=' + ite_menu_operation, headers=\n filter_headers)\n", (1979, 2060), False, 'import requests\n'), ((2075, 2104), 'json.loads', 'json.loads', (['opelist_resp.text'], {}), '(opelist_resp.text)\n', (2085, 2104), False, 'import json\n'), ((2193, 2231), 'django.http.response.JsonResponse', 'JsonResponse', (['opelist_json'], {'status': '(200)'}), '(opelist_json, status=200)\n', (2205, 2231), False, 'from django.http.response import JsonResponse\n')] |
import unittest
import pygame
from chip8_pygame_integration.config import get_config, KeyBind, to_text
DEFAULT = [KeyBind(pygame.K_o, pygame.KMOD_CTRL, 'some_command')]
class ConfigLoadTest(unittest.TestCase):
def setUp(self):
self.default = None
def test_empty_pattern_returns_empty_array(self):
self.assertEqual([], get_config((), []))
def test_single_command_pattern_parses_single_key(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
def test_two_command_pattern_parses_2_keys(self):
self.when_pattern_is((('comm1', 'comm2',),))
self.when_lines_are(['A D'])
self.expect_config([
KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_d, pygame.KMOD_NONE, 'comm2')])
def test_2_lines_pattern_parses_2_lines(self):
self.when_pattern_is((('comm1',), ('comm2',)))
self.when_lines_are(['A', 'D'])
self.expect_config([
KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_d, pygame.KMOD_NONE, 'comm2')])
def test_too_little_elements_in_line_return_default(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_lines_are(['A'])
self.when_default_is(DEFAULT)
self.expect_config(DEFAULT)
def test_ctrl_is_parsed_as_KMOD_CTRL(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['ctrl+A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_CTRL, 'comm1')])
def test_two_modifiers_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['ctrl+lshift+A'])
kmods = pygame.KMOD_CTRL | pygame.KMOD_LSHIFT
self.expect_config([KeyBind(pygame.K_a, kmods, 'comm1')])
def test_lowercase_keys_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['a'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
def test_lowercase_special_keys_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['space'])
self.expect_config(
[KeyBind(pygame.K_SPACE, pygame.KMOD_NONE, 'comm1')])
def test_uppercase_modifiers_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['LCTRL+A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_LCTRL, 'comm1')])
def test_invalid_key_results_in_default(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['F42'])
self.when_default_is(DEFAULT)
self.expect_config(DEFAULT)
def when_pattern_is(self, pattern):
self.pattern = pattern
def when_lines_are(self, lines):
self.lines = lines
def when_default_is(self, default):
self.default = default
def expect_config(self, config):
result = get_config(self.pattern, self.lines, self.default)
self.assertEqual(config, result)
class ConfigSaveTest(unittest.TestCase):
def test_empty_pattern_generates_empty_file(self):
self.assertEqual([], to_text((), []))
def test_one_command_generates_1_line(self):
self.when_pattern_is((('comm1',),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['a'])
def test_two_commands_generate_line_with_2_elements(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm2')])
self.expect_generated_text(['a b'])
def test_commands_are_generated_in_order_of_pattern(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm2'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['b a'])
def test_two_lines_generate_2_lines_(self):
self.when_pattern_is((('comm1',), ('comm2',),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm2'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['b', 'a'])
def test_KMOD_CTRL_generates_output(self):
self.expect_3_mod_versions_handled('ctrl')
def test_KMOD_SHIFT_generates_output(self):
self.expect_3_mod_versions_handled('shift')
def test_KMOD_ALT_generates_output(self):
self.expect_3_mod_versions_handled('alt')
def test_KMOD_META_generates_output(self):
self.expect_3_mod_versions_handled('meta')
def test_KMOD_CAPS_generates_output(self):
self.expect_mod_handled('caps')
def test_KMOD_NUM_generates_output(self):
self.expect_mod_handled('num')
def test_KMOD_MODE_generates_output(self):
self.expect_mod_handled('mode')
def expect_3_mod_versions_handled(self, baseModName):
self.expect_mod_handled(baseModName)
self.expect_mod_handled('l' + baseModName)
self.expect_mod_handled('r' + baseModName)
def expect_mod_handled(self, modName):
self.when_pattern_is((('comm1',),))
fieldName = 'KMOD_' + modName.upper()
mod = getattr(pygame, fieldName)
self.when_config_is([KeyBind(pygame.K_a, mod, 'comm1')])
expected = '{}+a'.format(modName)
self.expect_generated_text([expected])
def when_pattern_is(self, pattern):
self.pattern = pattern
def when_config_is(self, config):
self.config = config
def expect_generated_text(self, text):
text = self.add_newlines(text)
self.assertEqual(text, to_text(self.pattern, self.config))
def add_newlines(self, lines):
return [l + '\n' for l in lines]
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"chip8_pygame_integration.config.KeyBind",
"chip8_pygame_integration.config.get_config",
"chip8_pygame_integration.config.to_text"
] | [((116, 169), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_o', 'pygame.KMOD_CTRL', '"""some_command"""'], {}), "(pygame.K_o, pygame.KMOD_CTRL, 'some_command')\n", (123, 169), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((5970, 5985), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5983, 5985), False, 'import unittest\n'), ((2990, 3040), 'chip8_pygame_integration.config.get_config', 'get_config', (['self.pattern', 'self.lines', 'self.default'], {}), '(self.pattern, self.lines, self.default)\n', (3000, 3040), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((347, 365), 'chip8_pygame_integration.config.get_config', 'get_config', (['()', '[]'], {}), '((), [])\n', (357, 365), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3209, 3224), 'chip8_pygame_integration.config.to_text', 'to_text', (['()', '[]'], {}), '((), [])\n', (3216, 3224), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((5824, 5858), 'chip8_pygame_integration.config.to_text', 'to_text', (['self.pattern', 'self.config'], {}), '(self.pattern, self.config)\n', (5831, 5858), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((536, 582), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (543, 582), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((771, 817), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (778, 817), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((831, 877), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_d', 'pygame.KMOD_NONE', '"""comm2"""'], {}), "(pygame.K_d, pygame.KMOD_NONE, 'comm2')\n", (838, 877), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((1068, 1114), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (1075, 1114), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((1128, 1174), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_d', 'pygame.KMOD_NONE', '"""comm2"""'], {}), "(pygame.K_d, pygame.KMOD_NONE, 'comm2')\n", (1135, 1174), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((1563, 1609), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_CTRL', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_CTRL, 'comm1')\n", (1570, 1609), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((1831, 1866), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'kmods', '"""comm1"""'], {}), "(pygame.K_a, kmods, 'comm1')\n", (1838, 1866), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((2023, 2069), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (2030, 2069), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((2251, 2301), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_SPACE', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_SPACE, pygame.KMOD_NONE, 'comm1')\n", (2258, 2301), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((2469, 2516), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_LCTRL', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_LCTRL, 'comm1')\n", (2476, 2516), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3349, 3395), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (3356, 3395), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3585, 3631), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm1')\n", (3592, 3631), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3662, 3708), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_b', 'pygame.KMOD_NONE', '"""comm2"""'], {}), "(pygame.K_b, pygame.KMOD_NONE, 'comm2')\n", (3669, 3708), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3900, 3946), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm2"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm2')\n", (3907, 3946), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((3977, 4023), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_b', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_b, pygame.KMOD_NONE, 'comm1')\n", (3984, 4023), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((4204, 4250), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'pygame.KMOD_NONE', '"""comm2"""'], {}), "(pygame.K_a, pygame.KMOD_NONE, 'comm2')\n", (4211, 4250), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((4281, 4327), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_b', 'pygame.KMOD_NONE', '"""comm1"""'], {}), "(pygame.K_b, pygame.KMOD_NONE, 'comm1')\n", (4288, 4327), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n'), ((5445, 5478), 'chip8_pygame_integration.config.KeyBind', 'KeyBind', (['pygame.K_a', 'mod', '"""comm1"""'], {}), "(pygame.K_a, mod, 'comm1')\n", (5452, 5478), False, 'from chip8_pygame_integration.config import get_config, KeyBind, to_text\n')] |
'''
Preorder Binary Tree
For a given Binary Tree of integers, print the pre-order traversal.
Input Format:
The first and the only line of input will contain the nodes data, all separated by a single space. Since -1 is used as an indication whether the left or right node data exist for root, it will not be a part of the node data.
Output Format:
The only line of output prints the pre-order traversal of the given binary tree.
Constraints:
1 <= N <= 10^6
Where N is the total number of nodes in the binary tree.
Time Limit: 1 sec
Sample Input 1:
5 6 10 2 3 -1 -1 -1 -1 -1 9 -1 -1
Sample Ouptut 1:
5 6 2 3 9 10
Sample Input 2:
1 2 3 4 5 6 7 -1 -1 -1 -1 -1 -1 -1 -1
Sample Ouptut 2:
1 2 4 5 3 6 7
'''
from sys import stdin, setrecursionlimit
import queue
setrecursionlimit(10 ** 6)
#Following the structure used for Binary Tree
class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def preOrder(root):
#Your code goes here
if root is None:
return
print(root.data, end=' ')
preOrder(root.left)
preOrder(root.right)
#Taking level-order input using fast I/O method
def takeInput():
levelOrder = list(map(int, stdin.readline().strip().split(" ")))
start = 0
length = len(levelOrder)
root = BinaryTreeNode(levelOrder[start])
start += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelOrder[start]
start += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelOrder[start]
start += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
root = takeInput()
preOrder(root) | [
"sys.stdin.readline",
"sys.setrecursionlimit",
"queue.Queue"
] | [((766, 792), 'sys.setrecursionlimit', 'setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (783, 792), False, 'from sys import stdin, setrecursionlimit\n'), ((1377, 1390), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1388, 1390), False, 'import queue\n'), ((1225, 1241), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (1239, 1241), False, 'from sys import stdin, setrecursionlimit\n')] |
#!/usr/bin/env python3
"""
This script has been tested on various custom google forms and other various forms with
few alteratios ..
Google forms which does include the input type "token" attribute are found
to be safer than those who don't.
Any form contains various fields.
1. input text fields
2. radio
3. checkboxes
4. textareas
5. Uploads --- important . still working.
"""
import re
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
params = {}
url = input("Enter the website url")
page = urlopen(url)
bs_obj = BeautifulSoup(page, 'html.parser')
# bs_obj.prettify() --> it's effects on the tags buried deep in the divs
requests.session()
input_tags = bs_obj.find_all('input')
# print(input_tags)
form_action = bs_obj.find('form') # some pages have multiple form tags ...
text_tags = bs_obj.find_all('textarea')
for text in text_tags:
try:
print(text['name'])
text['name'] = "Running around and fill this form"
except:
print('Key Error')
# if form_action.attrs['action'] == "" or None:
# print("Form action not specifies")
# else:
# print(form_action)
url = form_action.attrs['action']
print(f"Post request is send in here: {url}")
# there might be some custom fields which are to be looked and inspected manually as they skip the scrapper
# like params['entry.377191685'] = 'Faculty'
# params['tos'] = 'true'
# vary accordingly as at least an attck is just not that easy. ;-)
for tag in input_tags:
try:
print(tag.attrs['aria-label'])
except:
pass
try:
if tag.attrs['value'] == "" or None:
tag.attrs['value'] = input(f"Enter the value of {tag.attrs['name']}")
params[tag.attrs['name']] = tag.attrs['value']
# except:
# value= input(f"Enter the value of {tag.attrs['name']}")
# params[tag.attrs['name']] = value
else:
params[tag.attrs['name']] = tag.attrs['value'].strip('\n')
except:
pass
print(params)
# getting the dicts as printed here... which is to be submitted
while True:
requests.session()
r = requests.post(url, data=params)
print(r.status_code)
# 200 OK ---> submitted
# 400 BAD REQUEST ERROR --> input data corrupt or server incompatible
# 401 UNAOUTHORIZED ACCESS --> validation failed (need to deal with tokens and the cookies)
| [
"bs4.BeautifulSoup",
"requests.session",
"requests.post",
"urllib.request.urlopen"
] | [((534, 546), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (541, 546), False, 'from urllib.request import urlopen\n'), ((557, 591), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (570, 591), False, 'from bs4 import BeautifulSoup\n'), ((665, 683), 'requests.session', 'requests.session', ([], {}), '()\n', (681, 683), False, 'import requests\n'), ((2124, 2142), 'requests.session', 'requests.session', ([], {}), '()\n', (2140, 2142), False, 'import requests\n'), ((2152, 2183), 'requests.post', 'requests.post', (['url'], {'data': 'params'}), '(url, data=params)\n', (2165, 2183), False, 'import requests\n')] |
import dataset
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from .config import TOKEN, DB_URI
from .commands import HANDLERS
logging.basicConfig()
def save_message(message, db):
replied = None
if message.reply_to_message is not None:
replied = message.reply_to_message.message_id
length = None
if message.text is not None:
length = len(message.text)
elif message.caption is not None:
length = len(message.caption)
vote = None
if message.text == '+':
vote = '+'
elif message.text == '-':
vote = '-'
new_row = {
'timestamp': message.date,
'message_id': message.message_id,
'chat_id': message.chat_id,
'user_id': message.from_user.id,
'replied': replied,
'length': length,
'vote': vote,
}
db['messages'].upsert(new_row, keys=['message_id', 'chat_id'])
def save_user(user, db):
table = db['users']
new_row = {
'user_id': user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
}
if table.find_one(user_id=user.id) is None:
new_row['tracked'] = True
table.insert(new_row)
else:
table.update(new_row, keys=['user_id'])
def save(bot, update):
db = dataset.connect(DB_URI)
save_message(update.message, db)
save_user(update.message.from_user, db)
def track(user_id, value):
db = dataset.connect(DB_URI)
table = db['users']
new_row = {
'user_id': user_id,
'tracked': value,
}
table.upsert(new_row, keys=['user_id'])
def opt_in(bot, update):
track(update.message.from_user.id, True)
def opt_out(bot, update):
track(update.message.from_user.id, False)
def run():
updater = Updater(TOKEN)
handlers = HANDLERS + [
CommandHandler('opt_in', opt_in),
CommandHandler('opt_out', opt_out),
MessageHandler(Filters.all, save), # must be last
]
for h in handlers:
updater.dispatcher.add_handler(h)
updater.start_polling()
updater.idle()
| [
"logging.basicConfig",
"telegram.ext.MessageHandler",
"telegram.ext.CommandHandler",
"telegram.ext.Updater",
"dataset.connect"
] | [((172, 193), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (191, 193), False, 'import logging\n'), ((1360, 1383), 'dataset.connect', 'dataset.connect', (['DB_URI'], {}), '(DB_URI)\n', (1375, 1383), False, 'import dataset\n'), ((1503, 1526), 'dataset.connect', 'dataset.connect', (['DB_URI'], {}), '(DB_URI)\n', (1518, 1526), False, 'import dataset\n'), ((1846, 1860), 'telegram.ext.Updater', 'Updater', (['TOKEN'], {}), '(TOKEN)\n', (1853, 1860), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1898, 1930), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""opt_in"""', 'opt_in'], {}), "('opt_in', opt_in)\n", (1912, 1930), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1940, 1974), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""opt_out"""', 'opt_out'], {}), "('opt_out', opt_out)\n", (1954, 1974), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1984, 2017), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.all', 'save'], {}), '(Filters.all, save)\n', (1998, 2017), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n')] |
from pymp3decoder import Decoder
import contextlib
import os
import math
import pyaudio
CHUNK_SIZE = 4096
def take_chunk(content):
""" Split a buffer of data into chunks """
num_blocks = int(math.ceil(1.0*len(content)/CHUNK_SIZE))
for start in range(num_blocks):
yield content[CHUNK_SIZE*start:CHUNK_SIZE*(start+1)]
class TestPlayer:
@contextlib.contextmanager
def start(self):
try:
p = pyaudio.PyAudio()
self.decoder = Decoder(CHUNK_SIZE*20)
self.stream = p.open(format=p.get_format_from_width(2),
channels=2,
rate=44100,
output=True)
yield self.stream
finally:
self.stream.stop_stream()
self.stream.close()
p.terminate()
def test_file(self):
""" Open a file and decode it """
abs_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.mp3")
with open(abs_location, "rb") as in_file, self.start():
content = in_file.read()
for chunk in self.decoder.decode_iter(take_chunk(content)):
self.stream.write(chunk)
| [
"os.path.abspath",
"pymp3decoder.Decoder",
"pyaudio.PyAudio"
] | [((443, 460), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (458, 460), False, 'import pyaudio\n'), ((489, 513), 'pymp3decoder.Decoder', 'Decoder', (['(CHUNK_SIZE * 20)'], {}), '(CHUNK_SIZE * 20)\n', (496, 513), False, 'from pymp3decoder import Decoder\n'), ((984, 1009), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (999, 1009), False, 'import os\n')] |
import telegram.ext
import messsages as msg
import functions as f
import matplotlib.pyplot as plt
import traceback
import os
import os.path
from os import path
def start(update, context):
nombre = update.message.chat.first_name
mensaje = "Bienvenido {}, para conocer lo que puedo hacer utiliza el comando /Help.".format(nombre)
update.message.reply_text(mensaje)
def help(update, context):
update.message.reply_text(msg.helpMessage, parse_mode=telegram.ParseMode.HTML)
#Función para mandar la figura con todas las estrellas
def allStars(update, context):
chat_id = update.message.chat.id
figure = f.stars()
figure.draw()
figure.savefig("./files/stars.png")
context.bot.send_photo(chat_id, open("./files/stars.png",'rb'))
os.remove("./files/stars.png")
#Función para mandar la figura con todas las estrellas y una constelación
def allStars1Constellation(update, context):
chat_id = update.message.chat.id
messageUser = update.message.text
constellation = messageUser.split(" ")
try:
f.searchFile("./files/constellations/", constellation[1])
figure = f.allStars1Constellation(constellation[1], f.stars(), "#fdff6e")
figure.savefig("./files/1Constellation.png")
context.bot.send_photo(chat_id, open("./files/1Constellation.png",'rb'))
os.remove("./files/1Constellation.png")
except:
update.message.reply_text(msg.errorMessage, parse_mode=telegram.ParseMode.HTML)
#Función para mandar la figura con todas las estrellas y todas las constelaciones
def allStarsAllConstellations(update, context):
chat_id = update.message.chat.id
figure = f.starsAndContellations()
figure.draw()
figure.savefig("./files/StarsAndConstellations.png")
context.bot.send_photo(chat_id, open("./files/StarsAndConstellations.png",'rb'))
os.remove("./files/StarsAndConstellations.png")
#Función para mandar una lista de las constelaciones disponibles
def constellations(update, context):
update.message.reply_text(msg.constellationsMessage)
#Función para mandar una lista de las constelaciones disponibles
def about(update, context):
update.message.reply_text(msg.infoMessage, parse_mode=telegram.ParseMode.HTML)
| [
"os.remove",
"functions.starsAndContellations",
"functions.searchFile",
"functions.stars"
] | [((625, 634), 'functions.stars', 'f.stars', ([], {}), '()\n', (632, 634), True, 'import functions as f\n'), ((765, 795), 'os.remove', 'os.remove', (['"""./files/stars.png"""'], {}), "('./files/stars.png')\n", (774, 795), False, 'import os\n'), ((1655, 1680), 'functions.starsAndContellations', 'f.starsAndContellations', ([], {}), '()\n', (1678, 1680), True, 'import functions as f\n'), ((1845, 1892), 'os.remove', 'os.remove', (['"""./files/StarsAndConstellations.png"""'], {}), "('./files/StarsAndConstellations.png')\n", (1854, 1892), False, 'import os\n'), ((1052, 1109), 'functions.searchFile', 'f.searchFile', (['"""./files/constellations/"""', 'constellation[1]'], {}), "('./files/constellations/', constellation[1])\n", (1064, 1109), True, 'import functions as f\n'), ((1334, 1373), 'os.remove', 'os.remove', (['"""./files/1Constellation.png"""'], {}), "('./files/1Constellation.png')\n", (1343, 1373), False, 'import os\n'), ((1170, 1179), 'functions.stars', 'f.stars', ([], {}), '()\n', (1177, 1179), True, 'import functions as f\n')] |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
def countnum():
dates = pd.date_range(start="2019-01-01", end="2019-05-31", freq='M')
# print(dates)
# print(dates[0])
# print(type(dates[0]))
col1 = [i for i in range(1, len(dates) + 1)]
# print(col1)
col2 = [i + 1 for i in range(1, len(dates) + 1)]
df = pd.DataFrame({'col1': col1, 'col2': col2}, index=dates)
# print(df)
dict_ic = {}
dict_ic['date'] = df
df_ic = pd.concat(dict_ic.values(), keys=dict_ic.keys())
# print (df_ic)
# 基于list统计
mean = df_ic.groupby(level=0).apply(lambda frame: len(
[i for i in frame['col1'].values if i > 2]) / len(frame['col1']))
print(mean)
def statfunc():
perf_dict = {"code": ['000001', '000002', '000003'],
"close": [100, 91.1, 5.4],
"vol": [1000, 200, 3000]}
df = pd.DataFrame(perf_dict)
#最大、最小值所在位置
print(df['close'].idxmin())
min_close = df.iloc[df['close'].idxmin(),:]
print(min_close)
max_close = df.iloc[df['close'].idxmax(), :]
print(max_close)
result = df.any(axis=0)
print(result)
if __name__ == '__main__':
# countnum()
statfunc()
| [
"pandas.DataFrame",
"pandas.date_range"
] | [((93, 154), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2019-01-01"""', 'end': '"""2019-05-31"""', 'freq': '"""M"""'}), "(start='2019-01-01', end='2019-05-31', freq='M')\n", (106, 154), True, 'import pandas as pd\n'), ((354, 409), 'pandas.DataFrame', 'pd.DataFrame', (["{'col1': col1, 'col2': col2}"], {'index': 'dates'}), "({'col1': col1, 'col2': col2}, index=dates)\n", (366, 409), True, 'import pandas as pd\n'), ((886, 909), 'pandas.DataFrame', 'pd.DataFrame', (['perf_dict'], {}), '(perf_dict)\n', (898, 909), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import base64
import os
import zlib
from .environment import get_environment
from . import util
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
yield (root, filename)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename) in iter_results_paths(results):
yield Results.load(os.path.join(root, filename))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
for (root, filename) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
max_len = max(len(commit), len(results_commit))
if results_commit[:max_len] == commit[:max_len]:
yield Results.load(os.path.join(root, filename))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates. Each
element yielded is the pair (hash, date).
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash, result.date
def get_existing_hashes(results):
"""
Get all of the commit hashes that have already been tested.
Each element yielded is the pair (hash, date).
"""
hashes = list(set(iter_existing_hashes(results)))
return hashes
def find_latest_result_hash(machine, root):
"""
Find the latest result for the given machine.
"""
root = os.path.join(root, machine)
latest_date = 0
latest_hash = ''
for commit_hash, date in iter_existing_hashes(root):
if date > latest_date:
latest_date = date
latest_hash = commit_hash
return latest_hash
def get_filename(machine, commit_hash, env):
"""
Get the result filename for a given machine, commit_hash and
environment.
"""
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env.name))
class Results(object):
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 1
def __init__(self, params, env, commit_hash, date):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
env : Environment object
Environment in which the benchmarks were run.
commit_hash : str
The commit hash for the benchmark run.
date : int
Javascript timestamp for when the commit was merged into
the repository.
"""
self._params = params
self._env = env
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._profiles = {}
self._python = env.python
self._filename = get_filename(
params['machine'], self._commit_hash, env)
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def results(self):
return self._results
@property
def env(self):
return self._env
def add_time(self, benchmark_name, time):
"""
Add benchmark times.
Parameters
----------
benchmark_name : str
Name of benchmark
time : number
Numeric result
"""
self._results[benchmark_name] = time
def add_profile(self, benchmark_name, profile):
"""
Add benchmark profile data.
Parameters
----------
benchmark_name : str
Name of benchmark
profile : bytes
`cProfile` data
"""
self._profiles[benchmark_name] = base64.b64encode(
zlib.compress(profile))
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
"""
return zlib.decompress(
base64.b64decode(self._profiles[benchmark_name]))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
path = os.path.join(result_dir, self._filename)
util.write_json(path, {
'results': self._results,
'params': self._params,
'requirements': self._env.requirements,
'commit_hash': self._commit_hash,
'date': self._date,
'python': self._python,
'profiles': self._profiles
}, self.api_version)
@classmethod
def load(cls, path):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
"""
d = util.load_json(path, cls.api_version)
obj = cls(
d['params'],
get_environment('', d['python'], d['requirements']),
d['commit_hash'],
d['date'])
obj._results = d['results']
if 'profiles' in d:
obj._profiles = d['profiles']
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
return obj
def rm(self, result_dir):
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)
| [
"os.path.join",
"base64.b64decode",
"zlib.compress",
"os.walk",
"os.remove"
] | [((495, 511), 'os.walk', 'os.walk', (['results'], {}), '(results)\n', (502, 511), False, 'import os\n'), ((2241, 2268), 'os.path.join', 'os.path.join', (['root', 'machine'], {}), '(root, machine)\n', (2253, 2268), False, 'import os\n'), ((1020, 1055), 'os.path.join', 'os.path.join', (['results', 'machine_name'], {}), '(results, machine_name)\n', (1032, 1055), False, 'import os\n'), ((1294, 1329), 'os.path.join', 'os.path.join', (['results', 'machine_name'], {}), '(results, machine_name)\n', (1306, 1329), False, 'import os\n'), ((5325, 5365), 'os.path.join', 'os.path.join', (['result_dir', 'self._filename'], {}), '(result_dir, self._filename)\n', (5337, 5365), False, 'import os\n'), ((6350, 6390), 'os.path.join', 'os.path.join', (['result_dir', 'self._filename'], {}), '(result_dir, self._filename)\n', (6362, 6390), False, 'import os\n'), ((6399, 6414), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (6408, 6414), False, 'import os\n'), ((4686, 4708), 'zlib.compress', 'zlib.compress', (['profile'], {}), '(profile)\n', (4699, 4708), False, 'import zlib\n'), ((4881, 4929), 'base64.b64decode', 'base64.b64decode', (['self._profiles[benchmark_name]'], {}), '(self._profiles[benchmark_name])\n', (4897, 4929), False, 'import base64\n'), ((828, 856), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (840, 856), False, 'import os\n'), ((1524, 1552), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (1536, 1552), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from typing import Tuple
def clean_df_headers(df: pd.DataFrame) -> pd.DataFrame:
"""Remove leading and trailing spaces in DataFrame headers."""
headers = pd.Series(df.columns)
new_headers = [header.strip() for header in headers]
new_headers = pd.Series(new_headers)
df.columns = new_headers
return df
def configure_ax(ax: plt.axes,
df: pd.DataFrame = None,
xlabel: str = None,
ylabel: Tuple[int,int] = None,
ylim: str = None,
title: str = None,
legend: bool = False
) -> plt.axes:
"""Configure Matplotlib axe."""
if df is not None:
x = df.index
for h in df.columns:
y = df[h]
ax.plot(x, y,label=h)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if legend is not None:
ax.legend()
return ax
if __name__ == "__main__":
# Load sensor data
df_data = pd.read_csv("step_03_-_scenario_08_-_after_tuning.txt")
# Remove leading and trailing spaces in df headers
df_data = clean_df_headers(df_data)
# Set "time" column as DataFrame index
df_data = df_data.set_index("time")
# Plot results
fig = plt.figure()
fig.suptitle("True & Predicted States \n (Global Frame)")
# X-Position and X-Speed
ax = plt.subplot(3,1,1)
df = df_data[["quad.pos.x", "quad.est.x", "quad.vel.x", "quad.est.vx"]]
ax = configure_ax(ax, df = df, ylabel = "X-Positions [m] \n X-Velocities [m/s]", title = "After Tuning", legend = True)
# Y-Position and Y-Speed
ax = plt.subplot(3,1,2)
df = df_data[["quad.pos.y", "quad.est.y", "quad.vel.y", "quad.est.vy"]]
ax = configure_ax(ax, df = df, ylabel = "Y-Positions [m] \n Y-Velocities [m/s]", legend = True)
# Z-Position and Z-Speed
ax = plt.subplot(3,1,3)
df = df_data[["quad.pos.z", "quad.est.z", "quad.vel.z", "quad.est.vz"]]
ax = configure_ax(ax, df = df, xlabel = "Time [s]", ylabel = "Z-Positions [m] \n Z-Velocities [m/s]", legend = True)
plt.show()
| [
"pandas.Series",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((236, 257), 'pandas.Series', 'pd.Series', (['df.columns'], {}), '(df.columns)\n', (245, 257), True, 'import pandas as pd\n'), ((334, 356), 'pandas.Series', 'pd.Series', (['new_headers'], {}), '(new_headers)\n', (343, 356), True, 'import pandas as pd\n'), ((1233, 1288), 'pandas.read_csv', 'pd.read_csv', (['"""step_03_-_scenario_08_-_after_tuning.txt"""'], {}), "('step_03_-_scenario_08_-_after_tuning.txt')\n", (1244, 1288), True, 'import pandas as pd\n'), ((1499, 1511), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1509, 1511), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1624, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1891), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (1882, 1891), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2125), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2116, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2338, 2340), True, 'import matplotlib.pyplot as plt\n')] |
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from common.tflogs2pandas import tflog2pandas, many_logs2pandas
from common.gym_interface import template
bodies = [300]
all_seeds = list(range(20))
all_stackframe = [0,4]
cache_filename = "output_data/tmp/plot0"
try:
df = pd.read_pickle(cache_filename)
except:
# if True:
dfs = []
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
path = f"output_data/tensorboard/model-{body}"
if stackframe>0:
path += f"-stack{stackframe}"
path += f"-sd{seed}/SAC_1"
print(f"Loading {path}")
if not os.path.exists(path):
continue
df = tflog2pandas(path)
df["body"] = body
df["seed"] = seed
df["stackframe"] = stackframe
df = df[df["metric"] == f"eval/{body}_mean_reward"]
print(df.shape)
print(df.head())
dfs.append(df)
df = pd.concat(dfs)
df.to_pickle(cache_filename)
print(df.shape)
# df = df[::100]
print(df[df["seed"]==0].head())
print(df[df["seed"]==1].head())
print(df[df["seed"]==2].head())
print(df[df["seed"]==3].head())
df1 = pd.DataFrame(columns=df.columns)
print(df1)
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
df2 = df[(df["body"]==body) & (df["seed"]==seed) & (df["stackframe"]==stackframe)]
print(df2.shape)
x = df2.iloc[df2["value"].argsort().iloc[-1]]
df1 = df1.append(x)
# for i in range(30):
if False:
step_number = 60000
x = df2.iloc[(df2["step"] - step_number).abs().argsort()[0]]
if abs(x["step"]-step_number)>1500:
print("no")
else:
# print(x)
x = x.copy()
# x["step"] = step_number
df1 = df1.append(x)
df1 = df1[df1["step"]>550000]
print(df1)
print("control")
df2 = df1[df1["stackframe"]==0]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print("treatment: stackframe")
df2 = df1[df1["stackframe"]==4]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print(df1.shape, df.shape)
df = df1
fig, axes = plt.subplots(nrows=1, ncols=1, sharey=True, figsize=[10,10])
sns.barplot(ax=axes, data=df1, x="stackframe", y="value")
# axes = [axes]
# axes = axes.flatten()
# for idx, body in enumerate(bodies):
# sns.lineplot(
# ax=axes[idx],
# data=df[df["body"]==body],
# x="step", y="value", hue="stackframe",
# markers=True, dashes=False
# ).set_title(template(body))
plt.legend()
plt.tight_layout()
plt.savefig("output_data/plots/0.png")
# plt.show() | [
"pandas.read_pickle",
"os.path.exists",
"matplotlib.pyplot.savefig",
"common.tflogs2pandas.tflog2pandas",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"seaborn.barplot",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((1337, 1369), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (1349, 1369), True, 'import pandas as pd\n'), ((2448, 2509), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'sharey': '(True)', 'figsize': '[10, 10]'}), '(nrows=1, ncols=1, sharey=True, figsize=[10, 10])\n', (2460, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2509, 2566), 'seaborn.barplot', 'sns.barplot', ([], {'ax': 'axes', 'data': 'df1', 'x': '"""stackframe"""', 'y': '"""value"""'}), "(ax=axes, data=df1, x='stackframe', y='value')\n", (2520, 2566), True, 'import seaborn as sns\n'), ((2847, 2859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2857, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2878), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2876, 2878), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2917), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_data/plots/0.png"""'], {}), "('output_data/plots/0.png')\n", (2890, 2917), True, 'import matplotlib.pyplot as plt\n'), ((314, 344), 'pandas.read_pickle', 'pd.read_pickle', (['cache_filename'], {}), '(cache_filename)\n', (328, 344), True, 'import pandas as pd\n'), ((1118, 1132), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (1127, 1132), True, 'import pandas as pd\n'), ((803, 821), 'common.tflogs2pandas.tflog2pandas', 'tflog2pandas', (['path'], {}), '(path)\n', (815, 821), False, 'from common.tflogs2pandas import tflog2pandas, many_logs2pandas\n'), ((731, 751), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (745, 751), False, 'import os\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from archai.nas.model import Model
from archai.nas.macro_builder import MacroBuilder
from archai.common.common import common_init
def test_darts_zero_model():
conf = common_init(config_filepath='confs/algos/darts.yaml')
conf_search = conf['nas']['search']
model_desc = conf_search['model_desc']
macro_builder = MacroBuilder(model_desc)
model_desc = macro_builder.build()
m = Model(model_desc, False, True)
y, aux = m(torch.rand((1, 3, 32, 32)))
assert isinstance(y, torch.Tensor) and y.shape==(1,10) and aux is None
def test_petridish_zero_model():
conf = common_init(config_filepath='confs/petridish_cifar.yaml')
conf_search = conf['nas']['search']
model_desc = conf_search['model_desc']
macro_builder = MacroBuilder(model_desc)
model_desc = macro_builder.build()
m = Model(model_desc, False, True)
y, aux = m(torch.rand((1, 3, 32, 32)))
assert isinstance(y, torch.Tensor) and y.shape==(1,10) and aux is None
| [
"archai.common.common.common_init",
"archai.nas.model.Model",
"torch.rand",
"archai.nas.macro_builder.MacroBuilder"
] | [((267, 320), 'archai.common.common.common_init', 'common_init', ([], {'config_filepath': '"""confs/algos/darts.yaml"""'}), "(config_filepath='confs/algos/darts.yaml')\n", (278, 320), False, 'from archai.common.common import common_init\n'), ((429, 453), 'archai.nas.macro_builder.MacroBuilder', 'MacroBuilder', (['model_desc'], {}), '(model_desc)\n', (441, 453), False, 'from archai.nas.macro_builder import MacroBuilder\n'), ((503, 533), 'archai.nas.model.Model', 'Model', (['model_desc', '(False)', '(True)'], {}), '(model_desc, False, True)\n', (508, 533), False, 'from archai.nas.model import Model\n'), ((702, 759), 'archai.common.common.common_init', 'common_init', ([], {'config_filepath': '"""confs/petridish_cifar.yaml"""'}), "(config_filepath='confs/petridish_cifar.yaml')\n", (713, 759), False, 'from archai.common.common import common_init\n'), ((868, 892), 'archai.nas.macro_builder.MacroBuilder', 'MacroBuilder', (['model_desc'], {}), '(model_desc)\n', (880, 892), False, 'from archai.nas.macro_builder import MacroBuilder\n'), ((942, 972), 'archai.nas.model.Model', 'Model', (['model_desc', '(False)', '(True)'], {}), '(model_desc, False, True)\n', (947, 972), False, 'from archai.nas.model import Model\n'), ((550, 576), 'torch.rand', 'torch.rand', (['(1, 3, 32, 32)'], {}), '((1, 3, 32, 32))\n', (560, 576), False, 'import torch\n'), ((989, 1015), 'torch.rand', 'torch.rand', (['(1, 3, 32, 32)'], {}), '((1, 3, 32, 32))\n', (999, 1015), False, 'import torch\n')] |
import logging
import datetime
class DatasetReports(object):
def __init__(self, dataverse_api=None, dataverse_database=None, config=None):
if dataverse_api is None:
print('Dataverse API required to create dataset reports.')
return
if dataverse_database is None:
print('Dataverse database required to create dataset reports.')
return
if config is None:
print('Dataverse configuration required to create dataset reports.')
return
self.dataverse_api = dataverse_api
self.dataverse_database = dataverse_database
# Ensure trailing slash on work_dir
if config['work_dir'][len(config['work_dir'])-1] != '/':
config['work_dir'] = config['work_dir'] + '/'
self.config = config
self.logger = logging.getLogger('dataverse-reports')
def report_datasets_recursive(self, dataverse_identifier):
# List of datasets
datasets = []
self.logger.info("Begin loading datasets for %s.", dataverse_identifier)
self.load_datasets_recursive(datasets, dataverse_identifier)
self.logger.info("Finished loading %s datasets for %s", str(len(datasets)), dataverse_identifier)
return datasets
def load_datasets_recursive(self, datasets={}, dataverse_identifier=None):
if dataverse_identifier is None:
self.logger.error("Dataverse identifier is required.")
return
self.logger.info("Loading dataverse: %s.", dataverse_identifier)
# Load dataverse
dataverse_response = self.dataverse_api.get_dataverse(identifier=dataverse_identifier)
response_json = dataverse_response.json()
if 'data' in response_json:
dataverse = response_json['data']
self.logger.info("Dataverse name: %s", dataverse['name'])
# Retrieve dvObjects for this dataverse
dataverse_contents = self.dataverse_api.get_dataverse_contents(identifier=dataverse_identifier)
self.logger.info('Total dvObjects in this dataverse: ' + str(len(dataverse_contents)))
for dvObject in dataverse_contents:
if dvObject['type'] == 'dataset':
# Add dataset to this dataverse
self.logger.info("Adding dataset %s to dataverse %s.", str(dvObject['id']), str(dataverse_identifier))
self.add_dataset(datasets, dataverse_identifier, dvObject['id'], dvObject['identifier'])
if dvObject['type'] == 'dataverse':
self.logger.info("Found new dataverse %s.", str(dvObject['id']))
self.load_datasets_recursive(datasets, dvObject['id'])
else:
self.logger.warn("Dataverse was empty.")
def add_dataset(self, datasets, dataverse_identifier, dataset_id, dataset_identifier):
# Load dataset
self.logger.info("Dataset id: %s", dataset_id)
self.logger.info("Dataset identifier: %s", dataset_identifier)
dataset_response = self.dataverse_api.get_dataset(identifier=dataset_id)
response_json = dataset_response.json()
if 'data' in response_json:
dataset = response_json['data']
if 'latestVersion' in dataset:
latest_version = dataset['latestVersion']
metadata_blocks = latest_version['metadataBlocks']
# Flatten the latest_version information
for key, value in latest_version.items():
if key != 'metadataBlocks':
dataset[key] = value
# Flatten the nested citation fields information
citation = metadata_blocks['citation']
fields = citation['fields']
for item in fields:
self.logger.debug("Looking at field: %s.", item['typeName'])
valuesString = self.get_value_recursive('', item)
if valuesString.endswith(' ; '):
valuesString = valuesString[:-len(' ; ')]
typeName = item['typeName']
dataset[typeName] = valuesString
# Remove nested information
dataset.pop('latestVersion')
if (self.config['include_dataset_metrics']):
# Calculate previous month
last_month = self.get_last_month()
# Use Make Data Count endpoints to gather views and downloads statistics
dataset_metrics_options = ['viewsUnique', 'viewsMonth', 'viewsTotal', 'downloadsUnique', 'downloadsMonth', 'downloadsTotal']
for dataset_metrics_option in dataset_metrics_options:
self.logger.debug("Calling endpoint for dataset metric: " + dataset_metrics_option)
if dataset_metrics_option == 'viewsMonth':
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option='viewsTotal',doi=dataset_identifier,date=last_month)
elif dataset_metrics_option == 'downloadsMonth':
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option='downloadsTotal',doi=dataset_identifier,date=last_month)
else:
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option=dataset_metrics_option,doi=dataset_identifier)
dataset_metrics_json = dataset_metrics_response.json()
if dataset_metrics_json['status'] == 'OK':
if dataset_metrics_option == 'viewsMonth':
if 'viewsTotal' in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data']['viewsTotal']))
dataset[dataset_metrics_option] = dataset_metrics_json['data']['viewsTotal']
else:
self.logger.debug("Unable to find viewsTotal in response.")
elif dataset_metrics_option == 'downloadsMonth':
if 'downloadsTotal' in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data']['downloadsTotal']))
dataset[dataset_metrics_option] = dataset_metrics_json['data']['downloadsTotal']
else:
self.logger.debug("Unable to find downloadsTotal in response.")
elif dataset_metrics_option in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data'][dataset_metrics_option]))
dataset[dataset_metrics_option] = dataset_metrics_json['data'][dataset_metrics_option]
else:
self.logger.error("Unable to find dataset metric in response.")
else:
self.logger.error("API call was unsuccessful.")
self.logger.error(dataset_metrics_json)
dataset[dataset_metrics_option] = 0
# Use dataverse_database to retrieve cumulative download count of file in this dataset
download_count = self.dataverse_database.get_download_count(dataset_id=dataset_id)
self.logger.info("Download count for dataset: %s", str(download_count))
dataset['downloadCount'] = download_count
if 'files' in dataset:
contentSize = 0
count_restricted = 0
files = dataset['files']
for file in files:
if 'dataFile' in file:
if file['restricted']:
count_restricted += 1
dataFile = file['dataFile']
filesize = int(dataFile['filesize'])
contentSize += filesize
self.logger.info('Totel size (bytes) of all files in this dataset: %s', str(contentSize))
# Convert to megabytes for reports
dataset['contentSize (MB)'] = (contentSize/1048576)
dataset['totalFiles'] = len(files)
dataset['totalRestrictedFiles'] = count_restricted
# Retrieve dataverse to get alias
dataverse_response = self.dataverse_api.get_dataverse(identifier=dataverse_identifier)
response_json = dataverse_response.json()
dataverse = response_json['data']
self.logger.info("Adding dataset to dataverse with alias: %s", str(dataverse['alias']))
dataset['dataverse'] = dataverse['alias']
datasets.append(dataset)
else:
self.logger.warn("Dataset was empty.")
def get_value_recursive(self, valuesString, field):
if not field['multiple']:
if field['typeClass'] == 'primitive':
valuesString += field['value']
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'controlledVocabulary':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'compound':
subValue = ''
if isinstance(field['value'], list):
for value in field['value']:
if isinstance(value, str):
self.logger.debug("Value: %s", value)
for key, elements in value.items():
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
valuesString += subValue + " ; "
else:
value = field['value']
for key, elements in value.items():
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
valuesString += subValue + " ; "
if valuesString.endswith(' ; '):
valuesString = valuesString[:-len(' ; ')]
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
else:
self.logger.debug("Unrecognized typeClass: %s", field['typeClass'])
else:
if field['typeClass'] == 'primitive':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'controlledVocabulary':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'compound':
subValue = ''
for value in field['value']:
for key, elements in value.items():
self.logger.debug("Key: %s", key)
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
subValue = subValue[:-3]
valuesString += subValue + " ; "
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
else:
self.logger.debug("Unrecognized typeClass: %s", field['typeClass'])
def get_last_month(self):
now = datetime.datetime.now()
previous = now.date().replace(day=1) - datetime.timedelta(days=1)
last_month = previous.strftime("%Y-%m")
return last_month | [
"logging.getLogger",
"datetime.datetime.now",
"datetime.timedelta"
] | [((847, 885), 'logging.getLogger', 'logging.getLogger', (['"""dataverse-reports"""'], {}), "('dataverse-reports')\n", (864, 885), False, 'import logging\n'), ((13290, 13313), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13311, 13313), False, 'import datetime\n'), ((13361, 13387), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (13379, 13387), False, 'import datetime\n')] |
import json
import aiohttp
from copy import deepcopy
class Resource(object):
"""
A base class for API resources
"""
# """List of allowed methods, allowed values are
# ```['GET', 'PUT', 'POST', 'DELETE']``"""
# ALLOWED_METHODS = []
def __init__(self, url, auth):
"""
:param url: The RabbitMQ API url to connect to. This should include the
protocol and port number.
:type url: str
:param auth: The authentication to pass to the request. See
`aiohttp' authentication`_ documentation. For the simplest case of
a username and password, simply pass in a tuple of
``('username', 'password')``
:type auth: Requests auth
.. _Requests' authentication: https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.BasicAuth
""" # noqa
self.url = url.rstrip('/')
if isinstance(auth, tuple):
auth = aiohttp.BasicAuth(*auth)
self.auth = auth
self.headers = {
'Content-type': 'application/json',
}
self.session_args = {
'raise_for_status': True
}
async def _api_get(self, url, **kwargs):
"""
A convenience wrapper for _get. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
return await self._get(**kwargs)
async def _get(self, *args, **kwargs):
"""
A wrapper for getting things
:returns: The response of your get
:rtype: dict
"""
async with aiohttp.ClientSession(**self.session_args) as session:
async with session.get(*args, **kwargs) as resp:
return await resp.json()
async def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._put(**kwargs)
async def _put(self, *args, **kwargs):
"""
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data']).encode()
async with aiohttp.ClientSession(**self.session_args) as session:
await session.put(*args, **kwargs)
async def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._post(**kwargs)
async def _post(self, *args, **kwargs):
"""
A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data']).encode()
async with aiohttp.ClientSession(**self.session_args) as session:
await session.post(*args, **kwargs)
async def _api_delete(self, url, **kwargs):
"""
A convenience wrapper for _delete. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._delete(**kwargs)
async def _delete(self, *args, **kwargs):
"""
A wrapper for deleting things
:returns: The response of your delete
:rtype: dict
"""
async with aiohttp.ClientSession(**self.session_args) as session:
await session.delete(*args, **kwargs)
| [
"aiohttp.ClientSession",
"aiohttp.BasicAuth",
"json.dumps",
"copy.deepcopy"
] | [((1423, 1445), 'copy.deepcopy', 'deepcopy', (['self.headers'], {}), '(self.headers)\n', (1431, 1445), False, 'from copy import deepcopy\n'), ((2174, 2196), 'copy.deepcopy', 'deepcopy', (['self.headers'], {}), '(self.headers)\n', (2182, 2196), False, 'from copy import deepcopy\n'), ((3007, 3029), 'copy.deepcopy', 'deepcopy', (['self.headers'], {}), '(self.headers)\n', (3015, 3029), False, 'from copy import deepcopy\n'), ((3848, 3870), 'copy.deepcopy', 'deepcopy', (['self.headers'], {}), '(self.headers)\n', (3856, 3870), False, 'from copy import deepcopy\n'), ((957, 981), 'aiohttp.BasicAuth', 'aiohttp.BasicAuth', (['*auth'], {}), '(*auth)\n', (974, 981), False, 'import aiohttp\n'), ((1763, 1805), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '(**self.session_args)\n', (1784, 1805), False, 'import aiohttp\n'), ((2649, 2691), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '(**self.session_args)\n', (2670, 2691), False, 'import aiohttp\n'), ((3485, 3527), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '(**self.session_args)\n', (3506, 3527), False, 'import aiohttp\n'), ((4191, 4233), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '(**self.session_args)\n', (4212, 4233), False, 'import aiohttp\n'), ((2593, 2619), 'json.dumps', 'json.dumps', (["kwargs['data']"], {}), "(kwargs['data'])\n", (2603, 2619), False, 'import json\n'), ((3429, 3455), 'json.dumps', 'json.dumps', (["kwargs['data']"], {}), "(kwargs['data'])\n", (3439, 3455), False, 'import json\n')] |
from link_extractor import run_enumeration
from colorama import Fore
from utils.headers import HEADERS
from time import sleep
import requests
import database
import re
import json
from bs4 import BeautifulSoup
import colorama
print(Fore.GREEN + '-----------------------------------' + Fore.RESET, Fore.RED)
print('尸闩㇄尸㠪龱 - Website Link Extractor')
print(' by @RealDebian | V0.02')
print(Fore.GREEN + '-----------------------------------' + Fore.RESET)
print()
sleep(1)
print('Example:')
print()
target_host = str(input('Target Site: '))
print('Select the Protocol (http|https)')
sleep(.5)
protocol = str(input('http=0 | https=1: '))
while True:
if protocol == '0':
run_enumeration('http://' + target_host)
break
elif protocol == '1':
run_enumeration('https://' + target_host)
break
else:
print('Wrong option!')
| [
"link_extractor.run_enumeration",
"time.sleep"
] | [((465, 473), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (470, 473), False, 'from time import sleep\n'), ((585, 595), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (590, 595), False, 'from time import sleep\n'), ((684, 724), 'link_extractor.run_enumeration', 'run_enumeration', (["('http://' + target_host)"], {}), "('http://' + target_host)\n", (699, 724), False, 'from link_extractor import run_enumeration\n'), ((773, 814), 'link_extractor.run_enumeration', 'run_enumeration', (["('https://' + target_host)"], {}), "('https://' + target_host)\n", (788, 814), False, 'from link_extractor import run_enumeration\n')] |
from setuptools import setup
from pathlib import Path
from lightkube.models import __version__
setup(
name='lightkube-models',
version=__version__,
description='Models and Resources for lightkube module',
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
license='Apache Software License',
url='https://github.com/gtsystem/lightkube-models',
packages=['lightkube.models', 'lightkube.resources'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
| [
"pathlib.Path"
] | [((241, 258), 'pathlib.Path', 'Path', (['"""README.md"""'], {}), "('README.md')\n", (245, 258), False, 'from pathlib import Path\n')] |
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB), and the INTEL Visual Computing Lab.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import datetime
import sys
from contextlib import contextmanager
@contextmanager
def make_connection(client_type, *args, **kwargs):
"""Context manager to create and connect a networking client object."""
client = None
try:
client = client_type(*args, **kwargs)
client.connect()
yield client
finally:
if client is not None:
client.disconnect()
class StopWatch(object):
def __init__(self):
self.start = datetime.datetime.now()
self.end = None
def stop(self):
self.end = datetime.datetime.now()
def milliseconds(self):
return 1000.0 * (self.end - self.start).total_seconds()
def to_hex_str(header):
return ':'.join('{:02x}'.format(ord(c)) for c in header)
if sys.version_info >= (3, 3):
import shutil
def print_over_same_line(text):
terminal_width = shutil.get_terminal_size((80, 20)).columns
empty_space = max(0, terminal_width - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
else:
# Workaround for older Python versions.
def print_over_same_line(text):
line_length = max(print_over_same_line._last_line_length, len(text))
empty_space = max(0, line_length - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
print_over_same_line._last_line_length = line_length
print_over_same_line._last_line_length = 0
| [
"shutil.get_terminal_size",
"datetime.datetime.now",
"sys.stdout.flush",
"sys.stdout.write"
] | [((734, 757), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (755, 757), False, 'import datetime\n'), ((822, 845), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (843, 845), False, 'import datetime\n'), ((1248, 1297), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + text + empty_space * ' ')"], {}), "('\\r' + text + empty_space * ' ')\n", (1264, 1297), False, 'import sys\n'), ((1306, 1324), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1322, 1324), False, 'import sys\n'), ((1552, 1601), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + text + empty_space * ' ')"], {}), "('\\r' + text + empty_space * ' ')\n", (1568, 1601), False, 'import sys\n'), ((1610, 1628), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1626, 1628), False, 'import sys\n'), ((1140, 1174), 'shutil.get_terminal_size', 'shutil.get_terminal_size', (['(80, 20)'], {}), '((80, 20))\n', (1164, 1174), False, 'import shutil\n')] |
from cms.extensions import PageExtension
from cms.extensions.extension_pool import extension_pool
from django.utils.translation import ugettext as _
from filer.fields.image import FilerImageField
class SimplePageExtension(PageExtension):
"""
A generic website page.
"""
image = FilerImageField(verbose_name=_("image"))
extension_pool.register(SimplePageExtension)
| [
"django.utils.translation.ugettext",
"cms.extensions.extension_pool.extension_pool.register"
] | [((339, 383), 'cms.extensions.extension_pool.extension_pool.register', 'extension_pool.register', (['SimplePageExtension'], {}), '(SimplePageExtension)\n', (362, 383), False, 'from cms.extensions.extension_pool import extension_pool\n'), ((325, 335), 'django.utils.translation.ugettext', '_', (['"""image"""'], {}), "('image')\n", (326, 335), True, 'from django.utils.translation import ugettext as _\n')] |
import re
from . import tables
from .instr import Instruction
from .instr.nop import *
from .instr.alu import *
from .instr.bcd import *
from .instr.bit import *
from .instr.flag import *
from .instr.mov import *
from .instr.smov import *
from .instr.ld_st import *
from .instr.stack import *
from .instr.jmp import *
from .instr.call import *
from .instr.ctx import *
from .instr.trap import *
enumerations = {
'R': tables.rx_ax,
'I': tables.dsp8_dsp16_abs16,
'6': tables.dsp8_abs16,
'7': tables.r0x_r0y_dsp8_abs16,
'8': tables.r0x_dsp8_abs16,
'A': tables.reg16_dsp8_dsp16_dsp20_abs16,
'E': tables.reg8l_dsp8_dsp16_abs16,
'N': tables.reg8_dsp8_dsp16_abs16,
'C': tables.creg,
'J': tables.cnd_j3,
'K': tables.cnd_j4,
'M': tables.cnd_bm4,
}
encodings = {
'0111_011z_1111_dddd': AbsReg,
'0111_011z_0110_dddd': AdcImm,
'1011_000z_ssss_dddd': AdcReg,
'0111_011z_1110_dddd': Adcf,
'0111_011z_0100_dddd': AddImm,
'1100_100z_iiii_dddd': AddImm4,
'1000_0DDD;8': AddImm8,
'1010_000z_ssss_dddd': AddReg,
'0010_0DSS;7': AddReg8,
'0111_110z_1110_1011': AddImmSP,
'0111_1101_1011_iiii': AddImm4SP,
'1111_100z_iiii_dddd': Adjnz,
'0111_011z_0010_dddd': AndImm,
'1001_0DDD;8': AndImm8,
'1001_000z_ssss_dddd': AndReg,
'0001_0DSS;7': AndReg8,
'0111_1110_0100_ssss': Band,
'0111_1110_1000_dddd': Bclr,
'0100_0bbb': BclrSB,
'0111_1110_0010_dddd': Bmcnd,
'0111_1101_1101_CCCC;M': BmcndC,
'0111_1110_0101_ssss': Bnand,
'0111_1110_0111_ssss': Bnor,
'0111_1110_1010_dddd': Bnot,
'0101_0bbb': BnotSB,
'0111_1110_0011_ssss': Bntst,
'0111_1110_1101_ssss': Bnxor,
'0111_1110_0110_ssss': Bor,
'0111_1110_1001_dddd': Bset,
'0100_1bbb': BsetSB,
'0111_1110_1011_ssss': Btst,
'0101_1bbb': BtstSB,
'0111_1110_0000_dddd': Btstc,
'0111_1110_0001_dddd': Btsts,
'0111_1110_1100_ssss': Bxor,
'0000_0000': Brk,
'0111_011z_1000_dddd': CmpImm,
'1101_000z_iiii_dddd': CmpImm4,
'1110_0DDD;8': CmpImm8,
'1100_000z_ssss_dddd': CmpReg,
'0011_1DSS;7': CmpReg8,
'0111_1100_1110_1110': DadcImm8,
'0111_1101_1110_1110': DadcImm16,
'0111_1100_1110_0110': DadcReg8,
'0111_1101_1110_0110': DadcReg16,
'0111_1100_1110_1100': DaddImm8,
'0111_1101_1110_1100': DaddImm16,
'0111_1100_1110_0100': DaddReg8,
'0111_1101_1110_0100': DaddReg16,
'1010_1DDD;8': Dec,
'1111_d010': DecAdr,
'0111_110z_1110_0001': DivImm,
'0111_011z_1101_ssss': DivReg,
'0111_110z_1110_0000': DivuImm,
'0111_011z_1100_ssss': DivuReg,
'0111_110z_1110_0011': DivxImm,
'0111_011z_1001_ssss': DivxReg,
'0111_1100_1110_1111': DsbbImm8,
'0111_1101_1110_1111': DsbbImm16,
'0111_1100_1110_0111': DsbbReg8,
'0111_1101_1110_0111': DsbbReg16,
'0111_1100_1110_1101': DsubImm8,
'0111_1101_1110_1101': DsubImm16,
'0111_1100_1110_0101': DsubReg8,
'0111_1101_1110_0101': DsubReg16,
'0111_1100_1111_0010': Enter,
'0111_1101_1111_0010': Exitd,
'0111_1100_0110_DDDD;E': Exts,
'0111_1100_1111_0011': ExtsR0,
'1110_1011_0fff_0101': Fclr,
'1110_1011_0fff_0100': Fset,
'1010_0DDD;8': Inc,
'1011_d010': IncAdr,
'1110_1011_11ii_iiii': Int,
'1111_0110': Into,
'0110_1CCC;J': Jcnd1,
'0111_1101_1100_CCCC;K': Jcnd2,
'0110_0iii': Jmp3,
'1111_1110': Jmp8,
'1111_0100': Jmp16,
'1111_1100': JmpAbs,
'0111_1101_0010_ssss': Jmpi,
'0111_1101_0000_SSSS;A': JmpiAbs,
'1110_1110': Jmps,
'1111_0101': Jsr16,
'1111_1101': JsrAbs,
'0111_1101_0011_ssss': Jsri,
'0111_1101_0001_SSSS;A': JsriAbs,
'1110_1111': Jsrs,
'1110_1011_0DDD;C_0000': LdcImm,
'0111_1010_1DDD;C_ssss': LdcReg,
'0111_1100_1111_0000': Ldctx,
'0111_010z_1000_dddd': Lde,
'0111_010z_1001_dddd': LdeA0,
'0111_010z_1010_dddd': LdeA1A0,
'0111_1101_1010_0iii': Ldipl,
'0111_010z_1100_dddd': MovImmReg,
'1101_100z_iiii_dddd': MovImm4Reg,
'1100_0DDD;8': MovImm8Reg,
'1110_d010': MovImm8Adr,
'1010_d010': MovImm16Adr,
'1011_0DDD;8': MovZero8Reg,
'0111_001z_ssss_dddd': MovRegReg,
'0011_0dss': MovRegAdr,
'0000_0sDD;6': MovReg8Reg,
'0000_1DSS;7': MovRegReg8,
'0111_010z_1011_dddd': MovIndSPReg,
'0111_010z_0011_ssss': MovRegIndSP,
'1110_1011_0DDD;R_SSSS;I': Mova,
'0111_1100_10rr_DDDD;N': MovdirR0LReg,
'0111_1100_00rr_SSSS;N': MovdirRegR0L,
'0111_110z_0101_dddd': MulImm,
'0111_100z_ssss_dddd': MulReg,
'0111_110z_0100_dddd': MuluImm,
'0111_000z_ssss_dddd': MuluReg,
'0111_010z_0101_dddd': NegReg,
'0000_0100': Nop,
'0111_010z_0111_dddd': NotReg,
'1011_1DDD;8': NotReg8,
'0111_011z_0011_dddd': OrImm,
'1001_1DDD;8': OrImm8,
'1001_100z_ssss_dddd': OrReg,
'0001_1DSS;7': OrReg8,
'0111_010z_1101_dddd': Pop,
'1001_d010': PopReg8,
'1101_d010': PopAdr,
'1110_1011_0DDD;C_0011': Popc,
'1110_1101': Popm,
'0111_110z_1110_0010': PushImm,
'0111_010z_0100_ssss': Push,
'1000_s010': PushReg8,
'1100_s010': PushAdr,
'0111_1101_1001_SSSS;I': Pusha,
'1110_1011_0SSS;C_0010': Pushc,
'1110_1100': Pushm,
'1111_1011': Reit,
'0111_110z_1111_0001': Rmpa,
'1110_000z_iiii_dddd': RotImm4,
'0111_010z_0110_dddd': RotR1H,
'0111_011z_1010_dddd': Rolc,
'0111_011z_1011_dddd': Rorc,
'1111_0011': Rts,
'0111_011z_0111_dddd': SbbImm,
'1011_100z_ssss_dddd': SbbReg,
'1111_000z_iiii_dddd': ShaImm4,
'0111_010z_1111_dddd': ShaR1H,
'1110_1011_101d_iiii': Sha32Imm4,
'1110_1011_001d_0001': Sha32R1H,
'1110_100z_iiii_dddd': ShlImm4,
'0111_010z_1110_dddd': ShlR1H,
'1110_1011_100d_iiii': Shl32Imm4,
'1110_1011_000d_0001': Shl32R1H,
'0111_110z_1110_1001': Smovb,
'0111_110z_1110_1000': Smovf,
'0111_110z_1110_1010': Sstr,
'0111_1011_1SSS;C_dddd': StcReg,
'0111_1100_1100_DDDD;A': StcPc,
'0111_1101_1111_0000': Stctx,
'0111_010z_0000_ssss': Ste,
'0111_010z_0001_ssss': SteA0,
'0111_010z_0010_ssss': SteA1A0,
'1101_0DDD;8': Stnz,
'1100_1DDD;8': Stz,
'1101_1DDD;8': Stzx,
'0111_011z_0101_dddd': SubImm,
'1000_1DDD;8': SubImm8,
'1010_100z_ssss_dddd': SubReg,
'0010_1DSS;7': SubReg8,
'0111_011z_0000_dddd': TstImm,
'1000_000z_ssss_dddd': TstReg,
'1111_1111': Und,
'0111_1101_1111_0011': Wait,
'0111_101z_00ss_dddd': Xchg,
'0111_011z_0001_dddd': XorImm,
'1000_100z_ssss_dddd': XorReg,
}
def generate_tables():
for encoding, instr in encodings.items():
def expand_encoding(table, parts):
part, *parts = parts
if ';' in part:
part, enum = part.split(';', 2)
else:
enum = ''
assert len(part) == 4 and len(enum) <= 1
chunks = []
try:
chunks.append(int(part, 2))
except ValueError:
wildcard_part = re.sub(r'[A-Z]', '0', part)
instr_code = int(re.sub(r'[^01]', '0', wildcard_part), 2)
instr_mask = int(re.sub(r'[^01]', '0', wildcard_part.replace('0', '1')), 2)
operand_mask = int(re.sub(r'[^01]', '1', wildcard_part.replace('1', '0')), 2)
operand_code = 0
while True:
chunks.append(instr_code | operand_code)
if operand_code == operand_mask:
break
# The following line cleverly uses carries to make a counter only from the bits
# that are set in `operand_mask`. To understand it, consider that `instr_mask`
# is the inverse of `operand_mask`, and adding 1 to a 011...1 chunk changes it
# into a 100...0 chunk.
operand_code = ((operand_code | instr_mask) + 1) & operand_mask
if enum:
shift = 4 - re.search(r'[A-Z]+', part).end()
chunks, chunk_templates = [], chunks
for template in chunk_templates:
for legal_bits in enumerations[enum]:
chunks.append(template | (legal_bits << shift))
for chunk in chunks:
if parts:
try:
subtable = table[chunk]
except KeyError:
subtable = table[chunk] = dict()
assert isinstance(subtable, dict)
expand_encoding(subtable, parts)
else:
assert chunk not in table, "{} conflicts with {}".format(instr, table[chunk])
table[chunk] = instr
parts = encoding.split('_')
while re.match(r"^[a-z]+$", parts[-1]):
parts.pop()
expand_encoding(Instruction.opcodes, parts)
def print_assigned():
def contract_encoding(table, parts):
for part, entry in table.items():
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
else:
encoding = '_'.join('{:04b}'.format(part) for part in (*parts, part))
mnemonic = entry().name()
print('{:20s} {}'.format(encoding, mnemonic))
contract_encoding(Instruction.opcodes, ())
def print_unassigned():
def contract_encoding(table, parts):
unassigned = set(range(16))
for part, entry in table.items():
unassigned.remove(part)
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
for part in unassigned:
print('_'.join('{:04b}'.format(part) for part in (*parts, part)))
contract_encoding(Instruction.opcodes, ())
generate_tables()
# print_assigned()
# print_unassigned()
| [
"re.sub",
"re.match",
"re.search"
] | [((8856, 8887), 're.match', 're.match', (['"""^[a-z]+$"""', 'parts[-1]'], {}), "('^[a-z]+$', parts[-1])\n", (8864, 8887), False, 'import re\n'), ((7048, 7074), 're.sub', 're.sub', (['"""[A-Z]"""', '"""0"""', 'part'], {}), "('[A-Z]', '0', part)\n", (7054, 7074), False, 'import re\n'), ((7111, 7146), 're.sub', 're.sub', (['"""[^01]"""', '"""0"""', 'wildcard_part'], {}), "('[^01]', '0', wildcard_part)\n", (7117, 7146), False, 'import re\n'), ((8029, 8054), 're.search', 're.search', (['"""[A-Z]+"""', 'part'], {}), "('[A-Z]+', part)\n", (8038, 8054), False, 'import re\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
__all__ = ['Law', 'Bin', 'Poi', 'Gau']
class Law(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def sample(n, d):
pass
@staticmethod
@abstractmethod
def loglikely(n, d, k):
pass
@staticmethod
def likelihood(n, d, k):
return np.exp(loglikely(n, d, k))
class Bin(Law):
def sample(n, d):
return np.random.binomial(n, d)
def loglikely(n, d, k):
return k*np.log(d) + (n-k)*np.log(1-d)
class Poi(Law):
def sample(n, d):
return np.random.poisson(n*d)
def loglikely(n, d, k):
return k*np.log(n*d) - n*d + k - k*np.log(1+k) # - np.sum(np.log(np.arange(k)+1))
class Gau(Law):
def sample(n, d=1):
return n * (1 + 0.1*np.random.randn())
def loglikely(n, d, k):
return -50 * np.log(k/n)**2
| [
"numpy.random.poisson",
"numpy.log",
"numpy.random.randn",
"numpy.random.binomial"
] | [((432, 456), 'numpy.random.binomial', 'np.random.binomial', (['n', 'd'], {}), '(n, d)\n', (450, 456), True, 'import numpy as np\n'), ((592, 616), 'numpy.random.poisson', 'np.random.poisson', (['(n * d)'], {}), '(n * d)\n', (609, 616), True, 'import numpy as np\n'), ((507, 516), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (513, 516), True, 'import numpy as np\n'), ((525, 538), 'numpy.log', 'np.log', (['(1 - d)'], {}), '(1 - d)\n', (531, 538), True, 'import numpy as np\n'), ((691, 704), 'numpy.log', 'np.log', (['(1 + k)'], {}), '(1 + k)\n', (697, 704), True, 'import numpy as np\n'), ((885, 898), 'numpy.log', 'np.log', (['(k / n)'], {}), '(k / n)\n', (891, 898), True, 'import numpy as np\n'), ((812, 829), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (827, 829), True, 'import numpy as np\n'), ((665, 678), 'numpy.log', 'np.log', (['(n * d)'], {}), '(n * d)\n', (671, 678), True, 'import numpy as np\n')] |
from unittest import TestCase
from schemer import Schema, Array, ValidationException
from dusty.schemas.base_schema_class import DustySchema, DustySpecs
from ...testcases import DustyTestCase
class TestDustySchemaClass(TestCase):
def setUp(self):
self.base_schema = Schema({'street': {'type': basestring},
'house_number': {'type': int, 'default': 1}})
self.bigger_schema = Schema({'address': {'type': self.base_schema, 'default': {}},
'first_name': {'type': basestring, 'required': True},
'last_name': {'type': basestring, 'default': 'johnson'}})
def test_init_invalid_doc(self):
doc = {'street': 'dogstoon',
'house_number': '1'}
with self.assertRaises(ValidationException):
DustySchema(self.base_schema, doc)
def test_valid_doc(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults(self):
doc = {'street': 'dogstoon'}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults_more_complicated_1(self):
doc = {'first_name': 'dusty'}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['first_name'], 'dusty')
self.assertEquals(dusty_schema['last_name'], 'johnson')
self.assertEquals(dusty_schema['address'], {'house_number': 1})
def test_setting_defaults_more_complicated_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['address']['street'], 'dogstoon')
self.assertEquals(dusty_schema['address']['house_number'], 1)
def test_in_1(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertTrue('first_name' in dusty_schema)
def test_in_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertFalse('first_names' in dusty_schema)
def test_keys(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['street', 'house_number']), set(dusty_schema.keys()))
def test_values(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['dogstoon', 1]), set(dusty_schema.values()))
class TestDustySpecsClass(DustyTestCase):
def test_finds_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_lib('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_lib('lib-a'), specs['libs']['lib-a'])
def test_raises_without_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
with self.assertRaises(KeyError):
specs.get_app_or_lib('non-existant-thingy')
def test_get_app_or_service(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_service('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_service('service-a'), specs['services']['service-a'])
| [
"schemer.Schema",
"dusty.schemas.base_schema_class.DustySchema",
"dusty.schemas.base_schema_class.DustySpecs"
] | [((281, 370), 'schemer.Schema', 'Schema', (["{'street': {'type': basestring}, 'house_number': {'type': int, 'default': 1}}"], {}), "({'street': {'type': basestring}, 'house_number': {'type': int,\n 'default': 1}})\n", (287, 370), False, 'from schemer import Schema, Array, ValidationException\n'), ((431, 612), 'schemer.Schema', 'Schema', (["{'address': {'type': self.base_schema, 'default': {}}, 'first_name': {\n 'type': basestring, 'required': True}, 'last_name': {'type': basestring,\n 'default': 'johnson'}}"], {}), "({'address': {'type': self.base_schema, 'default': {}}, 'first_name':\n {'type': basestring, 'required': True}, 'last_name': {'type':\n basestring, 'default': 'johnson'}})\n", (437, 612), False, 'from schemer import Schema, Array, ValidationException\n'), ((1015, 1049), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.base_schema', 'doc'], {}), '(self.base_schema, doc)\n', (1026, 1049), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((1269, 1303), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.base_schema', 'doc'], {}), '(self.base_schema, doc)\n', (1280, 1303), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((1543, 1579), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.bigger_schema', 'doc'], {}), '(self.bigger_schema, doc)\n', (1554, 1579), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((1947, 1983), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.bigger_schema', 'doc'], {}), '(self.bigger_schema, doc)\n', (1958, 1983), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((2264, 2300), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.bigger_schema', 'doc'], {}), '(self.bigger_schema, doc)\n', (2275, 2300), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((2492, 2528), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.bigger_schema', 'doc'], {}), '(self.bigger_schema, doc)\n', (2503, 2528), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((2705, 2739), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.base_schema', 'doc'], {}), '(self.base_schema, doc)\n', (2716, 2739), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((2947, 2981), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.base_schema', 'doc'], {}), '(self.base_schema, doc)\n', (2958, 2981), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((3154, 3186), 'dusty.schemas.base_schema_class.DustySpecs', 'DustySpecs', (['self.temp_specs_path'], {}), '(self.temp_specs_path)\n', (3164, 3186), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((3412, 3444), 'dusty.schemas.base_schema_class.DustySpecs', 'DustySpecs', (['self.temp_specs_path'], {}), '(self.temp_specs_path)\n', (3422, 3444), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((3599, 3631), 'dusty.schemas.base_schema_class.DustySpecs', 'DustySpecs', (['self.temp_specs_path'], {}), '(self.temp_specs_path)\n', (3609, 3631), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n'), ((855, 889), 'dusty.schemas.base_schema_class.DustySchema', 'DustySchema', (['self.base_schema', 'doc'], {}), '(self.base_schema, doc)\n', (866, 889), False, 'from dusty.schemas.base_schema_class import DustySchema, DustySpecs\n')] |
from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import datetime, date
class Item(metaclass=ABCMeta):
def __init__(self, code, name, quantity, cost, offer):
self.item_code=code
self.item_name=name
self.quantity_on_hand=quantity
self.cost_price=cost
self.on_offer=offer
pass
@property
def quantity_on_hand(self): # implements the get - this name is *the* name
return self._quantity_on_hand
#
@quantity_on_hand.setter
def quantity_on_hand(self, value): # name must be the same
self._quantity_on_hand = value
@property
def cost_price(self): # implements the get - this name is *the* name
return self._cost_price
#
@cost_price.setter
def cost_price(self, value): # name must be the same
self._cost_price = value
def changeOffer():
if(self.on_offer == "Yes"):
self.on_offer = "No"
elif(self.on_offer == "No"):
self.on_offer == "Yes"
@abstractmethod
def selling_price(self):
pass
@abstractmethod
def offer_price(self):
pass
@abstractmethod
def profit_margin(self):
pass
@abstractmethod
def discount_rate(self):
pass
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Availalbe= " + str(self.quantity_on_hand) + " " + offer
return string
class Perishable(Item):
def __init__(self, code, name, quantity, cost, offer, expiry):
Item.__init__(self, code, name, quantity, cost, offer)
self.expiry_date = expiry
def profit_margin(self):
return self.cost_price * 0.25
def selling_price(self):
return self.cost_price + self.profit_margin()
def days_before_expiry(self):
now = datetime.now().date()
days = self.expiry_date- now
return days.days
def discount_rate(self):
days = self.days_before_expiry()
price = self.selling_price()
if(days < 15):
return price * 0.3
elif(days < 30):
return price * 0.2
elif (days > 29):
return price * 0.1
def offer_price(self):
if(self.on_offer == "No"):
return selling_price()
return self.selling_price() - self.discount_rate()
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer**"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Available= " + str(self.quantity_on_hand) + " Price: $" + str(self.offer_price()) +" " + offer + " Expiry Date: " + self.expiry_date.strftime('%d %b %Y') + " Perishable Item"
return string
class NonPerishable(Item):
def __init__(self, code, name, quantity, cost, offer):
Item.__init__(self, code, name, quantity, cost, offer)
def profit_margin(self):
return self.cost_price * 0.3
def selling_price(self):
return self.cost_price + self.profit_margin()
def discount_rate(self):
return self.selling_price() * 0.1
def offer_price(self):
if(self.on_offer == "No"):
return self.selling_price()
return self.selling_price() - self.discount_rate()
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer**"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Available= " + str(self.quantity_on_hand) + " Price: $" + str(self.offer_price()) +" " + offer + " Non Perishable Item"
return string
class Grocer:
def __init__(self):
self.items_list = []
def print_items(self):
for item in self.items_list:
print(item.to_string())
def add_to_list(self, item_to_be_added):
self.items_list.append(item_to_be_added)
return
def update_quantity_on_hand(self, item_code, new_quantity):
if(new_quantity < 0):
print("Quantity cannot be zero. Failed to update.")
return False
for item in self.items_list:
if(item.item_code == item_code):
item.quantity_on_hand = new_quantity
return True
perishable = Perishable("P101", "Real Raisins", 10, 2, "Yes", date(2018,12, 10))
non_perishable = NonPerishable("NP210", "Tan Baking Paper", 25, 2, "No")
perishable2 = Perishable("P105", "Eggy Soup Tofu", 14, 1.85, "Yes", date(2018,11, 26))
grocer = Grocer()
grocer.add_to_list(perishable)
grocer.add_to_list(non_perishable)
grocer.add_to_list(perishable2)
grocer.print_items()
grocer.update_quantity_on_hand("P105", 10)
print()
grocer.print_items()
####################################################################
#DISCUSSION
"""
Single Responsibility Principle:
1) IN Perishable clas.
2) In NonPersishable class.
Open Closed Principle
1) Abstract class Item is open to be extended
2) Abstract class Item is closed for modification
Interface Segregation Principle
1) For using Perishable items, user don't have to know anything about Non-perishable items.
2) For using Non-perishable items, users don't have to know tha details of Perishable items.
Hence users are not forced to use methods they don't require.
"""
#################################################################### | [
"datetime.datetime.now",
"datetime.date"
] | [((4440, 4458), 'datetime.date', 'date', (['(2018)', '(12)', '(10)'], {}), '(2018, 12, 10)\n', (4444, 4458), False, 'from datetime import datetime, date\n'), ((4600, 4618), 'datetime.date', 'date', (['(2018)', '(11)', '(26)'], {}), '(2018, 11, 26)\n', (4604, 4618), False, 'from datetime import datetime, date\n'), ((1940, 1954), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1952, 1954), False, 'from datetime import datetime, date\n')] |
#!/usr/bin/env python3
host = 'mongodb'
port = 27017
ssl_ca_cert='/run/secrets/rootCA.pem'
ssl_certfile='/run/secrets/tls_cert.pem'
ssl_keyfile='/run/secrets/tls_key.pem'
# don't turn these signal into exceptions, just die.
# necessary for integrating into bash script pipelines seamlessly.
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# get administrator credentials
with open('/run/secrets/username','r') as f:
username = f.read()
with open('/run/secrets/password','r') as f:
password = f.read()
from pymongo import MongoClient
client = MongoClient(host, port,
ssl=True,
username=username,
password=password,
authSource=username, # assume admin database and admin user share name
ssl_ca_certs=ssl_ca_cert,
ssl_certfile=ssl_certfile,
ssl_keyfile=ssl_keyfile,
tlsAllowInvalidHostnames=True)
# Within the container environment, mongod runs on host 'mongodb'.
# That hostname, however, is not mentioned within the host certificate.
dbs = client.list_database_names()
for db in dbs:
print(db)
client.close()
| [
"pymongo.MongoClient",
"signal.signal"
] | [((306, 350), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (319, 350), False, 'import signal\n'), ((351, 396), 'signal.signal', 'signal.signal', (['signal.SIGPIPE', 'signal.SIG_DFL'], {}), '(signal.SIGPIPE, signal.SIG_DFL)\n', (364, 396), False, 'import signal\n'), ((612, 822), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {'ssl': '(True)', 'username': 'username', 'password': 'password', 'authSource': 'username', 'ssl_ca_certs': 'ssl_ca_cert', 'ssl_certfile': 'ssl_certfile', 'ssl_keyfile': 'ssl_keyfile', 'tlsAllowInvalidHostnames': '(True)'}), '(host, port, ssl=True, username=username, password=password,\n authSource=username, ssl_ca_certs=ssl_ca_cert, ssl_certfile=\n ssl_certfile, ssl_keyfile=ssl_keyfile, tlsAllowInvalidHostnames=True)\n', (623, 822), False, 'from pymongo import MongoClient\n')] |
"""
The weak_script annotation needs to be here instead of inside torch/jit/ so it
can be used in other places in torch/ (namely torch.nn) without running into
circular dependency problems
"""
import weakref
import inspect
try:
import builtins # PY3
except Exception:
import __builtin__ as builtins # PY2
# Tracks standalone weak script functions
_compiled_weak_fns = weakref.WeakKeyDictionary()
# Tracks which methods should be converted to strong methods
_weak_script_methods = weakref.WeakKeyDictionary()
# Converted modules and their corresponding WeakScriptModuleProxy objects
_weak_modules = weakref.WeakKeyDictionary()
# Types that have been declared as weak modules
_weak_types = weakref.WeakKeyDictionary()
# Wrapper functions that can call either of 2 functions depending on a boolean
# argument
_boolean_dispatched = weakref.WeakKeyDictionary()
COMPILATION_PENDING = object()
COMPILED = object()
def createResolutionCallback(frames_up=0):
"""
Creates a function which, given a string variable name,
returns the value of the variable in the scope of the caller of
the function which called createResolutionCallback (by default).
This is used to enable access in-scope Python variables inside
TorchScript fragments.
frames_up is number of additional frames to go up on the stack.
The default value is 0, which correspond to the frame of the caller
of createResolutionCallback. Also for example, if frames_up is set
to 1, then the frame of the caller's caller of createResolutionCallback
will be taken.
For example, the following program prints 2::
def bar():
cb = createResolutionCallback(1)
print(cb("foo"))
def baz():
foo = 2
bar()
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
frame = frame.f_back
i += 1
f_locals = frame.f_locals
f_globals = frame.f_globals
def env(key):
if key in f_locals:
return f_locals[key]
elif key in f_globals:
return f_globals[key]
elif hasattr(builtins, key):
return getattr(builtins, key)
else:
return None
return env
def weak_script(fn, _frames_up=0):
"""
Marks a function as a weak script function. When used in a script function
or ScriptModule, the weak script function will be lazily compiled and
inlined in the graph. When not used in a script function, the weak script
annotation has no effect.
"""
_compiled_weak_fns[fn] = {
"status": COMPILATION_PENDING,
"compiled_fn": None,
"rcb": createResolutionCallback(_frames_up + 1)
}
return fn
def weak_module(cls):
_weak_types[cls] = {
"method_stubs": None
}
return cls
def weak_script_method(fn):
_weak_script_methods[fn] = {
"rcb": createResolutionCallback(frames_up=2),
"original_method": fn
}
return fn
def boolean_dispatch(arg_name, arg_index, default, if_true, if_false):
"""
Dispatches to either of 2 weak script functions based on a boolean argument.
In Torch Script, the boolean argument must be constant so that the correct
function to use can be determined at compile time.
"""
if _compiled_weak_fns.get(if_true) is None or _compiled_weak_fns.get(if_false) is None:
raise RuntimeError("both functions must be weak script")
def fn(*args, **kwargs):
dispatch_flag = False
if arg_name in kwargs:
dispatch_flag = kwargs[arg_name]
elif arg_index < len(args):
dispatch_flag = args[arg_index]
if dispatch_flag:
return if_true(*args, **kwargs)
else:
return if_false(*args, **kwargs)
if if_true.__doc__ is None and if_false.__doc__ is not None:
doc = if_false.__doc__
if_true.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is not None:
doc = if_true.__doc__
if_false.__doc__ = doc
else:
raise RuntimeError("only one function can have a docstring")
fn.__doc__ = doc
_boolean_dispatched[fn] = {
"if_true": if_true,
"if_false": if_false,
"index": arg_index,
"default": default,
"arg_name": arg_name
}
return fn
try:
import typing
from typing import Tuple, List
def is_tuple(ann):
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is typing.Tuple or
getattr(ann, '__origin__', None) is tuple)
except ImportError:
# A minimal polyfill for versions of Python that don't have typing.
# Note that this means that they also don't support the fancy annotation syntax, so
# those instances will only be used in our tiny `type: ` comment interpreter.
# The __getitem__ in typing is implemented using metaclasses, but I'm too lazy for that.
class TupleCls(object):
def __getitem__(self, types):
return TupleInstance(types)
class TupleInstance(object):
def __init__(self, types):
setattr(self, '__args__', types)
class ListInstance(object):
def __init__(self, types):
setattr(self, '__args__', types)
class ListCls(object):
def __getitem__(self, types):
return TupleInstance(types)
Tuple = TupleCls()
List = ListCls()
def is_tuple(ann):
return isinstance(ann, TupleInstance)
# allows BroadcastingList instance to be subscriptable
class BroadcastingListCls(object):
def __getitem__(self, types):
return
# mypy doesn't support parameters on types, so we have to explicitly type each
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()["BroadcastingList{}".format(i)] = BroadcastingList1
| [
"weakref.WeakKeyDictionary",
"inspect.currentframe"
] | [((380, 407), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (405, 407), False, 'import weakref\n'), ((493, 520), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (518, 520), False, 'import weakref\n'), ((612, 639), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (637, 639), False, 'import weakref\n'), ((703, 730), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (728, 730), False, 'import weakref\n'), ((844, 871), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (869, 871), False, 'import weakref\n'), ((1814, 1836), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1834, 1836), False, 'import inspect\n')] |
import re, requests
def parse_page(url):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36"
}
response = requests.get(url, headers=headers)
text = response.content.decode("utf-8")
contents = re.findall(r'<div class="content">.*?<span>(.*?)</span>', text, re.DOTALL)
for content in contents:
x = re.sub(r'<.*?>|\n', '', content)
print(x.strip())
print("===="*20)
def main():
base_url = "https://www.qiushibaike.com/text/page/{}/"
for x in range(1, 10):
url = base_url.format(x)
parse_page(url)
if __name__ == '__main__':
main()
| [
"re.sub",
"re.findall",
"requests.get"
] | [((218, 252), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (230, 252), False, 'import re, requests\n'), ((312, 385), 're.findall', 're.findall', (['"""<div class="content">.*?<span>(.*?)</span>"""', 'text', 're.DOTALL'], {}), '(\'<div class="content">.*?<span>(.*?)</span>\', text, re.DOTALL)\n', (322, 385), False, 'import re, requests\n'), ((428, 460), 're.sub', 're.sub', (['"""<.*?>|\\\\n"""', '""""""', 'content'], {}), "('<.*?>|\\\\n', '', content)\n", (434, 460), False, 'import re, requests\n')] |
__all__ = ['plot_cum_error_dist']
import numpy as np
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import itertools
from . import palettes
# colors = itertools.cycle(npl.palettes.color_palette(palette="sweet", n_colors=15))
# from ..core import *
# from ..auxiliary import *
from .. import decoding
# from . import utils # import plotting/utils
def plot_cum_error_dist(*, cumhist=None, bincenters=None,
bst=None, extern=None, decodefunc=None,
k=None, transfunc=None, n_extern=None,
n_bins = None, extmin=None, extmax=None,
sigma=None, lw=None, ax=None, inset=True,
inset_ax=None, color=None, **kwargs):
"""Plot (and optionally compute) the cumulative distribution of
decoding errors, evaluated using a cross-validation procedure.
See Fig 3.(b) of "Analysis of Hippocampal Memory Replay Using Neural
Population Decoding", <NAME>, 2012.
Parameters
----------
Returns
-------
"""
if ax is None:
ax = plt.gca()
if lw is None:
lw=1.5
if decodefunc is None:
decodefunc = decoding.decode1D
if k is None:
k=5
if n_extern is None:
n_extern=100
if n_bins is None:
n_bins = 200
if extmin is None:
extmin=0
if extmax is None:
extmax=100
if sigma is None:
sigma = 3
# Get the color from the current color cycle
if color is None:
line, = ax.plot(0, 0.5)
color = line.get_color()
line.remove()
# if cumhist or bincenters are NOT provided, then compute them
if cumhist is None or bincenters is None:
assert bst is not None, "if cumhist and bincenters are not given, then bst must be provided to recompute them!"
assert extern is not None, "if cumhist and bincenters are not given, then extern must be provided to recompute them!"
cumhist, bincenters = \
decoding.cumulative_dist_decoding_error_using_xval(
bst=bst,
extern=extern,
decodefunc=decoding.decode1D,
k=k,
transfunc=transfunc,
n_extern=n_extern,
extmin=extmin,
extmax=extmax,
sigma=sigma,
n_bins=n_bins)
# now plot results
ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
ax.set_xlim(bincenters[0], bincenters[-1])
ax.set_xlabel('error [cm]')
ax.set_ylabel('cumulative probability')
ax.set_ylim(0)
if inset:
if inset_ax is None:
inset_ax = inset_axes(parent_axes=ax,
width="60%",
height="50%",
loc=4,
borderpad=2)
inset_ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
# annotate inset
thresh1 = 0.7
bcidx = np.asscalar(np.argwhere(cumhist>thresh1)[0]-1)
inset_ax.hlines(thresh1, 0, bincenters[bcidx], color=color, alpha=0.9, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh1, color=color, alpha=0.9, linestyle='--')
inset_ax.set_xlim(0,12*np.ceil(bincenters[bcidx]/10))
thresh2 = 0.5
bcidx = np.asscalar(np.argwhere(cumhist>thresh2)[0]-1)
inset_ax.hlines(thresh2, 0, bincenters[bcidx], color=color, alpha=0.6, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh2, color=color, alpha=0.6, linestyle='--')
inset_ax.set_yticks((0,thresh1, thresh2, 1))
inset_ax.set_ylim(0)
return ax, inset_ax
return ax | [
"matplotlib.pyplot.gca",
"numpy.ceil",
"mpl_toolkits.axes_grid.inset_locator.inset_axes",
"numpy.argwhere"
] | [((1188, 1197), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1195, 1197), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2797), 'mpl_toolkits.axes_grid.inset_locator.inset_axes', 'inset_axes', ([], {'parent_axes': 'ax', 'width': '"""60%"""', 'height': '"""50%"""', 'loc': '(4)', 'borderpad': '(2)'}), "(parent_axes=ax, width='60%', height='50%', loc=4, borderpad=2)\n", (2734, 2797), False, 'from mpl_toolkits.axes_grid.inset_locator import inset_axes\n'), ((3341, 3372), 'numpy.ceil', 'np.ceil', (['(bincenters[bcidx] / 10)'], {}), '(bincenters[bcidx] / 10)\n', (3348, 3372), True, 'import numpy as np\n'), ((3084, 3114), 'numpy.argwhere', 'np.argwhere', (['(cumhist > thresh1)'], {}), '(cumhist > thresh1)\n', (3095, 3114), True, 'import numpy as np\n'), ((3423, 3453), 'numpy.argwhere', 'np.argwhere', (['(cumhist > thresh2)'], {}), '(cumhist > thresh2)\n', (3434, 3453), True, 'import numpy as np\n')] |
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
try:
assert 'Django' in browser.title
finally:
browser.close()
| [
"selenium.webdriver.Firefox"
] | [((42, 61), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (59, 61), False, 'from selenium import webdriver\n')] |