content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
n = int(input("Enter the number to be rotated:"))
a = int(input("Enter the bits to be rotated:"))
print("1.Left Rotation 2.Right Rotation")
b = int(input("Enter your choice:"))
if b == 1:
print("The number",n,"after left rotation by",a,"bits is", n << a)
elif b == 2:
print("The number",n,"after right rotation by",a,"bits is", n >> a)
else:
print("Invalid choice")
| n = int(input('Enter the number to be rotated:'))
a = int(input('Enter the bits to be rotated:'))
print('1.Left Rotation 2.Right Rotation')
b = int(input('Enter your choice:'))
if b == 1:
print('The number', n, 'after left rotation by', a, 'bits is', n << a)
elif b == 2:
print('The number', n, 'after right rotation by', a, 'bits is', n >> a)
else:
print('Invalid choice') |
"""
Binary to Decimal and Back Converter - Develop a converter to convert a decimal number to binary or a binary number to its decimal equivalent.
"""
print("Welcome to Binary to Decimal & Back converter!")
while True:
action = input("Choose Your action:\n[1] - Convert binary to decimal\n[2] - Convert decimal to binary\n[3] - End program\n")
if action == "1":
number = input("Input binary number to convert:")
print("Binary: {}, decimal: {}".format(number, int(number, 2)))
elif action == "2":
number = int(input("Input decimal number to convert:"))
print("Decimal: {}, binary: {}".format(number, bin(number).replace("0b", "")))
elif action == "3":
print("You have chosen to end.")
break
else:
print("Please type in correct action.")
| """
Binary to Decimal and Back Converter - Develop a converter to convert a decimal number to binary or a binary number to its decimal equivalent.
"""
print('Welcome to Binary to Decimal & Back converter!')
while True:
action = input('Choose Your action:\n[1] - Convert binary to decimal\n[2] - Convert decimal to binary\n[3] - End program\n')
if action == '1':
number = input('Input binary number to convert:')
print('Binary: {}, decimal: {}'.format(number, int(number, 2)))
elif action == '2':
number = int(input('Input decimal number to convert:'))
print('Decimal: {}, binary: {}'.format(number, bin(number).replace('0b', '')))
elif action == '3':
print('You have chosen to end.')
break
else:
print('Please type in correct action.') |
def ws_message(message):
# ASGI WebSocket packet-received and send-packet message types
# both have a "text" key for their textual data.
message.reply_channel.send({
"text": message.content['text'],
})
| def ws_message(message):
message.reply_channel.send({'text': message.content['text']}) |
class Solution:
def closeStrings(self, word1: str, word2: str) -> bool:
if len(word1) != len(word2):
return False
count1 = Counter(word1)
count2 = Counter(word2)
if count1.keys() != count2.keys():
return False
return sorted(count1.values()) == sorted(count2.values())
| class Solution:
def close_strings(self, word1: str, word2: str) -> bool:
if len(word1) != len(word2):
return False
count1 = counter(word1)
count2 = counter(word2)
if count1.keys() != count2.keys():
return False
return sorted(count1.values()) == sorted(count2.values()) |
# The Western Suburbs Croquet Club has two categories of membership, Senior and Open. They would like your help with an application form that will tell prospective members which category they will be placed.
#
# To be a senior, a member must be at least 55 years old and have a handicap greater than 7. In this croquet club, handicaps range from -2 to +26; the better the player the lower the handicap.
#
# Input
# Input will consist of a list of pairs. Each pair contains information for a single potential member. Information consists of an integer for the person's age and an integer for the person's handicap.
#
# Output
# Output will consist of a list of string values (in Haskell: Open or Senior) stating whether the respective member is to be placed in the senior or open category.
#
# Example
# input = [(18, 20), (45, 2), (61, 12), (37, 6), (21, 21), (78, 9)]
# output = ["Open", "Open", "Senior", "Open", "Open", "Senior"]
output = [(16, 23),(73,1),(56, 20),(1, -1)]
new = []
def open_or_senior(data):
for list_1 in data:
# print(type(list_1))
if list_1[0] >= 55 and list_1[1] >= 7:
new.append("Senior")
else:
new.append("Open")
return new
print(open_or_senior(output))
# For Pros >>
def openOrSenior(data):
return ["Senior" if age >= 55 and handicap >= 7 else "Open" for (age, handicap) in data]
| output = [(16, 23), (73, 1), (56, 20), (1, -1)]
new = []
def open_or_senior(data):
for list_1 in data:
if list_1[0] >= 55 and list_1[1] >= 7:
new.append('Senior')
else:
new.append('Open')
return new
print(open_or_senior(output))
def open_or_senior(data):
return ['Senior' if age >= 55 and handicap >= 7 else 'Open' for (age, handicap) in data] |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'woff2_dec',
'type': 'static_library',
'include_dirs': [
'src',
'<(DEPTH)/third_party/brotli/dec',
],
'dependencies': [
'<(DEPTH)/third_party/brotli/brotli.gyp:brotli',
],
'sources': [
'src/buffer.h',
'src/round.h',
'src/store_bytes.h',
'src/table_tags.cc',
'src/table_tags.h',
'src/variable_length.cc',
'src/variable_length.h',
'src/woff2_common.cc',
'src/woff2_common.h',
'src/woff2_dec.cc',
'src/woff2_dec.h',
'src/woff2_out.cc',
'src/woff2_out.h',
],
# TODO(ksakamoto): http://crbug.com/167187
'msvs_disabled_warnings': [
4267,
],
},
],
}
| {'targets': [{'target_name': 'woff2_dec', 'type': 'static_library', 'include_dirs': ['src', '<(DEPTH)/third_party/brotli/dec'], 'dependencies': ['<(DEPTH)/third_party/brotli/brotli.gyp:brotli'], 'sources': ['src/buffer.h', 'src/round.h', 'src/store_bytes.h', 'src/table_tags.cc', 'src/table_tags.h', 'src/variable_length.cc', 'src/variable_length.h', 'src/woff2_common.cc', 'src/woff2_common.h', 'src/woff2_dec.cc', 'src/woff2_dec.h', 'src/woff2_out.cc', 'src/woff2_out.h'], 'msvs_disabled_warnings': [4267]}]} |
"""
Given a non-negative integer represented as non-empty a singly linked list of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
Example :
Input: [1,2,3]
Output: [1,2,4]
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def plusOne(self, head: ListNode) -> ListNode:
dummy = ListNode(0)
dummy.next = head
slow = fast = dummy
while fast:
if fast.val != 9:
slow = fast
fast = fast.next
slow.val += 1
slow = slow.next
while slow:
slow.val = 0
slow = slow.next
return dummy.next if dummy.val == 0 else dummy
| """
Given a non-negative integer represented as non-empty a singly linked list of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
Example :
Input: [1,2,3]
Output: [1,2,4]
"""
class Solution:
def plus_one(self, head: ListNode) -> ListNode:
dummy = list_node(0)
dummy.next = head
slow = fast = dummy
while fast:
if fast.val != 9:
slow = fast
fast = fast.next
slow.val += 1
slow = slow.next
while slow:
slow.val = 0
slow = slow.next
return dummy.next if dummy.val == 0 else dummy |
# encoding: utf-8
# convert dictionary to object
class dict2obj(object):
def __init__(self, dictionary):
self.__dict__ = dictionary | class Dict2Obj(object):
def __init__(self, dictionary):
self.__dict__ = dictionary |
a = "test"
b = 0
def fun1(val):
global b
b = b + 1
#a = "hello"
def fun2():
global b
nonlocal val
b += 5
val += 1000
print(val)
fun2()
print(a)
fun1(100)
print(b) | a = 'test'
b = 0
def fun1(val):
global b
b = b + 1
def fun2():
global b
nonlocal val
b += 5
val += 1000
print(val)
fun2()
print(a)
fun1(100)
print(b) |
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
prev = 0
for letter in s:
found = False
for i in range(prev, len(t)):
if letter == t[i]:
prev = i+1
found = True
break
if not found:
return False
return True | class Solution:
def is_subsequence(self, s: str, t: str) -> bool:
prev = 0
for letter in s:
found = False
for i in range(prev, len(t)):
if letter == t[i]:
prev = i + 1
found = True
break
if not found:
return False
return True |
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/24/2020 11:04 PM'
class Solution:
def findSubsequences(self, nums):
self.res = []
def backtrace(nums, temp):
if len(temp) >= 2 and temp not in self.res:
self.res.append(temp)
if not nums:
return
for i in range(len(nums)):
if not temp or nums[i] >= temp[-1]:
backtrace(nums[i + 1:], temp + [nums[i]])
backtrace(nums, [])
return self.res
if __name__ == '__main__':
arr = [4, 6, 7, 7]
result = Solution().findSubsequences(arr)
print(result)
| __author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/24/2020 11:04 PM'
class Solution:
def find_subsequences(self, nums):
self.res = []
def backtrace(nums, temp):
if len(temp) >= 2 and temp not in self.res:
self.res.append(temp)
if not nums:
return
for i in range(len(nums)):
if not temp or nums[i] >= temp[-1]:
backtrace(nums[i + 1:], temp + [nums[i]])
backtrace(nums, [])
return self.res
if __name__ == '__main__':
arr = [4, 6, 7, 7]
result = solution().findSubsequences(arr)
print(result) |
'''
Clase que maneja datos de historial (Resultados de solicitud pasados)
'''
class History:
def __init__(self,message,arrReqRes):
self.__message=message # requests
self.__arrReqRes=arrReqRes
def getHistory(self):
data=[]
for reqRes in self.__arrReqRes:
data.append(reqRes.get_RequestResult())
data={"requests":self.__message, "arrRequest":data}
return data
def getHistoryNull(self):
data={"requests":self.__message}
return data
def getMessage(self):
return self.__message
def setMessage(self,message):
self.__message=message
def getArrReqRes(self):
return self.__arrReqRes
def setArrReqRes(self,arrReqRes):
self.__arrReqRes=arrReqRes | """
Clase que maneja datos de historial (Resultados de solicitud pasados)
"""
class History:
def __init__(self, message, arrReqRes):
self.__message = message
self.__arrReqRes = arrReqRes
def get_history(self):
data = []
for req_res in self.__arrReqRes:
data.append(reqRes.get_RequestResult())
data = {'requests': self.__message, 'arrRequest': data}
return data
def get_history_null(self):
data = {'requests': self.__message}
return data
def get_message(self):
return self.__message
def set_message(self, message):
self.__message = message
def get_arr_req_res(self):
return self.__arrReqRes
def set_arr_req_res(self, arrReqRes):
self.__arrReqRes = arrReqRes |
class Loss(object):
def __init__(self, dout):
self.dout = dout
def eval(self, _ytrue, _ypred):
"""
Subclass should implement log p(Y | F)
:param output: (batch_size x Dout) matrix containing true outputs
:param latent_val: (MC x batch_size x Q) matrix
of latent function values, usually Q=F
:return:
"""
raise NotImplementedError("Subclass should implement this.")
def get_name(self):
raise NotImplementedError("Subclass should implement this.")
| class Loss(object):
def __init__(self, dout):
self.dout = dout
def eval(self, _ytrue, _ypred):
"""
Subclass should implement log p(Y | F)
:param output: (batch_size x Dout) matrix containing true outputs
:param latent_val: (MC x batch_size x Q) matrix
of latent function values, usually Q=F
:return:
"""
raise not_implemented_error('Subclass should implement this.')
def get_name(self):
raise not_implemented_error('Subclass should implement this.') |
__all__ = [
"WriteError",
"InvalidQueryError",
"DoesNotExistError",
"MultipleObjectsReturnedError",
"FieldDoesNotExistError",
]
class WriteError(Exception):
pass
class InvalidQueryError(Exception):
pass
class DoesNotExistError(Exception):
pass
class MultipleObjectsReturnedError(Exception):
pass
class FieldDoesNotExistError(Exception):
pass
class IndexCreationError(Exception):
pass
class DuplicateKeyError(Exception):
pass
| __all__ = ['WriteError', 'InvalidQueryError', 'DoesNotExistError', 'MultipleObjectsReturnedError', 'FieldDoesNotExistError']
class Writeerror(Exception):
pass
class Invalidqueryerror(Exception):
pass
class Doesnotexisterror(Exception):
pass
class Multipleobjectsreturnederror(Exception):
pass
class Fielddoesnotexisterror(Exception):
pass
class Indexcreationerror(Exception):
pass
class Duplicatekeyerror(Exception):
pass |
'''
Install mysql connector for python
Use the command in CMD: "pip install mysql-connector-python"
Also, install "MySQL Workbench"
Create a Database in workbench within 'root' user, here "myDB"
create a table in the database, here 'emp'
Now, go to any IDE or text editor to connect python with Database
''' | """
Install mysql connector for python
Use the command in CMD: "pip install mysql-connector-python"
Also, install "MySQL Workbench"
Create a Database in workbench within 'root' user, here "myDB"
create a table in the database, here 'emp'
Now, go to any IDE or text editor to connect python with Database
""" |
def decode_orientation(net, n_classes, loss_fn,
train_data, train_labels, test_data, test_labels,
n_iter=1000, L2_penalty=0, L1_penalty=0):
""" Initialize, train, and test deep network to decode binned orientation from neural responses
Args:
net (nn.Module): deep network to run
n_classes (scalar): number of classes in which to bin orientation
loss_fn (function): loss function to run
train_data (torch.Tensor): n_train x n_neurons tensor with neural
responses to train on
train_labels (torch.Tensor): n_train x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
test_data (torch.Tensor): n_test x n_neurons tensor with neural
responses to train on
test_labels (torch.Tensor): n_test x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
n_iter (int, optional): number of iterations to run optimization
L2_penalty (float, optional): l2 penalty regularizer coefficient
L1_penalty (float, optional): l1 penalty regularizer coefficient
Returns:
(list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the
stimuli from decoding neural network
"""
# Bin stimulus orientations in training set
train_binned_labels = stimulus_class(train_labels, n_classes)
test_binned_labels = stimulus_class(test_labels, n_classes)
# Run GD on training set data, using learning rate of 0.1
# (add optional arguments test_data and test_binned_labels!)
train_loss, test_loss = train(net, loss_fn, train_data, train_binned_labels,
learning_rate=0.1, test_data=test_data,
test_labels=test_binned_labels, n_iter=n_iter,
L2_penalty=L2_penalty, L1_penalty=L1_penalty)
# Decode neural responses in testing set data
out = net(test_data)
out_labels = np.argmax(out.detach(), axis=1) # predicted classes
frac_correct = (out_labels==test_binned_labels).sum() / len(test_binned_labels)
print(f'>>> fraction correct = {frac_correct:.3f}')
return train_loss, test_loss, out_labels
# Set random seeds for reproducibility
np.random.seed(1)
torch.manual_seed(1)
n_classes = 20
# Initialize network
net = DeepNetSoftmax(n_neurons, 20, n_classes) # use M=20 hidden units
# Initialize built-in PyTorch negative log likelihood loss function
loss_fn = nn.NLLLoss()
# Uncomment below to train network and run it on test images
# this function uses the train function you wrote before
train_loss, test_loss, predicted_test_labels = decode_orientation(net, n_classes, loss_fn,
resp_train, stimuli_train, resp_test, stimuli_test)
# Plot results
with plt.xkcd():
plot_decoded_results(train_loss, test_loss, stimuli_test, predicted_test_labels) | def decode_orientation(net, n_classes, loss_fn, train_data, train_labels, test_data, test_labels, n_iter=1000, L2_penalty=0, L1_penalty=0):
""" Initialize, train, and test deep network to decode binned orientation from neural responses
Args:
net (nn.Module): deep network to run
n_classes (scalar): number of classes in which to bin orientation
loss_fn (function): loss function to run
train_data (torch.Tensor): n_train x n_neurons tensor with neural
responses to train on
train_labels (torch.Tensor): n_train x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
test_data (torch.Tensor): n_test x n_neurons tensor with neural
responses to train on
test_labels (torch.Tensor): n_test x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
n_iter (int, optional): number of iterations to run optimization
L2_penalty (float, optional): l2 penalty regularizer coefficient
L1_penalty (float, optional): l1 penalty regularizer coefficient
Returns:
(list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the
stimuli from decoding neural network
"""
train_binned_labels = stimulus_class(train_labels, n_classes)
test_binned_labels = stimulus_class(test_labels, n_classes)
(train_loss, test_loss) = train(net, loss_fn, train_data, train_binned_labels, learning_rate=0.1, test_data=test_data, test_labels=test_binned_labels, n_iter=n_iter, L2_penalty=L2_penalty, L1_penalty=L1_penalty)
out = net(test_data)
out_labels = np.argmax(out.detach(), axis=1)
frac_correct = (out_labels == test_binned_labels).sum() / len(test_binned_labels)
print(f'>>> fraction correct = {frac_correct:.3f}')
return (train_loss, test_loss, out_labels)
np.random.seed(1)
torch.manual_seed(1)
n_classes = 20
net = deep_net_softmax(n_neurons, 20, n_classes)
loss_fn = nn.NLLLoss()
(train_loss, test_loss, predicted_test_labels) = decode_orientation(net, n_classes, loss_fn, resp_train, stimuli_train, resp_test, stimuli_test)
with plt.xkcd():
plot_decoded_results(train_loss, test_loss, stimuli_test, predicted_test_labels) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: April 2019
Author: A. P. Naik
Description: Physical constants and unit conversions, for general use. All SI
units.
"""
# constants
pi = 3.141592653589793
G = 6.67408e-11 # m^3 kg s^-2
# units
pc = 3.0857e+16 # metres
kpc = 3.0857e+19 # metres
Mpc = 3.0857e+22 # metres
M_sun = 1.9885e+30 # kg
year = 31536000.0 # seconds
| """
Created: April 2019
Author: A. P. Naik
Description: Physical constants and unit conversions, for general use. All SI
units.
"""
pi = 3.141592653589793
g = 6.67408e-11
pc = 3.0857e+16
kpc = 3.0857e+19
mpc = 3.0857e+22
m_sun = 1.9885e+30
year = 31536000.0 |
"""
Connect and manage field sensor in the area or centralized end points.
Also collects local temperature, humidity, air pressure, movement (gyroscope)
Comm method: wifi and mesh. Sat Comm, LoRa and GSM are still in development
"""
| """
Connect and manage field sensor in the area or centralized end points.
Also collects local temperature, humidity, air pressure, movement (gyroscope)
Comm method: wifi and mesh. Sat Comm, LoRa and GSM are still in development
""" |
"""Npm integration testing
"""
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_test")
# Returns the manifest path of a file: `workspace/path/to/file`
def _to_manifest_path(ctx, file):
if file.short_path.startswith("../"):
# Strip the ../ from short_path to external repository
return file.short_path[3:]
else:
# Add the repository name for short_path to local repository
return ctx.workspace_name + "/" + file.short_path
def _npm_integration_test_config_impl(ctx):
if len(ctx.files.test_files) == 0:
fail("No files were found to run under integration testing.")
if ctx.attr.debug:
for f in ctx.files.test_files:
if f.is_directory:
fail("In debug mode, directory test_files labels not supported.")
commands = []
for c in ctx.attr.commands:
commands.append(ctx.expand_location(c, targets = ctx.attr.data))
# pass --define vars to test; these are added to the environment using process.env().
env_vars = {}
for k in ctx.attr.configuration_env_vars:
if k in ctx.var.keys():
env_vars[k] = ctx.var[k]
# Serialize configuration file for test runner
ctx.actions.write(
output = ctx.outputs.config,
content = """// npm_integration_test runner config generated by npm_integration_test rule
module.exports = {{
testFiles: [ {TMPL_test_files} ],
commands: [ {TMPL_commands} ],
npmPackages: {{ {TMPL_npm_packages} }},
checkNpmPackages: [ {TMPL_check_npm_packages} ],
envVars: {{ {TMPL_env_vars} }},
debug: {TMPL_debug},
}};
""".format(
TMPL_test_files = ", ".join(["'%s'" % f.short_path for f in ctx.files.test_files]),
TMPL_commands = ", ".join(["'%s'" % s for s in commands]),
TMPL_npm_packages = ", ".join(["'%s': '%s'" % (ctx.attr.npm_packages[n], n.files.to_list()[0].short_path) for n in ctx.attr.npm_packages]),
TMPL_check_npm_packages = ", ".join(["'%s'" % s for s in ctx.attr.check_npm_packages]),
TMPL_env_vars = ", ".join(["'%s': '%s'" % (k, env_vars[k]) for k in env_vars]),
TMPL_debug = "true" if ctx.attr.debug else "false",
),
)
runfiles = [ctx.outputs.config] + ctx.files.test_files + ctx.files.npm_packages
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
_NPM_INTEGRATION_TEST_CONFIG_ATTRS = {
"commands": attr.string_list(
default = [],
mandatory = True,
doc = """The list of test commands to run. Defaults to `[]`.""",
),
"configuration_env_vars": attr.string_list(
doc = """Pass these configuration environment variables to the resulting test.
Chooses a subset of the configuration environment variables (taken from `ctx.var`), which also
includes anything specified via the --define flag.
Note, this can lead to different results for the test.""",
default = [],
),
"check_npm_packages": attr.string_list(
doc = """A list of npm packages that should be replaced in this test.
This attribute checks that none of the npm packages lists is found in the workspace-under-test's
package.json file unlinked to a generated npm package.
This can be used to verify that all npm package artifacts that need to be tested against are indeed
replaced in all integration tests. For example,
```
check_npm_packages = [
"@angular/common",
"@angular/compiler",
"@angular/compiler-cli",
"@angular/core",
],
```
If an `npm_packages` replacement on any package listed is missed then the test will fail. Since listing all
npm packages in `npm_packages` is expensive as any change will result in all integration tests re-running,
this attribute allows a fine grained `npm_packages` per integration test with the added safety that none
are missed for any one test.
""",
),
"data": attr.label_list(
doc = """Data dependencies for test.""",
allow_files = True,
),
"debug": attr.bool(
doc = """Setup the test for debugging.
If set to true then the package.json replacement are done in-place instead of a tmp folder
and the test is not run. This is used to configure the test folder for local testing and debugging.
""",
default = False,
),
"npm_packages": attr.label_keyed_string_dict(
doc = """A label keyed string dictionary of npm package replacements to make in the workspace-under-test's
package.json with npm package targets. The targets should be pkg_tar tar.gz archives.
For example,
```
npm_packages = {
"//packages/common:npm_package_archive": "@angular/common",
"//packages/compiler:npm_package_archive": "@angular/compiler",
"//packages/compiler-cli:npm_package_archive": "@angular/compiler-cli",
"//packages/core:npm_package_archive": "@angular/core",
}
```""",
allow_files = True,
),
"test_files": attr.label(
doc = """A filegroup of all files necessary to run the test.""",
allow_files = True,
),
}
_npm_integration_test_config = rule(
implementation = _npm_integration_test_config_impl,
doc = """Generates an npm_integration_test config.""",
attrs = _NPM_INTEGRATION_TEST_CONFIG_ATTRS,
outputs = {
"config": "%{name}.js",
},
)
def npm_integration_test(name, **kwargs):
"""Runs an npm integration test.
See _NPM_INTEGRATION_TEST_CONFIG_ATTRS above for configuration arguments.
"""
commands = kwargs.pop("commands", [])
configuration_env_vars = kwargs.pop("configuration_env_vars", [])
check_npm_packages = kwargs.pop("check_npm_packages", [])
npm_packages = kwargs.pop("npm_packages", {})
test_files = kwargs.pop("test_files", [])
data = kwargs.pop("data", [])
_npm_integration_test_config(
name = name + ".config",
commands = commands,
configuration_env_vars = configuration_env_vars,
check_npm_packages = check_npm_packages,
data = data,
npm_packages = npm_packages,
test_files = test_files,
visibility = ["//visibility:private"],
tags = ["manual"],
testonly = True,
)
# Config for debug target below
_npm_integration_test_config(
name = name + ".debug.config",
commands = commands,
configuration_env_vars = configuration_env_vars,
check_npm_packages = check_npm_packages,
data = data,
npm_packages = npm_packages,
test_files = test_files,
debug = True,
visibility = ["//visibility:private"],
tags = ["manual"],
testonly = True,
)
tags = kwargs.pop("tags", [])
npm_deps = ["@npm//tmp"]
nodejs_test(
name = name,
data = data + npm_deps + [":%s.config" % name, ":%s.config.js" % name],
tags = tags,
templated_args = ["$(location :%s.config.js)" % name],
entry_point = "//tools/npm_integration_test:test_runner.js",
**kwargs
)
# Setup a .debug target that sets the debug attribute to True.
# This target must be run with `bazel run` so it is tagged manual.
nodejs_test(
name = name + ".debug",
data = data + npm_deps + [":%s.debug.config" % name, ":%s.debug.config.js" % name],
tags = tags + ["manual", "local"],
templated_args = ["$(location :%s.debug.config.js)" % name],
entry_point = "//tools/npm_integration_test:test_runner.js",
**kwargs
)
| """Npm integration testing
"""
load('@build_bazel_rules_nodejs//:index.bzl', 'nodejs_test')
def _to_manifest_path(ctx, file):
if file.short_path.startswith('../'):
return file.short_path[3:]
else:
return ctx.workspace_name + '/' + file.short_path
def _npm_integration_test_config_impl(ctx):
if len(ctx.files.test_files) == 0:
fail('No files were found to run under integration testing.')
if ctx.attr.debug:
for f in ctx.files.test_files:
if f.is_directory:
fail('In debug mode, directory test_files labels not supported.')
commands = []
for c in ctx.attr.commands:
commands.append(ctx.expand_location(c, targets=ctx.attr.data))
env_vars = {}
for k in ctx.attr.configuration_env_vars:
if k in ctx.var.keys():
env_vars[k] = ctx.var[k]
ctx.actions.write(output=ctx.outputs.config, content='// npm_integration_test runner config generated by npm_integration_test rule\nmodule.exports = {{\n testFiles: [ {TMPL_test_files} ],\n commands: [ {TMPL_commands} ],\n npmPackages: {{ {TMPL_npm_packages} }},\n checkNpmPackages: [ {TMPL_check_npm_packages} ],\n envVars: {{ {TMPL_env_vars} }},\n debug: {TMPL_debug},\n}};\n'.format(TMPL_test_files=', '.join(["'%s'" % f.short_path for f in ctx.files.test_files]), TMPL_commands=', '.join(["'%s'" % s for s in commands]), TMPL_npm_packages=', '.join(["'%s': '%s'" % (ctx.attr.npm_packages[n], n.files.to_list()[0].short_path) for n in ctx.attr.npm_packages]), TMPL_check_npm_packages=', '.join(["'%s'" % s for s in ctx.attr.check_npm_packages]), TMPL_env_vars=', '.join(["'%s': '%s'" % (k, env_vars[k]) for k in env_vars]), TMPL_debug='true' if ctx.attr.debug else 'false'))
runfiles = [ctx.outputs.config] + ctx.files.test_files + ctx.files.npm_packages
return [default_info(runfiles=ctx.runfiles(files=runfiles))]
_npm_integration_test_config_attrs = {'commands': attr.string_list(default=[], mandatory=True, doc='The list of test commands to run. Defaults to `[]`.'), 'configuration_env_vars': attr.string_list(doc='Pass these configuration environment variables to the resulting test.\n Chooses a subset of the configuration environment variables (taken from `ctx.var`), which also\n includes anything specified via the --define flag.\n Note, this can lead to different results for the test.', default=[]), 'check_npm_packages': attr.string_list(doc='A list of npm packages that should be replaced in this test.\n\nThis attribute checks that none of the npm packages lists is found in the workspace-under-test\'s\npackage.json file unlinked to a generated npm package.\n\nThis can be used to verify that all npm package artifacts that need to be tested against are indeed\nreplaced in all integration tests. For example,\n```\ncheck_npm_packages = [\n "@angular/common",\n "@angular/compiler",\n "@angular/compiler-cli",\n "@angular/core",\n],\n```\nIf an `npm_packages` replacement on any package listed is missed then the test will fail. Since listing all\nnpm packages in `npm_packages` is expensive as any change will result in all integration tests re-running,\nthis attribute allows a fine grained `npm_packages` per integration test with the added safety that none\nare missed for any one test.\n'), 'data': attr.label_list(doc='Data dependencies for test.', allow_files=True), 'debug': attr.bool(doc='Setup the test for debugging.\n\nIf set to true then the package.json replacement are done in-place instead of a tmp folder\nand the test is not run. This is used to configure the test folder for local testing and debugging.\n', default=False), 'npm_packages': attr.label_keyed_string_dict(doc='A label keyed string dictionary of npm package replacements to make in the workspace-under-test\'s\npackage.json with npm package targets. The targets should be pkg_tar tar.gz archives.\n\nFor example,\n```\nnpm_packages = {\n "//packages/common:npm_package_archive": "@angular/common",\n "//packages/compiler:npm_package_archive": "@angular/compiler",\n "//packages/compiler-cli:npm_package_archive": "@angular/compiler-cli",\n "//packages/core:npm_package_archive": "@angular/core",\n}\n```', allow_files=True), 'test_files': attr.label(doc='A filegroup of all files necessary to run the test.', allow_files=True)}
_npm_integration_test_config = rule(implementation=_npm_integration_test_config_impl, doc='Generates an npm_integration_test config.', attrs=_NPM_INTEGRATION_TEST_CONFIG_ATTRS, outputs={'config': '%{name}.js'})
def npm_integration_test(name, **kwargs):
"""Runs an npm integration test.
See _NPM_INTEGRATION_TEST_CONFIG_ATTRS above for configuration arguments.
"""
commands = kwargs.pop('commands', [])
configuration_env_vars = kwargs.pop('configuration_env_vars', [])
check_npm_packages = kwargs.pop('check_npm_packages', [])
npm_packages = kwargs.pop('npm_packages', {})
test_files = kwargs.pop('test_files', [])
data = kwargs.pop('data', [])
_npm_integration_test_config(name=name + '.config', commands=commands, configuration_env_vars=configuration_env_vars, check_npm_packages=check_npm_packages, data=data, npm_packages=npm_packages, test_files=test_files, visibility=['//visibility:private'], tags=['manual'], testonly=True)
_npm_integration_test_config(name=name + '.debug.config', commands=commands, configuration_env_vars=configuration_env_vars, check_npm_packages=check_npm_packages, data=data, npm_packages=npm_packages, test_files=test_files, debug=True, visibility=['//visibility:private'], tags=['manual'], testonly=True)
tags = kwargs.pop('tags', [])
npm_deps = ['@npm//tmp']
nodejs_test(name=name, data=data + npm_deps + [':%s.config' % name, ':%s.config.js' % name], tags=tags, templated_args=['$(location :%s.config.js)' % name], entry_point='//tools/npm_integration_test:test_runner.js', **kwargs)
nodejs_test(name=name + '.debug', data=data + npm_deps + [':%s.debug.config' % name, ':%s.debug.config.js' % name], tags=tags + ['manual', 'local'], templated_args=['$(location :%s.debug.config.js)' % name], entry_point='//tools/npm_integration_test:test_runner.js', **kwargs) |
## Multiples of 3 or 5
## 6 kyu
## https://www.codewars.com/kata/514b92a657cdc65150000006
def solution(n):
mult_sum = 0
for i in range (3, n):
if i % 3 == 0 or i % 5 ==0:
mult_sum += i
return mult_sum | def solution(n):
mult_sum = 0
for i in range(3, n):
if i % 3 == 0 or i % 5 == 0:
mult_sum += i
return mult_sum |
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Link Simulation *
#******************************************************************************
__all__ = ["IF", "TMTC"]
| __all__ = ['IF', 'TMTC'] |
class Compliance:
def __init__(self, Status=None):
self.Status = Status
class Malware:
def __init__(self, Name=None, Path=None, State=None, Type=None):
self.Name = Name
self.Path = Path
self.State = State
self.Type = Type
class Network:
def __init__(self, DestinationDomain=None, DestinationIpV4=None, DestinationIpV6=None, DestinationPort=None,
Direction=None, Protocol=None, SourceDomain=None, SourceIpV4=None, SourceIpV6=None, SourceMac=None,
SourcePort=None):
self.DestinationDomain = DestinationDomain
self.DestinationIpV4 = DestinationIpV4
self.DestinationIpV6 = DestinationIpV6
self.DestinationPort = DestinationPort
self.Direction = Direction
self.Protocol = Protocol
self.SourceDomain = SourceDomain
self.SourceIpV4 = SourceIpV4
self.SourceIpV6 = SourceIpV6
self.SourceMac = SourceMac
self.SourcePort = SourcePort
class Note:
def __init__(self, Text=None, UpdatedAt=None, UpdatedBy=None):
self.Text = Text
self.UpdatedAt = UpdatedAt
self.UpdatedBy = UpdatedBy
class Process:
def __init__(self, LaunchedAt=None, Name=None, ParentPid=None, Path=None, Pid=None, TerminatedAt=None):
self.LaunchedAt = LaunchedAt
self.Name = Name
self.ParentPid = ParentPid
self.Path = Path
self.Pid = Pid
self.TerminatedAt = TerminatedAt
class ProductFields:
def __init__(self, string=None):
self.string = string
class RelatedFinding:
def __init__(self, Id=None, ProductArn=None):
self.Id = Id
self.ProductArn = ProductArn
class Recommendation:
def __init__(self, Text=None, Url=None):
self.Text = Text
self.Url = Url
class Remediation:
def __init__(self, Recommendation=None):
self.Recommendation = Recommendation
class AwsEc2Instance:
def __init__(self, IamInstanceProfileArn=None, ImageId=None, IpV4Addresses=None, IpV6Addresses=None, KeyName=None,
LaunchedAt=None, SubnetId=None, Type=None, VpcId=None):
self.IamInstanceProfileArn = IamInstanceProfileArn
self.ImageId = ImageId
self.IpV4Addresses = IpV4Addresses
self.IpV6Addresses = IpV6Addresses
self.KeyName = KeyName
self.LaunchedAt = LaunchedAt
self.SubnetId = SubnetId
self.Type = Type
self.VpcId = VpcId
class AwsIamAccessKey:
def __init__(self, CreatedAt=None, Status=None, UserName=None):
self.CreatedAt = CreatedAt
self.Status = Status
self.UserName = UserName
class AwsS3Bucket:
def __init__(self, OwnerId=None, OwnerName=None):
self.OwnerId = OwnerId
self.OwnerName = OwnerName
class Container:
def __init__(self, ImageId=None, ImageName=None, LaunchedAt=None, Name=None):
self.ImageId = ImageId
self.ImageName = ImageName
self.LaunchedAt = LaunchedAt
self.Name = Name
class Details:
def __init__(self, AwsEc2Instance=None, AwsIamAccessKey=None, AwsS3Bucket=None, Container=None, Other=None):
self.AwsEc2Instance = AwsEc2Instance
self.AwsIamAccessKey = AwsIamAccessKey
self.AwsS3Bucket = AwsS3Bucket
self.Container = Container
self.Other = Other
class Resource:
def __init__(self, Details=None, Id=None, Partition=None, Region=None, Tags=None, Type=None):
self.Details = Details
self.Id = Id
self.Partition = Partition
self.Region = Region
self.Tags = Tags
self.Type = Type
class Severity:
def __init__(self, Normalized=None, Product=None, Label=None):
self.Normalized = Normalized
self.Product = Product
self.Label = Label
class ThreatIntelIndicator:
def __init__(self, Category=None, LastObservedAt=None, Source=None, SourceUrl=None, Type=None, Value=None):
self.Category = Category
self.LastObservedAt = LastObservedAt
self.Source = Source
self.SourceUrl = SourceUrl
self.Type = Type
self.Value = Value
class UserDefinedFields:
def __init__(self, SourceRuleName=None, SourceEmail=None, SourceUsername=None, SourceFullName=None,
SourceLoginName=None, SourceExtraData=None,
SourceHostname=None, SourceDestinations=None):
self.SourceRuleName = SourceRuleName
self.SourceEmail = SourceEmail
self.SourceUsername = SourceUsername
self.SourceFullName = SourceFullName
self.SourceLoginName = SourceLoginName
self.SourceExtraData = SourceExtraData
self.SourceHostname = SourceHostname
self.SourceDestinations = SourceDestinations
class Finding:
def __init__(self, AwsAccountId=None, Compliance=None, Confidence=None, CreatedAt=None, Criticality=None,
Description=None, FirstObservedAt=None, GeneratorId=None, Id=None, LastObservedAt=None, Malware=None,
Network=None, Note=None, Process=None, ProductArn=None, ProductFields=None, RecordState=None,
RelatedFindings=None, Remediation=None, Resources=None, SchemaVersion=None, Severity=None,
SourceUrl=None, ThreatIntelIndicators=None, Title=None, Types=None, UpdatedAt=None,
UserDefinedFields=None, VerificationState=None, WorkflowState=None):
self.AwsAccountId = AwsAccountId
self.Compliance = Compliance
self.Confidence = Confidence
self.CreatedAt = CreatedAt
self.Criticality = Criticality
self.Description = Description
self.FirstObservedAt = FirstObservedAt
self.GeneratorId = GeneratorId
self.Id = Id
self.LastObservedAt = LastObservedAt
self.Malware = Malware
self.Network = Network
self.Note = Note
self.Process = Process
self.ProductArn = ProductArn
self.ProductFields = ProductFields
self.RecordState = RecordState
self.RelatedFindings = RelatedFindings
self.Remediation = Remediation
self.Resources = Resources
self.SchemaVersion = SchemaVersion
self.Severity = Severity
self.SourceUrl = SourceUrl
self.ThreatIntelIndicators = ThreatIntelIndicators
self.Title = Title
self.Types = Types
self.UpdatedAt = UpdatedAt
self.UserDefinedFields = UserDefinedFields
self.VerificationState = VerificationState
self.WorkflowState = WorkflowState
class JsonFormatClass:
def __init__(self, Finding=None):
self.Finding = Finding
| class Compliance:
def __init__(self, Status=None):
self.Status = Status
class Malware:
def __init__(self, Name=None, Path=None, State=None, Type=None):
self.Name = Name
self.Path = Path
self.State = State
self.Type = Type
class Network:
def __init__(self, DestinationDomain=None, DestinationIpV4=None, DestinationIpV6=None, DestinationPort=None, Direction=None, Protocol=None, SourceDomain=None, SourceIpV4=None, SourceIpV6=None, SourceMac=None, SourcePort=None):
self.DestinationDomain = DestinationDomain
self.DestinationIpV4 = DestinationIpV4
self.DestinationIpV6 = DestinationIpV6
self.DestinationPort = DestinationPort
self.Direction = Direction
self.Protocol = Protocol
self.SourceDomain = SourceDomain
self.SourceIpV4 = SourceIpV4
self.SourceIpV6 = SourceIpV6
self.SourceMac = SourceMac
self.SourcePort = SourcePort
class Note:
def __init__(self, Text=None, UpdatedAt=None, UpdatedBy=None):
self.Text = Text
self.UpdatedAt = UpdatedAt
self.UpdatedBy = UpdatedBy
class Process:
def __init__(self, LaunchedAt=None, Name=None, ParentPid=None, Path=None, Pid=None, TerminatedAt=None):
self.LaunchedAt = LaunchedAt
self.Name = Name
self.ParentPid = ParentPid
self.Path = Path
self.Pid = Pid
self.TerminatedAt = TerminatedAt
class Productfields:
def __init__(self, string=None):
self.string = string
class Relatedfinding:
def __init__(self, Id=None, ProductArn=None):
self.Id = Id
self.ProductArn = ProductArn
class Recommendation:
def __init__(self, Text=None, Url=None):
self.Text = Text
self.Url = Url
class Remediation:
def __init__(self, Recommendation=None):
self.Recommendation = Recommendation
class Awsec2Instance:
def __init__(self, IamInstanceProfileArn=None, ImageId=None, IpV4Addresses=None, IpV6Addresses=None, KeyName=None, LaunchedAt=None, SubnetId=None, Type=None, VpcId=None):
self.IamInstanceProfileArn = IamInstanceProfileArn
self.ImageId = ImageId
self.IpV4Addresses = IpV4Addresses
self.IpV6Addresses = IpV6Addresses
self.KeyName = KeyName
self.LaunchedAt = LaunchedAt
self.SubnetId = SubnetId
self.Type = Type
self.VpcId = VpcId
class Awsiamaccesskey:
def __init__(self, CreatedAt=None, Status=None, UserName=None):
self.CreatedAt = CreatedAt
self.Status = Status
self.UserName = UserName
class Awss3Bucket:
def __init__(self, OwnerId=None, OwnerName=None):
self.OwnerId = OwnerId
self.OwnerName = OwnerName
class Container:
def __init__(self, ImageId=None, ImageName=None, LaunchedAt=None, Name=None):
self.ImageId = ImageId
self.ImageName = ImageName
self.LaunchedAt = LaunchedAt
self.Name = Name
class Details:
def __init__(self, AwsEc2Instance=None, AwsIamAccessKey=None, AwsS3Bucket=None, Container=None, Other=None):
self.AwsEc2Instance = AwsEc2Instance
self.AwsIamAccessKey = AwsIamAccessKey
self.AwsS3Bucket = AwsS3Bucket
self.Container = Container
self.Other = Other
class Resource:
def __init__(self, Details=None, Id=None, Partition=None, Region=None, Tags=None, Type=None):
self.Details = Details
self.Id = Id
self.Partition = Partition
self.Region = Region
self.Tags = Tags
self.Type = Type
class Severity:
def __init__(self, Normalized=None, Product=None, Label=None):
self.Normalized = Normalized
self.Product = Product
self.Label = Label
class Threatintelindicator:
def __init__(self, Category=None, LastObservedAt=None, Source=None, SourceUrl=None, Type=None, Value=None):
self.Category = Category
self.LastObservedAt = LastObservedAt
self.Source = Source
self.SourceUrl = SourceUrl
self.Type = Type
self.Value = Value
class Userdefinedfields:
def __init__(self, SourceRuleName=None, SourceEmail=None, SourceUsername=None, SourceFullName=None, SourceLoginName=None, SourceExtraData=None, SourceHostname=None, SourceDestinations=None):
self.SourceRuleName = SourceRuleName
self.SourceEmail = SourceEmail
self.SourceUsername = SourceUsername
self.SourceFullName = SourceFullName
self.SourceLoginName = SourceLoginName
self.SourceExtraData = SourceExtraData
self.SourceHostname = SourceHostname
self.SourceDestinations = SourceDestinations
class Finding:
def __init__(self, AwsAccountId=None, Compliance=None, Confidence=None, CreatedAt=None, Criticality=None, Description=None, FirstObservedAt=None, GeneratorId=None, Id=None, LastObservedAt=None, Malware=None, Network=None, Note=None, Process=None, ProductArn=None, ProductFields=None, RecordState=None, RelatedFindings=None, Remediation=None, Resources=None, SchemaVersion=None, Severity=None, SourceUrl=None, ThreatIntelIndicators=None, Title=None, Types=None, UpdatedAt=None, UserDefinedFields=None, VerificationState=None, WorkflowState=None):
self.AwsAccountId = AwsAccountId
self.Compliance = Compliance
self.Confidence = Confidence
self.CreatedAt = CreatedAt
self.Criticality = Criticality
self.Description = Description
self.FirstObservedAt = FirstObservedAt
self.GeneratorId = GeneratorId
self.Id = Id
self.LastObservedAt = LastObservedAt
self.Malware = Malware
self.Network = Network
self.Note = Note
self.Process = Process
self.ProductArn = ProductArn
self.ProductFields = ProductFields
self.RecordState = RecordState
self.RelatedFindings = RelatedFindings
self.Remediation = Remediation
self.Resources = Resources
self.SchemaVersion = SchemaVersion
self.Severity = Severity
self.SourceUrl = SourceUrl
self.ThreatIntelIndicators = ThreatIntelIndicators
self.Title = Title
self.Types = Types
self.UpdatedAt = UpdatedAt
self.UserDefinedFields = UserDefinedFields
self.VerificationState = VerificationState
self.WorkflowState = WorkflowState
class Jsonformatclass:
def __init__(self, Finding=None):
self.Finding = Finding |
"""
Maintain version for CropMl.
"""
MAJOR = 0
"""(int) Version major component."""
MINOR = 0
"""(int) Version minor component."""
POST = 2
"""(int) Version post or bugfix component."""
__version__ = ".".join([str(s) for s in (MAJOR, MINOR, POST)])
| """
Maintain version for CropMl.
"""
major = 0
'(int) Version major component.'
minor = 0
'(int) Version minor component.'
post = 2
'(int) Version post or bugfix component.'
__version__ = '.'.join([str(s) for s in (MAJOR, MINOR, POST)]) |
train_task_id = '2T736'
initial_epoch = 0
epoch_num = 24
lr = 1e-3
decay = 5e-4
# clipvalue = 0.5 # default 0.5, 0 means no clip
patience = 2
load_weights = True
lambda_inside_score_loss = 4.0
lambda_side_vertex_code_loss = 1.0
lambda_side_vertex_coord_loss = 1.0
total_img = 222199
validation_split_ratio = 0.1
max_train_img_size = int(train_task_id[-3:])
max_predict_img_size = int(train_task_id[-3:]) # 2400
assert max_train_img_size in [256, 384, 512, 640, 736], \
'max_train_img_size must in [256, 384, 512, 640, 736]'
if max_train_img_size == 256:
batch_size = 8
elif max_train_img_size == 384:
batch_size = 4
elif max_train_img_size == 512:
batch_size = 2
else:
batch_size = 1
steps_per_epoch = total_img * (1 - validation_split_ratio) // batch_size
validation_steps = total_img * validation_split_ratio // batch_size
#data_dir = '/media/haoxin/A1/data/AdvancedEAST'
data_dir = '/data/kuaidi01/dataset_detect/AdvancedEast_data'
origin_image_dir_name = 'image_all/'
origin_txt_dir_name = 'txt_all/'
train_image_dir_name = 'images_%s/' % train_task_id
train_label_dir_name = 'labels_%s/' % train_task_id
show_gt_image_dir_name = 'show_gt_images_%s/' % train_task_id
show_act_image_dir_name = 'show_act_images_%s/' % train_task_id
gen_origin_img = True
draw_gt_quad = True
draw_act_quad = True
val_fname = 'val_%s.txt' % train_task_id
train_fname = 'train_%s.txt' % train_task_id
# in paper it's 0.3, maybe to large to this problem
shrink_ratio = 0.2
# pixels between 0.1 and 0.3 are side pixels
shrink_side_ratio = 0.6
epsilon = 1e-4
num_channels = 3
feature_layers_range = range(5, 1, -1)
# feature_layers_range = range(3, 0, -1)
feature_layers_num = len(feature_layers_range)
# pixel_size = 4
pixel_size = 2 ** feature_layers_range[-1]
locked_layers = False
model_weights_path = 'model/weights_%s.{epoch:03d}-{val_loss:.3f}.h5' \
% train_task_id
saved_model_file_path = 'saved_model/east_model_%s.h5' % train_task_id
saved_model_weights_file_path = 'saved_model/east_model_weights_%s.h5'\
% train_task_id
pixel_threshold = 0.9
side_vertex_pixel_threshold = 0.9
trunc_threshold = 0.1
predict_cut_text_line = False
predict_write2txt = True
| train_task_id = '2T736'
initial_epoch = 0
epoch_num = 24
lr = 0.001
decay = 0.0005
patience = 2
load_weights = True
lambda_inside_score_loss = 4.0
lambda_side_vertex_code_loss = 1.0
lambda_side_vertex_coord_loss = 1.0
total_img = 222199
validation_split_ratio = 0.1
max_train_img_size = int(train_task_id[-3:])
max_predict_img_size = int(train_task_id[-3:])
assert max_train_img_size in [256, 384, 512, 640, 736], 'max_train_img_size must in [256, 384, 512, 640, 736]'
if max_train_img_size == 256:
batch_size = 8
elif max_train_img_size == 384:
batch_size = 4
elif max_train_img_size == 512:
batch_size = 2
else:
batch_size = 1
steps_per_epoch = total_img * (1 - validation_split_ratio) // batch_size
validation_steps = total_img * validation_split_ratio // batch_size
data_dir = '/data/kuaidi01/dataset_detect/AdvancedEast_data'
origin_image_dir_name = 'image_all/'
origin_txt_dir_name = 'txt_all/'
train_image_dir_name = 'images_%s/' % train_task_id
train_label_dir_name = 'labels_%s/' % train_task_id
show_gt_image_dir_name = 'show_gt_images_%s/' % train_task_id
show_act_image_dir_name = 'show_act_images_%s/' % train_task_id
gen_origin_img = True
draw_gt_quad = True
draw_act_quad = True
val_fname = 'val_%s.txt' % train_task_id
train_fname = 'train_%s.txt' % train_task_id
shrink_ratio = 0.2
shrink_side_ratio = 0.6
epsilon = 0.0001
num_channels = 3
feature_layers_range = range(5, 1, -1)
feature_layers_num = len(feature_layers_range)
pixel_size = 2 ** feature_layers_range[-1]
locked_layers = False
model_weights_path = 'model/weights_%s.{epoch:03d}-{val_loss:.3f}.h5' % train_task_id
saved_model_file_path = 'saved_model/east_model_%s.h5' % train_task_id
saved_model_weights_file_path = 'saved_model/east_model_weights_%s.h5' % train_task_id
pixel_threshold = 0.9
side_vertex_pixel_threshold = 0.9
trunc_threshold = 0.1
predict_cut_text_line = False
predict_write2txt = True |
#encoding:utf-8
subreddit = 'tifu'
t_channel = '@r_channels_tifu'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| subreddit = 'tifu'
t_channel = '@r_channels_tifu'
def send_post(submission, r2t):
return r2t.send_simple(submission) |
#
# @lc app=leetcode id=217 lang=python3
#
# [217] Contains Duplicate
#
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# first approach: use a hashtable, add the number to the hashtable
# if the number is already there, return True
# second approach: use a set. return len(set) != len(nums)
# if len is different, it means the list contains non-unique values (returns True)
return len(nums) != len(set(nums))
| class Solution:
def contains_duplicate(self, nums: List[int]) -> bool:
return len(nums) != len(set(nums)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class LookupCache:
"""
Prime a cache of all valid knowledge base phrases to prevent costly DB lookups for phrases that do not exist.
"""
def __init__(self,
resource_path='../kb-classifier/data-wiki/resources.txt',
redirect_path='../kb-classifier/data-wiki/redirect_resources.txt',
anchor_path='../kb-classifier/data-wiki/anchors.txt',
use_anchors_only=False):
"""
:param resource_path: the path to the file containing valid resources in DBpedia.
:param redirect_path: the path to the file containing redirects in DBpedia.
:param anchor_path: the path to the file containing valid anchor text in DBpedia.
:param use_anchors_only: set to True if only anchors should be considered and not direct resource or redirect
matches.
"""
if not use_anchors_only:
self.resource_cache = self.load_phrase_cache(resource_path)
self.redirect_cache = self.load_phrase_cache(redirect_path)
self.anchor_cache = self.load_phrase_cache(anchor_path)
self.use_anchors_only = use_anchors_only
print('Cache Initialised')
def load_phrase_cache(self, phrase_path):
"""
Given a file of phrases, returns a set of those phrases so it can be used as a cache.
:param phrase_path: the path to the file containing phrases.
:returns: a set of phrases contained in the file.
"""
valid_phrases = set()
with open(phrase_path, 'r') as phrases:
for phrase in phrases:
if phrase.strip() != '':
valid_phrases.add(phrase.strip())
return valid_phrases
def contains_exact(self, phrase):
"""
Does an exact resource match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a resource exists for the phrase. Will always return False if use_anchors_only=True.
"""
# Resources always have their first letter as a capital
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.resource_cache
def contains_redirect(self, phrase):
"""
Does a redirect match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a redirect exists for the phrase. Will always return False if use_anchors_only=True.
"""
# Resources always have their first letter as a capital
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.redirect_cache
def contains_anchor(self, phrase):
"""
Does an anchor match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if an anchor exists for the phrase.
"""
return phrase in self.anchor_cache
def translate(self, phrase):
"""
No translation necessary for DBpedia.
"""
return phrase
| class Lookupcache:
"""
Prime a cache of all valid knowledge base phrases to prevent costly DB lookups for phrases that do not exist.
"""
def __init__(self, resource_path='../kb-classifier/data-wiki/resources.txt', redirect_path='../kb-classifier/data-wiki/redirect_resources.txt', anchor_path='../kb-classifier/data-wiki/anchors.txt', use_anchors_only=False):
"""
:param resource_path: the path to the file containing valid resources in DBpedia.
:param redirect_path: the path to the file containing redirects in DBpedia.
:param anchor_path: the path to the file containing valid anchor text in DBpedia.
:param use_anchors_only: set to True if only anchors should be considered and not direct resource or redirect
matches.
"""
if not use_anchors_only:
self.resource_cache = self.load_phrase_cache(resource_path)
self.redirect_cache = self.load_phrase_cache(redirect_path)
self.anchor_cache = self.load_phrase_cache(anchor_path)
self.use_anchors_only = use_anchors_only
print('Cache Initialised')
def load_phrase_cache(self, phrase_path):
"""
Given a file of phrases, returns a set of those phrases so it can be used as a cache.
:param phrase_path: the path to the file containing phrases.
:returns: a set of phrases contained in the file.
"""
valid_phrases = set()
with open(phrase_path, 'r') as phrases:
for phrase in phrases:
if phrase.strip() != '':
valid_phrases.add(phrase.strip())
return valid_phrases
def contains_exact(self, phrase):
"""
Does an exact resource match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a resource exists for the phrase. Will always return False if use_anchors_only=True.
"""
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.resource_cache
def contains_redirect(self, phrase):
"""
Does a redirect match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a redirect exists for the phrase. Will always return False if use_anchors_only=True.
"""
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.redirect_cache
def contains_anchor(self, phrase):
"""
Does an anchor match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if an anchor exists for the phrase.
"""
return phrase in self.anchor_cache
def translate(self, phrase):
"""
No translation necessary for DBpedia.
"""
return phrase |
def dictionary_masher(dict_a, dict_b):
for key, value in dict_b.items():
if key not in dict_a:
dict_a[key] = value
return dict_a | def dictionary_masher(dict_a, dict_b):
for (key, value) in dict_b.items():
if key not in dict_a:
dict_a[key] = value
return dict_a |
class Accessor(property):
"""Accessor is a property factory for structure fields
The Accessor is used by the Meta metaclass to generate accessors for the
structure fields inside a new strcuture class.
"""
def __init__(self, item, key):
super().__init__(
lambda s: item.getter(s.__instance__.data[key]),
lambda s, v: item.setter(s.__instance__.data[key], v),
)
| class Accessor(property):
"""Accessor is a property factory for structure fields
The Accessor is used by the Meta metaclass to generate accessors for the
structure fields inside a new strcuture class.
"""
def __init__(self, item, key):
super().__init__(lambda s: item.getter(s.__instance__.data[key]), lambda s, v: item.setter(s.__instance__.data[key], v)) |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
# sys.path.insert(0, os.path.abspath('../src/dvg_devices'))
# -- Project information -----------------------------------------------------
project = "DvG_Devices"
copyright = "2021, Dennis van Gils"
author = "Dennis van Gils"
# The full version, including alpha/beta/rc tags
release = "1.0.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx_qt_documentation",
]
intersphinx_mapping = {
"PyQt5": ("https://www.riverbankcomputing.com/static/Docs/PyQt5/", None),
"NumPy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3", None),
"serial": ("https://pyserial.readthedocs.io/en/latest/", None),
"dvg_qdeviceio": (
"https://python-dvg-qdeviceio.readthedocs.io/en/latest/",
None,
),
}
qt_documentation = "Qt5"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# 'bizstyle', 'classic', 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"
html_theme_path = [
"_themes",
]
# html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'style_nav_header_background': '#2980B9',
# Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
# }
html_last_updated_fmt = "%d-%m-%Y"
html4_writer = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False # True to create block. Downside is that we lose hyperlinks to class variables
napoleon_use_param = False # False
napoleon_use_rtype = True
| project = 'DvG_Devices'
copyright = '2021, Dennis van Gils'
author = 'Dennis van Gils'
release = '1.0.0'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx_qt_documentation']
intersphinx_mapping = {'PyQt5': ('https://www.riverbankcomputing.com/static/Docs/PyQt5/', None), 'NumPy': ('https://numpy.org/doc/stable/', None), 'python': ('https://docs.python.org/3', None), 'serial': ('https://pyserial.readthedocs.io/en/latest/', None), 'dvg_qdeviceio': ('https://python-dvg-qdeviceio.readthedocs.io/en/latest/', None)}
qt_documentation = 'Qt5'
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes']
html_last_updated_fmt = '%d-%m-%Y'
html4_writer = True
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = True |
class Solution:
def thirdMax(self, nums: List[int]) -> int:
if len(set(nums))<3:
return max(nums)
for i in range(2):
for i in range(nums.count(max(nums))):
nums.remove(max(nums))
return max(nums)
| class Solution:
def third_max(self, nums: List[int]) -> int:
if len(set(nums)) < 3:
return max(nums)
for i in range(2):
for i in range(nums.count(max(nums))):
nums.remove(max(nums))
return max(nums) |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Rule to run the terser binary under bazel"
load("@build_bazel_rules_nodejs//:providers.bzl", "run_node")
_DOC = """Run the terser minifier.
Typical example:
```python
load("//packages/terser:index.bzl", "terser_minified")
terser_minified(
name = "out.min",
src = "input.js",
config_file = "terser_config.json",
)
```
Note that the `name` attribute determines what the resulting files will be called.
So the example above will output `out.min.js` and `out.min.js.map` (since `sourcemap` defaults to `true`).
If the input is a directory, then the output will also be a directory, named after the `name` attribute.
"""
_TERSER_ATTRS = {
"args": attr.string_list(
doc = """Additional command line arguments to pass to terser.
Terser only parses minify() args from the config file so additional arguments such as `--comments` may
be passed to the rule using this attribute. See https://github.com/terser/terser#command-line-usage for the
full list of terser CLI options.""",
),
"config_file": attr.label(
doc = """A JSON file containing Terser minify() options.
This is the file you would pass to the --config-file argument in terser's CLI.
https://github.com/terser-js/terser#minify-options documents the content of the file.
Bazel will make a copy of your config file, treating it as a template.
> Run bazel with `--subcommands` to see the path to the copied file.
If you use the magic strings `"bazel_debug"` or `"bazel_no_debug"`, these will be
replaced with `true` and `false` respecting the value of the `debug` attribute
or the `--compilation_mode=dbg` bazel flag.
For example,
```
{
"compress": {
"arrows": "bazel_no_debug"
}
}
```
Will disable the `arrows` compression setting when debugging.
If `config_file` isn't supplied, Bazel will use a default config file.
""",
allow_single_file = True,
# These defaults match how terser was run in the legacy built-in rollup_bundle rule.
# We keep them the same so it's easier for users to migrate.
default = Label("//packages/terser:terser_config.default.json"),
),
"debug": attr.bool(
doc = """Configure terser to produce more readable output.
Instead of setting this attribute, consider using debugging compilation mode instead
bazel build --compilation_mode=dbg //my/terser:target
so that it only affects the current build.
""",
),
"sourcemap": attr.bool(
doc = "Whether to produce a .js.map output",
default = True,
),
"src": attr.label(
doc = """File(s) to minify.
Can be a .js file, a rule producing .js files as its default output, or a rule producing a directory of .js files.
Note that you can pass multiple files to terser, which it will bundle together.
If you want to do this, you can pass a filegroup here.""",
allow_files = [".js", ".map", ".mjs"],
mandatory = True,
),
"terser_bin": attr.label(
doc = "An executable target that runs Terser",
default = Label("//packages/terser/bin:terser"),
executable = True,
cfg = "host",
),
}
def _filter_js(files):
return [f for f in files if f.is_directory or f.extension == "js" or f.extension == "mjs"]
def _terser(ctx):
"Generate actions to create terser config run terser"
# CLI arguments; see https://www.npmjs.com/package/terser#command-line-usage
args = ctx.actions.args()
inputs = ctx.files.src[:]
outputs = []
sources = _filter_js(inputs)
sourcemaps = [f for f in inputs if f.extension == "map"]
directory_srcs = [s for s in sources if s.is_directory]
if len(directory_srcs) > 0:
if len(sources) > 1:
fail("When directories are passed to terser_minified, there should be only one input")
outputs.append(ctx.actions.declare_directory(ctx.label.name))
else:
outputs.append(ctx.actions.declare_file("%s.js" % ctx.label.name))
if ctx.attr.sourcemap:
outputs.append(ctx.actions.declare_file("%s.js.map" % ctx.label.name))
args.add_all([s.path for s in sources])
args.add_all(["--output", outputs[0].path])
debug = ctx.attr.debug or ctx.var["COMPILATION_MODE"] == "dbg"
if debug:
args.add("--debug")
args.add("--beautify")
if ctx.attr.sourcemap:
# Source mapping options are comma-packed into one argv
# see https://github.com/terser-js/terser#command-line-usage
source_map_opts = ["includeSources", "base=" + ctx.bin_dir.path]
if len(sourcemaps) == 0:
source_map_opts.append("content=inline")
elif len(sourcemaps) == 1:
source_map_opts.append("content='%s'" % sourcemaps[0].path)
else:
fail("When sourcemap is True, there should only be one or none input sourcemaps")
# Add a comment at the end of the js output so DevTools knows where to find the sourcemap
source_map_opts.append("url='%s.js.map'" % ctx.label.name)
# This option doesn't work in the config file, only on the CLI
args.add_all(["--source-map", ",".join(source_map_opts)])
opts = ctx.actions.declare_file("_%s.minify_options.json" % ctx.label.name)
inputs.append(opts)
ctx.actions.expand_template(
template = ctx.file.config_file,
output = opts,
substitutions = {
"\"bazel_debug\"": str(debug).lower(),
"\"bazel_no_debug\"": str(not debug).lower(),
},
)
args.add_all(["--config-file", opts.path])
args.add_all(ctx.attr.args)
run_node(
ctx,
inputs = inputs,
outputs = outputs,
executable = "terser_bin",
arguments = [args],
env = {"COMPILATION_MODE": ctx.var["COMPILATION_MODE"]},
progress_message = "Minifying JavaScript %s [terser]" % (outputs[0].short_path),
)
return [
DefaultInfo(files = depset(outputs)),
]
terser_minified = rule(
doc = _DOC,
implementation = _terser,
attrs = _TERSER_ATTRS,
)
| """Rule to run the terser binary under bazel"""
load('@build_bazel_rules_nodejs//:providers.bzl', 'run_node')
_doc = 'Run the terser minifier.\n\nTypical example:\n```python\nload("//packages/terser:index.bzl", "terser_minified")\n\nterser_minified(\n name = "out.min",\n src = "input.js",\n config_file = "terser_config.json",\n)\n```\n\nNote that the `name` attribute determines what the resulting files will be called.\nSo the example above will output `out.min.js` and `out.min.js.map` (since `sourcemap` defaults to `true`).\nIf the input is a directory, then the output will also be a directory, named after the `name` attribute.\n'
_terser_attrs = {'args': attr.string_list(doc='Additional command line arguments to pass to terser.\n\nTerser only parses minify() args from the config file so additional arguments such as `--comments` may\nbe passed to the rule using this attribute. See https://github.com/terser/terser#command-line-usage for the\nfull list of terser CLI options.'), 'config_file': attr.label(doc='A JSON file containing Terser minify() options.\n\nThis is the file you would pass to the --config-file argument in terser\'s CLI.\nhttps://github.com/terser-js/terser#minify-options documents the content of the file.\n\nBazel will make a copy of your config file, treating it as a template.\n\n> Run bazel with `--subcommands` to see the path to the copied file.\n\nIf you use the magic strings `"bazel_debug"` or `"bazel_no_debug"`, these will be\nreplaced with `true` and `false` respecting the value of the `debug` attribute\nor the `--compilation_mode=dbg` bazel flag.\n\nFor example,\n\n```\n{\n "compress": {\n "arrows": "bazel_no_debug"\n }\n}\n```\nWill disable the `arrows` compression setting when debugging.\n\nIf `config_file` isn\'t supplied, Bazel will use a default config file.\n', allow_single_file=True, default=label('//packages/terser:terser_config.default.json')), 'debug': attr.bool(doc='Configure terser to produce more readable output.\n\nInstead of setting this attribute, consider using debugging compilation mode instead\nbazel build --compilation_mode=dbg //my/terser:target\nso that it only affects the current build.\n'), 'sourcemap': attr.bool(doc='Whether to produce a .js.map output', default=True), 'src': attr.label(doc='File(s) to minify.\n\nCan be a .js file, a rule producing .js files as its default output, or a rule producing a directory of .js files.\n\nNote that you can pass multiple files to terser, which it will bundle together.\nIf you want to do this, you can pass a filegroup here.', allow_files=['.js', '.map', '.mjs'], mandatory=True), 'terser_bin': attr.label(doc='An executable target that runs Terser', default=label('//packages/terser/bin:terser'), executable=True, cfg='host')}
def _filter_js(files):
return [f for f in files if f.is_directory or f.extension == 'js' or f.extension == 'mjs']
def _terser(ctx):
"""Generate actions to create terser config run terser"""
args = ctx.actions.args()
inputs = ctx.files.src[:]
outputs = []
sources = _filter_js(inputs)
sourcemaps = [f for f in inputs if f.extension == 'map']
directory_srcs = [s for s in sources if s.is_directory]
if len(directory_srcs) > 0:
if len(sources) > 1:
fail('When directories are passed to terser_minified, there should be only one input')
outputs.append(ctx.actions.declare_directory(ctx.label.name))
else:
outputs.append(ctx.actions.declare_file('%s.js' % ctx.label.name))
if ctx.attr.sourcemap:
outputs.append(ctx.actions.declare_file('%s.js.map' % ctx.label.name))
args.add_all([s.path for s in sources])
args.add_all(['--output', outputs[0].path])
debug = ctx.attr.debug or ctx.var['COMPILATION_MODE'] == 'dbg'
if debug:
args.add('--debug')
args.add('--beautify')
if ctx.attr.sourcemap:
source_map_opts = ['includeSources', 'base=' + ctx.bin_dir.path]
if len(sourcemaps) == 0:
source_map_opts.append('content=inline')
elif len(sourcemaps) == 1:
source_map_opts.append("content='%s'" % sourcemaps[0].path)
else:
fail('When sourcemap is True, there should only be one or none input sourcemaps')
source_map_opts.append("url='%s.js.map'" % ctx.label.name)
args.add_all(['--source-map', ','.join(source_map_opts)])
opts = ctx.actions.declare_file('_%s.minify_options.json' % ctx.label.name)
inputs.append(opts)
ctx.actions.expand_template(template=ctx.file.config_file, output=opts, substitutions={'"bazel_debug"': str(debug).lower(), '"bazel_no_debug"': str(not debug).lower()})
args.add_all(['--config-file', opts.path])
args.add_all(ctx.attr.args)
run_node(ctx, inputs=inputs, outputs=outputs, executable='terser_bin', arguments=[args], env={'COMPILATION_MODE': ctx.var['COMPILATION_MODE']}, progress_message='Minifying JavaScript %s [terser]' % outputs[0].short_path)
return [default_info(files=depset(outputs))]
terser_minified = rule(doc=_DOC, implementation=_terser, attrs=_TERSER_ATTRS) |
#
# PySNMP MIB module GSM7312-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GSM7312-QOS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:20:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
gsm7312, = mibBuilder.importSymbols("GSM7312-REF-MIB", "gsm7312")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, Unsigned32, Counter32, ModuleIdentity, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, IpAddress, MibIdentifier, Integer32, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Counter32", "ModuleIdentity", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "IpAddress", "MibIdentifier", "Integer32", "iso", "Counter64")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
gsm7312QOS = ModuleIdentity((1, 3, 6, 1, 4, 1, 4526, 1, 6, 3))
gsm7312QOS.setRevisions(('2003-05-06 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: gsm7312QOS.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: gsm7312QOS.setLastUpdated('200305061200Z')
if mibBuilder.loadTexts: gsm7312QOS.setOrganization('Netgear')
if mibBuilder.loadTexts: gsm7312QOS.setContactInfo('')
if mibBuilder.loadTexts: gsm7312QOS.setDescription('')
mibBuilder.exportSymbols("GSM7312-QOS-MIB", PYSNMP_MODULE_ID=gsm7312QOS, gsm7312QOS=gsm7312QOS)
| (integer, octet_string, object_identifier) = mibBuilder.importSymbols('ASN1', 'Integer', 'OctetString', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_range_constraint, value_size_constraint, constraints_union, constraints_intersection, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueRangeConstraint', 'ValueSizeConstraint', 'ConstraintsUnion', 'ConstraintsIntersection', 'SingleValueConstraint')
(gsm7312,) = mibBuilder.importSymbols('GSM7312-REF-MIB', 'gsm7312')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(time_ticks, unsigned32, counter32, module_identity, gauge32, bits, mib_scalar, mib_table, mib_table_row, mib_table_column, object_identity, notification_type, ip_address, mib_identifier, integer32, iso, counter64) = mibBuilder.importSymbols('SNMPv2-SMI', 'TimeTicks', 'Unsigned32', 'Counter32', 'ModuleIdentity', 'Gauge32', 'Bits', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ObjectIdentity', 'NotificationType', 'IpAddress', 'MibIdentifier', 'Integer32', 'iso', 'Counter64')
(display_string, textual_convention, row_status) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention', 'RowStatus')
gsm7312_qos = module_identity((1, 3, 6, 1, 4, 1, 4526, 1, 6, 3))
gsm7312QOS.setRevisions(('2003-05-06 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
gsm7312QOS.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts:
gsm7312QOS.setLastUpdated('200305061200Z')
if mibBuilder.loadTexts:
gsm7312QOS.setOrganization('Netgear')
if mibBuilder.loadTexts:
gsm7312QOS.setContactInfo('')
if mibBuilder.loadTexts:
gsm7312QOS.setDescription('')
mibBuilder.exportSymbols('GSM7312-QOS-MIB', PYSNMP_MODULE_ID=gsm7312QOS, gsm7312QOS=gsm7312QOS) |
'''
You are given an array points where points[i] = [xi, yi] is the coordinates of the ith point on a 2D plane. Multiple points can have the same coordinates.
You are also given an array queries where queries[j] = [xj, yj, rj] describes a circle centered at (xj, yj) with a radius of rj.
For each query queries[j], compute the number of points inside the jth circle. Points on the border of the circle are considered inside.
Return an array answer, where answer[j] is the answer to the jth query.
'''
class Solution:
def countPoints(self, points: List[List[int]], queries: List[List[int]]) -> List[int]:
def calc_distance(xi, yi, xj, yj):
return ((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5
answer = []
for i in queries:
c = 0
for j in points:
if calc_distance(i[0], i[1], j[0], j[1]) <= i[2]: c += 1
answer.append(c)
return answer
| """
You are given an array points where points[i] = [xi, yi] is the coordinates of the ith point on a 2D plane. Multiple points can have the same coordinates.
You are also given an array queries where queries[j] = [xj, yj, rj] describes a circle centered at (xj, yj) with a radius of rj.
For each query queries[j], compute the number of points inside the jth circle. Points on the border of the circle are considered inside.
Return an array answer, where answer[j] is the answer to the jth query.
"""
class Solution:
def count_points(self, points: List[List[int]], queries: List[List[int]]) -> List[int]:
def calc_distance(xi, yi, xj, yj):
return ((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5
answer = []
for i in queries:
c = 0
for j in points:
if calc_distance(i[0], i[1], j[0], j[1]) <= i[2]:
c += 1
answer.append(c)
return answer |
class Solution(object):
def tribonacci(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 1:
return n
dp = [0] * (n + 1)
dp[0] = 0
dp[1] = 1
dp[2] = 1
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[-1] | class Solution(object):
def tribonacci(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 1:
return n
dp = [0] * (n + 1)
dp[0] = 0
dp[1] = 1
dp[2] = 1
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[-1] |
def somaAll(x,y):
s = c = 0
l = [x]
while x < y:
s = x + 1
l.append(s)
x+=1
s=0
while c <len(l):
s += l[c]
c+=1
return s
def somaintervalo(ini,fim):
c=ini
s=0
while(c<=fim):
s+=c
c+=1
return s
print(somaAll(1,3))
| def soma_all(x, y):
s = c = 0
l = [x]
while x < y:
s = x + 1
l.append(s)
x += 1
s = 0
while c < len(l):
s += l[c]
c += 1
return s
def somaintervalo(ini, fim):
c = ini
s = 0
while c <= fim:
s += c
c += 1
return s
print(soma_all(1, 3)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Fibonacci:
def __init__(self, max_n):
self.MaxN = max_n
self.N = 0
self.A = 0
self.B = 0
def __iter__(self):
self.N = 0
self.A = 0
self.B = 1
return self
def __next__(self):
if self.N < self.MaxN:
self.N += 1
self.A, self.B = self.B, self.A + self.B
return self.A
else:
raise StopIteration
for f in Fibonacci(14):
print(f, end=" ")
print()
print(list(Fibonacci(16)))
| class Fibonacci:
def __init__(self, max_n):
self.MaxN = max_n
self.N = 0
self.A = 0
self.B = 0
def __iter__(self):
self.N = 0
self.A = 0
self.B = 1
return self
def __next__(self):
if self.N < self.MaxN:
self.N += 1
(self.A, self.B) = (self.B, self.A + self.B)
return self.A
else:
raise StopIteration
for f in fibonacci(14):
print(f, end=' ')
print()
print(list(fibonacci(16))) |
def addr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + regs[b]
return result
def addi(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + b
return result
def mulr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * regs[b]
return result
def muli(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * b
return result
def banr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & regs[b]
return result
def bani(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & b
return result
def borr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | regs[b]
return result
def bori(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | b
return result
def setr(regs, a, b, c):
result = regs[:]
result[c] = regs[a]
return result
def seti(regs, a, b, c):
result = regs[:]
result[c] = a
return result
def gtir(regs, a, b, c):
result = regs[:]
if a > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def gtri(regs, a, b, c):
result = regs[:]
if regs[a] > b:
result[c] = 1
else:
result[c] = 0
return result
def gtrr(regs, a, b, c):
result = regs[:]
if regs[a] > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqir(regs, a, b, c):
result = regs[:]
if a == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqri(regs, a, b, c):
result = regs[:]
if regs[a] == b:
result[c] = 1
else:
result[c] = 0
return result
def eqrr(regs, a, b, c):
result = regs[:]
if regs[a] == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
ops = {
"addr": addr,
"addi": addi,
"mulr": mulr,
"muli": muli,
"banr": banr,
"bani": bani,
"borr": borr,
"bori": bori,
"setr": setr,
"seti": seti,
"gtir": gtir,
"gtri": gtri,
"gtrr": gtrr,
"eqir": eqir,
"eqri": eqri,
"eqrr": eqrr,
}
def solve(input):
registers = [0, 0, 0, 0, 0, 0]
parts = input[0].split()
ip_reg = int(parts[1])
ip = 0
instructions = []
for line in input[1:]:
instruction = line.split()
instructions.append((instruction[0], int(instruction[1]), int(instruction[2]), int(instruction[3])))
iterations = 0
while True:
instruction = instructions[ip]
op = ops[instruction[0]]
registers[ip_reg] = ip
after = op(registers, *instruction[1:])
# print(iterations, ip, registers, instruction, after)
registers = after
ip = registers[ip_reg] + 1
if ip < 0 or ip >= len(instructions):
break
# if iterations > 500:
# break
iterations += 1
print(registers[0])
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
| def addr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + regs[b]
return result
def addi(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + b
return result
def mulr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * regs[b]
return result
def muli(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * b
return result
def banr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & regs[b]
return result
def bani(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & b
return result
def borr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | regs[b]
return result
def bori(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | b
return result
def setr(regs, a, b, c):
result = regs[:]
result[c] = regs[a]
return result
def seti(regs, a, b, c):
result = regs[:]
result[c] = a
return result
def gtir(regs, a, b, c):
result = regs[:]
if a > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def gtri(regs, a, b, c):
result = regs[:]
if regs[a] > b:
result[c] = 1
else:
result[c] = 0
return result
def gtrr(regs, a, b, c):
result = regs[:]
if regs[a] > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqir(regs, a, b, c):
result = regs[:]
if a == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqri(regs, a, b, c):
result = regs[:]
if regs[a] == b:
result[c] = 1
else:
result[c] = 0
return result
def eqrr(regs, a, b, c):
result = regs[:]
if regs[a] == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
ops = {'addr': addr, 'addi': addi, 'mulr': mulr, 'muli': muli, 'banr': banr, 'bani': bani, 'borr': borr, 'bori': bori, 'setr': setr, 'seti': seti, 'gtir': gtir, 'gtri': gtri, 'gtrr': gtrr, 'eqir': eqir, 'eqri': eqri, 'eqrr': eqrr}
def solve(input):
registers = [0, 0, 0, 0, 0, 0]
parts = input[0].split()
ip_reg = int(parts[1])
ip = 0
instructions = []
for line in input[1:]:
instruction = line.split()
instructions.append((instruction[0], int(instruction[1]), int(instruction[2]), int(instruction[3])))
iterations = 0
while True:
instruction = instructions[ip]
op = ops[instruction[0]]
registers[ip_reg] = ip
after = op(registers, *instruction[1:])
registers = after
ip = registers[ip_reg] + 1
if ip < 0 or ip >= len(instructions):
break
iterations += 1
print(registers[0])
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input) |
n = int(input())
m = int(input())
dif = n - m
print(dif)
| n = int(input())
m = int(input())
dif = n - m
print(dif) |
def alphabetically_first(s1, s2):
sorted_list = sorted(list([s1, s2]))
return sorted_list[0]
| def alphabetically_first(s1, s2):
sorted_list = sorted(list([s1, s2]))
return sorted_list[0] |
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
pressed=[0]*26
lastRelease=0
for i in range(len(releaseTimes)):
index=ord(keysPressed[i])-ord('a')
pressed[index]=max(pressed[index],releaseTimes[i]-lastRelease)
lastRelease=releaseTimes[i]
maxPressed=max(pressed)
for i in range(25,-1,-1):
if pressed[i]==maxPressed:
return chr(i+ord('a')) | class Solution:
def slowest_key(self, releaseTimes: List[int], keysPressed: str) -> str:
pressed = [0] * 26
last_release = 0
for i in range(len(releaseTimes)):
index = ord(keysPressed[i]) - ord('a')
pressed[index] = max(pressed[index], releaseTimes[i] - lastRelease)
last_release = releaseTimes[i]
max_pressed = max(pressed)
for i in range(25, -1, -1):
if pressed[i] == maxPressed:
return chr(i + ord('a')) |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
class DigitsError(Exception):
"""
DIGITS custom exception
"""
pass
class DeleteError(DigitsError):
"""
Errors that occur when deleting a job
"""
pass
class LoadImageError(DigitsError):
"""
Errors that occur while loading an image
"""
pass
| class Digitserror(Exception):
"""
DIGITS custom exception
"""
pass
class Deleteerror(DigitsError):
"""
Errors that occur when deleting a job
"""
pass
class Loadimageerror(DigitsError):
"""
Errors that occur while loading an image
"""
pass |
class TestApi:
def test_api_with_valid_url_bitly(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.withnottplum.com', 'provider': 'bitly'}
)
# then
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.withnottplum.com'
assert response.get_json()['data']['short_link'].startswith('https://bit.ly')
def test_api_with_valid_url_tinyurl(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.pekosestate.com', 'provider': 'tinyurl'}
)
# then
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.pekosestate.com'
assert response.get_json()['data']['short_link'].startswith('https://tinyurl.com')
def test_api_with_invalid_url_bitly(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.pekos estate'}
)
assert response.status_code == 422
assert response.get_json()['error']['message'] == 'Malformed URL'
def test_api_default_provider(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.withplum.com'}
)
assert response.status_code == 200
assert response.get_json()['data']['short_link'].startswith('https://bit.ly')
| class Testapi:
def test_api_with_valid_url_bitly(self, post):
response = post('/shortlinks', data={'url': 'https://www.withnottplum.com', 'provider': 'bitly'})
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.withnottplum.com'
assert response.get_json()['data']['short_link'].startswith('https://bit.ly')
def test_api_with_valid_url_tinyurl(self, post):
response = post('/shortlinks', data={'url': 'https://www.pekosestate.com', 'provider': 'tinyurl'})
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.pekosestate.com'
assert response.get_json()['data']['short_link'].startswith('https://tinyurl.com')
def test_api_with_invalid_url_bitly(self, post):
response = post('/shortlinks', data={'url': 'https://www.pekos estate'})
assert response.status_code == 422
assert response.get_json()['error']['message'] == 'Malformed URL'
def test_api_default_provider(self, post):
response = post('/shortlinks', data={'url': 'https://www.withplum.com'})
assert response.status_code == 200
assert response.get_json()['data']['short_link'].startswith('https://bit.ly') |
def classify_numbers(numbers):
pos = [n for n in numbers if n >= 0]
neg = [n for n in numbers if n < 0]
even = [n for n in numbers if n % 2 == 0]
odd = [n for n in numbers if n % 2 != 0]
return pos, neg, even, odd
pos, neg, even, odd = classify_numbers([int(x) for x in input().split(', ')])
print(f'Positive: {", ".join([str(x) for x in pos])}')
print(f'Negative: {", ".join([str(x) for x in neg])}')
print(f'Even: {", ".join([str(x) for x in even])}')
print(f'Odd: {", ".join([str(x) for x in odd])}')
| def classify_numbers(numbers):
pos = [n for n in numbers if n >= 0]
neg = [n for n in numbers if n < 0]
even = [n for n in numbers if n % 2 == 0]
odd = [n for n in numbers if n % 2 != 0]
return (pos, neg, even, odd)
(pos, neg, even, odd) = classify_numbers([int(x) for x in input().split(', ')])
print(f"Positive: {', '.join([str(x) for x in pos])}")
print(f"Negative: {', '.join([str(x) for x in neg])}")
print(f"Even: {', '.join([str(x) for x in even])}")
print(f"Odd: {', '.join([str(x) for x in odd])}") |
#!/usr/bin/python3
k, t = list(map(int, input().split()))
a = t // k
result = 0
if (a % 2 == 0):
result = t % k
else:
# print("k - t%k + 1", k, t)
result = k - t % k
print(result) | (k, t) = list(map(int, input().split()))
a = t // k
result = 0
if a % 2 == 0:
result = t % k
else:
result = k - t % k
print(result) |
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
def quicksort(left,right):
if left>=right:return
pivot = left
i=j=pivot+1
while j<=right:
if nums[j]<nums[pivot]:
nums[i],nums[j] = nums[j],nums[i]
i+=1
j+=1
nums[pivot],nums[i-1] = nums[i-1],nums[pivot]
quicksort(left,i-1)
quicksort(i,right)
quicksort(0,len(nums)-1)
return nums
| class Solution:
def sort_array(self, nums: List[int]) -> List[int]:
def quicksort(left, right):
if left >= right:
return
pivot = left
i = j = pivot + 1
while j <= right:
if nums[j] < nums[pivot]:
(nums[i], nums[j]) = (nums[j], nums[i])
i += 1
j += 1
(nums[pivot], nums[i - 1]) = (nums[i - 1], nums[pivot])
quicksort(left, i - 1)
quicksort(i, right)
quicksort(0, len(nums) - 1)
return nums |
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 09.04.2018 14:18
:Licence MIT
Part of grammpy
"""
| """
:Author Patrik Valkovic
:Created 09.04.2018 14:18
:Licence MIT
Part of grammpy
""" |
""" Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1125 codec
'1125' : 'cp1125',
'ibm1125' : 'cp1125',
'cp866u' : 'cp1125',
'ruscii' : 'cp1125',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp273 codec
'273' : 'cp273',
'ibm273' : 'cp273',
'csibm273' : 'cp273',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# kz1048 codec
'kz_1048' : 'kz1048',
'rk1048' : 'kz1048',
'strk1048_2002' : 'kz1048',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macintosh' : 'mac_roman',
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
# temporary mac CJK aliases, will be replaced by proper codecs in 3.1
'x_mac_japanese' : 'shift_jis',
'x_mac_korean' : 'euc_kr',
'x_mac_simp_chinese' : 'gb2312',
'x_mac_trad_chinese' : 'big5',
}
| """ Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {'646': 'ascii', 'ansi_x3.4_1968': 'ascii', 'ansi_x3_4_1968': 'ascii', 'ansi_x3.4_1986': 'ascii', 'cp367': 'ascii', 'csascii': 'ascii', 'ibm367': 'ascii', 'iso646_us': 'ascii', 'iso_646.irv_1991': 'ascii', 'iso_ir_6': 'ascii', 'us': 'ascii', 'us_ascii': 'ascii', 'base64': 'base64_codec', 'base_64': 'base64_codec', 'big5_tw': 'big5', 'csbig5': 'big5', 'big5_hkscs': 'big5hkscs', 'hkscs': 'big5hkscs', 'bz2': 'bz2_codec', '037': 'cp037', 'csibm037': 'cp037', 'ebcdic_cp_ca': 'cp037', 'ebcdic_cp_nl': 'cp037', 'ebcdic_cp_us': 'cp037', 'ebcdic_cp_wt': 'cp037', 'ibm037': 'cp037', 'ibm039': 'cp037', '1026': 'cp1026', 'csibm1026': 'cp1026', 'ibm1026': 'cp1026', '1125': 'cp1125', 'ibm1125': 'cp1125', 'cp866u': 'cp1125', 'ruscii': 'cp1125', '1140': 'cp1140', 'ibm1140': 'cp1140', '1250': 'cp1250', 'windows_1250': 'cp1250', '1251': 'cp1251', 'windows_1251': 'cp1251', '1252': 'cp1252', 'windows_1252': 'cp1252', '1253': 'cp1253', 'windows_1253': 'cp1253', '1254': 'cp1254', 'windows_1254': 'cp1254', '1255': 'cp1255', 'windows_1255': 'cp1255', '1256': 'cp1256', 'windows_1256': 'cp1256', '1257': 'cp1257', 'windows_1257': 'cp1257', '1258': 'cp1258', 'windows_1258': 'cp1258', '273': 'cp273', 'ibm273': 'cp273', 'csibm273': 'cp273', '424': 'cp424', 'csibm424': 'cp424', 'ebcdic_cp_he': 'cp424', 'ibm424': 'cp424', '437': 'cp437', 'cspc8codepage437': 'cp437', 'ibm437': 'cp437', '500': 'cp500', 'csibm500': 'cp500', 'ebcdic_cp_be': 'cp500', 'ebcdic_cp_ch': 'cp500', 'ibm500': 'cp500', '775': 'cp775', 'cspc775baltic': 'cp775', 'ibm775': 'cp775', '850': 'cp850', 'cspc850multilingual': 'cp850', 'ibm850': 'cp850', '852': 'cp852', 'cspcp852': 'cp852', 'ibm852': 'cp852', '855': 'cp855', 'csibm855': 'cp855', 'ibm855': 'cp855', '857': 'cp857', 'csibm857': 'cp857', 'ibm857': 'cp857', '858': 'cp858', 'csibm858': 'cp858', 'ibm858': 'cp858', '860': 'cp860', 'csibm860': 'cp860', 'ibm860': 'cp860', '861': 'cp861', 'cp_is': 'cp861', 'csibm861': 'cp861', 'ibm861': 'cp861', '862': 'cp862', 'cspc862latinhebrew': 'cp862', 'ibm862': 'cp862', '863': 'cp863', 'csibm863': 'cp863', 'ibm863': 'cp863', '864': 'cp864', 'csibm864': 'cp864', 'ibm864': 'cp864', '865': 'cp865', 'csibm865': 'cp865', 'ibm865': 'cp865', '866': 'cp866', 'csibm866': 'cp866', 'ibm866': 'cp866', '869': 'cp869', 'cp_gr': 'cp869', 'csibm869': 'cp869', 'ibm869': 'cp869', '932': 'cp932', 'ms932': 'cp932', 'mskanji': 'cp932', 'ms_kanji': 'cp932', '949': 'cp949', 'ms949': 'cp949', 'uhc': 'cp949', '950': 'cp950', 'ms950': 'cp950', 'jisx0213': 'euc_jis_2004', 'eucjis2004': 'euc_jis_2004', 'euc_jis2004': 'euc_jis_2004', 'eucjisx0213': 'euc_jisx0213', 'eucjp': 'euc_jp', 'ujis': 'euc_jp', 'u_jis': 'euc_jp', 'euckr': 'euc_kr', 'korean': 'euc_kr', 'ksc5601': 'euc_kr', 'ks_c_5601': 'euc_kr', 'ks_c_5601_1987': 'euc_kr', 'ksx1001': 'euc_kr', 'ks_x_1001': 'euc_kr', 'gb18030_2000': 'gb18030', 'chinese': 'gb2312', 'csiso58gb231280': 'gb2312', 'euc_cn': 'gb2312', 'euccn': 'gb2312', 'eucgb2312_cn': 'gb2312', 'gb2312_1980': 'gb2312', 'gb2312_80': 'gb2312', 'iso_ir_58': 'gb2312', '936': 'gbk', 'cp936': 'gbk', 'ms936': 'gbk', 'hex': 'hex_codec', 'roman8': 'hp_roman8', 'r8': 'hp_roman8', 'csHPRoman8': 'hp_roman8', 'hzgb': 'hz', 'hz_gb': 'hz', 'hz_gb_2312': 'hz', 'csiso2022jp': 'iso2022_jp', 'iso2022jp': 'iso2022_jp', 'iso_2022_jp': 'iso2022_jp', 'iso2022jp_1': 'iso2022_jp_1', 'iso_2022_jp_1': 'iso2022_jp_1', 'iso2022jp_2': 'iso2022_jp_2', 'iso_2022_jp_2': 'iso2022_jp_2', 'iso_2022_jp_2004': 'iso2022_jp_2004', 'iso2022jp_2004': 'iso2022_jp_2004', 'iso2022jp_3': 'iso2022_jp_3', 'iso_2022_jp_3': 'iso2022_jp_3', 'iso2022jp_ext': 'iso2022_jp_ext', 'iso_2022_jp_ext': 'iso2022_jp_ext', 'csiso2022kr': 'iso2022_kr', 'iso2022kr': 'iso2022_kr', 'iso_2022_kr': 'iso2022_kr', 'csisolatin6': 'iso8859_10', 'iso_8859_10': 'iso8859_10', 'iso_8859_10_1992': 'iso8859_10', 'iso_ir_157': 'iso8859_10', 'l6': 'iso8859_10', 'latin6': 'iso8859_10', 'thai': 'iso8859_11', 'iso_8859_11': 'iso8859_11', 'iso_8859_11_2001': 'iso8859_11', 'iso_8859_13': 'iso8859_13', 'l7': 'iso8859_13', 'latin7': 'iso8859_13', 'iso_8859_14': 'iso8859_14', 'iso_8859_14_1998': 'iso8859_14', 'iso_celtic': 'iso8859_14', 'iso_ir_199': 'iso8859_14', 'l8': 'iso8859_14', 'latin8': 'iso8859_14', 'iso_8859_15': 'iso8859_15', 'l9': 'iso8859_15', 'latin9': 'iso8859_15', 'iso_8859_16': 'iso8859_16', 'iso_8859_16_2001': 'iso8859_16', 'iso_ir_226': 'iso8859_16', 'l10': 'iso8859_16', 'latin10': 'iso8859_16', 'csisolatin2': 'iso8859_2', 'iso_8859_2': 'iso8859_2', 'iso_8859_2_1987': 'iso8859_2', 'iso_ir_101': 'iso8859_2', 'l2': 'iso8859_2', 'latin2': 'iso8859_2', 'csisolatin3': 'iso8859_3', 'iso_8859_3': 'iso8859_3', 'iso_8859_3_1988': 'iso8859_3', 'iso_ir_109': 'iso8859_3', 'l3': 'iso8859_3', 'latin3': 'iso8859_3', 'csisolatin4': 'iso8859_4', 'iso_8859_4': 'iso8859_4', 'iso_8859_4_1988': 'iso8859_4', 'iso_ir_110': 'iso8859_4', 'l4': 'iso8859_4', 'latin4': 'iso8859_4', 'csisolatincyrillic': 'iso8859_5', 'cyrillic': 'iso8859_5', 'iso_8859_5': 'iso8859_5', 'iso_8859_5_1988': 'iso8859_5', 'iso_ir_144': 'iso8859_5', 'arabic': 'iso8859_6', 'asmo_708': 'iso8859_6', 'csisolatinarabic': 'iso8859_6', 'ecma_114': 'iso8859_6', 'iso_8859_6': 'iso8859_6', 'iso_8859_6_1987': 'iso8859_6', 'iso_ir_127': 'iso8859_6', 'csisolatingreek': 'iso8859_7', 'ecma_118': 'iso8859_7', 'elot_928': 'iso8859_7', 'greek': 'iso8859_7', 'greek8': 'iso8859_7', 'iso_8859_7': 'iso8859_7', 'iso_8859_7_1987': 'iso8859_7', 'iso_ir_126': 'iso8859_7', 'csisolatinhebrew': 'iso8859_8', 'hebrew': 'iso8859_8', 'iso_8859_8': 'iso8859_8', 'iso_8859_8_1988': 'iso8859_8', 'iso_ir_138': 'iso8859_8', 'csisolatin5': 'iso8859_9', 'iso_8859_9': 'iso8859_9', 'iso_8859_9_1989': 'iso8859_9', 'iso_ir_148': 'iso8859_9', 'l5': 'iso8859_9', 'latin5': 'iso8859_9', 'cp1361': 'johab', 'ms1361': 'johab', 'cskoi8r': 'koi8_r', 'kz_1048': 'kz1048', 'rk1048': 'kz1048', 'strk1048_2002': 'kz1048', '8859': 'latin_1', 'cp819': 'latin_1', 'csisolatin1': 'latin_1', 'ibm819': 'latin_1', 'iso8859': 'latin_1', 'iso8859_1': 'latin_1', 'iso_8859_1': 'latin_1', 'iso_8859_1_1987': 'latin_1', 'iso_ir_100': 'latin_1', 'l1': 'latin_1', 'latin': 'latin_1', 'latin1': 'latin_1', 'maccyrillic': 'mac_cyrillic', 'macgreek': 'mac_greek', 'maciceland': 'mac_iceland', 'maccentraleurope': 'mac_latin2', 'maclatin2': 'mac_latin2', 'macintosh': 'mac_roman', 'macroman': 'mac_roman', 'macturkish': 'mac_turkish', 'dbcs': 'mbcs', 'csptcp154': 'ptcp154', 'pt154': 'ptcp154', 'cp154': 'ptcp154', 'cyrillic_asian': 'ptcp154', 'quopri': 'quopri_codec', 'quoted_printable': 'quopri_codec', 'quotedprintable': 'quopri_codec', 'rot13': 'rot_13', 'csshiftjis': 'shift_jis', 'shiftjis': 'shift_jis', 'sjis': 'shift_jis', 's_jis': 'shift_jis', 'shiftjis2004': 'shift_jis_2004', 'sjis_2004': 'shift_jis_2004', 's_jis_2004': 'shift_jis_2004', 'shiftjisx0213': 'shift_jisx0213', 'sjisx0213': 'shift_jisx0213', 's_jisx0213': 'shift_jisx0213', 'tis260': 'tactis', 'tis620': 'tis_620', 'tis_620_0': 'tis_620', 'tis_620_2529_0': 'tis_620', 'tis_620_2529_1': 'tis_620', 'iso_ir_166': 'tis_620', 'u16': 'utf_16', 'utf16': 'utf_16', 'unicodebigunmarked': 'utf_16_be', 'utf_16be': 'utf_16_be', 'unicodelittleunmarked': 'utf_16_le', 'utf_16le': 'utf_16_le', 'u32': 'utf_32', 'utf32': 'utf_32', 'utf_32be': 'utf_32_be', 'utf_32le': 'utf_32_le', 'u7': 'utf_7', 'utf7': 'utf_7', 'unicode_1_1_utf_7': 'utf_7', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf8_ucs2': 'utf_8', 'utf8_ucs4': 'utf_8', 'uu': 'uu_codec', 'zip': 'zlib_codec', 'zlib': 'zlib_codec', 'x_mac_japanese': 'shift_jis', 'x_mac_korean': 'euc_kr', 'x_mac_simp_chinese': 'gb2312', 'x_mac_trad_chinese': 'big5'} |
class GridResizeDirection(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies whether a System.Windows.Controls.GridSplitter control redistributes space between rows or between columns.
enum GridResizeDirection,values: Auto (0),Columns (1),Rows (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Auto = None
Columns = None
Rows = None
value__ = None
| class Gridresizedirection(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies whether a System.Windows.Controls.GridSplitter control redistributes space between rows or between columns.
enum GridResizeDirection,values: Auto (0),Columns (1),Rows (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
auto = None
columns = None
rows = None
value__ = None |
#! /usr/bin/env python3
def ins_sort_indices(A):
F = list(range(len(A)))
for j in range(len(A)):
key = A[F[j]]
i = j-1
while i >= 0 and A[F[i]] > key:
F[i+1] = F[i]
i = i-1
F[i+1] = j
return F
def T(F):
T = list(range(len(F)))
for (i, f) in enumerate(F):
T[f] = i
return T
if __name__ == '__main__':
As = [
[1, 2, 3],
[3, 5, 4],
[2, 3, 1],
]
for a in As:
print('A=', a)
indices = ins_sort_indices(a)
t = T(indices)
print('sorted(A)=', end='')
for i in indices:
print(a[i], end='')
print('')
print('F=', indices)
print('T=', t)
print('')
| def ins_sort_indices(A):
f = list(range(len(A)))
for j in range(len(A)):
key = A[F[j]]
i = j - 1
while i >= 0 and A[F[i]] > key:
F[i + 1] = F[i]
i = i - 1
F[i + 1] = j
return F
def t(F):
t = list(range(len(F)))
for (i, f) in enumerate(F):
T[f] = i
return T
if __name__ == '__main__':
as = [[1, 2, 3], [3, 5, 4], [2, 3, 1]]
for a in As:
print('A=', a)
indices = ins_sort_indices(a)
t = t(indices)
print('sorted(A)=', end='')
for i in indices:
print(a[i], end='')
print('')
print('F=', indices)
print('T=', t)
print('') |
"""
Django accounts management made easy.
"""
VERSION = (1, 0, 2)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns string with digit parts only as version.
"""
return '.'.join((str(each) for each in VERSION[:3]))
| """
Django accounts management made easy.
"""
version = (1, 0, 2)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns string with digit parts only as version.
"""
return '.'.join((str(each) for each in VERSION[:3])) |
#addListElements.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.insert(0,list1[i] + list2[i])
print("list3:", list3) | list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print('list1 elements:', list1[0], list1[1], list1[2], list1[3], list1[4])
print('list2 elements:', list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.insert(0, list1[i] + list2[i])
print('list3:', list3) |
"""TEST that will not run, because the filename does not start with 'test_'"""
def test_what_will_not_run():
assert True is False
| """TEST that will not run, because the filename does not start with 'test_'"""
def test_what_will_not_run():
assert True is False |
class Account:
def __init__(self, name, balance, min_balance):
self.name = name
self.balance = balance
self.min_balance = min_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
if self.balance - amount >= self.min_balance:
self.balance -= amount
else:
print("Sorry, not enough funds!")
def statements(self):
print('Account Balance:Rs{}'.format(self.balance))
class Current(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance =-1000)
def __str__(self):
return "{}'s Current Account : Balance is Rs{}".format(self.name, self.balance)
class Savings(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance =0)
def __str__(self):
return "{}'s Savings Account : Balance is Rs{}".format(self.name, self.balance)
| class Account:
def __init__(self, name, balance, min_balance):
self.name = name
self.balance = balance
self.min_balance = min_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
if self.balance - amount >= self.min_balance:
self.balance -= amount
else:
print('Sorry, not enough funds!')
def statements(self):
print('Account Balance:Rs{}'.format(self.balance))
class Current(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance=-1000)
def __str__(self):
return "{}'s Current Account : Balance is Rs{}".format(self.name, self.balance)
class Savings(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance=0)
def __str__(self):
return "{}'s Savings Account : Balance is Rs{}".format(self.name, self.balance) |
#
# PySNMP MIB module JUNIPER-LSYSSP-SCHEDULER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-LSYSSP-SCHEDULER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:00:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
jnxLsysSpScheduler, = mibBuilder.importSymbols("JUNIPER-LSYS-SECURITYPROFILE-MIB", "jnxLsysSpScheduler")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, TimeTicks, ModuleIdentity, Unsigned32, Integer32, ObjectIdentity, NotificationType, iso, Counter32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, MibIdentifier, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "TimeTicks", "ModuleIdentity", "Unsigned32", "Integer32", "ObjectIdentity", "NotificationType", "iso", "Counter32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "MibIdentifier", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
jnxLsysSpSchedulerMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1))
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setLastUpdated('201005191644Z')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setContactInfo('Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net HTTP://www.juniper.net')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setDescription('This module defines the scheduler-specific MIB for Juniper Enterprise Logical-System (LSYS) security profiles. Juniper documentation is recommended as the reference. The LSYS security profile provides various static and dynamic resource management by observing resource quota limits. Security scheduler resource is the focus in this MIB. ')
jnxLsysSpSchedulerObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1))
jnxLsysSpSchedulerSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2))
jnxLsysSpSchedulerTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1), )
if mibBuilder.loadTexts: jnxLsysSpSchedulerTable.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerTable.setDescription('LSYSPROFILE scheduler objects for scheduler resource consumption per LSYS.')
jnxLsysSpSchedulerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1), ).setIndexNames((1, "JUNIPER-LSYSSP-SCHEDULER-MIB", "jnxLsysSpSchedulerLsysName"))
if mibBuilder.loadTexts: jnxLsysSpSchedulerEntry.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerEntry.setDescription('An entry in scheduler resource table.')
jnxLsysSpSchedulerLsysName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: jnxLsysSpSchedulerLsysName.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLsysName.setDescription('The name of the logical system for which scheduler resource information is retrieved. ')
jnxLsysSpSchedulerProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerProfileName.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerProfileName.setDescription('The security profile name string for the LSYS.')
jnxLsysSpSchedulerUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsage.setDescription('The current resource usage count for the LSYS.')
jnxLsysSpSchedulerReserved = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerReserved.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerReserved.setDescription('The reserved resource count for the LSYS.')
jnxLsysSpSchedulerMaximum = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaximum.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaximum.setDescription('The maximum allowed resource usage count for the LSYS.')
jnxLsysSpSchedulerUsedAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsedAmount.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsedAmount.setDescription('The scheduler resource consumption over all LSYS.')
jnxLsysSpSchedulerMaxQuota = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaxQuota.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaxQuota.setDescription('The scheduler resource maximum quota for the whole device for all LSYS.')
jnxLsysSpSchedulerAvailableAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerAvailableAmount.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerAvailableAmount.setDescription('The scheduler resource available in the whole device.')
jnxLsysSpSchedulerHeaviestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUsage.setDescription('The most amount of scheduler resource consumed of a LSYS.')
jnxLsysSpSchedulerHeaviestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUser.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUser.setDescription('The LSYS name that consume the most scheduler resource.')
jnxLsysSpSchedulerLightestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUsage.setDescription('The least amount of scheduler resource consumed of a LSYS.')
jnxLsysSpSchedulerLightestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUser.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUser.setDescription('The LSYS name that consume the least scheduler resource.')
mibBuilder.exportSymbols("JUNIPER-LSYSSP-SCHEDULER-MIB", jnxLsysSpSchedulerTable=jnxLsysSpSchedulerTable, jnxLsysSpSchedulerUsedAmount=jnxLsysSpSchedulerUsedAmount, jnxLsysSpSchedulerMIB=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerProfileName=jnxLsysSpSchedulerProfileName, jnxLsysSpSchedulerMaximum=jnxLsysSpSchedulerMaximum, jnxLsysSpSchedulerLsysName=jnxLsysSpSchedulerLsysName, jnxLsysSpSchedulerReserved=jnxLsysSpSchedulerReserved, PYSNMP_MODULE_ID=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerLightestUser=jnxLsysSpSchedulerLightestUser, jnxLsysSpSchedulerHeaviestUser=jnxLsysSpSchedulerHeaviestUser, jnxLsysSpSchedulerObjects=jnxLsysSpSchedulerObjects, jnxLsysSpSchedulerEntry=jnxLsysSpSchedulerEntry, jnxLsysSpSchedulerUsage=jnxLsysSpSchedulerUsage, jnxLsysSpSchedulerHeaviestUsage=jnxLsysSpSchedulerHeaviestUsage, jnxLsysSpSchedulerAvailableAmount=jnxLsysSpSchedulerAvailableAmount, jnxLsysSpSchedulerLightestUsage=jnxLsysSpSchedulerLightestUsage, jnxLsysSpSchedulerSummary=jnxLsysSpSchedulerSummary, jnxLsysSpSchedulerMaxQuota=jnxLsysSpSchedulerMaxQuota)
| (object_identifier, integer, octet_string) = mibBuilder.importSymbols('ASN1', 'ObjectIdentifier', 'Integer', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_range_constraint, constraints_union, single_value_constraint, value_size_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueRangeConstraint', 'ConstraintsUnion', 'SingleValueConstraint', 'ValueSizeConstraint', 'ConstraintsIntersection')
(jnx_lsys_sp_scheduler,) = mibBuilder.importSymbols('JUNIPER-LSYS-SECURITYPROFILE-MIB', 'jnxLsysSpScheduler')
(notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance')
(gauge32, time_ticks, module_identity, unsigned32, integer32, object_identity, notification_type, iso, counter32, ip_address, mib_scalar, mib_table, mib_table_row, mib_table_column, bits, mib_identifier, counter64) = mibBuilder.importSymbols('SNMPv2-SMI', 'Gauge32', 'TimeTicks', 'ModuleIdentity', 'Unsigned32', 'Integer32', 'ObjectIdentity', 'NotificationType', 'iso', 'Counter32', 'IpAddress', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Bits', 'MibIdentifier', 'Counter64')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
jnx_lsys_sp_scheduler_mib = module_identity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1))
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMIB.setLastUpdated('201005191644Z')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMIB.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMIB.setContactInfo('Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net HTTP://www.juniper.net')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMIB.setDescription('This module defines the scheduler-specific MIB for Juniper Enterprise Logical-System (LSYS) security profiles. Juniper documentation is recommended as the reference. The LSYS security profile provides various static and dynamic resource management by observing resource quota limits. Security scheduler resource is the focus in this MIB. ')
jnx_lsys_sp_scheduler_objects = mib_identifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1))
jnx_lsys_sp_scheduler_summary = mib_identifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2))
jnx_lsys_sp_scheduler_table = mib_table((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1))
if mibBuilder.loadTexts:
jnxLsysSpSchedulerTable.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerTable.setDescription('LSYSPROFILE scheduler objects for scheduler resource consumption per LSYS.')
jnx_lsys_sp_scheduler_entry = mib_table_row((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1)).setIndexNames((1, 'JUNIPER-LSYSSP-SCHEDULER-MIB', 'jnxLsysSpSchedulerLsysName'))
if mibBuilder.loadTexts:
jnxLsysSpSchedulerEntry.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerEntry.setDescription('An entry in scheduler resource table.')
jnx_lsys_sp_scheduler_lsys_name = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 1), display_string().subtype(subtypeSpec=value_size_constraint(1, 64)))
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLsysName.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLsysName.setDescription('The name of the logical system for which scheduler resource information is retrieved. ')
jnx_lsys_sp_scheduler_profile_name = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 2), display_string().subtype(subtypeSpec=value_size_constraint(1, 32))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerProfileName.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerProfileName.setDescription('The security profile name string for the LSYS.')
jnx_lsys_sp_scheduler_usage = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 3), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerUsage.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerUsage.setDescription('The current resource usage count for the LSYS.')
jnx_lsys_sp_scheduler_reserved = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 4), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerReserved.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerReserved.setDescription('The reserved resource count for the LSYS.')
jnx_lsys_sp_scheduler_maximum = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 5), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMaximum.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMaximum.setDescription('The maximum allowed resource usage count for the LSYS.')
jnx_lsys_sp_scheduler_used_amount = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 1), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerUsedAmount.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerUsedAmount.setDescription('The scheduler resource consumption over all LSYS.')
jnx_lsys_sp_scheduler_max_quota = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 2), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMaxQuota.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerMaxQuota.setDescription('The scheduler resource maximum quota for the whole device for all LSYS.')
jnx_lsys_sp_scheduler_available_amount = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 3), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerAvailableAmount.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerAvailableAmount.setDescription('The scheduler resource available in the whole device.')
jnx_lsys_sp_scheduler_heaviest_usage = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 4), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerHeaviestUsage.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerHeaviestUsage.setDescription('The most amount of scheduler resource consumed of a LSYS.')
jnx_lsys_sp_scheduler_heaviest_user = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 5), display_string().subtype(subtypeSpec=value_size_constraint(1, 64))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerHeaviestUser.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerHeaviestUser.setDescription('The LSYS name that consume the most scheduler resource.')
jnx_lsys_sp_scheduler_lightest_usage = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 6), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLightestUsage.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLightestUsage.setDescription('The least amount of scheduler resource consumed of a LSYS.')
jnx_lsys_sp_scheduler_lightest_user = mib_scalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 7), display_string().subtype(subtypeSpec=value_size_constraint(1, 64))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLightestUser.setStatus('current')
if mibBuilder.loadTexts:
jnxLsysSpSchedulerLightestUser.setDescription('The LSYS name that consume the least scheduler resource.')
mibBuilder.exportSymbols('JUNIPER-LSYSSP-SCHEDULER-MIB', jnxLsysSpSchedulerTable=jnxLsysSpSchedulerTable, jnxLsysSpSchedulerUsedAmount=jnxLsysSpSchedulerUsedAmount, jnxLsysSpSchedulerMIB=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerProfileName=jnxLsysSpSchedulerProfileName, jnxLsysSpSchedulerMaximum=jnxLsysSpSchedulerMaximum, jnxLsysSpSchedulerLsysName=jnxLsysSpSchedulerLsysName, jnxLsysSpSchedulerReserved=jnxLsysSpSchedulerReserved, PYSNMP_MODULE_ID=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerLightestUser=jnxLsysSpSchedulerLightestUser, jnxLsysSpSchedulerHeaviestUser=jnxLsysSpSchedulerHeaviestUser, jnxLsysSpSchedulerObjects=jnxLsysSpSchedulerObjects, jnxLsysSpSchedulerEntry=jnxLsysSpSchedulerEntry, jnxLsysSpSchedulerUsage=jnxLsysSpSchedulerUsage, jnxLsysSpSchedulerHeaviestUsage=jnxLsysSpSchedulerHeaviestUsage, jnxLsysSpSchedulerAvailableAmount=jnxLsysSpSchedulerAvailableAmount, jnxLsysSpSchedulerLightestUsage=jnxLsysSpSchedulerLightestUsage, jnxLsysSpSchedulerSummary=jnxLsysSpSchedulerSummary, jnxLsysSpSchedulerMaxQuota=jnxLsysSpSchedulerMaxQuota) |
class solution:
def __init__(self):
self.best = 0
self.bestIndividual = []
self.solutions = []
self.population_fitness = []
self.optimizer = ""
self.objfname = ""
self.lb = 0
self.ub = 0
self.dim = 0
self.popnum = 0
self.maxiers = 0
self.objf = 0
| class Solution:
def __init__(self):
self.best = 0
self.bestIndividual = []
self.solutions = []
self.population_fitness = []
self.optimizer = ''
self.objfname = ''
self.lb = 0
self.ub = 0
self.dim = 0
self.popnum = 0
self.maxiers = 0
self.objf = 0 |
name = "Waldo"
text = "Can you find where Wally is?"
if text.find(name):
print("Found Waldo")
else:
print("Cannot find Waldo")
| name = 'Waldo'
text = 'Can you find where Wally is?'
if text.find(name):
print('Found Waldo')
else:
print('Cannot find Waldo') |
## maintain two array to keep track of dfs particular movement and visited vertex also..
def checkcycle( node, adj:list, visited:list, dfsvisited:list):
visited[node] = 1
dfsvisited[node] = 1
for i in adj[node]:
if(visited[i] == 0):
if(checkcycle(i, adj, visited, dfsvisited) == True):
return True
elif(dfsvisited[i] == 1):
return True
dfsvisited[node] = 0
return False
def iscyclic( adj:list, N):
visited = [0] * (N)
dfsvisited = [0] * (N)
for i in range(N):
if(visited[i] == 0):
if(checkcycle(i, adj, visited, dfsvisited) == True):
return True
return False
## Driver code..!!!
if __name__ == "__main__":
V = int(input())
adj = []
for i in range(V):
u = list(map(int,input().split()))
adj.append(u)
## function call...!!!
if(iscyclic(adj, V)):
print("YES, cycle is present in graph")
else:
print("No cycle detect")
"""
'''
sample input...
7
0 1
1 2
2 3
4 3
4 5
4 6
1 6
'''
""" | def checkcycle(node, adj: list, visited: list, dfsvisited: list):
visited[node] = 1
dfsvisited[node] = 1
for i in adj[node]:
if visited[i] == 0:
if checkcycle(i, adj, visited, dfsvisited) == True:
return True
elif dfsvisited[i] == 1:
return True
dfsvisited[node] = 0
return False
def iscyclic(adj: list, N):
visited = [0] * N
dfsvisited = [0] * N
for i in range(N):
if visited[i] == 0:
if checkcycle(i, adj, visited, dfsvisited) == True:
return True
return False
if __name__ == '__main__':
v = int(input())
adj = []
for i in range(V):
u = list(map(int, input().split()))
adj.append(u)
if iscyclic(adj, V):
print('YES, cycle is present in graph')
else:
print('No cycle detect')
"\n'''\nsample input...\n7\n0 1\n1 2\n2 3\n4 3\n4 5\n4 6\n1 6\n'''\n" |
# -*- coding: utf-8 -*-
"""
This file is part of the open source project py-dynasynthetic
(see https://github.com/micha-k/py-dynasynthetic).
Author: Michael Kessel
Contact: I have an email account 'dev' on a host called 'michaelkessel' listed
in the toplevel domain 'de'.
"""
__version__ = '0.5'
| """
This file is part of the open source project py-dynasynthetic
(see https://github.com/micha-k/py-dynasynthetic).
Author: Michael Kessel
Contact: I have an email account 'dev' on a host called 'michaelkessel' listed
in the toplevel domain 'de'.
"""
__version__ = '0.5' |
def insertion_sort(l):
for x in range(len(l)):
pos = x
while pos > 0 and l[pos-1] > l[pos]:
l[pos-1], l[pos] = l[pos], l[pos-1]
pos -= 1
list1 = [9, 2, 6, 5, 1, 7]
insertion_sort(list1)
print(list1)
| def insertion_sort(l):
for x in range(len(l)):
pos = x
while pos > 0 and l[pos - 1] > l[pos]:
(l[pos - 1], l[pos]) = (l[pos], l[pos - 1])
pos -= 1
list1 = [9, 2, 6, 5, 1, 7]
insertion_sort(list1)
print(list1) |
# encoding: utf-8
# module GH_IO.Types calls itself Types
# from GH_IO,Version=1.0.0.0,Culture=neutral,PublicKeyToken=6a29997d2e6b4f97
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class GH_BoundingBox(object):
"""
Represents a 3D bounding box,denoted by two points.
GH_BoundingBox(nMin: GH_Point3D,nMax: GH_Point3D)
GH_BoundingBox(Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
def ToString(self):
"""
ToString(self: GH_BoundingBox) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the box structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_BoundingBox]() -> GH_BoundingBox
__new__(cls: type,nMin: GH_Point3D,nMax: GH_Point3D)
__new__(cls: type,Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
pass
Max=None
Min=None
class GH_Interval1D(object):
"""
Represents two double precision floating point values.
GH_Interval1D(na: float,nb: float)
"""
def ToString(self):
"""
ToString(self: GH_Interval1D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the Interval structure.
"""
pass
@staticmethod
def __new__(self,na,nb):
"""
__new__[GH_Interval1D]() -> GH_Interval1D
__new__(cls: type,na: float,nb: float)
"""
pass
a=None
b=None
class GH_Interval2D(object):
"""
Represents two double precision domains.
GH_Interval2D(nu: GH_Interval1D,nv: GH_Interval1D)
GH_Interval2D(nu0: float,nu1: float,nv0: float,nv1: float)
"""
def ToString(self):
"""
ToString(self: GH_Interval2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimensional Interval structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Interval2D]() -> GH_Interval2D
__new__(cls: type,nu: GH_Interval1D,nv: GH_Interval1D)
__new__(cls: type,nu0: float,nu1: float,nv0: float,nv1: float)
"""
pass
u=None
v=None
class GH_Item(object,GH_IBinarySupport,GH_IXmlSupport):
"""
Represents a single data item in a chunk.
GH_Item(item_name: str,item_data: bool)
GH_Item(item_name: str,item_index: int,item_data: bool)
GH_Item(item_name: str,item_data: Byte)
GH_Item(item_name: str,item_index: int,item_data: Byte)
GH_Item(item_name: str,item_data: int)
GH_Item(item_name: str,item_index: int,item_data: int)
GH_Item(item_name: str,item_data: Int64)
GH_Item(item_name: str,item_index: int,item_data: Int64)
GH_Item(item_name: str,item_data: Single)
GH_Item(item_name: str,item_index: int,item_data: Single)
GH_Item(item_name: str,item_data: float)
GH_Item(item_name: str,item_index: int,item_data: float)
GH_Item(item_name: str,item_data: Decimal)
GH_Item(item_name: str,item_index: int,item_data: Decimal)
GH_Item(item_name: str,item_data: DateTime)
GH_Item(item_name: str,item_index: int,item_data: DateTime)
GH_Item(item_name: str,item_data: Guid)
GH_Item(item_name: str,item_index: int,item_data: Guid)
GH_Item(item_name: str,item_data: str)
GH_Item(item_name: str,item_index: int,item_data: str)
GH_Item(item_name: str,item_data: Array[Byte])
GH_Item(item_name: str,item_index: int,item_data: Array[Byte])
GH_Item(item_name: str,item_data: Array[float])
GH_Item(item_name: str,item_index: int,item_data: Array[float])
GH_Item(item_name: str,item_data: Point)
GH_Item(item_name: str,item_index: int,item_data: Point)
GH_Item(item_name: str,item_data: PointF)
GH_Item(item_name: str,item_index: int,item_data: PointF)
GH_Item(item_name: str,item_data: Size)
GH_Item(item_name: str,item_index: int,item_data: Size)
GH_Item(item_name: str,item_data: SizeF)
GH_Item(item_name: str,item_index: int,item_data: SizeF)
GH_Item(item_name: str,item_data: Rectangle)
GH_Item(item_name: str,item_index: int,item_data: Rectangle)
GH_Item(item_name: str,item_data: RectangleF)
GH_Item(item_name: str,item_index: int,item_data: RectangleF)
GH_Item(item_name: str,item_data: Color)
GH_Item(item_name: str,item_index: int,item_data: Color)
GH_Item(item_name: str,item_data: Bitmap)
GH_Item(item_name: str,item_index: int,item_data: Bitmap)
GH_Item(item_name: str,item_data: GH_Point2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point2D)
GH_Item(item_name: str,item_data: GH_Point3D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point3D)
GH_Item(item_name: str,item_data: GH_Point4D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point4D)
GH_Item(item_name: str,item_data: GH_Interval1D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval1D)
GH_Item(item_name: str,item_data: GH_Interval2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval2D)
GH_Item(item_name: str,item_data: GH_Line)
GH_Item(item_name: str,item_index: int,item_data: GH_Line)
GH_Item(item_name: str,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_index: int,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_data: GH_Plane)
GH_Item(item_name: str,item_index: int,item_data: GH_Plane)
GH_Item(item_name: str,item_data: GH_Version)
GH_Item(item_name: str,item_index: int,item_data: GH_Version)
"""
@staticmethod
def CreateFrom(*__args):
"""
CreateFrom(node: XmlNode) -> GH_Item
Creates a new instance of GH_Item and sets the fields from an Xml node object.
node: Xml node object that defines the field data.
Returns: The constructed and read item.
CreateFrom(reader: BinaryReader) -> GH_Item
Creates a new instance of GH_Item and sets the fields from a reader object.
reader: Reader object that defines the field data.
Returns: The constructed and read item.
"""
pass
def Read(self,*__args):
"""
Read(self: GH_Item,node: XmlNode)
Deserialize this item from an Xml node.
node: Xml node to serialize from.
Read(self: GH_Item,reader: BinaryReader)
Deserialize this item from a binary stream.
reader: Reader to deserialize with.
"""
pass
def ToString(self):
"""
ToString(self: GH_Item) -> str
Converts the struct into a human readable format.
"""
pass
def Write(self,writer):
"""
Write(self: GH_Item,writer: XmlWriter)
Serialize this item into an Xml stream.
writer: Writer to serialize with.
Write(self: GH_Item,writer: BinaryWriter)
Serialize this item into a binary stream.
writer: Writer to serialize with.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,item_name,*__args):
"""
__new__(cls: type,item_name: str,item_data: bool)
__new__(cls: type,item_name: str,item_index: int,item_data: bool)
__new__(cls: type,item_name: str,item_data: Byte)
__new__(cls: type,item_name: str,item_index: int,item_data: Byte)
__new__(cls: type,item_name: str,item_data: int)
__new__(cls: type,item_name: str,item_index: int,item_data: int)
__new__(cls: type,item_name: str,item_data: Int64)
__new__(cls: type,item_name: str,item_index: int,item_data: Int64)
__new__(cls: type,item_name: str,item_data: Single)
__new__(cls: type,item_name: str,item_index: int,item_data: Single)
__new__(cls: type,item_name: str,item_data: float)
__new__(cls: type,item_name: str,item_index: int,item_data: float)
__new__(cls: type,item_name: str,item_data: Decimal)
__new__(cls: type,item_name: str,item_index: int,item_data: Decimal)
__new__(cls: type,item_name: str,item_data: DateTime)
__new__(cls: type,item_name: str,item_index: int,item_data: DateTime)
__new__(cls: type,item_name: str,item_data: Guid)
__new__(cls: type,item_name: str,item_index: int,item_data: Guid)
__new__(cls: type,item_name: str,item_data: str)
__new__(cls: type,item_name: str,item_index: int,item_data: str)
__new__(cls: type,item_name: str,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_data: Array[float])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[float])
__new__(cls: type,item_name: str,item_data: Point)
__new__(cls: type,item_name: str,item_index: int,item_data: Point)
__new__(cls: type,item_name: str,item_data: PointF)
__new__(cls: type,item_name: str,item_index: int,item_data: PointF)
__new__(cls: type,item_name: str,item_data: Size)
__new__(cls: type,item_name: str,item_index: int,item_data: Size)
__new__(cls: type,item_name: str,item_data: SizeF)
__new__(cls: type,item_name: str,item_index: int,item_data: SizeF)
__new__(cls: type,item_name: str,item_data: Rectangle)
__new__(cls: type,item_name: str,item_index: int,item_data: Rectangle)
__new__(cls: type,item_name: str,item_data: RectangleF)
__new__(cls: type,item_name: str,item_index: int,item_data: RectangleF)
__new__(cls: type,item_name: str,item_data: Color)
__new__(cls: type,item_name: str,item_index: int,item_data: Color)
__new__(cls: type,item_name: str,item_data: Bitmap)
__new__(cls: type,item_name: str,item_index: int,item_data: Bitmap)
__new__(cls: type,item_name: str,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_data: GH_Line)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Line)
__new__(cls: type,item_name: str,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_data: GH_Version)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Version)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
DebuggerDisplay=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Readonly property used during Debugging.
Get: DebuggerDisplay(self: GH_Item) -> str
"""
HasIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the index existence implication. The item is considered to have an index qualifier
if the index value is larger than or equal to zero.
Get: HasIndex(self: GH_Item) -> bool
"""
HasName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name validity of this item.
The item is considered to have an invalid name if string.IsNullOrEmpty(name)
Get: HasName(self: GH_Item) -> bool
"""
HasType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type set validity of this item.
The item is considered to have a type if type != GH_Types.unset
Get: HasType(self: GH_Item) -> bool
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the index of an item.
Typically,indices are set at construction and do not change.
If you change indices after construction,you could corrupt an archive.
Get: Index(self: GH_Item) -> int
Set: Index(self: GH_Item)=value
"""
InternalData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves the internal data of this item.
No type casting is performed.
Get: InternalData(self: GH_Item) -> object
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of this item.
Typically,names are set at construction and do not change.
If you change names after construction,you could corrupt an archive.
Get: Name(self: GH_Item) -> str
Set: Name(self: GH_Item)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type of this item.
Type flags are set during construction and cannot be altered.
Get: Type(self: GH_Item) -> GH_Types
"""
_bool=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Boolean.
If the data is not stored as a Boolean,a conversion exception might be thrown.
Get: _bool(self: GH_Item) -> bool
"""
_boundingbox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a BoundingBox.
If the data is not stored as a BoundingBox,a conversion exception might be thrown.
Get: _boundingbox(self: GH_Item) -> GH_BoundingBox
"""
_byte=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte.
If the data is not stored as a Byte,a conversion exception might be thrown.
Get: _byte(self: GH_Item) -> Byte
"""
_bytearray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte array.
If the data is not stored as a Byte array,a conversion exception might be thrown.
Get: _bytearray(self: GH_Item) -> Array[Byte]
"""
_date=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a DateTime.
If the data is not stored as a DateTime,a conversion exception might be thrown.
Get: _date(self: GH_Item) -> DateTime
"""
_decimal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Decimal.
If the data is not stored as a Decimal,a conversion exception might be thrown.
Get: _decimal(self: GH_Item) -> Decimal
"""
_double=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Double.
If the data is not stored as a Double,a conversion exception might be thrown.
Get: _double(self: GH_Item) -> float
"""
_doublearray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte array.
If the data is not stored as a Byte array,a conversion exception might be thrown.
Get: _doublearray(self: GH_Item) -> Array[float]
"""
_drawing_bitmap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Bitmap.
If the data is not stored as a Bitmap,a conversion exception might be thrown.
Get: _drawing_bitmap(self: GH_Item) -> Bitmap
"""
_drawing_color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Color.
If the data is not stored as a Color,a conversion exception might be thrown.
Get: _drawing_color(self: GH_Item) -> Color
"""
_drawing_point=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point.
If the data is not stored as a Point,a conversion exception might be thrown.
Get: _drawing_point(self: GH_Item) -> Point
"""
_drawing_pointf=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a PointF.
If the data is not stored as a PointF,a conversion exception might be thrown.
Get: _drawing_pointf(self: GH_Item) -> PointF
"""
_drawing_rectangle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Rectangle.
If the data is not stored as a Rectangle,a conversion exception might be thrown.
Get: _drawing_rectangle(self: GH_Item) -> Rectangle
"""
_drawing_rectanglef=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a RectangleF.
If the data is not stored as a RectangleF,a conversion exception might be thrown.
Get: _drawing_rectanglef(self: GH_Item) -> RectangleF
"""
_drawing_size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Size.
If the data is not stored as a Size,a conversion exception might be thrown.
Get: _drawing_size(self: GH_Item) -> Size
"""
_drawing_sizef=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a SizeF.
If the data is not stored as a SizeF,a conversion exception might be thrown.
Get: _drawing_sizef(self: GH_Item) -> SizeF
"""
_guid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Guid.
If the data is not stored as a Guid,a conversion exception might be thrown.
Get: _guid(self: GH_Item) -> Guid
"""
_int32=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Int32.
If the data is not stored as an Int32,a conversion exception might be thrown.
Get: _int32(self: GH_Item) -> int
"""
_int64=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Int64.
If the data is not stored as an Int64,a conversion exception might be thrown.
Get: _int64(self: GH_Item) -> Int64
"""
_interval1d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Interval1D.
If the data is not stored as an Interval1D,a conversion exception might be thrown.
Get: _interval1d(self: GH_Item) -> GH_Interval1D
"""
_interval2d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Interval2D.
If the data is not stored as an Interval2D,a conversion exception might be thrown.
Get: _interval2d(self: GH_Item) -> GH_Interval2D
"""
_line=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Line.
If the data is not stored as a Line,a conversion exception might be thrown.
Get: _line(self: GH_Item) -> GH_Line
"""
_plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Plane.
If the data is not stored as a Plane,a conversion exception might be thrown.
Get: _plane(self: GH_Item) -> GH_Plane
"""
_point2d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point2D.
If the data is not stored as a Point2D,a conversion exception might be thrown.
Get: _point2d(self: GH_Item) -> GH_Point2D
"""
_point3d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point3D.
If the data is not stored as a Point3D,a conversion exception might be thrown.
Get: _point3d(self: GH_Item) -> GH_Point3D
"""
_point4d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point4D.
If the data is not stored as a Point4D,a conversion exception might be thrown.
Get: _point4d(self: GH_Item) -> GH_Point4D
"""
_single=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Single.
If the data is not stored as a Single,a conversion exception might be thrown.
Get: _single(self: GH_Item) -> Single
"""
_string=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a String.
If the data is not stored as a String,a conversion exception might be thrown.
Get: _string(self: GH_Item) -> str
"""
_version=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Version.
If the data is not stored as a Version,a conversion exception might be thrown.
Get: _version(self: GH_Item) -> GH_Version
"""
class GH_Line(object):
"""
Represents a 3D line segment,denoted by start and endpoints.
GH_Line(nA: GH_Point3D,nB: GH_Point3D)
GH_Line(Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
def ToString(self):
"""
ToString(self: GH_Line) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the line structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Line]() -> GH_Line
__new__(cls: type,nA: GH_Point3D,nB: GH_Point3D)
__new__(cls: type,Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
pass
A=None
B=None
class GH_Plane(object):
"""
Represents a 3D plane system,defined by origin point and {X,Y} axis directions.
GH_Plane(nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
GH_Plane(Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
def ToString(self):
"""
ToString(self: GH_Plane) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the plane structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Plane]() -> GH_Plane
__new__(cls: type,nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
__new__(cls: type,Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
pass
Origin=None
XAxis=None
YAxis=None
class GH_Point2D(object):
"""
Represents a 2D point coordinate with double precision floating point components.
GH_Point2D(nx: float,ny: float)
"""
def ToString(self):
"""
ToString(self: GH_Point2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny):
"""
__new__[GH_Point2D]() -> GH_Point2D
__new__(cls: type,nx: float,ny: float)
"""
pass
x=None
y=None
class GH_Point3D(object):
"""
Represents a 3D point coordinate with double precision floating point components.
GH_Point3D(nx: float,ny: float,nz: float)
"""
def ToString(self):
"""
ToString(self: GH_Point3D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the three-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny,nz):
"""
__new__[GH_Point3D]() -> GH_Point3D
__new__(cls: type,nx: float,ny: float,nz: float)
"""
pass
x=None
y=None
z=None
class GH_Point4D(object):
"""
Represents a 4D point coordinate with double precision floating point components.
GH_Point4D(nx: float,ny: float,nz: float,nw: float)
"""
def ToString(self):
"""
ToString(self: GH_Point4D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the four-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny,nz,nw):
"""
__new__[GH_Point4D]() -> GH_Point4D
__new__(cls: type,nx: float,ny: float,nz: float,nw: float)
"""
pass
w=None
x=None
y=None
z=None
class GH_Types(Enum,IComparable,IFormattable,IConvertible):
"""
Contains flags for all data types currently supported by GH_IO.dll
enum GH_Types,values: gh_bool (1),gh_boundingbox (71),gh_byte (2),gh_bytearray (20),gh_date (8),gh_decimal (7),gh_double (6),gh_doublearray (21),gh_drawing_bitmap (37),gh_drawing_color (36),gh_drawing_point (30),gh_drawing_pointf (31),gh_drawing_rectangle (34),gh_drawing_rectanglef (35),gh_drawing_size (32),gh_drawing_sizef (33),gh_guid (9),gh_int32 (3),gh_int64 (4),gh_interval1d (60),gh_interval2d (61),gh_line (70),gh_plane (72),gh_point2d (50),gh_point3d (51),gh_point4d (52),gh_single (5),gh_string (10),gh_version (80),unset (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
gh_bool=None
gh_boundingbox=None
gh_byte=None
gh_bytearray=None
gh_date=None
gh_decimal=None
gh_double=None
gh_doublearray=None
gh_drawing_bitmap=None
gh_drawing_color=None
gh_drawing_point=None
gh_drawing_pointf=None
gh_drawing_rectangle=None
gh_drawing_rectanglef=None
gh_drawing_size=None
gh_drawing_sizef=None
gh_guid=None
gh_int32=None
gh_int64=None
gh_interval1d=None
gh_interval2d=None
gh_line=None
gh_plane=None
gh_point2d=None
gh_point3d=None
gh_point4d=None
gh_single=None
gh_string=None
gh_version=None
unset=None
value__=None
class GH_Version(object):
"""
Basic version type. Contains Major,Minor and Revision fields.
GH_Version(v_major: int,v_minor: int,v_revision: int)
GH_Version(other: GH_Version)
"""
def Equals(self,obj):
"""
Equals(self: GH_Version,obj: object) -> bool
Performs value equality comparison.
obj: Object to compare with.
If obj is a null reference or not a GH_Version instance,
false is returned.
Returns: True if obj is a GH_Version instance which is equal to this one.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: GH_Version) -> int
Returns the hash code for this instance.
Returns: A hash code for the current version object.
"""
pass
def ToString(self):
"""
ToString(self: GH_Version) -> str
Default formatter for Version data: M.m.RRRR
Revision section is padded with
zeroes until it is at least 4 digits long.
Returns: A string represtation of the Version structure.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Version]() -> GH_Version
__new__(cls: type,v_major: int,v_minor: int,v_revision: int)
__new__(cls: type,other: GH_Version)
"""
pass
def __ne__(self,*args):
pass
major=None
minor=None
revision=None
| """ NamespaceTracker represent a CLS namespace. """
class Gh_Boundingbox(object):
"""
Represents a 3D bounding box,denoted by two points.
GH_BoundingBox(nMin: GH_Point3D,nMax: GH_Point3D)
GH_BoundingBox(Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
def to_string(self):
"""
ToString(self: GH_BoundingBox) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the box structure.
"""
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[GH_BoundingBox]() -> GH_BoundingBox
__new__(cls: type,nMin: GH_Point3D,nMax: GH_Point3D)
__new__(cls: type,Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
pass
max = None
min = None
class Gh_Interval1D(object):
"""
Represents two double precision floating point values.
GH_Interval1D(na: float,nb: float)
"""
def to_string(self):
"""
ToString(self: GH_Interval1D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the Interval structure.
"""
pass
@staticmethod
def __new__(self, na, nb):
"""
__new__[GH_Interval1D]() -> GH_Interval1D
__new__(cls: type,na: float,nb: float)
"""
pass
a = None
b = None
class Gh_Interval2D(object):
"""
Represents two double precision domains.
GH_Interval2D(nu: GH_Interval1D,nv: GH_Interval1D)
GH_Interval2D(nu0: float,nu1: float,nv0: float,nv1: float)
"""
def to_string(self):
"""
ToString(self: GH_Interval2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimensional Interval structure.
"""
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[GH_Interval2D]() -> GH_Interval2D
__new__(cls: type,nu: GH_Interval1D,nv: GH_Interval1D)
__new__(cls: type,nu0: float,nu1: float,nv0: float,nv1: float)
"""
pass
u = None
v = None
class Gh_Item(object, GH_IBinarySupport, GH_IXmlSupport):
"""
Represents a single data item in a chunk.
GH_Item(item_name: str,item_data: bool)
GH_Item(item_name: str,item_index: int,item_data: bool)
GH_Item(item_name: str,item_data: Byte)
GH_Item(item_name: str,item_index: int,item_data: Byte)
GH_Item(item_name: str,item_data: int)
GH_Item(item_name: str,item_index: int,item_data: int)
GH_Item(item_name: str,item_data: Int64)
GH_Item(item_name: str,item_index: int,item_data: Int64)
GH_Item(item_name: str,item_data: Single)
GH_Item(item_name: str,item_index: int,item_data: Single)
GH_Item(item_name: str,item_data: float)
GH_Item(item_name: str,item_index: int,item_data: float)
GH_Item(item_name: str,item_data: Decimal)
GH_Item(item_name: str,item_index: int,item_data: Decimal)
GH_Item(item_name: str,item_data: DateTime)
GH_Item(item_name: str,item_index: int,item_data: DateTime)
GH_Item(item_name: str,item_data: Guid)
GH_Item(item_name: str,item_index: int,item_data: Guid)
GH_Item(item_name: str,item_data: str)
GH_Item(item_name: str,item_index: int,item_data: str)
GH_Item(item_name: str,item_data: Array[Byte])
GH_Item(item_name: str,item_index: int,item_data: Array[Byte])
GH_Item(item_name: str,item_data: Array[float])
GH_Item(item_name: str,item_index: int,item_data: Array[float])
GH_Item(item_name: str,item_data: Point)
GH_Item(item_name: str,item_index: int,item_data: Point)
GH_Item(item_name: str,item_data: PointF)
GH_Item(item_name: str,item_index: int,item_data: PointF)
GH_Item(item_name: str,item_data: Size)
GH_Item(item_name: str,item_index: int,item_data: Size)
GH_Item(item_name: str,item_data: SizeF)
GH_Item(item_name: str,item_index: int,item_data: SizeF)
GH_Item(item_name: str,item_data: Rectangle)
GH_Item(item_name: str,item_index: int,item_data: Rectangle)
GH_Item(item_name: str,item_data: RectangleF)
GH_Item(item_name: str,item_index: int,item_data: RectangleF)
GH_Item(item_name: str,item_data: Color)
GH_Item(item_name: str,item_index: int,item_data: Color)
GH_Item(item_name: str,item_data: Bitmap)
GH_Item(item_name: str,item_index: int,item_data: Bitmap)
GH_Item(item_name: str,item_data: GH_Point2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point2D)
GH_Item(item_name: str,item_data: GH_Point3D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point3D)
GH_Item(item_name: str,item_data: GH_Point4D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point4D)
GH_Item(item_name: str,item_data: GH_Interval1D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval1D)
GH_Item(item_name: str,item_data: GH_Interval2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval2D)
GH_Item(item_name: str,item_data: GH_Line)
GH_Item(item_name: str,item_index: int,item_data: GH_Line)
GH_Item(item_name: str,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_index: int,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_data: GH_Plane)
GH_Item(item_name: str,item_index: int,item_data: GH_Plane)
GH_Item(item_name: str,item_data: GH_Version)
GH_Item(item_name: str,item_index: int,item_data: GH_Version)
"""
@staticmethod
def create_from(*__args):
"""
CreateFrom(node: XmlNode) -> GH_Item
Creates a new instance of GH_Item and sets the fields from an Xml node object.
node: Xml node object that defines the field data.
Returns: The constructed and read item.
CreateFrom(reader: BinaryReader) -> GH_Item
Creates a new instance of GH_Item and sets the fields from a reader object.
reader: Reader object that defines the field data.
Returns: The constructed and read item.
"""
pass
def read(self, *__args):
"""
Read(self: GH_Item,node: XmlNode)
Deserialize this item from an Xml node.
node: Xml node to serialize from.
Read(self: GH_Item,reader: BinaryReader)
Deserialize this item from a binary stream.
reader: Reader to deserialize with.
"""
pass
def to_string(self):
"""
ToString(self: GH_Item) -> str
Converts the struct into a human readable format.
"""
pass
def write(self, writer):
"""
Write(self: GH_Item,writer: XmlWriter)
Serialize this item into an Xml stream.
writer: Writer to serialize with.
Write(self: GH_Item,writer: BinaryWriter)
Serialize this item into a binary stream.
writer: Writer to serialize with.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, item_name, *__args):
"""
__new__(cls: type,item_name: str,item_data: bool)
__new__(cls: type,item_name: str,item_index: int,item_data: bool)
__new__(cls: type,item_name: str,item_data: Byte)
__new__(cls: type,item_name: str,item_index: int,item_data: Byte)
__new__(cls: type,item_name: str,item_data: int)
__new__(cls: type,item_name: str,item_index: int,item_data: int)
__new__(cls: type,item_name: str,item_data: Int64)
__new__(cls: type,item_name: str,item_index: int,item_data: Int64)
__new__(cls: type,item_name: str,item_data: Single)
__new__(cls: type,item_name: str,item_index: int,item_data: Single)
__new__(cls: type,item_name: str,item_data: float)
__new__(cls: type,item_name: str,item_index: int,item_data: float)
__new__(cls: type,item_name: str,item_data: Decimal)
__new__(cls: type,item_name: str,item_index: int,item_data: Decimal)
__new__(cls: type,item_name: str,item_data: DateTime)
__new__(cls: type,item_name: str,item_index: int,item_data: DateTime)
__new__(cls: type,item_name: str,item_data: Guid)
__new__(cls: type,item_name: str,item_index: int,item_data: Guid)
__new__(cls: type,item_name: str,item_data: str)
__new__(cls: type,item_name: str,item_index: int,item_data: str)
__new__(cls: type,item_name: str,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_data: Array[float])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[float])
__new__(cls: type,item_name: str,item_data: Point)
__new__(cls: type,item_name: str,item_index: int,item_data: Point)
__new__(cls: type,item_name: str,item_data: PointF)
__new__(cls: type,item_name: str,item_index: int,item_data: PointF)
__new__(cls: type,item_name: str,item_data: Size)
__new__(cls: type,item_name: str,item_index: int,item_data: Size)
__new__(cls: type,item_name: str,item_data: SizeF)
__new__(cls: type,item_name: str,item_index: int,item_data: SizeF)
__new__(cls: type,item_name: str,item_data: Rectangle)
__new__(cls: type,item_name: str,item_index: int,item_data: Rectangle)
__new__(cls: type,item_name: str,item_data: RectangleF)
__new__(cls: type,item_name: str,item_index: int,item_data: RectangleF)
__new__(cls: type,item_name: str,item_data: Color)
__new__(cls: type,item_name: str,item_index: int,item_data: Color)
__new__(cls: type,item_name: str,item_data: Bitmap)
__new__(cls: type,item_name: str,item_index: int,item_data: Bitmap)
__new__(cls: type,item_name: str,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_data: GH_Line)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Line)
__new__(cls: type,item_name: str,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_data: GH_Version)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Version)
"""
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
def __str__(self, *args):
pass
debugger_display = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Readonly property used during Debugging.\n\n\n\nGet: DebuggerDisplay(self: GH_Item) -> str\n\n\n\n'
has_index = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the index existence implication. The item is considered to have an index qualifier \n\n if the index value is larger than or equal to zero.\n\n\n\nGet: HasIndex(self: GH_Item) -> bool\n\n\n\n'
has_name = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the name validity of this item. \n\n The item is considered to have an invalid name if string.IsNullOrEmpty(name)\n\n\n\nGet: HasName(self: GH_Item) -> bool\n\n\n\n'
has_type = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the type set validity of this item. \n\n The item is considered to have a type if type != GH_Types.unset\n\n\n\nGet: HasType(self: GH_Item) -> bool\n\n\n\n'
index = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets or sets the index of an item. \n\n Typically,indices are set at construction and do not change. \n\n If you change indices after construction,you could corrupt an archive.\n\n\n\nGet: Index(self: GH_Item) -> int\n\n\n\nSet: Index(self: GH_Item)=value\n\n'
internal_data = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Retrieves the internal data of this item. \n\n No type casting is performed.\n\n\n\nGet: InternalData(self: GH_Item) -> object\n\n\n\n'
name = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets or sets the name of this item. \n\n Typically,names are set at construction and do not change. \n\n If you change names after construction,you could corrupt an archive.\n\n\n\nGet: Name(self: GH_Item) -> str\n\n\n\nSet: Name(self: GH_Item)=value\n\n'
type = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the type of this item. \n\n Type flags are set during construction and cannot be altered.\n\n\n\nGet: Type(self: GH_Item) -> GH_Types\n\n\n\n'
_bool = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Boolean.\n\n If the data is not stored as a Boolean,a conversion exception might be thrown.\n\n\n\nGet: _bool(self: GH_Item) -> bool\n\n\n\n'
_boundingbox = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a BoundingBox.\n\n If the data is not stored as a BoundingBox,a conversion exception might be thrown.\n\n\n\nGet: _boundingbox(self: GH_Item) -> GH_BoundingBox\n\n\n\n'
_byte = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Byte.\n\n If the data is not stored as a Byte,a conversion exception might be thrown.\n\n\n\nGet: _byte(self: GH_Item) -> Byte\n\n\n\n'
_bytearray = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Byte array.\n\n If the data is not stored as a Byte array,a conversion exception might be thrown.\n\n\n\nGet: _bytearray(self: GH_Item) -> Array[Byte]\n\n\n\n'
_date = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a DateTime.\n\n If the data is not stored as a DateTime,a conversion exception might be thrown.\n\n\n\nGet: _date(self: GH_Item) -> DateTime\n\n\n\n'
_decimal = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Decimal.\n\n If the data is not stored as a Decimal,a conversion exception might be thrown.\n\n\n\nGet: _decimal(self: GH_Item) -> Decimal\n\n\n\n'
_double = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Double.\n\n If the data is not stored as a Double,a conversion exception might be thrown.\n\n\n\nGet: _double(self: GH_Item) -> float\n\n\n\n'
_doublearray = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Byte array.\n\n If the data is not stored as a Byte array,a conversion exception might be thrown.\n\n\n\nGet: _doublearray(self: GH_Item) -> Array[float]\n\n\n\n'
_drawing_bitmap = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Bitmap.\n\n If the data is not stored as a Bitmap,a conversion exception might be thrown.\n\n\n\nGet: _drawing_bitmap(self: GH_Item) -> Bitmap\n\n\n\n'
_drawing_color = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Color.\n\n If the data is not stored as a Color,a conversion exception might be thrown.\n\n\n\nGet: _drawing_color(self: GH_Item) -> Color\n\n\n\n'
_drawing_point = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Point.\n\n If the data is not stored as a Point,a conversion exception might be thrown.\n\n\n\nGet: _drawing_point(self: GH_Item) -> Point\n\n\n\n'
_drawing_pointf = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a PointF.\n\n If the data is not stored as a PointF,a conversion exception might be thrown.\n\n\n\nGet: _drawing_pointf(self: GH_Item) -> PointF\n\n\n\n'
_drawing_rectangle = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Rectangle.\n\n If the data is not stored as a Rectangle,a conversion exception might be thrown.\n\n\n\nGet: _drawing_rectangle(self: GH_Item) -> Rectangle\n\n\n\n'
_drawing_rectanglef = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a RectangleF.\n\n If the data is not stored as a RectangleF,a conversion exception might be thrown.\n\n\n\nGet: _drawing_rectanglef(self: GH_Item) -> RectangleF\n\n\n\n'
_drawing_size = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Size.\n\n If the data is not stored as a Size,a conversion exception might be thrown.\n\n\n\nGet: _drawing_size(self: GH_Item) -> Size\n\n\n\n'
_drawing_sizef = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a SizeF.\n\n If the data is not stored as a SizeF,a conversion exception might be thrown.\n\n\n\nGet: _drawing_sizef(self: GH_Item) -> SizeF\n\n\n\n'
_guid = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Guid.\n\n If the data is not stored as a Guid,a conversion exception might be thrown.\n\n\n\nGet: _guid(self: GH_Item) -> Guid\n\n\n\n'
_int32 = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to an Int32.\n\n If the data is not stored as an Int32,a conversion exception might be thrown.\n\n\n\nGet: _int32(self: GH_Item) -> int\n\n\n\n'
_int64 = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to an Int64.\n\n If the data is not stored as an Int64,a conversion exception might be thrown.\n\n\n\nGet: _int64(self: GH_Item) -> Int64\n\n\n\n'
_interval1d = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to an Interval1D.\n\n If the data is not stored as an Interval1D,a conversion exception might be thrown.\n\n\n\nGet: _interval1d(self: GH_Item) -> GH_Interval1D\n\n\n\n'
_interval2d = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to an Interval2D.\n\n If the data is not stored as an Interval2D,a conversion exception might be thrown.\n\n\n\nGet: _interval2d(self: GH_Item) -> GH_Interval2D\n\n\n\n'
_line = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Line.\n\n If the data is not stored as a Line,a conversion exception might be thrown.\n\n\n\nGet: _line(self: GH_Item) -> GH_Line\n\n\n\n'
_plane = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Plane.\n\n If the data is not stored as a Plane,a conversion exception might be thrown.\n\n\n\nGet: _plane(self: GH_Item) -> GH_Plane\n\n\n\n'
_point2d = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Point2D.\n\n If the data is not stored as a Point2D,a conversion exception might be thrown.\n\n\n\nGet: _point2d(self: GH_Item) -> GH_Point2D\n\n\n\n'
_point3d = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Point3D.\n\n If the data is not stored as a Point3D,a conversion exception might be thrown.\n\n\n\nGet: _point3d(self: GH_Item) -> GH_Point3D\n\n\n\n'
_point4d = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Point4D.\n\n If the data is not stored as a Point4D,a conversion exception might be thrown.\n\n\n\nGet: _point4d(self: GH_Item) -> GH_Point4D\n\n\n\n'
_single = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Single.\n\n If the data is not stored as a Single,a conversion exception might be thrown.\n\n\n\nGet: _single(self: GH_Item) -> Single\n\n\n\n'
_string = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a String.\n\n If the data is not stored as a String,a conversion exception might be thrown.\n\n\n\nGet: _string(self: GH_Item) -> str\n\n\n\n'
_version = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Returns the internal data of this item cast to a Version.\n\n If the data is not stored as a Version,a conversion exception might be thrown.\n\n\n\nGet: _version(self: GH_Item) -> GH_Version\n\n\n\n'
class Gh_Line(object):
"""
Represents a 3D line segment,denoted by start and endpoints.
GH_Line(nA: GH_Point3D,nB: GH_Point3D)
GH_Line(Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
def to_string(self):
"""
ToString(self: GH_Line) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the line structure.
"""
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[GH_Line]() -> GH_Line
__new__(cls: type,nA: GH_Point3D,nB: GH_Point3D)
__new__(cls: type,Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
pass
a = None
b = None
class Gh_Plane(object):
"""
Represents a 3D plane system,defined by origin point and {X,Y} axis directions.
GH_Plane(nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
GH_Plane(Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
def to_string(self):
"""
ToString(self: GH_Plane) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the plane structure.
"""
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[GH_Plane]() -> GH_Plane
__new__(cls: type,nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
__new__(cls: type,Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
pass
origin = None
x_axis = None
y_axis = None
class Gh_Point2D(object):
"""
Represents a 2D point coordinate with double precision floating point components.
GH_Point2D(nx: float,ny: float)
"""
def to_string(self):
"""
ToString(self: GH_Point2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self, nx, ny):
"""
__new__[GH_Point2D]() -> GH_Point2D
__new__(cls: type,nx: float,ny: float)
"""
pass
x = None
y = None
class Gh_Point3D(object):
"""
Represents a 3D point coordinate with double precision floating point components.
GH_Point3D(nx: float,ny: float,nz: float)
"""
def to_string(self):
"""
ToString(self: GH_Point3D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the three-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self, nx, ny, nz):
"""
__new__[GH_Point3D]() -> GH_Point3D
__new__(cls: type,nx: float,ny: float,nz: float)
"""
pass
x = None
y = None
z = None
class Gh_Point4D(object):
"""
Represents a 4D point coordinate with double precision floating point components.
GH_Point4D(nx: float,ny: float,nz: float,nw: float)
"""
def to_string(self):
"""
ToString(self: GH_Point4D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the four-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self, nx, ny, nz, nw):
"""
__new__[GH_Point4D]() -> GH_Point4D
__new__(cls: type,nx: float,ny: float,nz: float,nw: float)
"""
pass
w = None
x = None
y = None
z = None
class Gh_Types(Enum, IComparable, IFormattable, IConvertible):
"""
Contains flags for all data types currently supported by GH_IO.dll
enum GH_Types,values: gh_bool (1),gh_boundingbox (71),gh_byte (2),gh_bytearray (20),gh_date (8),gh_decimal (7),gh_double (6),gh_doublearray (21),gh_drawing_bitmap (37),gh_drawing_color (36),gh_drawing_point (30),gh_drawing_pointf (31),gh_drawing_rectangle (34),gh_drawing_rectanglef (35),gh_drawing_size (32),gh_drawing_sizef (33),gh_guid (9),gh_int32 (3),gh_int64 (4),gh_interval1d (60),gh_interval2d (61),gh_line (70),gh_plane (72),gh_point2d (50),gh_point3d (51),gh_point4d (52),gh_single (5),gh_string (10),gh_version (80),unset (0)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
gh_bool = None
gh_boundingbox = None
gh_byte = None
gh_bytearray = None
gh_date = None
gh_decimal = None
gh_double = None
gh_doublearray = None
gh_drawing_bitmap = None
gh_drawing_color = None
gh_drawing_point = None
gh_drawing_pointf = None
gh_drawing_rectangle = None
gh_drawing_rectanglef = None
gh_drawing_size = None
gh_drawing_sizef = None
gh_guid = None
gh_int32 = None
gh_int64 = None
gh_interval1d = None
gh_interval2d = None
gh_line = None
gh_plane = None
gh_point2d = None
gh_point3d = None
gh_point4d = None
gh_single = None
gh_string = None
gh_version = None
unset = None
value__ = None
class Gh_Version(object):
"""
Basic version type. Contains Major,Minor and Revision fields.
GH_Version(v_major: int,v_minor: int,v_revision: int)
GH_Version(other: GH_Version)
"""
def equals(self, obj):
"""
Equals(self: GH_Version,obj: object) -> bool
Performs value equality comparison.
obj: Object to compare with.
If obj is a null reference or not a GH_Version instance,
false is returned.
Returns: True if obj is a GH_Version instance which is equal to this one.
"""
pass
def get_hash_code(self):
"""
GetHashCode(self: GH_Version) -> int
Returns the hash code for this instance.
Returns: A hash code for the current version object.
"""
pass
def to_string(self):
"""
ToString(self: GH_Version) -> str
Default formatter for Version data: M.m.RRRR
Revision section is padded with
zeroes until it is at least 4 digits long.
Returns: A string represtation of the Version structure.
"""
pass
def __eq__(self, *args):
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[GH_Version]() -> GH_Version
__new__(cls: type,v_major: int,v_minor: int,v_revision: int)
__new__(cls: type,other: GH_Version)
"""
pass
def __ne__(self, *args):
pass
major = None
minor = None
revision = None |
def _parse_categories(lines):
"""
Read (category_id, category_name) pairs from the categories section.
Each line consists of an integer followed a tab and then the category name.
This section is separated from the lexicon by a line consisting of a single "%".
"""
for line in lines:
line = line.strip()
if line == "%":
return
# ignore non-matching groups of categories
if "\t" in line:
category_id, category_name = line.split("\t", 1)
yield category_id, category_name
def _parse_lexicon(lines, category_mapping):
"""
Read (match_expression, category_names) pairs from the lexicon section.
Each line consists of a match expression followed by a tab and then one or more
tab-separated integers, which are mapped to category names using `category_mapping`.
"""
for line in lines:
line = line.strip()
parts = line.split("\t")
yield parts[0], [category_mapping[category_id] for category_id in parts[1:]]
def read_dic(filepath):
"""
Reads a LIWC lexicon from a file in the .dic format, returning a tuple of
(lexicon, category_names), where:
* `lexicon` is a dict mapping string patterns to lists of category names
* `category_names` is a list of category names (as strings)
"""
with open(filepath) as lines:
# read up to first "%" (should be very first line of file)
for line in lines:
if line.strip() == "%":
break
# read categories (a mapping from integer string to category name)
category_mapping = dict(_parse_categories(lines))
# read lexicon (a mapping from matching string to a list of category names)
lexicon = dict(_parse_lexicon(lines, category_mapping))
return lexicon, list(category_mapping.values())
| def _parse_categories(lines):
"""
Read (category_id, category_name) pairs from the categories section.
Each line consists of an integer followed a tab and then the category name.
This section is separated from the lexicon by a line consisting of a single "%".
"""
for line in lines:
line = line.strip()
if line == '%':
return
if '\t' in line:
(category_id, category_name) = line.split('\t', 1)
yield (category_id, category_name)
def _parse_lexicon(lines, category_mapping):
"""
Read (match_expression, category_names) pairs from the lexicon section.
Each line consists of a match expression followed by a tab and then one or more
tab-separated integers, which are mapped to category names using `category_mapping`.
"""
for line in lines:
line = line.strip()
parts = line.split('\t')
yield (parts[0], [category_mapping[category_id] for category_id in parts[1:]])
def read_dic(filepath):
"""
Reads a LIWC lexicon from a file in the .dic format, returning a tuple of
(lexicon, category_names), where:
* `lexicon` is a dict mapping string patterns to lists of category names
* `category_names` is a list of category names (as strings)
"""
with open(filepath) as lines:
for line in lines:
if line.strip() == '%':
break
category_mapping = dict(_parse_categories(lines))
lexicon = dict(_parse_lexicon(lines, category_mapping))
return (lexicon, list(category_mapping.values())) |
def test_example() -> None:
assert True, "not True"
assert 1 + 1 == 2
assert 4 / 2 == 2
assert 2 * 2 == 4
assert "ab" + "bc" == "abbc"
| def test_example() -> None:
assert True, 'not True'
assert 1 + 1 == 2
assert 4 / 2 == 2
assert 2 * 2 == 4
assert 'ab' + 'bc' == 'abbc' |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/config/hp_defaults.ipynb (unless otherwise specified).
__all__ = ['allow_base_class', 'name_epoch', 'name_last_epoch', 'name_model_history', 'model_file_name',
'path_experiments', 'defaults', 'root', 'metric', 'op', 'result_file', 'min_iterations', 'use_previous_best',
'name_logger', 'verbose']
# Cell
allow_base_class=False
name_epoch='epochs'
name_last_epoch='last_epoch'
name_model_history='model_history.pk'
model_file_name='model.h5'
path_experiments='results/hpsearch'
defaults={}
root=''
metric='accuracy'
op='max'
result_file='dict_results.pk'
min_iterations=50
use_previous_best=True
name_logger='experiment_manager'
verbose=0 | __all__ = ['allow_base_class', 'name_epoch', 'name_last_epoch', 'name_model_history', 'model_file_name', 'path_experiments', 'defaults', 'root', 'metric', 'op', 'result_file', 'min_iterations', 'use_previous_best', 'name_logger', 'verbose']
allow_base_class = False
name_epoch = 'epochs'
name_last_epoch = 'last_epoch'
name_model_history = 'model_history.pk'
model_file_name = 'model.h5'
path_experiments = 'results/hpsearch'
defaults = {}
root = ''
metric = 'accuracy'
op = 'max'
result_file = 'dict_results.pk'
min_iterations = 50
use_previous_best = True
name_logger = 'experiment_manager'
verbose = 0 |
# Given the head of a singly linked list, group all thenodes
# # with odd indices together followed by the nodes with even indices, and return the reordered list.
# #
# # The first node is considered odd, and the secondnode is even, and so on.
# #
# # Note that the relative order inside both the even and odd groups should remain as it
# # was in the input.
# #
# # You must solve the problem in O(1) extra space complexity and O(n)
# # time complexity.
# #
# # Example 1:
# #
# # Input: head = [1, 2, 3, 4, 5]
# # Output: [1, 3, 5, 2, 4]
# #
# # Example 2:
# #
# # Input: head = [2, 1, 3, 5, 6, 4, 7]
# # Output: [2, 3, 6, 7, 1, 5, 4]
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None or head.next is None or head.next.next is None:
return head
odd = head
even = head.next
even_head = head.next
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head
| class Solution:
def odd_even_list(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None or head.next is None or head.next.next is None:
return head
odd = head
even = head.next
even_head = head.next
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head |
class Atom:
__slots__ = ["name", "type", "resname", "resid", "x", "y", "z",
"diameter", "rotmass", "charge", "mass",
"sigma", "epsilon", "dipole"]
def __init__(self, **kwargs):
for key in self.__slots__:
try:
setattr(self, key, kwargs[key])
except KeyError:
setattr(self, key, None)
def __repr__(self):
line = "<Atom name={0}, resname={1}, resnum={2}, type={3}>"
return line.format(self.name, self.resname, self.resid, self.type)
@staticmethod
def compare(val1, val2):
"""
Compare two values.
Return the second value if both values are the same or the first value is None.
Return the first value if the second value is None.
Raise exception if values are different and neither is None.
Args:
val1: First value
val2: Second value
Returns: One of the values
"""
if val1 == val2:
return val2
elif val1 is None:
return val2
elif val2 is None:
return val1
else:
raise ValueError("Values for comparison are different and not None.")
def populate(self, other):
"""
Populate this Atom using values from Atom other, where this Atom is missing data.
Args:
other: Another Atom instance
Returns: Nothing
"""
for key in self.__slots__:
val = Atom.compare(getattr(self, key), getattr(other, key))
setattr(self, key, val)
| class Atom:
__slots__ = ['name', 'type', 'resname', 'resid', 'x', 'y', 'z', 'diameter', 'rotmass', 'charge', 'mass', 'sigma', 'epsilon', 'dipole']
def __init__(self, **kwargs):
for key in self.__slots__:
try:
setattr(self, key, kwargs[key])
except KeyError:
setattr(self, key, None)
def __repr__(self):
line = '<Atom name={0}, resname={1}, resnum={2}, type={3}>'
return line.format(self.name, self.resname, self.resid, self.type)
@staticmethod
def compare(val1, val2):
"""
Compare two values.
Return the second value if both values are the same or the first value is None.
Return the first value if the second value is None.
Raise exception if values are different and neither is None.
Args:
val1: First value
val2: Second value
Returns: One of the values
"""
if val1 == val2:
return val2
elif val1 is None:
return val2
elif val2 is None:
return val1
else:
raise value_error('Values for comparison are different and not None.')
def populate(self, other):
"""
Populate this Atom using values from Atom other, where this Atom is missing data.
Args:
other: Another Atom instance
Returns: Nothing
"""
for key in self.__slots__:
val = Atom.compare(getattr(self, key), getattr(other, key))
setattr(self, key, val) |
class Question:
def __init__(self, text, answer):
self.text = text
self.answer = answer
| class Question:
def __init__(self, text, answer):
self.text = text
self.answer = answer |
# this technique creates the copy of the list while reversing. It takes up more memory.
lis = [1, 2, 3]
reversed_list = lis[::-1]
print(reversed_list)
| lis = [1, 2, 3]
reversed_list = lis[::-1]
print(reversed_list) |
#TAT = CT - AT => CT = AT + TAT
#WT = TAT - BT => TAT = BT + WT
#TAT = BT + WT => BT = TAT - WT
#CT = ST + BT => CT = ST + TAT - WT
#for creating a 2D Array
n = 5
process = []
for i in range(5):
l = []
for j in range(4):
l.append(0)
process.append(l)
def findWaitingTime( waitingTime):
cumulativeBurstTime = [0] * 5
cumulativeBurstTime[0] = 0
waitingTime[0] = 0
for i in range(1, n):
cumulativeBurstTime[i] = process[i - 1][1] + cumulativeBurstTime[i - 1]
waitingTime[i] = cumulativeBurstTime[i] - process[i][0]
if(waitingTime[i] < 0) :
waitingTime[i] = 0
def findTurnAroundTime(turnAroundTime, waitingTime):
for i in range(n):
turnAroundTime[i] = process[i][1] + waitingTime[i]
def findGanttChart():
waitingTime = [0] * 5
turnAroundTime = [0] * 5
avgWaitingTime = 0
avgTurnAroundTime = 0
findWaitingTime(waitingTime)
findTurnAroundTime(turnAroundTime, waitingTime)
startTime = [0] * 5
completionTime = [0] * 5
startTime[0] = 1
completionTime[0] = startTime[0] + turnAroundTime[0]
for i in range(1, n):
startTime[i] = completionTime[i - 1]
completionTime[i] = startTime[i] + turnAroundTime[i] - waitingTime[i]
print("Process ID\tStart Time\tCompletetion Time",
"\tTurn Around Time\tWaiting Time\t Priority")
for i in range(n):
avgWaitingTime += waitingTime[i]
avgTurnAroundTime += turnAroundTime[i]
print(process[i][3], "\t\t", startTime[i],"\t\t", end = " ")
print(completionTime[i], "\t\t\t", turnAroundTime[i], "\t\t\t", waitingTime[i], "\t\t\t", priority[i])
print("Average waiting time is : ", end = " ")
print(avgWaitingTime / n)
print("Average turnaround time : " , end = " ")
print(avgTurnAroundTime / n)
if __name__ =="__main__":
arrivalTime = [1, 2, 3, 4, 5]
burstTime = [3, 5, 1, 7, 4]
priority = [3, 4, 1, 7, 8]
for i in range(n):
process[i][0] = arrivalTime[i]
process[i][1] = burstTime[i]
process[i][2] = priority[i]
process[i][3] = i + 1
#for sorting in priority order
process = sorted (process, key = lambda x:x[2])
process = sorted (process)
findGanttChart()
# End of the program
| n = 5
process = []
for i in range(5):
l = []
for j in range(4):
l.append(0)
process.append(l)
def find_waiting_time(waitingTime):
cumulative_burst_time = [0] * 5
cumulativeBurstTime[0] = 0
waitingTime[0] = 0
for i in range(1, n):
cumulativeBurstTime[i] = process[i - 1][1] + cumulativeBurstTime[i - 1]
waitingTime[i] = cumulativeBurstTime[i] - process[i][0]
if waitingTime[i] < 0:
waitingTime[i] = 0
def find_turn_around_time(turnAroundTime, waitingTime):
for i in range(n):
turnAroundTime[i] = process[i][1] + waitingTime[i]
def find_gantt_chart():
waiting_time = [0] * 5
turn_around_time = [0] * 5
avg_waiting_time = 0
avg_turn_around_time = 0
find_waiting_time(waitingTime)
find_turn_around_time(turnAroundTime, waitingTime)
start_time = [0] * 5
completion_time = [0] * 5
startTime[0] = 1
completionTime[0] = startTime[0] + turnAroundTime[0]
for i in range(1, n):
startTime[i] = completionTime[i - 1]
completionTime[i] = startTime[i] + turnAroundTime[i] - waitingTime[i]
print('Process ID\tStart Time\tCompletetion Time', '\tTurn Around Time\tWaiting Time\t Priority')
for i in range(n):
avg_waiting_time += waitingTime[i]
avg_turn_around_time += turnAroundTime[i]
print(process[i][3], '\t\t', startTime[i], '\t\t', end=' ')
print(completionTime[i], '\t\t\t', turnAroundTime[i], '\t\t\t', waitingTime[i], '\t\t\t', priority[i])
print('Average waiting time is : ', end=' ')
print(avgWaitingTime / n)
print('Average turnaround time : ', end=' ')
print(avgTurnAroundTime / n)
if __name__ == '__main__':
arrival_time = [1, 2, 3, 4, 5]
burst_time = [3, 5, 1, 7, 4]
priority = [3, 4, 1, 7, 8]
for i in range(n):
process[i][0] = arrivalTime[i]
process[i][1] = burstTime[i]
process[i][2] = priority[i]
process[i][3] = i + 1
process = sorted(process, key=lambda x: x[2])
process = sorted(process)
find_gantt_chart() |
expected = 'Jana III Sobieskiego'
a = ' Jana III Sobieskiego '
b = 'ul Jana III SobIESkiego'
c = '\tul. Jana trzeciego Sobieskiego'
d = 'ulicaJana III Sobieskiego'
e = 'UL. JA\tNA 3 SOBIES\tKIEGO'
f = 'UL. jana III SOBiesKIEGO'
g = 'ULICA JANA III SOBIESKIEGO '
h = 'ULICA. JANA III SOBIeskieGO'
i = ' Jana 3 Sobieskiego '
j = 'Jana III\tSobieskiego '
k = 'ul.Jana III Sob\n\nieskiego\n'
def clean(text):
text = text.upper()
text = text.replace('\n', '')
text = text.replace('\t', '')
text = text.replace('ULICA', '')
text = text.replace('.', '')
text = text.replace('UL', '')
text = text.replace('TRZECIEGO', 'III')
text = text.replace('3', 'III')
text = text.strip()
text = text.title().replace('Iii', 'III')
return text
a = clean(a)
b = clean(b)
c = clean(c)
d = clean(d)
e = clean(e)
f = clean(f)
g = clean(g)
h = clean(h)
i = clean(i)
j = clean(j).replace('IIIs', 'III S')
k = clean(k)
print(f'{a == expected}\t a: "{a}"')
print(f'{b == expected}\t b: "{b}"')
print(f'{c == expected}\t c: "{c}"')
print(f'{d == expected}\t d: "{d}"')
print(f'{e == expected}\t e: "{e}"')
print(f'{f == expected}\t f: "{f}"')
print(f'{g == expected}\t g: "{g}"')
print(f'{h == expected}\t h: "{h}"')
print(f'{i == expected}\t i: "{i}"')
print(f'{j == expected}\t j: "{j}"')
print(f'{k == expected}\t k: "{k}"')
| expected = 'Jana III Sobieskiego'
a = ' Jana III Sobieskiego '
b = 'ul Jana III SobIESkiego'
c = '\tul. Jana trzeciego Sobieskiego'
d = 'ulicaJana III Sobieskiego'
e = 'UL. JA\tNA 3 SOBIES\tKIEGO'
f = 'UL. jana III SOBiesKIEGO'
g = 'ULICA JANA III SOBIESKIEGO '
h = 'ULICA. JANA III SOBIeskieGO'
i = ' Jana 3 Sobieskiego '
j = 'Jana III\tSobieskiego '
k = 'ul.Jana III Sob\n\nieskiego\n'
def clean(text):
text = text.upper()
text = text.replace('\n', '')
text = text.replace('\t', '')
text = text.replace('ULICA', '')
text = text.replace('.', '')
text = text.replace('UL', '')
text = text.replace('TRZECIEGO', 'III')
text = text.replace('3', 'III')
text = text.strip()
text = text.title().replace('Iii', 'III')
return text
a = clean(a)
b = clean(b)
c = clean(c)
d = clean(d)
e = clean(e)
f = clean(f)
g = clean(g)
h = clean(h)
i = clean(i)
j = clean(j).replace('IIIs', 'III S')
k = clean(k)
print(f'{a == expected}\t a: "{a}"')
print(f'{b == expected}\t b: "{b}"')
print(f'{c == expected}\t c: "{c}"')
print(f'{d == expected}\t d: "{d}"')
print(f'{e == expected}\t e: "{e}"')
print(f'{f == expected}\t f: "{f}"')
print(f'{g == expected}\t g: "{g}"')
print(f'{h == expected}\t h: "{h}"')
print(f'{i == expected}\t i: "{i}"')
print(f'{j == expected}\t j: "{j}"')
print(f'{k == expected}\t k: "{k}"') |
#Linear Threshold global to test: 0.261583162085
svm_linear = {
"global_threshhold": 0.17234866241479857,
"model_path": "models/signetf_lambda0.95.pkl",
"c-minus": 1,
"gamma": 'auto',
}
svm_rbf = {
"global_threshhold": 0.26488806589512859,
"model_path": "models/signetf_lambda0.999.pkl",
"c-minus": 1,
"gamma": 2**(-11),
}
signature_numbers_by_user = {
"genuine": 24,
"skilled": 30,
}
def gpds160_config():
config = {
"dataset_path": "datasets/gpds160/",
"dataset_for_random_path": "datasets/gpds160-RANDOM/",
"train_config": {
"genuine": 14,
"skilled": 0,
"random": 14
},
"test_config": {
"genuine": 10,
"skilled": 10,
"random": 10
},
"c-plus": 721,
"signature_numbers_by_user": signature_numbers_by_user,
"number_of_tests_by_user": 100,
"max_image_size": (819, 1137),
"canvas": (952, 1360),
"svm_linear": svm_linear,
"svm_rbf": svm_rbf
}
return config
def gpds300_config():
config = {
"dataset_path": "datasets/gpds300/",
"dataset_for_random_path": "datasets/gpds300-RANDOM/",
"train_config": {
"genuine": 14,
"skilled": 0,
"random": 14
},
"test_config": {
"genuine": 10,
"skilled": 10,
"random": 10
},
"c-plus": 581,
"signature_numbers_by_user": signature_numbers_by_user,
"number_of_tests_by_user": 100,
"max_image_size": (819, 1137),
"canvas": (952, 1360),
"svm_linear": svm_linear,
"svm_rbf": svm_rbf
}
return config
def gpds50_config():
config = {
"dataset_path": "datasets/gpds50/",
"dataset_for_random_path": "datasets/gpds300/",
"train_config": {
"genuine": 14,
"skilled": 0,
"random": 14
},
"test_config": {
"genuine": 10,
"skilled": 10,
"random": 10
},
"c-plus": 300,
"signature_numbers_by_user": signature_numbers_by_user,
"number_of_tests_by_user": 100,
"max_image_size": (819, 1137),
"canvas": (952, 1360),
"svm_linear": svm_linear,
"svm_rbf": svm_rbf
}
return config | svm_linear = {'global_threshhold': 0.17234866241479857, 'model_path': 'models/signetf_lambda0.95.pkl', 'c-minus': 1, 'gamma': 'auto'}
svm_rbf = {'global_threshhold': 0.2648880658951286, 'model_path': 'models/signetf_lambda0.999.pkl', 'c-minus': 1, 'gamma': 2 ** (-11)}
signature_numbers_by_user = {'genuine': 24, 'skilled': 30}
def gpds160_config():
config = {'dataset_path': 'datasets/gpds160/', 'dataset_for_random_path': 'datasets/gpds160-RANDOM/', 'train_config': {'genuine': 14, 'skilled': 0, 'random': 14}, 'test_config': {'genuine': 10, 'skilled': 10, 'random': 10}, 'c-plus': 721, 'signature_numbers_by_user': signature_numbers_by_user, 'number_of_tests_by_user': 100, 'max_image_size': (819, 1137), 'canvas': (952, 1360), 'svm_linear': svm_linear, 'svm_rbf': svm_rbf}
return config
def gpds300_config():
config = {'dataset_path': 'datasets/gpds300/', 'dataset_for_random_path': 'datasets/gpds300-RANDOM/', 'train_config': {'genuine': 14, 'skilled': 0, 'random': 14}, 'test_config': {'genuine': 10, 'skilled': 10, 'random': 10}, 'c-plus': 581, 'signature_numbers_by_user': signature_numbers_by_user, 'number_of_tests_by_user': 100, 'max_image_size': (819, 1137), 'canvas': (952, 1360), 'svm_linear': svm_linear, 'svm_rbf': svm_rbf}
return config
def gpds50_config():
config = {'dataset_path': 'datasets/gpds50/', 'dataset_for_random_path': 'datasets/gpds300/', 'train_config': {'genuine': 14, 'skilled': 0, 'random': 14}, 'test_config': {'genuine': 10, 'skilled': 10, 'random': 10}, 'c-plus': 300, 'signature_numbers_by_user': signature_numbers_by_user, 'number_of_tests_by_user': 100, 'max_image_size': (819, 1137), 'canvas': (952, 1360), 'svm_linear': svm_linear, 'svm_rbf': svm_rbf}
return config |
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
map = {}
maxTimes = 0
def findTreeSum(root):
if not root:
return 0
sum = root.val
sum += findTreeSum(root.left)
sum += findTreeSum(root.right)
if sum in map:
map[sum] += 1
else:
map[sum] = 1
return sum
findTreeSum(root)
for key, value in map.items():
if value > maxTimes:
res = [key]
maxTimes = value
elif value == maxTimes:
res.append(key)
return res
root = TreeNode(5)
root.left = TreeNode(2)
root.right = TreeNode(-3)
s = Solution()
s.findFrequentTreeSum(root) | class Treenode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def find_frequent_tree_sum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
map = {}
max_times = 0
def find_tree_sum(root):
if not root:
return 0
sum = root.val
sum += find_tree_sum(root.left)
sum += find_tree_sum(root.right)
if sum in map:
map[sum] += 1
else:
map[sum] = 1
return sum
find_tree_sum(root)
for (key, value) in map.items():
if value > maxTimes:
res = [key]
max_times = value
elif value == maxTimes:
res.append(key)
return res
root = tree_node(5)
root.left = tree_node(2)
root.right = tree_node(-3)
s = solution()
s.findFrequentTreeSum(root) |
def T():
for row in range(6):
for col in range(5):
if (row==0 or col==2):
print("*",end=" ")
else:
print(end=" ")
print()
| def t():
for row in range(6):
for col in range(5):
if row == 0 or col == 2:
print('*', end=' ')
else:
print(end=' ')
print() |
class state (object):
def __init__(self):
self._dict = {}
self._dict['dependencies'] = {}
self._dict['build'] = {}
self._clean = True
def clean(self):
return self._clean
def __getattr__(self, key):
try:
return self._dict[key]
except KeyError:
raise AttributeError(key)
def update(self, **kwargs):
def merge(dest, src):
for k, v in src.items():
if isinstance(v, dict) and k in dest:
merge(dest[k], v)
else:
dest[k] = v
merge(self._dict, kwargs)
self._clean = False
def serialize(self):
return self._dict
@classmethod
def from_yaml(cls, defs, data):
instance = cls()
instance.update(**data)
return instance
| class State(object):
def __init__(self):
self._dict = {}
self._dict['dependencies'] = {}
self._dict['build'] = {}
self._clean = True
def clean(self):
return self._clean
def __getattr__(self, key):
try:
return self._dict[key]
except KeyError:
raise attribute_error(key)
def update(self, **kwargs):
def merge(dest, src):
for (k, v) in src.items():
if isinstance(v, dict) and k in dest:
merge(dest[k], v)
else:
dest[k] = v
merge(self._dict, kwargs)
self._clean = False
def serialize(self):
return self._dict
@classmethod
def from_yaml(cls, defs, data):
instance = cls()
instance.update(**data)
return instance |
DATASETS = {
'ctx': {
'ptype': 'edr',
'instr': 'ctx',
'host': 'mro'
},
'hirise': {
'ptype': 'rdrv11',
'instr': 'hirise',
'host': 'mex'
}
}
| datasets = {'ctx': {'ptype': 'edr', 'instr': 'ctx', 'host': 'mro'}, 'hirise': {'ptype': 'rdrv11', 'instr': 'hirise', 'host': 'mex'}} |
# Demo Python Scope - Global Keyword
'''
Global Keyword
If you need to create a global variable, but are stuck in the local scope, you can use the global keyword.
The global keyword makes the variable global.
Also, use the global keyword if you want to make a change to a global variable inside a function.
'''
# To change the value of a global variable inside a function, refer to the variable by using the global keyword:
x = 300
def myfunc():
global x
x = 200
myfunc()
print(x)
| """
Global Keyword
If you need to create a global variable, but are stuck in the local scope, you can use the global keyword.
The global keyword makes the variable global.
Also, use the global keyword if you want to make a change to a global variable inside a function.
"""
x = 300
def myfunc():
global x
x = 200
myfunc()
print(x) |
VERSION = (0, 0, 2)
def get_version(version = None):
version = version or VERSION
return ".".join(map(str, list(version)))
__version__ = get_version(VERSION)
| version = (0, 0, 2)
def get_version(version=None):
version = version or VERSION
return '.'.join(map(str, list(version)))
__version__ = get_version(VERSION) |
"""import os
import string
import random
from datetime import datetime, timedelta
from elasticsearch_dsl.connections import connections
from search.doc_app import Article
connections.create_connection(hosts=[os.getenv('ELASTIC7', 'localhost:9200')])
Article.init()
idindex = 0
for i in range(0, 20):
for j in range(0, 500):
title = ''.join(
random.choice(string.ascii_uppercase) for x in range(8))
body = ''.join(
random.choice(string.ascii_uppercase) for x in range(999))
article = Article(
meta={'id': idindex},
title=f'{title}-{i}-{j}',
body=body,
published_from=datetime.now() + timedelta(days=int(f'{i}{j}')),
tags=[
random.choice(['g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8'])
])
idindex = idindex + 1
article.save()
print('-id-43-article-')
article = Article.get(id=43)
print(article.is_published())
print(article.toJson())
"""
| """import os
import string
import random
from datetime import datetime, timedelta
from elasticsearch_dsl.connections import connections
from search.doc_app import Article
connections.create_connection(hosts=[os.getenv('ELASTIC7', 'localhost:9200')])
Article.init()
idindex = 0
for i in range(0, 20):
for j in range(0, 500):
title = ''.join(
random.choice(string.ascii_uppercase) for x in range(8))
body = ''.join(
random.choice(string.ascii_uppercase) for x in range(999))
article = Article(
meta={'id': idindex},
title=f'{title}-{i}-{j}',
body=body,
published_from=datetime.now() + timedelta(days=int(f'{i}{j}')),
tags=[
random.choice(['g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8'])
])
idindex = idindex + 1
article.save()
print('-id-43-article-')
article = Article.get(id=43)
print(article.is_published())
print(article.toJson())
""" |
"""
pass
"""
def sayhi():
print('hello world!')
| """
pass
"""
def sayhi():
print('hello world!') |
def ffr(n):
if n < 1 or type(n) != int: raise ValueError("n must be an int >= 1")
try:
return ffr.r[n]
except IndexError:
r, s = ffr.r, ffs.s
ffr_n_1 = ffr(n-1)
lastr = r[-1]
# extend s up to, and one past, last r
s += list(range(s[-1] + 1, lastr))
if s[-1] < lastr: s += [lastr + 1]
# access s[n-1] temporarily extending s if necessary
len_s = len(s)
ffs_n_1 = s[n-1] if len_s > n else (n - len_s) + s[-1]
ans = ffr_n_1 + ffs_n_1
r.append(ans)
return ans
ffr.r = [None, 1]
def ffs(n):
if n < 1 or type(n) != int: raise ValueError("n must be an int >= 1")
try:
return ffs.s[n]
except IndexError:
r, s = ffr.r, ffs.s
for i in range(len(r), n+2):
ffr(i)
if len(s) > n:
return s[n]
raise Exception("Whoops!")
ffs.s = [None, 2]
if __name__ == '__main__':
first10 = [ffr(i) for i in range(1,11)]
assert first10 == [1, 3, 7, 12, 18, 26, 35, 45, 56, 69], "ffr() value error(s)"
print(("ffr(n) for n = [1..10] is", first10))
#
bin = [None] + [0]*1000
for i in range(40, 0, -1):
bin[ffr(i)] += 1
for i in range(960, 0, -1):
bin[ffs(i)] += 1
if all(b == 1 for b in bin[1:1000]):
print("All Integers 1..1000 found OK")
else:
print("All Integers 1..1000 NOT found only once: ERROR")
| def ffr(n):
if n < 1 or type(n) != int:
raise value_error('n must be an int >= 1')
try:
return ffr.r[n]
except IndexError:
(r, s) = (ffr.r, ffs.s)
ffr_n_1 = ffr(n - 1)
lastr = r[-1]
s += list(range(s[-1] + 1, lastr))
if s[-1] < lastr:
s += [lastr + 1]
len_s = len(s)
ffs_n_1 = s[n - 1] if len_s > n else n - len_s + s[-1]
ans = ffr_n_1 + ffs_n_1
r.append(ans)
return ans
ffr.r = [None, 1]
def ffs(n):
if n < 1 or type(n) != int:
raise value_error('n must be an int >= 1')
try:
return ffs.s[n]
except IndexError:
(r, s) = (ffr.r, ffs.s)
for i in range(len(r), n + 2):
ffr(i)
if len(s) > n:
return s[n]
raise exception('Whoops!')
ffs.s = [None, 2]
if __name__ == '__main__':
first10 = [ffr(i) for i in range(1, 11)]
assert first10 == [1, 3, 7, 12, 18, 26, 35, 45, 56, 69], 'ffr() value error(s)'
print(('ffr(n) for n = [1..10] is', first10))
bin = [None] + [0] * 1000
for i in range(40, 0, -1):
bin[ffr(i)] += 1
for i in range(960, 0, -1):
bin[ffs(i)] += 1
if all((b == 1 for b in bin[1:1000])):
print('All Integers 1..1000 found OK')
else:
print('All Integers 1..1000 NOT found only once: ERROR') |
def fib(n):
if n <= 1:
return n
return fib(n - 1) + fib(n - 2)
if __name__ == "__main__":
n = 40
print("{answer}\n".format(answer=fib(n)))
| def fib(n):
if n <= 1:
return n
return fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
n = 40
print('{answer}\n'.format(answer=fib(n))) |
#ejemplo 1
num1 = 13
num2 = 32.34
num1 = num1 + num2
print ("num1 =",num1)
#ejemplo 2
num1 = 141.42
num2 = 412
num1 = num1 + num2
print ("num1 =",round(num1))
#ejemplo 3
num1 = 42
num2 = 4.23
result = (num1 + num2)
print("El resultado de la operacion es =", round(result))
print(int(6.324))
rl1 = str(64.523)
print(type(rl1))
print(float(rl1)) | num1 = 13
num2 = 32.34
num1 = num1 + num2
print('num1 =', num1)
num1 = 141.42
num2 = 412
num1 = num1 + num2
print('num1 =', round(num1))
num1 = 42
num2 = 4.23
result = num1 + num2
print('El resultado de la operacion es =', round(result))
print(int(6.324))
rl1 = str(64.523)
print(type(rl1))
print(float(rl1)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class PodiumAccount(object):
"""
Object that represents a particular User.
**Attributes:**
**account_id** (str): Unique id for account
**username** (string): The User's username.
**email** (str): Account email address
**user_uri** (str): URI to user associated with account
**events_uri** (str): URI to account's events
**devices_uri** (str): URI to account's devices
**events_uri** (str): URI to account's events
**streams_uri** (str): URI to account's current live streams
**exports_uri** (str): URI to account's telemetry exports
"""
def __init__(self, account_id, username, email, devices_uri, exports_uri,
streams_uri, user_uri, events_uri):
self.account_id = account_id
self.username = username
self.email = email
self.devices_uri = devices_uri
self.exports_uri = exports_uri
self.streams_uri = streams_uri
self.user_uri = user_uri
self.events_uri = events_uri
def get_account_from_json(json):
"""
Returns a PodiumAccount object from the json dict received from podium api.
Args:
json (dict): Dict of data from REST api
Return:
PodiumUser: The PodiumAccount object for the data.
"""
return PodiumAccount(json['id'], json['username'],
json['email'], json['devices_uri'],
json['exports_uri'], json['streams_uri'],
json['user_uri'], json['events_uri'])
| class Podiumaccount(object):
"""
Object that represents a particular User.
**Attributes:**
**account_id** (str): Unique id for account
**username** (string): The User's username.
**email** (str): Account email address
**user_uri** (str): URI to user associated with account
**events_uri** (str): URI to account's events
**devices_uri** (str): URI to account's devices
**events_uri** (str): URI to account's events
**streams_uri** (str): URI to account's current live streams
**exports_uri** (str): URI to account's telemetry exports
"""
def __init__(self, account_id, username, email, devices_uri, exports_uri, streams_uri, user_uri, events_uri):
self.account_id = account_id
self.username = username
self.email = email
self.devices_uri = devices_uri
self.exports_uri = exports_uri
self.streams_uri = streams_uri
self.user_uri = user_uri
self.events_uri = events_uri
def get_account_from_json(json):
"""
Returns a PodiumAccount object from the json dict received from podium api.
Args:
json (dict): Dict of data from REST api
Return:
PodiumUser: The PodiumAccount object for the data.
"""
return podium_account(json['id'], json['username'], json['email'], json['devices_uri'], json['exports_uri'], json['streams_uri'], json['user_uri'], json['events_uri']) |
class AbstractContext(object):
def valueForVar(self, name):
return
def setValueForVar(self, name, ex):
return
def typeForVar(self, name):
return | class Abstractcontext(object):
def value_for_var(self, name):
return
def set_value_for_var(self, name, ex):
return
def type_for_var(self, name):
return |
"""
.. module:: test_viewer
:synopsis: Unit tests for viewer module
"""
| """
.. module:: test_viewer
:synopsis: Unit tests for viewer module
""" |
class foo:
x = 5
def __init__(self,x):
self.x = x
def bar(self):
return self.x + 1
class foo2(foo):
x = 7
# no init so jump/inherit from foo
def bar(self):
return self.x + 2
def bark():
print('woof')
f = foo2(6) #calls foo init
f.bark() #Error
foo2.bark() #woof
foo.bark() #Error
f.bar() # 8
foo.bar(f) #7
#foo.bar refers to function bar in class foo, f
#lookup from instance -> class -> superclass -> error | class Foo:
x = 5
def __init__(self, x):
self.x = x
def bar(self):
return self.x + 1
class Foo2(foo):
x = 7
def bar(self):
return self.x + 2
def bark():
print('woof')
f = foo2(6)
f.bark()
foo2.bark()
foo.bark()
f.bar()
foo.bar(f) |
while True:
try:
file_name = input("Please enter the name of the file you would like to read")
fh = open(file_name)
break
except:
print("Couldn't open file. Please try again")
while True:
try:
line_cnt = int(input("How many lines would you like to read?"))
if(line_cnt <= 0):
raise ValueError
if(line_cnt >1000):
raise OverflowError
break
except ValueError:
print("Must enter an integer greater than 0")
except OverflowError:
print("Nobody's got time for that.")
except:
print("Strange input. Muste enter a legit int")
try:
for line in range(1, line_count+1):
print(fh.readline())
except EOFError:
print("Ya hit the end of the file at line:",line)
except:
print("Something went terribly wrong at line:",line)
finally:
fh.close()
| while True:
try:
file_name = input('Please enter the name of the file you would like to read')
fh = open(file_name)
break
except:
print("Couldn't open file. Please try again")
while True:
try:
line_cnt = int(input('How many lines would you like to read?'))
if line_cnt <= 0:
raise ValueError
if line_cnt > 1000:
raise OverflowError
break
except ValueError:
print('Must enter an integer greater than 0')
except OverflowError:
print("Nobody's got time for that.")
except:
print('Strange input. Muste enter a legit int')
try:
for line in range(1, line_count + 1):
print(fh.readline())
except EOFError:
print('Ya hit the end of the file at line:', line)
except:
print('Something went terribly wrong at line:', line)
finally:
fh.close() |
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
isIncrease = lambda nums: all(nums[i] <= nums[i + 1] for i in range(len(nums) - 1))
one, two = nums[:], nums[:]
for i in range(0, len(nums) - 1):
if nums[i + 1] < nums[i]:
one[i] = one[i + 1]
two[i + 1] = two[i]
break
return isIncrease(one) or isIncrease(two)
| class Solution(object):
def check_possibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
is_increase = lambda nums: all((nums[i] <= nums[i + 1] for i in range(len(nums) - 1)))
(one, two) = (nums[:], nums[:])
for i in range(0, len(nums) - 1):
if nums[i + 1] < nums[i]:
one[i] = one[i + 1]
two[i + 1] = two[i]
break
return is_increase(one) or is_increase(two) |
"""
Procuret Python
Error Module
author: hugh@blinkybeach.com
"""
class ProcuretError(Exception):
pass
| """
Procuret Python
Error Module
author: hugh@blinkybeach.com
"""
class Procureterror(Exception):
pass |
EC2_INSTANCE_TYPES = [
't1.micro',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
't2.xlarge',
't2.2xlarge',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'cr1.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'x1.16xlarge',
'x1.32xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'i3.large',
'i3.xlarge',
'i3.2xlarge',
'i3.4xlarge',
'i3.8xlarge',
'i3.16xlarge',
'hi1.4xlarge',
'hs1.8xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'g2.2xlarge',
'g2.8xlarge',
'g3.4xlarge',
'g3.8xlarge',
'g3.16xlarge',
'cg1.4xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'f1.2xlarge',
'f1.16xlarge',
]
| ec2_instance_types = ['t1.micro', 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 't2.xlarge', 't2.2xlarge', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'm4.16xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'r4.large', 'r4.xlarge', 'r4.2xlarge', 'r4.4xlarge', 'r4.8xlarge', 'r4.16xlarge', 'x1.16xlarge', 'x1.32xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'i3.large', 'i3.xlarge', 'i3.2xlarge', 'i3.4xlarge', 'i3.8xlarge', 'i3.16xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'g2.2xlarge', 'g2.8xlarge', 'g3.4xlarge', 'g3.8xlarge', 'g3.16xlarge', 'cg1.4xlarge', 'p2.xlarge', 'p2.8xlarge', 'p2.16xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'f1.2xlarge', 'f1.16xlarge'] |
class Config:
BOT_USE = False # True is private use
BOT_TOKEN = '5298092373:AAF7zZJm02Gkv4DE8jVrbJSPcUoWMvXGX6k' # from @botfather
APP_ID = 16655133 # from https://my.telegram.org/apps
API_HASH = '6e6a925776a4525c7f0234119f629c99' # from https://my.telegram.org/apps
AUTH_USERS = [5034111461] # Private users id
| class Config:
bot_use = False
bot_token = '5298092373:AAF7zZJm02Gkv4DE8jVrbJSPcUoWMvXGX6k'
app_id = 16655133
api_hash = '6e6a925776a4525c7f0234119f629c99'
auth_users = [5034111461] |
# Read in the two haplotypes to compare
with open("h1.txt", "r") as infile:
real = infile.readlines()[0]
with open("h2.txt", "r") as infile:
found = infile.readlines()[0]
# Initialize counters for different kinds of errors
switch = 0
flip = 0
# Save length of smaller haplotype (if they are different lengths?)
size = min(len(real), len(found))
# Initialize comparison string
xorHap = ""
# Create comparison string
for i in xrange(size):
xorHap += str(int(real[i]) ^ int(found[i]))
# Process comparison string
i = 0
while i < size:
if (int(xorHap[i]) == 1):
flag = False
while i < size - 1 and xorHap[i] == xorHap[i + 1]:
i += 1
flag = True
if flag:
switch += 1
else:
flip += 1
i += 1
# Display results
print("Switch: %d" % switch)
print("Flip: %d" % flip) | with open('h1.txt', 'r') as infile:
real = infile.readlines()[0]
with open('h2.txt', 'r') as infile:
found = infile.readlines()[0]
switch = 0
flip = 0
size = min(len(real), len(found))
xor_hap = ''
for i in xrange(size):
xor_hap += str(int(real[i]) ^ int(found[i]))
i = 0
while i < size:
if int(xorHap[i]) == 1:
flag = False
while i < size - 1 and xorHap[i] == xorHap[i + 1]:
i += 1
flag = True
if flag:
switch += 1
else:
flip += 1
i += 1
print('Switch: %d' % switch)
print('Flip: %d' % flip) |
def remove_duplicates(t):
t_unique = []
for element in t:
if not element in t_unique:
t_unique.append(element)
return t_unique
print(remove_duplicates(['a', 'b', 1, 2, 'a', '2', 3, 1]))
| def remove_duplicates(t):
t_unique = []
for element in t:
if not element in t_unique:
t_unique.append(element)
return t_unique
print(remove_duplicates(['a', 'b', 1, 2, 'a', '2', 3, 1])) |
## Autogenerated code from generate.py
## Note1: Please dont update this file manually
## Note2: For any new keys, add at the end of file smac_keys.list
## To update this file, update the keys in 'smac_keys.list' and rerun 'generate.py'
smac_keys={
'CMD_SET_PROPERTY' :'0',#
'0' :'CMD_SET_PROPERTY',
'CMD_SET_PROPERTY_ACK' :'1',#
'1' :'CMD_SET_PROPERTY_ACK',
'CMD_STATUS' :'2',#
'2' :'CMD_STATUS',
'CMD_REQ_SEND_INFO' :'3',#
'3' :'CMD_REQ_SEND_INFO',
'CMD_SEND_INFO' :'4',#
'4' :'CMD_SEND_INFO',
'FROM' :'5',#
'5' :'FROM',
'TO' :'6',#
'6' :'TO',
'COMMAND' :'7',#
'7' :'COMMAND',
'ACK' :'8',#
'8' :'ACK',
'ID_MESSAGE' :'9',#
'9' :'ID_MESSAGE',
'MESSAGE' :'A',#
'A' :'MESSAGE',
'SSID_WIFI' :'B',#
'B' :'SSID_WIFI',
'NAME_WIFI' :'C',#
'C' :'NAME_WIFI',
'NAME_TOPIC' :'D',#
'D' :'NAME_TOPIC',
'ID_TOPIC' :'E',#
'E' :'ID_TOPIC',
'ID_DEVICE' :'F',#
'F' :'ID_DEVICE',
'NAME_DEVICE' :'G',#
'G' :'NAME_DEVICE',
'TYPE_DEVICE' :'H',#
'H' :'TYPE_DEVICE',
'LOCATION_WIFI' :'I',#
'I' :'LOCATION_WIFI',
'ID_PROPERTY' :'K',#
'K' :'ID_PROPERTY',
'TYPE_PROPERTY' :'L',#
'L' :'TYPE_PROPERTY',
'NAME_PROPERTY' :'M',#
'M' :'NAME_PROPERTY',
'VALUE' :'N',#
'N' :'VALUE',
'VALUE_MIN' :'O',#
'O' :'VALUE_MIN',
'VALUE_MAX' :'P',#
'P' :'VALUE_MAX',
'VALUE_STEP' :'Q',#
'Q' :'VALUE_STEP',
'VALUE_UNIT' :'R',#
'R' :'VALUE_UNIT',
'CMD_INIT_SEND_INFO' :'S',
'S' :'CMD_INIT_SEND_INFO',
'CMD_END_SEND_INFO' :'T',
'T' :'CMD_END_SEND_INFO',
'CMD_STATUS_SET_PROPERTY' :'U',
'U' :'CMD_STATUS_SET_PROPERTY',
'CMD_STATUS_ADD_TOPIC' :'V',
'V' :'CMD_STATUS_ADD_TOPIC' ,
'CMD_STATUS_REMOVE_TOPIC' :'W',
'W' :'CMD_STATUS_REMOVE_TOPIC',
'CMD_ADD_TOPIC' :'X',
'X' :'CMD_ADD_TOPIC',
'CMD_REMOVE_TOPIC' :'Y',
'Y' :'CMD_REMOVE_TOPIC',
'PASSKEY' :'Z',
'Z' :'PASSKEY',
'CMD_DEVICE_BUSY' :'a',
'a' :'CMD_DEVICE_BUSY',
'CMD_INVALID_PIN' :'b',
'b' :'CMD_INVALID_PIN',
'NAME_HOME' :'c',
'c' :'NAME_HOME',
'CMD_UPDATE_WIFI_CONFIG' :'d',
'd' :'CMD_UPDATE_WIFI_CONFIG',
'SSID' :'e',
'e' :'SSID',
'PASSWORD' :'f',
'f' :'PASSWORD',
'CMD_ONLINE' :'g',
'g' :'CMD_ONLINE',
'CMD_UPDATE_SOFTWARE' :'h',
'h' :'CMD_UPDATE_SOFTWARE',
'CMD_UPDATE_INTERVAL_ONLINE' :'i',
'i' :'CMD_UPDATE_INTERVAL_ONLINE',
'INTERVAL' :'j',
'j' :'INTERVAL',
'CMD_STATUS_UPDATE_WIFI_CONFIG' :'k',
'k' :'CMD_STATUS_UPDATE_WIFI_CONFIG',
'CMD_STATUS_UPDATE_INTERVAL_ONLINE' :'l',
'l' :'CMD_STATUS_UPDATE_INTERVAL_ONLINE',
'CMD_UPDATE_NAME_DEVICE' :'m',
'm' :'CMD_UPDATE_NAME_DEVICE',
'CMD_STATUS_UPDATE_NAME_DEVICE' :'n',
'n' :'CMD_STATUS_UPDATE_NAME_DEVICE',
'CMD_UPDATE_NAME_PROPERTY' :'o',
'o' :'CMD_UPDATE_NAME_PROPERTY',
'CMD_STATUS_UPDATE_NAME_PROPERTY' :'p',
'p' :'CMD_STATUS_UPDATE_NAME_PROPERTY',
'CMD_TOPIC_LIMIT_EXCEEDED' :'q',
'q' :'CMD_TOPIC_LIMIT_EXCEEDED',
'CMD_STATUS_UPDATE_SOFTWARE' : 'r',
'r' : 'CMD_STATUS_UPDATE_SOFTWARE',
'CMD_ADD_ACTION' :'s',
's' :'CMD_ADD_ACTION',
'CMD_REMOVE_ACTION' :'t',
't' :'CMD_REMOVE_ACTION',
'CMD_ADD_TRIGGER' :'u',
'u' :'CMD_ADD_TRIGGER',
'CMD_REMOVE_TRIGGER' :'v',
'v' :'CMD_REMOVE_TRIGGER',
'CMD_STATUS_ADD_ACTION' :'w',
'w' :'CMD_STATUS_ADD_ACTION',
'CMD_STATUS_REMOVE_ACTION' :'x',
'x' :'CMD_STATUS_REMOVE_ACTION',
'CMD_STATUS_ADD_TRIGGER' :'y',
'y' :'CMD_STATUS_ADD_TRIGGER',
'CMD_STATUS_REMOVE_TRIGGER' :'z',
'z' :'CMD_STATUS_REMOVE_TRIGGER',
'CMD_ACTION_LIMIT_EXCEEDED' :'a1',
'a1' :'CMD_ACTION_LIMIT_EXCEEDED',
'CMD_TRIGGER_LIMIT_EXCEEDED' :'a2',
'a2' :'CMD_TRIGGER_LIMIT_EXCEEDED',
'CMD_STATUS_ADD_CONTEXT' :'a3',
'a3' :'CMD_STATUS_ADD_CONTEXT',
'CMD_STATUS_REMOVE_CONTEXT' :'a4',
'a4' :'CMD_STATUS_REMOVE_CONTEXT',
'CONTEXT_ACTION' :"a5",
'a5' :'CONTEXT_ACTION',
'CONTEXT_TRIGGER' :"a6",
'a6' :'CONTEXT_TRIGGER',
'CONTEXT' :'a7',
'a7' :'CONTEXT',
'TOPIC' :'a8',
'a8' :'CONTEXT',
'PROPERTY' :'a9',
'a9' :'PROPERTY',
'ID_CONTEXT' :'a10',
'a10' :'ID_CONTEXT',
'NAME_CONTEXT' :'a11',
'a11' :'NAME_CONTEXT',
'TYPE_TRIGGER_PROP' :'a12',
'a12' :'TYPE_TRIGGER_PROP',
'TYPE_TRIGGER_TIME' :'a13',
'a13' :'TYPE_TRIGGER_TIME',
'TYPE_TRIGGER_LOCATION' :'a14',
'a14' :'TYPE_TRIGGER_LOCATION',
'TYPE_TRIGGER' :'a15',
'a15' :'TYPE_TRIGGER',
'CMD_TRIGGER_CONTEXT' :'a16',
'a16' :'CMD_TRIGGER_CONTEXT'
}
| smac_keys = {'CMD_SET_PROPERTY': '0', '0': 'CMD_SET_PROPERTY', 'CMD_SET_PROPERTY_ACK': '1', '1': 'CMD_SET_PROPERTY_ACK', 'CMD_STATUS': '2', '2': 'CMD_STATUS', 'CMD_REQ_SEND_INFO': '3', '3': 'CMD_REQ_SEND_INFO', 'CMD_SEND_INFO': '4', '4': 'CMD_SEND_INFO', 'FROM': '5', '5': 'FROM', 'TO': '6', '6': 'TO', 'COMMAND': '7', '7': 'COMMAND', 'ACK': '8', '8': 'ACK', 'ID_MESSAGE': '9', '9': 'ID_MESSAGE', 'MESSAGE': 'A', 'A': 'MESSAGE', 'SSID_WIFI': 'B', 'B': 'SSID_WIFI', 'NAME_WIFI': 'C', 'C': 'NAME_WIFI', 'NAME_TOPIC': 'D', 'D': 'NAME_TOPIC', 'ID_TOPIC': 'E', 'E': 'ID_TOPIC', 'ID_DEVICE': 'F', 'F': 'ID_DEVICE', 'NAME_DEVICE': 'G', 'G': 'NAME_DEVICE', 'TYPE_DEVICE': 'H', 'H': 'TYPE_DEVICE', 'LOCATION_WIFI': 'I', 'I': 'LOCATION_WIFI', 'ID_PROPERTY': 'K', 'K': 'ID_PROPERTY', 'TYPE_PROPERTY': 'L', 'L': 'TYPE_PROPERTY', 'NAME_PROPERTY': 'M', 'M': 'NAME_PROPERTY', 'VALUE': 'N', 'N': 'VALUE', 'VALUE_MIN': 'O', 'O': 'VALUE_MIN', 'VALUE_MAX': 'P', 'P': 'VALUE_MAX', 'VALUE_STEP': 'Q', 'Q': 'VALUE_STEP', 'VALUE_UNIT': 'R', 'R': 'VALUE_UNIT', 'CMD_INIT_SEND_INFO': 'S', 'S': 'CMD_INIT_SEND_INFO', 'CMD_END_SEND_INFO': 'T', 'T': 'CMD_END_SEND_INFO', 'CMD_STATUS_SET_PROPERTY': 'U', 'U': 'CMD_STATUS_SET_PROPERTY', 'CMD_STATUS_ADD_TOPIC': 'V', 'V': 'CMD_STATUS_ADD_TOPIC', 'CMD_STATUS_REMOVE_TOPIC': 'W', 'W': 'CMD_STATUS_REMOVE_TOPIC', 'CMD_ADD_TOPIC': 'X', 'X': 'CMD_ADD_TOPIC', 'CMD_REMOVE_TOPIC': 'Y', 'Y': 'CMD_REMOVE_TOPIC', 'PASSKEY': 'Z', 'Z': 'PASSKEY', 'CMD_DEVICE_BUSY': 'a', 'a': 'CMD_DEVICE_BUSY', 'CMD_INVALID_PIN': 'b', 'b': 'CMD_INVALID_PIN', 'NAME_HOME': 'c', 'c': 'NAME_HOME', 'CMD_UPDATE_WIFI_CONFIG': 'd', 'd': 'CMD_UPDATE_WIFI_CONFIG', 'SSID': 'e', 'e': 'SSID', 'PASSWORD': 'f', 'f': 'PASSWORD', 'CMD_ONLINE': 'g', 'g': 'CMD_ONLINE', 'CMD_UPDATE_SOFTWARE': 'h', 'h': 'CMD_UPDATE_SOFTWARE', 'CMD_UPDATE_INTERVAL_ONLINE': 'i', 'i': 'CMD_UPDATE_INTERVAL_ONLINE', 'INTERVAL': 'j', 'j': 'INTERVAL', 'CMD_STATUS_UPDATE_WIFI_CONFIG': 'k', 'k': 'CMD_STATUS_UPDATE_WIFI_CONFIG', 'CMD_STATUS_UPDATE_INTERVAL_ONLINE': 'l', 'l': 'CMD_STATUS_UPDATE_INTERVAL_ONLINE', 'CMD_UPDATE_NAME_DEVICE': 'm', 'm': 'CMD_UPDATE_NAME_DEVICE', 'CMD_STATUS_UPDATE_NAME_DEVICE': 'n', 'n': 'CMD_STATUS_UPDATE_NAME_DEVICE', 'CMD_UPDATE_NAME_PROPERTY': 'o', 'o': 'CMD_UPDATE_NAME_PROPERTY', 'CMD_STATUS_UPDATE_NAME_PROPERTY': 'p', 'p': 'CMD_STATUS_UPDATE_NAME_PROPERTY', 'CMD_TOPIC_LIMIT_EXCEEDED': 'q', 'q': 'CMD_TOPIC_LIMIT_EXCEEDED', 'CMD_STATUS_UPDATE_SOFTWARE': 'r', 'r': 'CMD_STATUS_UPDATE_SOFTWARE', 'CMD_ADD_ACTION': 's', 's': 'CMD_ADD_ACTION', 'CMD_REMOVE_ACTION': 't', 't': 'CMD_REMOVE_ACTION', 'CMD_ADD_TRIGGER': 'u', 'u': 'CMD_ADD_TRIGGER', 'CMD_REMOVE_TRIGGER': 'v', 'v': 'CMD_REMOVE_TRIGGER', 'CMD_STATUS_ADD_ACTION': 'w', 'w': 'CMD_STATUS_ADD_ACTION', 'CMD_STATUS_REMOVE_ACTION': 'x', 'x': 'CMD_STATUS_REMOVE_ACTION', 'CMD_STATUS_ADD_TRIGGER': 'y', 'y': 'CMD_STATUS_ADD_TRIGGER', 'CMD_STATUS_REMOVE_TRIGGER': 'z', 'z': 'CMD_STATUS_REMOVE_TRIGGER', 'CMD_ACTION_LIMIT_EXCEEDED': 'a1', 'a1': 'CMD_ACTION_LIMIT_EXCEEDED', 'CMD_TRIGGER_LIMIT_EXCEEDED': 'a2', 'a2': 'CMD_TRIGGER_LIMIT_EXCEEDED', 'CMD_STATUS_ADD_CONTEXT': 'a3', 'a3': 'CMD_STATUS_ADD_CONTEXT', 'CMD_STATUS_REMOVE_CONTEXT': 'a4', 'a4': 'CMD_STATUS_REMOVE_CONTEXT', 'CONTEXT_ACTION': 'a5', 'a5': 'CONTEXT_ACTION', 'CONTEXT_TRIGGER': 'a6', 'a6': 'CONTEXT_TRIGGER', 'CONTEXT': 'a7', 'a7': 'CONTEXT', 'TOPIC': 'a8', 'a8': 'CONTEXT', 'PROPERTY': 'a9', 'a9': 'PROPERTY', 'ID_CONTEXT': 'a10', 'a10': 'ID_CONTEXT', 'NAME_CONTEXT': 'a11', 'a11': 'NAME_CONTEXT', 'TYPE_TRIGGER_PROP': 'a12', 'a12': 'TYPE_TRIGGER_PROP', 'TYPE_TRIGGER_TIME': 'a13', 'a13': 'TYPE_TRIGGER_TIME', 'TYPE_TRIGGER_LOCATION': 'a14', 'a14': 'TYPE_TRIGGER_LOCATION', 'TYPE_TRIGGER': 'a15', 'a15': 'TYPE_TRIGGER', 'CMD_TRIGGER_CONTEXT': 'a16', 'a16': 'CMD_TRIGGER_CONTEXT'} |
class Solution:
def search(self, nums: list, target: int) -> int:
low =0
hi = len(nums) - 1
while low <= hi:
mid = int((low + hi)/2)
if nums[mid] == target:
return mid
elif nums[mid] > target:
hi = mid - 1
else:
low = mid + 1
return -1
nums = [-1,0,3,5,9,12]; target = 9
nums = [-1,0,3,5,9,12]; target = 2
s = Solution()
print(s.search(nums, target))
| class Solution:
def search(self, nums: list, target: int) -> int:
low = 0
hi = len(nums) - 1
while low <= hi:
mid = int((low + hi) / 2)
if nums[mid] == target:
return mid
elif nums[mid] > target:
hi = mid - 1
else:
low = mid + 1
return -1
nums = [-1, 0, 3, 5, 9, 12]
target = 9
nums = [-1, 0, 3, 5, 9, 12]
target = 2
s = solution()
print(s.search(nums, target)) |
"""
PROBLEM 43: Sub-string divisibility
The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits
0 to 9 in some order, but it also has a rather interesting sub-string divisibility property.
Let d_1 be the 1st digit, d_2 be the 2nd digit, and so on. In this way, we note the following:
d_2d_3d_4=406 is divisible by 2
d_3d_4d_5=063 is divisible by 3
d_4d_5d_6=635 is divisible by 5
d_5d_6d_7=357 is divisible by 7
d_6d_7d_8=572 is divisible by 11
d_7d_8d_9=728 is divisible by 13
d_8d_9d_10=289 is divisible by 17
Find the sum of all 0 to 9 pandigital numbers with this property.
"""
# I'm going to assume that by "this property" they mean "divisible by these specific numbers", not
# THE MANY OTHER SEQUENCES THAT COULD ALSO FIT THAT RULE, LIKE, IDK, DIVISIBLE BY ASCENDING PRIMES?
# I'm going to do this backwards, then reverse. Because that should slow down our explosion of
# places to search
str_numbers = [str(i) for i in range(10)]
initial_list = []
for num in range(1000):
if num % 17 == 0:
num_str = str(num)
while len(num_str) < 3:
num_str = '0' + num_str
for test_number in str_numbers:
if num_str.count(test_number) > 1:
break
else:
initial_list.append(num_str)
kept_list = []
for factor_num, factor in enumerate([13, 11, 7, 5, 3, 2]):
for number in initial_list:
for added_number in str_numbers:
if added_number in number:
continue
new_str_num = added_number + number
# my gosh it took me forever to get these indexes right
test_num = int(new_str_num[-4 - factor_num:-1 - factor_num])
if test_num % factor == 0:
kept_list.append(new_str_num)
initial_list = kept_list
kept_list = []
final_list = []
for number in initial_list:
for added_number in str_numbers:
if added_number not in number:
final_list.append(int(added_number + number))
print(sum(final_list))
| """
PROBLEM 43: Sub-string divisibility
The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits
0 to 9 in some order, but it also has a rather interesting sub-string divisibility property.
Let d_1 be the 1st digit, d_2 be the 2nd digit, and so on. In this way, we note the following:
d_2d_3d_4=406 is divisible by 2
d_3d_4d_5=063 is divisible by 3
d_4d_5d_6=635 is divisible by 5
d_5d_6d_7=357 is divisible by 7
d_6d_7d_8=572 is divisible by 11
d_7d_8d_9=728 is divisible by 13
d_8d_9d_10=289 is divisible by 17
Find the sum of all 0 to 9 pandigital numbers with this property.
"""
str_numbers = [str(i) for i in range(10)]
initial_list = []
for num in range(1000):
if num % 17 == 0:
num_str = str(num)
while len(num_str) < 3:
num_str = '0' + num_str
for test_number in str_numbers:
if num_str.count(test_number) > 1:
break
else:
initial_list.append(num_str)
kept_list = []
for (factor_num, factor) in enumerate([13, 11, 7, 5, 3, 2]):
for number in initial_list:
for added_number in str_numbers:
if added_number in number:
continue
new_str_num = added_number + number
test_num = int(new_str_num[-4 - factor_num:-1 - factor_num])
if test_num % factor == 0:
kept_list.append(new_str_num)
initial_list = kept_list
kept_list = []
final_list = []
for number in initial_list:
for added_number in str_numbers:
if added_number not in number:
final_list.append(int(added_number + number))
print(sum(final_list)) |
'''
Given a set of intervals, find out if any two intervals overlap.
Example:
Intervals: [[1,4], [2,5], [7,9]]
Output: true
Explanation: Intervals [1,4] and [2,5] overlap
'''
class Interval:
def __init__(self, start, end):
self.start = start
self.end = end
def doesOverlap(intervals):
intervals.sort(key=lambda x: x.start)
start = intervals[0].start
end = intervals[0].end
for i in range(1, len(intervals)):
interval = intervals[i]
if interval.start <= end:
return True
else:
start = interval.start
end = interval.end
return False
if __name__ == "__main__":
print(doesOverlap([Interval(1, 4), Interval(2, 5), Interval(7, 9)]))
print(doesOverlap([Interval(1, 4), Interval(6, 7), Interval(8, 9)]))
| """
Given a set of intervals, find out if any two intervals overlap.
Example:
Intervals: [[1,4], [2,5], [7,9]]
Output: true
Explanation: Intervals [1,4] and [2,5] overlap
"""
class Interval:
def __init__(self, start, end):
self.start = start
self.end = end
def does_overlap(intervals):
intervals.sort(key=lambda x: x.start)
start = intervals[0].start
end = intervals[0].end
for i in range(1, len(intervals)):
interval = intervals[i]
if interval.start <= end:
return True
else:
start = interval.start
end = interval.end
return False
if __name__ == '__main__':
print(does_overlap([interval(1, 4), interval(2, 5), interval(7, 9)]))
print(does_overlap([interval(1, 4), interval(6, 7), interval(8, 9)])) |
# Time: O(n)
# Space: O(1)
#
# The API: int read4(char *buf) reads 4 characters at a time from a file.
#
# The return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.
#
# By using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.
#
# Note:
# The read function will only be called once for each test case.
#
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# V0
# V1
# http://www.voidcn.com/article/p-pfdpmnvw-qp.html
# https://www.cnblogs.com/yrbbest/p/4489710.html
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# def read4(buf):
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
index = 0
while True:
buf4 = [""]*4
current = min(read4(buf4), index) # use read4 method, save the read data in bur4
for i in range(current):
buf[index] = buf4[i] # send value to buf, test case may need to check whether buf read the necessary characters
index += 1
if current!=4:
return index
# V1'
# https://www.jiuzhang.com/solution/read-n-characters-given-read4-ii-call-multiple-times/#tag-highlight-lang-python
class Solution:
def __init__(self):
self.buf4, self.i4, self.n4 = [None] * 4, 0, 0
# @param {char[]} buf destination buffer
# @param {int} n maximum number of characters to read
# @return {int} the number of characters read
def read(self, buf, n):
# Write your code here
i = 0
while i < n:
if self.i4 == self.n4:
self.i4, self.n4 = 0, Reader.read4(self.buf4)
if not self.n4:
break
buf[i], i, self.i4 = self.buf4[self.i4], i + 1, self.i4 + 1
return i
# V2
# Time: O(n)
# Space: O(1)
def read4(buf):
global file_content
i = 0
while i < len(file_content) and i < 4:
buf[i] = file_content[i]
i += 1
if len(file_content) > 4:
file_content = file_content[4:]
else:
file_content = ""
return i
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
read_bytes = 0
buffer = [''] * 4
for i in range(n / 4 + 1):
size = read4(buffer)
if size:
size = min(size, n-read_bytes)
buf[read_bytes:read_bytes+size] = buffer[:size]
read_bytes += size
else:
break
return read_bytes | class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
index = 0
while True:
buf4 = [''] * 4
current = min(read4(buf4), index)
for i in range(current):
buf[index] = buf4[i]
index += 1
if current != 4:
return index
class Solution:
def __init__(self):
(self.buf4, self.i4, self.n4) = ([None] * 4, 0, 0)
def read(self, buf, n):
i = 0
while i < n:
if self.i4 == self.n4:
(self.i4, self.n4) = (0, Reader.read4(self.buf4))
if not self.n4:
break
(buf[i], i, self.i4) = (self.buf4[self.i4], i + 1, self.i4 + 1)
return i
def read4(buf):
global file_content
i = 0
while i < len(file_content) and i < 4:
buf[i] = file_content[i]
i += 1
if len(file_content) > 4:
file_content = file_content[4:]
else:
file_content = ''
return i
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
read_bytes = 0
buffer = [''] * 4
for i in range(n / 4 + 1):
size = read4(buffer)
if size:
size = min(size, n - read_bytes)
buf[read_bytes:read_bytes + size] = buffer[:size]
read_bytes += size
else:
break
return read_bytes |
def check_line_containment(line_1, line_2):
return (line_1[0] <= line_2[0] and line_1[1] >= line_2[1])
def containment(rec_1, rec_2):
is__rec_1__in__rec_2 = check_line_containment(rec_1.get('x'), rec_2.get(
'x')) and check_line_containment(rec_1.get('y'), rec_2.get('y'))
is__rec_2__in__rec_1 = check_line_containment(rec_2.get('x'), rec_1.get(
'x')) and check_line_containment(rec_2.get('y'), rec_1.get('y'))
return is__rec_1__in__rec_2 or is__rec_2__in__rec_1
| def check_line_containment(line_1, line_2):
return line_1[0] <= line_2[0] and line_1[1] >= line_2[1]
def containment(rec_1, rec_2):
is__rec_1__in__rec_2 = check_line_containment(rec_1.get('x'), rec_2.get('x')) and check_line_containment(rec_1.get('y'), rec_2.get('y'))
is__rec_2__in__rec_1 = check_line_containment(rec_2.get('x'), rec_1.get('x')) and check_line_containment(rec_2.get('y'), rec_1.get('y'))
return is__rec_1__in__rec_2 or is__rec_2__in__rec_1 |
# -*- coding: utf-8 -*-
'''
mc_proxy / Dummy state generation
==================================
'''
def hook(name, changes=None, **kw):
'''
State that will always return ret, use that for orchestration purpose
name
name of dummy state
'''
if not changes:
changes = {}
ret = {'name': name,
'result': True,
'comment': 'Dummy state for {0}'.format(name),
'changes': changes}
return ret
def mod_watch(name, **kwargs):
'''
Execute a dummy state in case of watcher changes
'''
return hook(name, changes={1: 1})
#
| """
mc_proxy / Dummy state generation
==================================
"""
def hook(name, changes=None, **kw):
"""
State that will always return ret, use that for orchestration purpose
name
name of dummy state
"""
if not changes:
changes = {}
ret = {'name': name, 'result': True, 'comment': 'Dummy state for {0}'.format(name), 'changes': changes}
return ret
def mod_watch(name, **kwargs):
"""
Execute a dummy state in case of watcher changes
"""
return hook(name, changes={1: 1}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.