hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a09c8dabf09279418f0c028ae9f2bbd624af93e
| 241
|
py
|
Python
|
app/config/storages.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | null | null | null |
app/config/storages.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | 32
|
2019-08-30T13:09:28.000Z
|
2021-06-10T19:07:56.000Z
|
app/config/storages.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | 3
|
2019-09-19T10:12:50.000Z
|
2019-09-30T15:59:13.000Z
|
from storages.backends.s3boto3 import S3Boto3Storage
class MediaStorage(S3Boto3Storage):
location = 'media'
default_acl = 'public-read'
class StaticStorage(S3Boto3Storage):
location = 'static'
default_acl = 'public-read'
| 20.083333
| 52
| 0.742739
|
4a09c931c2c21b7523b6a259337add1b01012f8e
| 2,399
|
py
|
Python
|
SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/sharedArray.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/sharedArray.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/sharedArray.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Pacific Biosciences nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#################################################################################
from multiprocessing.sharedctypes import RawArray
import warnings
import numpy as np
class SharedArray:
"""
Very simple wrapper for a chunk of shared memory that can be accessed across processes
"""
def __init__(self, dtype, shape):
self._rawArray = RawArray(dtype, shape)
def getNumpyWrapper(self):
"""
Construct a numpy array that wraps the raw shared memory array
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.ctypeslib.as_array(self._rawArray)
| 46.134615
| 90
| 0.706128
|
4a09c97b26376722aecf17c4e2d544fa91209dd7
| 2,866
|
py
|
Python
|
nisc_impute.py
|
xiangz-108/NISC-Imputation
|
7e67b7766346883da55ff8bc92f364ac239ac33c
|
[
"MIT"
] | null | null | null |
nisc_impute.py
|
xiangz-108/NISC-Imputation
|
7e67b7766346883da55ff8bc92f364ac239ac33c
|
[
"MIT"
] | null | null | null |
nisc_impute.py
|
xiangz-108/NISC-Imputation
|
7e67b7766346883da55ff8bc92f364ac239ac33c
|
[
"MIT"
] | null | null | null |
#####################################
#Author: Xiang Zhang
#Email: xiangzhang@email.arizona.edu
#####################################
#Purpose: The goal of this code is to impute data using NISC method.
#It requires input data stored as 2D numpy array, with name "data.npy" -
#whose rows indicating the genes, cols indicating the cells.
#It will store the imputed data with name "data_nisc.npy"
import os
import random
import numpy as np
import tensorflow as tf
import time
def run_tf(gene_num,data_orig,input_tf,output_tf,current_loss,data_count_maximum):
graph = tf.Graph()
with graph.as_default():
images_ph = tf.placeholder(tf.float32, [None,gene_num])
labels_ph = tf.placeholder(tf.float32, [None,gene_num])
weight_ph = tf.placeholder(tf.float32, [None,gene_num])
hl_1 = tf.contrib.layers.fully_connected(images_ph, gene_num*2 , tf.nn.relu)
hl_2 = tf.contrib.layers.fully_connected(hl_1, np.int(np.floor(gene_num/2)), tf.nn.leaky_relu)
hl_3 = tf.contrib.layers.fully_connected(hl_2, gene_num*2, tf.nn.relu)
predicted_labels = tf.contrib.layers.fully_connected(hl_3, gene_num, tf.nn.relu)
loss = tf.div(tf.sqrt(tf.reduce_sum(tf.multiply(tf.div(weight_ph,49+weight_ph),tf.pow(tf.log(predicted_labels+1)-tf.log(labels_ph+1), 2)))),gene_num)
l2_loss = tf.losses.get_regularization_loss()
loss = loss+l2_loss*0.1
train = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
init = tf.global_variables_initializer()
# Train Model
session = tf.Session(graph=graph)
_=session.run([init])
#Pass training data and return loss
for i in range(gene_num*5):
_, loss_value = session.run([train, loss], feed_dict={weight_ph: data_orig[:,:], images_ph: input_tf[:,:], labels_ph: output_tf[:,:]})
#Monitor loss for every 10 iterations
if i%10 == 0:
print("Loss at iteration" + str(i) + "is" + str(loss_value)+ "\n")
pred = session.run([predicted_labels],feed_dict={weight_ph: data_orig[:,:],images_ph:input_tf[:,:]})
nn_output=np.asarray(pred)[0,:,:]
nn_output=nn_output.transpose()
nn_output=np.log(nn_output+1)
nn_output=nn_output*data_count_maximum
nn_output=np.exp(nn_output)-1
np.save("data_nisc", nn_output)
print('nn output generated')
return loss_value
data_count=np.load('data.npy')
data_count=np.log(data_count+1)
data_count_maximum=np.amax(data_count)
data_count=data_count/data_count_maximum
data_count=np.exp(data_count)-1
data_count_orig=np.load('data.npy')
data=data_count
gene_num=data.shape[0]
data_orig=data_count_orig.transpose()
input_tf=data.transpose()
output_tf=data.transpose()
current_loss=1000;
for i in range(1):
current_loss=run_tf(gene_num,data_orig,input_tf,output_tf,current_loss,data_count_maximum)
| 34.95122
| 157
| 0.693301
|
4a09ca167771b7fc5ca6795e4ebcdc7a6175eeb7
| 3,082
|
py
|
Python
|
tests/sentry/api/endpoints/test_project_tagkey_details.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_tagkey_details.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_tagkey_details.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import mock
from django.core.urlresolvers import reverse
from sentry import tagstore
from sentry.tagstore import TagKeyStatus
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
class ProjectTagKeyDetailsTest(APITestCase, SnubaTestCase):
def test_simple(self):
project = self.create_project()
def make_event(i):
self.store_event(
data={
"tags": {"foo": "val{}".format(i)},
"timestamp": iso_format(before_now(seconds=1)),
},
project_id=project.id,
)
for i in range(0, 16):
make_event(i)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-tagkey-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"key": "foo",
},
)
response = self.client.get(url)
assert response.status_code == 200
assert response.data["uniqueValues"] == 16
class ProjectTagKeyDeleteTest(APITestCase):
@mock.patch("sentry.eventstream")
def test_simple(self, mock_eventstream):
key = "foo"
val = "bar"
project = self.create_project()
self.store_event(
data={"tags": {key: val}, "timestamp": iso_format(before_now(seconds=1))},
project_id=project.id,
)
self.login_as(user=self.user)
eventstream_state = object()
mock_eventstream.start_delete_tag = mock.Mock(return_value=eventstream_state)
url = reverse(
"sentry-api-0-project-tagkey-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"key": key,
},
)
response = self.client.delete(url)
assert response.status_code == 204
mock_eventstream.start_delete_tag.assert_called_once_with(project.id, "foo")
mock_eventstream.end_delete_tag.assert_called_once_with(eventstream_state)
def test_protected(self):
project = self.create_project()
self.store_event(
data={"environment": "prod", "timestamp": iso_format(before_now(seconds=1))},
project_id=project.id,
)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-tagkey-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"key": "environment",
},
)
response = self.client.delete(url)
assert response.status_code == 403
assert (
tagstore.get_tag_key(
project.id, None, "environment", status=TagKeyStatus.VISIBLE # environment_id
).status
== TagKeyStatus.VISIBLE
)
| 28.803738
| 94
| 0.583387
|
4a09ca77c279805483d4429d87ce70e11e9f34cd
| 2,634
|
py
|
Python
|
packages/structural_dhcp_rst2pdf-aquavitae/structural_dhcp_rst2pdf/pygments2style.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 2
|
2017-09-11T15:25:14.000Z
|
2019-09-27T17:08:31.000Z
|
packages/structural_dhcp_rst2pdf-aquavitae/structural_dhcp_rst2pdf/pygments2style.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 6
|
2019-08-22T06:29:45.000Z
|
2021-09-19T18:59:46.000Z
|
packages/structural_dhcp_rst2pdf-aquavitae/structural_dhcp_rst2pdf/pygments2style.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 1
|
2018-02-12T14:38:33.000Z
|
2018-02-12T14:38:33.000Z
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
'''
Creates a structural_dhcp_rst2pdf stylesheet for each pygments style.
'''
import os
from . import dumpstyle
from pygments.token import STANDARD_TYPES
from pygments import styles as pstyles
# First get a list of all possible classes
classnames = set()
for name in list(pstyles.get_all_styles()):
css = os.popen('pygmentize -S %s -f html' % name, 'r').read()
for line in css.splitlines():
line = line.strip()
sname = "pygments-" + line.split(' ')[0][1:]
classnames.add(sname)
def css2rl(css):
dstyles = {}
# First create a dumb stylesheet
for key in STANDARD_TYPES:
dstyles["pygments-" + STANDARD_TYPES[key]] = {'parent': 'code'}
seenclassnames = set()
styles = []
for line in css.splitlines():
line = line.strip()
sname = "pygments-" + line.split(' ')[0][1:]
seenclassnames.add(sname)
style = dstyles.get(sname, {'parent': 'code'})
options = line.split('{')[1].split('}')[0].split(';')
for option in options:
option = option.strip()
option, argument = option.split(':')
option = option.strip()
argument = argument.strip()
if option == 'color':
style['textColor'] = argument.strip()
if option == 'background-color':
style['backColor'] = argument.strip()
# These two can come in any order
if option == 'font-weight' and argument == 'bold':
if 'fontName' in style and \
style['fontName'] == 'stdMonoItalic':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoBold'
if option == 'font-style' and argument == 'italic':
if 'fontName' in style and style['fontName'] == 'stdBold':
style['fontName'] = 'stdMonoBoldItalic'
else:
style['fontName'] = 'stdMonoItalic'
if style.get('textColor', None) is None:
style['textColor'] = 'black'
styles.append([sname, style])
# Now add default styles for all unseen class names
for sname in classnames - seenclassnames:
style = dstyles.get(sname, {'parent': 'code'})
style['textColor'] = 'black'
styles.append([sname, style])
return dumpstyle.dumps({'styles': styles})
for name in list(pstyles.get_all_styles()):
css = os.popen('pygmentize -S %s -f html' % name, 'r').read()
open(name + '.style', 'w').write(css2rl(css))
| 36.583333
| 74
| 0.566439
|
4a09caf9fbbb60b67ed45087e8aef17c3c568097
| 2,335
|
py
|
Python
|
mev/api/converters/mappers.py
|
hsph-qbrc/mev-backend
|
c381800aa7d53d7256e89a4db5a0f9444264e9a6
|
[
"MIT"
] | null | null | null |
mev/api/converters/mappers.py
|
hsph-qbrc/mev-backend
|
c381800aa7d53d7256e89a4db5a0f9444264e9a6
|
[
"MIT"
] | null | null | null |
mev/api/converters/mappers.py
|
hsph-qbrc/mev-backend
|
c381800aa7d53d7256e89a4db5a0f9444264e9a6
|
[
"MIT"
] | null | null | null |
import os
import json
from api.exceptions import InputMappingException
class MapConverter(object):
'''
A base class for inputs which serve as a proxy for the true inputs.
An example is when we need multiple genome-specific files (such as for
a BWA alignment) and the user simply needs to dictate which genome to use.
Then, the mapper implementation will take that input (e.g. a string coming from
an OptionString input type) and return back the relevant info (paths to
index files).
Importantly, the returned values are what populate the 'final' inputs that are used
to execute the analysis. So, if we are talking about a Cromwell-based job, then those
files need to be located in a cloud-based bucket already.
'''
pass
class SimpleFileBasedMapConverter(MapConverter):
'''
A simple implementation where a single key (such as a genome identifier) will be used
as a lookup to
'''
# the name of the file which will provide the inputs we need. For instance, if we are
# using this converter to get genome-specific files, then the keys would be the genome
# identifiers (e.g. Grch38) and the values would be objects themselves
# It is expected that this file will exist in the repo. Failure to provide that file
# will raise an exception at runtime
MAPPING_FILE = 'input_mapping.json'
def convert(self, input_key, user_input, op_dir, staging_dir):
map_file = os.path.join(op_dir, self.MAPPING_FILE)
if not os.path.exists(map_file):
raise InputMappingException('Could not locate the input mapping'
' file at {p}'.format(p=map_file))
try:
mapping_data = json.load(open(map_file))
except json.decoder.JSONDecodeError as ex:
raise InputMappingException('Could not use the JSON parser to load'
' the input mapping file at {p}. Exception was {ex}'.format(
p=map_file,
ex = ex
))
try:
return mapping_data[user_input]
except KeyError as ex:
raise InputMappingException('No mapping found for key: "{k}". Check'
' the mapping file at {p}'.format(
p=map_file,
k = user_input
))
| 40.964912
| 90
| 0.653105
|
4a09cb779465a105e87acf5d0b08ebd71cd66c99
| 22,465
|
py
|
Python
|
stretch_funmap/src/stretch_funmap/merge_maps.py
|
ScazLab/stretch_ros
|
7b7c9e6bea15ab507e87a15d294a1068b9dc4a85
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
stretch_funmap/src/stretch_funmap/merge_maps.py
|
ScazLab/stretch_ros
|
7b7c9e6bea15ab507e87a15d294a1068b9dc4a85
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
stretch_funmap/src/stretch_funmap/merge_maps.py
|
ScazLab/stretch_ros
|
7b7c9e6bea15ab507e87a15d294a1068b9dc4a85
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import max_height_image as mh
import numpy as np
import scipy.ndimage as nd
import scipy.signal as si
import cv2
import skimage as sk
import math
import hello_helpers.hello_misc as hm
import navigation_planning as na
import copy
import time
from scipy.optimize import minimize, minimize_scalar
from numba_compare_images import numba_compare_images_2
import mapping as ma
import tf_conversions
import cma
def affine_transform_2d_point(affine_matrix, point):
affine_point = np.ones(3)
affine_point[:2] = point
affine_point = np.reshape(affine_point, (3,1))
new_point = np.matmul(affine_matrix, affine_point)
new_point = np.reshape(new_point, (2))
return new_point
def register_images(max_height_image_to_warp, max_height_image_target, image_to_warp_center,
init_target_x, init_target_y, init_angle_deg,
verbose=False,
target_x_constraints=None,
target_y_constraints=None,
angle_deg_constraints=None,
grid_search=False):
if target_x_constraints is None:
target_x_constraints=[-1000, 1000]
if target_y_constraints is None:
target_y_constraints=[-1000, 1000]
if angle_deg_constraints is None:
angle_deg_constraints=[-360.0, 360.0]
m_per_pix = max_height_image_target.m_per_pix
m_per_height_unit = max_height_image_target.m_per_height_unit
image_to_warp = max_height_image_to_warp.image
weight_by_height = True
target_image = max_height_image_target.image
blur_size = (3,3)
image_to_warp = cv2.GaussianBlur(np.float64(image_to_warp), blur_size, 0)
target_image_not_smoothed = target_image
target_image = cv2.GaussianBlur(np.float64(target_image), blur_size, 0)
target_image[target_image_not_smoothed == 0] = 0
# As the target image becomes larger than the image being warped
# this upper bound becomes less meaningful, since it becomes
# unachievable and feasible values become much smaller.
# match_score_upper_bound = np.sum(target_image)
# Estimate the upper bound with the image to warp, since it will usually be a smaller local scan.
match_score_upper_bound = np.sum(image_to_warp)
image_to_warp_center = np.array(image_to_warp_center)
target_x_constraints = [init_target_x + c for c in target_x_constraints]
target_y_constraints = [init_target_y + c for c in target_y_constraints]
angle_deg_constraints = [init_angle_deg + c for c in angle_deg_constraints]
print('target_x_constraint =', target_x_constraints)
print('target_y_constraint =', target_y_constraints)
print('angle_deg_constraint =', angle_deg_constraints)
def scale_out(v, min_val, max_val):
return (v - min_val) / (max_val - min_val)
def scale_in(v, min_val, max_val):
return (v * (max_val - min_val)) + min_val
def scale_parameters_in(s):
# Transform the optimized parameters back to their original
# forms.
x = scale_in(s[0], target_x_constraints[0], target_x_constraints[1])
y = scale_in(s[1], target_y_constraints[0], target_y_constraints[1])
a = scale_in(s[2], angle_deg_constraints[0], angle_deg_constraints[1])
return [x,y,a]
def scale_parameters_out(s):
# Scale the parameters so that they are between 0 and 1 during
# the optimization when the constraints are met.
x = scale_out(s[0], target_x_constraints[0], target_x_constraints[1])
y = scale_out(s[1], target_y_constraints[0], target_y_constraints[1])
a = scale_out(s[2], angle_deg_constraints[0], angle_deg_constraints[1])
return [x,y,a]
def constraints_satisfied(target_x, target_y, angle_deg):
# Return true if all the constraints are satisfied. Otherwise, return false.
# When satisfying the constraints, the parameters will be between 0 and 1.
target_x_in_range = (0.0 <= target_x) and (target_x <= 1.0)
target_y_in_range = (0.0 <= target_y) and (target_y <= 1.0)
angle_deg_in_range = (0.0 <= angle_deg) and (angle_deg <= 1.0)
return (target_x_in_range and target_y_in_range and angle_deg_in_range)
def compute_penalty_normalized_input(parameter, scale):
penalty = 0.0
if parameter < 0.0:
penalty += scale * (-1.0 * penalty)
if parameter > 1.0:
penalty += scale * (penalty - 1.0)
return penalty
def constraints_penalty_normalized_input(target_x, target_y, angle_deg):
# Penalize violation of the constraints. When satisfying the
# constraints, the parameters will be between 0 and 1.
scale = 10000.0
penalty = 0.0
penalty += compute_penalty_normalized_input(target_x, scale)
penalty += compute_penalty_normalized_input(target_y, scale)
penalty += compute_penalty_normalized_input(angle_deg, scale)
return penalty
def compute_position_penalty(parameter, constraints, scale):
penalty = 0.0
if parameter < constraints[0]:
penalty += scale * abs(parameter - constraints[0])
if parameter > constraints[1]:
penalty += scale * abs(parameter - constraints[1])
return penalty
def compute_angle_penalty(parameter, constraints, scale):
penalty = 0.0
if parameter < constraints[0]:
penalty += scale * abs(parameter - constraints[0])
if parameter > constraints[1]:
penalty += scale * abs(parameter - constraints[1])
return penalty
def constraints_penalty(target_x, target_y, angle_deg):
# Penalize violation of the constraints. When satisfying the
# constraints, the parameters will be between 0 and 1.
scale = 10000.0
penalty = 0.0
penalty += compute_position_penalty(target_x, target_x_constraints, scale)
penalty += compute_position_penalty(target_y, target_y_constraints, scale)
penalty += compute_angle_penalty(angle_deg, angle_deg_constraints, scale)
return penalty
def fast_cost_func(s):
#penalty = constraints_penalty_normalized_input(*s)
s = scale_parameters_in(s)
penalty = constraints_penalty(*s)
target_image_location = s[:2]
target_x = target_image_location[0]
target_y = target_image_location[1]
angle_deg = s[2]
affine_matrix = cv2.getRotationMatrix2D((image_to_warp_center[0], image_to_warp_center[1]), angle_deg, 1.0)
affine_matrix[:, 2] += target_image_location - image_to_warp_center
match_score = numba_compare_images_2(image_to_warp, target_image, target_image_not_smoothed,
affine_matrix, m_per_height_unit, match_threshold_m=0.1,
weight_by_height=True)
# normalize match_score to be between 0.0 and 1.0
match_score = match_score/match_score_upper_bound
cost = penalty - match_score
return cost
height, width = target_image.shape
if init_target_x is None:
init_target_x = 0.0
if init_target_y is None:
init_target_y = 0.0
if init_angle_deg is None:
init_angle_deg = 0.0
if grid_search:
options = {'tolfun': 0.01}
initial_standard_deviation = 0.02
initial_solution = []
w,h = max_height_image_target.image.shape
# grid of starting positions across the map
n = 4
border = w/(2.0*n)
x_values = np.linspace(border, w - border, n)
y_values = np.linspace(border, h - border, n)
a_values = [0.0, 120.0, 240.0]
initial_solution_list = []
for x in x_values:
for y in y_values:
for a in a_values:
initial_solution_list.append([x, y, a])
print('len(initial_solution_list) =', len(initial_solution_list))
print('initial_solution_list =', initial_solution_list)
best_result = None
for initial_solution in initial_solution_list:
initial_solution = scale_parameters_out(initial_solution)
result = cma.fmin(fast_cost_func, initial_solution, initial_standard_deviation, options)
if best_result is None:
best_parameters = scale_parameters_in(result[0])
best_error = result[1]
best_result = result
else:
new_error = result[1]
if new_error < best_error:
best_parameters = scale_parameters_in(result[0])
best_error = result[1]
best_result = result
result = best_result
else:
options = {'tolfun': 0.001}
initial_standard_deviation = 0.02
initial_solution = [init_target_x, init_target_y, init_angle_deg]
initial_solution = scale_parameters_out(initial_solution)
result = cma.fmin(fast_cost_func, initial_solution, initial_standard_deviation, options)
best_parameters = scale_parameters_in(result[0])
best_error = result[1]
best_result = result
print
print('Optimization complete.')
print
no_numpy_cma_result = []
for entry in result:
if "tolist" in dir(entry):
entry = entry.tolist()
no_numpy_cma_result.append(entry)
cma_result = {'initial_solution': initial_solution,
'initial_standard_deviation': initial_standard_deviation,
'options': options,
'best_parameters': no_numpy_cma_result[0],
'best_parameters_error': no_numpy_cma_result[1],
'num_evals_to_find_best': no_numpy_cma_result[2],
'num_evals_total': no_numpy_cma_result[3],
'cma_iterations': no_numpy_cma_result[4],
'cma_parameter_means': no_numpy_cma_result[5],
'cma_parameter_stddevs': no_numpy_cma_result[6]}
print('cma_result =')
print(cma_result)
s_min = scale_parameters_in(cma_result['best_parameters'])
print('best_parameters =', s_min)
print('')
return (s_min[0], s_min[1], s_min[2]), cma_result['best_parameters_error']
def transform_xya_to_xya_3d(transform_mat, x, y, ang_rad):
# Only for 3D affine matrices
r, c = transform_mat.shape
assert((r == 3) or (r == 4))
assert(c == 4)
x1 = x
y1 = y
map_xy_1 = np.matmul(transform_mat, [x1, y1, 0.0, 1.0])[:2]
a = ang_rad
f_len = 1.0
f_x = f_len * np.cos(a)
f_y = f_len * np.sin(a)
x2 = x1 + f_x
y2 = y1 - f_y
map_xy_2 = np.matmul(transform_mat, [x2, y2, 0.0, 1.0])[:2]
map_diff = map_xy_2 - map_xy_1
map_ang_rad = np.arctan2(map_diff[1], map_diff[0])
return map_xy_1[0], map_xy_1[1], map_ang_rad
def transform_xya_to_xya_2d(transform_mat, x, y, ang_rad):
# Only for 2D affine matrices
r, c = transform_mat.shape
assert((r == 2) or (r == 3))
assert(c == 3)
x1 = x
y1 = y
map_xy_1 = np.matmul(transform_mat, [x1, y1, 1.0])[:2]
a = ang_rad
f_len = 1.0
f_x = f_len * np.cos(a)
f_y = f_len * np.sin(a)
x2 = x1 + f_x
y2 = y1 - f_y
map_xy_2 = np.matmul(transform_mat, [x2, y2, 1.0])[:2]
map_diff = map_xy_2 - map_xy_1
map_ang_rad = np.arctan2(map_diff[1], map_diff[0])
return map_xy_1[0], map_xy_1[1], map_ang_rad
def unaligned_merge_scan_1_into_scan_2(scan_1, scan_2, display_on=False, show_unaligned=False):
mhi_1 = scan_1.max_height_im
mhi_2 = scan_2.max_height_im
h1, w1 = mhi_1.image.shape
h2, w2 = mhi_2.image.shape
# First, try naively merging without alignment or using camera
# depth
if (h1 == h2) and (w1 == w2):
merged_sub_image = mhi_2.image
else:
# This assumes that scan 1 is smaller than scan 2 and is
# really just a quick hack for testing, since this operation
# doesn't make much sense if the images have different sizes
# and aren't aligned.
merged_sub_image = mhi_2.image[:h1,:w1]
merged_sub_image[merged_sub_image == 0] = mhi_1.image[merged_sub_image == 0]
if display_on:
cv2.imshow('Naive merge', merged.image)
def unaligned_blended_scan_1_into_scan_2(scan_1, scan_2, display_on=False, show_unaligned=False):
mhi_1 = scan_1.max_height_im
mhi_2 = scan_2.max_height_im
h1, w1 = mhi_1.image.shape
h2, w2 = mhi_2.image.shape
# Second, try merging without alignment, but using camera depth
if (h1 == h2) and (w1 == w2):
merged_sub_image = mhi_2.image
merged_sub_depth_image = mhi_2.camera_depth_image
else:
# This assumes that scan 1 is smaller than scan 2 and is
# really just a quick hack for testing, since this operation
# doesn't make much sense if the images have different sizes
# and aren't aligned.
merged_sub_image = mhi_2.image[:h1,:w1]
merged_sub_depth_image = mhi_2.camera_depth_image[:h1,:w1]
# 1 has an observation and 0 does not have an observation.
unobserved_selector = (merged_sub_image == 0) & (mhi_1.image != 0)
# 1 has an observation and the camera was closer than in 0. No
# observation in 0 would result in a camera depth of 0, so unobserved_selector is important.
nearer_selector = (mhi_1.camera_depth_image < merged_sub_depth_image) & (mhi_1.camera_depth_image != 0)
selector = unobserved_selector | nearer_selector
merged_sub_image[selector] = mhi_1.image[selector]
if display_on:
cv2.imshow('Unaligned camera depth merge', merged.image)
def estimate_scan_1_to_scan_2_transform(scan_1, scan_2, display_on=False, show_unaligned=False,
full_localization=False, init_target=None,
grid_search=False, small_search=False):
mhi_2 = scan_2.max_height_im
mhi_1 = scan_1.max_height_im
h, w = mhi_1.image.shape
if full_localization:
# Assume no pertinent pose information has been provided, so
# attempt to search over all feasible poses.
mhi_to_warp_center = [w/2, h/2]
min_dim = min(w,h)
if init_target is None:
init_target = [w/2, h/2, 0.0]
# Redundant, but allows some rollover for continuity. Might be a worthwhile tradeoff.
angle_constraint_deg = 200.0
position_constraint_pix = min_dim/2
else:
print('scan_1.robot_xy_pix = {0}'.format(scan_1.robot_xy_pix))
print('init_target = {0}'.format(init_target))
mhi_to_warp_center = [scan_1.robot_xy_pix[0], scan_1.robot_xy_pix[1]]
if init_target is None:
init_target = [scan_1.robot_xy_pix[0], scan_1.robot_xy_pix[1], 0.0]
if not small_search:
position_constraint_m = 1.2
angle_constraint_deg = 45.0
else:
position_constraint_m = 0.6
angle_constraint_deg = 30.0
position_constraint_pix = position_constraint_m / mhi_2.m_per_pix
mhi_to_warp = mhi_1
mhi_target = mhi_2
print('init_target =', init_target)
registration, cost = register_images(mhi_to_warp, mhi_target, mhi_to_warp_center,
verbose = True,
target_x_constraints=[-position_constraint_pix, position_constraint_pix],
target_y_constraints=[-position_constraint_pix, position_constraint_pix],
angle_deg_constraints=[-angle_constraint_deg, angle_constraint_deg],
init_target_x=init_target[0], init_target_y=init_target[1], init_angle_deg=init_target[2],
grid_search=grid_search)
target_x, target_y, angle_deg = registration
print('target_x =', target_x, ', target_y =', target_y, ', angle_deg =', angle_deg)
affine_matrix = cv2.getRotationMatrix2D((mhi_to_warp_center[0], mhi_to_warp_center[1]), angle_deg, 1.0)
affine_matrix[:, 2] += np.array((target_x, target_y)) - mhi_to_warp_center
# calculate pose of the robot in the new combined map (this
# assumes that the pose in the map_to_warp is the current pose of
# the robot)
map_to_warp_robot_x_pix = scan_1.robot_xy_pix[0]
map_to_warp_robot_y_pix = scan_1.robot_xy_pix[1]
map_to_warp_robot_theta_rad = scan_1.robot_ang_rad
combined_robot_x_pix, combined_robot_y_pix = affine_transform_2d_point(affine_matrix, (map_to_warp_robot_x_pix, map_to_warp_robot_y_pix))
combined_robot_theta_rad = map_to_warp_robot_theta_rad + ((np.pi/180.0) * angle_deg)
combined_robot_pose = {'x_pix':combined_robot_x_pix, 'y_pix':combined_robot_y_pix, 'theta_rad':combined_robot_theta_rad}
print('combined_robot_pose =', combined_robot_pose)
# Convert to the map frame of reference. This should allow
# the robot to update its estimate of its pose in the map frame.
p = combined_robot_pose
x1 = p['x_pix']
y1 = p['y_pix']
a = p['theta_rad']
map_x, map_y, map_ang_rad = transform_xya_to_xya_3d(scan_2.image_to_map_mat, x1, y1, a)
map_xy_1 = np.array([map_x, map_y])
map_quat = tf_conversions.transformations.quaternion_from_euler(0, 0, map_ang_rad)
print('map_xy_1 =', map_xy_1)
print('map_ang_rad =', map_ang_rad)
print('map_quat =', map_quat)
x, y, a = transform_xya_to_xya_3d(scan_2.image_to_map_mat, scan_1.robot_xy_pix[0], scan_1.robot_xy_pix[1], scan_1.robot_ang_rad)
original_robot_map_pose = [x, y, a]
corrected_robot_map_pose = [map_x, map_y, map_ang_rad]
print('original_robot_map_pose =', original_robot_map_pose)
print('corrected_robot_map_pose =', corrected_robot_map_pose)
return affine_matrix, original_robot_map_pose, corrected_robot_map_pose
def blend_max_height_image_1_into_max_height_image_2(mhi_1, mhi_2):
# This assumes that both max height images have camera_depth_images
assert(mhi_1.camera_depth_image is not None)
assert(mhi_2.camera_depth_image is not None)
# 1 has an observation and 0 does not have an observation.
unobserved_selector = (mhi_2.image == 0) & (mhi_1.image != 0)
# 1 has an observation and the camera was closer than in 0. No
# observation in 0 would result in a camera depth of 0, so unobserved_selector is important.
nearer_selector = (mhi_1.camera_depth_image < mhi_2.camera_depth_image) & (mhi_1.camera_depth_image != 0)
selector = unobserved_selector | nearer_selector
mhi_2.image[selector] = mhi_1.image[selector]
mhi_2.camera_depth_image[selector] = mhi_1.camera_depth_image[selector]
if (mhi_1.rgb_image is not None) and (mhi_2.rgb_image is not None):
mhi_2.rgb_image[selector] = mhi_1.rgb_image[selector]
def merge_scan_1_into_scan_2(scan_1, scan_2, display_on=False, show_unaligned=False,
full_localization=False, init_target=None,
grid_search=False, output_affine=False, small_search=False):
affine_matrix, original_robot_map_pose, corrected_robot_map_pose = estimate_scan_1_to_scan_2_transform(scan_1, scan_2,
display_on=display_on,
show_unaligned=show_unaligned,
full_localization=full_localization,
init_target=init_target,
grid_search=grid_search,
small_search=small_search)
mhi_2 = scan_2.max_height_im
mhi_1 = scan_1.max_height_im
mhi_to_warp = mhi_1
mhi_target = mhi_2
warped_image_1 = cv2.warpAffine(mhi_to_warp.image, affine_matrix, mhi_to_warp.image.shape, flags=cv2.INTER_NEAREST)
warped_camera_depth_image_1 = cv2.warpAffine(mhi_to_warp.camera_depth_image, affine_matrix,
mhi_to_warp.camera_depth_image.shape, flags=cv2.INTER_NEAREST)
if (mhi_1.rgb_image is not None) and (mhi_2.rgb_image is not None):
warped_rgb_image_1 = cv2.warpAffine(mhi_to_warp.rgb_image, affine_matrix,
mhi_to_warp.rgb_image.shape[:2], flags=cv2.INTER_NEAREST)
else:
warped_rgb_image_1 = None
if display_on:
h,w = mhi_target.image.shape
color_im = np.zeros((h, w, 3), np.uint8)
color_im[:,:,0] = mhi_target.image
color_im[:,:,1] = warped_image_1
cv2.imshow('Aligned color comparison', color_im)
color_im[:,:,1] = mhi_to_warp.image
cv2.imshow('Unaligned color comparison', color_im)
class TempMaxHeightImage:
image = warped_image_1
camera_depth_image = warped_camera_depth_image_1
rgb_image = warped_rgb_image_1
warped_mhi = TempMaxHeightImage()
blend_max_height_image_1_into_max_height_image_2(warped_mhi, mhi_2)
if display_on:
h,w = mhi_2.image.shape
color_im = np.zeros((h, w, 3), np.uint8)
color_im[:,:,0] = mhi_2.image
color_im[:,:,1] = mhi_2.image
color_im[:,:,2] = mhi_2.image
x, y, a = corrected_robot_map_pose
x = int(round(x))
y = int(round(y))
radius = 10
cv2.circle(color_im, (x,y), radius, [0,0,255], 1)
color_im[y,x] = [0,255,0]
f_len = 15.0
f_x = int(round(f_len * np.cos(a)))
f_y = int(round(f_len * np.sin(a)))
x2 = x + f_x
y2 = y - f_y
cv2.line(color_im, (x, y), (x2, y2), [0,255,255], 1)
cv2.imshow('Mhi_2 max height image', color_im)
if not output_affine:
return original_robot_map_pose, corrected_robot_map_pose
else:
return original_robot_map_pose, corrected_robot_map_pose, affine_matrix
| 43.119002
| 143
| 0.645092
|
4a09cc4d7338f09b266db268e1f8a56a88699a84
| 405
|
py
|
Python
|
Make_It_A_Keyboard/strings/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | null | null | null |
Make_It_A_Keyboard/strings/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | null | null | null |
Make_It_A_Keyboard/strings/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | null | null | null |
import usb_hid
from adafruit_circuitplayground.express import cpx
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
kbd = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(kbd)
while True:
if cpx.button_a:
# Type 'Jane Doe' followed by Enter (a newline).
layout.write('Jane Doe\n')
while cpx.button_a:
pass
| 27
| 60
| 0.735802
|
4a09cc6d240e35ac29937581d8c6d918d8aebfb0
| 610
|
py
|
Python
|
config.py
|
claudianjeri/The-News-Highlights
|
7e89fa2eb4ccd8901d9d666a8782d2c54d89ef4c
|
[
"MIT"
] | null | null | null |
config.py
|
claudianjeri/The-News-Highlights
|
7e89fa2eb4ccd8901d9d666a8782d2c54d89ef4c
|
[
"MIT"
] | 1
|
2018-05-29T19:30:04.000Z
|
2018-05-29T19:30:04.000Z
|
config.py
|
claudianjeri/The-News-Highlights
|
7e89fa2eb4ccd8901d9d666a8782d2c54d89ef4c
|
[
"MIT"
] | null | null | null |
import os
class Config:
#my api key.
NEWS_API_KEY = '8a48624dfbfc41d8a7009f960cccd473'
SECRET_KEY = 'nsdbbdkjsbnjrbwucsjanvrbvuhcurwhcnerhicewi'
#my source base url
SOURCE_API_BASE_URL = 'https://newsapi.org/v2/sources?&category={}&language=en&apiKey=8a48624dfbfc41d8a7009f960cccd473'
#my articles base url
ARTICLES_API_BASE_URL = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey=8a48624dfbfc41d8a7009f960cccd473'
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
}
| 25.416667
| 123
| 0.74918
|
4a09cc7dfc2ab625a3c860c43e0a0cee44752aca
| 5,269
|
py
|
Python
|
BeautyForMe/myvenv/Lib/site-packages/disqus/api.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 6
|
2019-12-07T07:30:34.000Z
|
2022-01-20T14:26:44.000Z
|
BeautyForMe/myvenv/Lib/site-packages/disqus/api.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 9
|
2019-12-28T06:18:53.000Z
|
2022-01-13T01:54:21.000Z
|
BeautyForMe/myvenv/Lib/site-packages/disqus/api.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 1
|
2020-05-21T15:55:45.000Z
|
2020-05-21T15:55:45.000Z
|
import urllib2
import urllib
def _usingPy26():
import sys
return sys.hexversion > 0x20600f0
if _usingPy26():
import json
else:
import simplejson as json
from exceptions import Exception
from disqus.disqus_globals import POST_ACTIONS
class DisqusError(Exception):
"""
Base Exception thrown by the Disqus object when there is an error
interacting with the API.
"""
pass
class DisqusHTTPError(DisqusError):
"""
Exception thrown by the Disqus object when there is an HTTP error
interacting with disqus.com
"""
def __init__(self, e, uri, arg_string):
self.e = e
self.uri = uri
self.arg_string = arg_string
def __str__(self):
return (
"Disqus sent status %i for URL: %s using parameters: "
"(%s)\ndetails: %s" %(
self.e.code, self.uri, self.arg_string,
self.e.fp.read()))
class DisqusCall(object):
def __init__(self, api_key=None, partner_key=None,
domain='disqus.com', api_version='1.1',
method=''):
self.api_key = api_key
self.partner_key = partner_key
self.domain = domain
self.method = method
self.api_version = api_version
def __getattr__(self, k, **kwargs):
try:
return object.__getattr__(self, k)
except AttributeError:
return DisqusCall(api_key=self.api_key,
partner_key=self.partner_key,
domain=self.domain, api_version=self.api_version,
method=k)
def __call__(self, **kwargs):
# format the arguments
kwargs['api_version'] = self.api_version
if self.api_key:
kwargs['user_api_key'] = self.api_key
if self.partner_key:
kwargs['partner_api_key'] = self.partner_key
arg_string = urllib.urlencode(kwargs)
# Get request type
if self.method in POST_ACTIONS:
request_type = "POST"
body = arg_string
uri = "http://%s/api/%s/" % (self.domain, self.method)
else:
request_type = "GET"
body = None
uri = "http://%s/api/%s/?%s" % (self.domain, 'get_' + \
self.method, \
arg_string)
req = urllib2.Request(uri, body, {})
try:
handle = urllib2.urlopen(req)
res = json.loads(handle.read())
return res['message']
except urllib2.HTTPError, e:
if (e.code == 304):
return []
else:
raise DisqusHTTPError(e, uri, arg_string)
class Disqus(DisqusCall):
"""
The Disqus API class.
Accessing members of this class returns RESTful data from the Disqus API.
The data is then converted to python objects (lists and dicts).
You can find more about the Disqus API here:
http://docs.disqus.com/developers
Examples:
---------
# Init with your API key. To find out your key visit
# http://disqus.com/api/get_my_key while logged in to disqus.com.
disqus = Disqus(secret_key)
# Get a list of forums that user owns
disqus.forum_list()
# Get a list of posts on a forum
disqus.forum_posts(forum_id=1)
# Get a list of categories on a forum
disqus.categories_list(forum_id=2)
# Get a list of threads on a forum
disqus.thread_list(forum_id=2)
# Get a list of updated threads on a forum
disqus.updated_threads(forum_id=1)
# Get a list of posts on a thread
disqus.thread_posts(thread_id=1)
# Get a particular thread or create it if it doesn't exist
disqus.thread_by_identifier(forum_id=1, identifier='my_thread',
title='My Killer Thread')
# or
disqus.thread_by_url(url='http://my.awesome/thread')
# Update a thread
disqus.update_thread(forum_id=1, thread_id=4)
# Create a new post
disqus.create_post(site_id=1, thread_id=4,
message='Dope API, yo!')
# Moderate a post
disqus.moderate_post(post_id=234, action='spam')
disqus.moderate_post(post_id=123, action='approve')
disqus.moderate_post(post_id=324, action='kill')
Using the data returned:
------------------------
All API calls are returned in decoded JSON. This is converted into python
objects.
x = disqus.forum_list()
# The first forum
x[0]
# The description of the first forum
x[0]['description']
# The shortname of the first forum
x[0]['shortname']
"""
def __init__(self, api_key=None, partner_key=None, domain="disqus.com",
api_version='1.1'):
"""
Creates a new Disqus API connector.
Pass an Auth object initialized with your Disqus API key. To get your
Disqus API key visit http://disqus.com/api/get_my_key while logged in.
"""
DisqusCall.__init__(self, api_key=api_key, partner_key=partner_key,
domain=domain, api_version=api_version)
| 30.281609
| 79
| 0.581515
|
4a09cf7d33932fae13d77ec29b631a376c1119dd
| 21,912
|
py
|
Python
|
homeassistant/components/plex/server.py
|
rotdrop/home-assistant-core
|
f7c4900d5c22da8aba35258a66a89e4e68cc7c60
|
[
"Apache-2.0"
] | 2
|
2021-07-26T02:35:56.000Z
|
2021-07-26T02:35:57.000Z
|
homeassistant/components/plex/server.py
|
rotdrop/home-assistant-core
|
f7c4900d5c22da8aba35258a66a89e4e68cc7c60
|
[
"Apache-2.0"
] | 6
|
2020-11-08T19:40:10.000Z
|
2022-03-01T11:11:07.000Z
|
homeassistant/components/plex/server.py
|
rotdrop/home-assistant-core
|
f7c4900d5c22da8aba35258a66a89e4e68cc7c60
|
[
"Apache-2.0"
] | 3
|
2016-10-03T20:14:06.000Z
|
2019-04-19T15:56:56.000Z
|
"""Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified, ShouldUpdateConfigEntry
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(self, hass, server_config, known_server_id=None, options=None):
"""Initialize a Plex server instance."""
self.hass = hass
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._plextv_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except (BadRequest, Unauthorized):
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
available_servers = [
(x.name, x.clientIdentifier)
for x in self.account.resources()
if "server" in x.provides
]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__ # pylint: disable=no-member
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating."
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized(
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available."
)
else:
self._accounts = [
account.name for account in system_accounts if account.name
]
_LOGGER.debug("Linked accounts: %s", self.accounts)
owner_account = [
account.name for account in system_accounts if account.accountID == 1
]
if owner_account:
self._owner_username = owner_account[0]
_LOGGER.debug("Server owner found: '%s'", self._owner_username)
self._version = self._plex_server.version
if config_entry_update_needed:
raise ShouldUpdateConfigEntry
@callback
def async_refresh_entity(self, machine_identifier, device, session):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
)
def _fetch_platform_data(self):
"""Fetch all data from the Plex server in a single method."""
return (
self._plex_server.clients(),
self._plex_server.sessions(),
self.plextv_clients(),
)
async def _async_update_platforms(self):
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
available_clients = {}
ignored_clients = set()
new_clients = set()
monitored_users = self.accounts
known_accounts = set(self.option_monitored_users)
if known_accounts:
monitored_users = {
user
for user in self.option_monitored_users
if self.option_monitored_users[user]["enabled"]
}
if not self.option_ignore_new_shared_users:
for new_user in self.accounts - known_accounts:
monitored_users.add(new_user)
try:
devices, sessions, plextv_clients = await self.hass.async_add_executor_job(
self._fetch_platform_data
)
except (
plexapi.exceptions.BadRequest,
requests.exceptions.RequestException,
) as ex:
_LOGGER.error(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
def process_device(source, device):
self._known_idle.discard(device.machineIdentifier)
available_clients.setdefault(device.machineIdentifier, {"device": device})
available_clients[device.machineIdentifier].setdefault(
PLAYER_SOURCE, source
)
if device.machineIdentifier not in ignored_clients:
if self.option_ignore_plexweb_clients and device.product == "Plex Web":
ignored_clients.add(device.machineIdentifier)
if device.machineIdentifier not in self._known_clients:
_LOGGER.debug(
"Ignoring %s %s: %s",
"Plex Web",
source,
device.machineIdentifier,
)
return
if device.machineIdentifier not in (
self._created_clients | ignored_clients | new_clients
):
new_clients.add(device.machineIdentifier)
_LOGGER.debug(
"New %s from %s: %s",
device.product,
source,
device.machineIdentifier,
)
for device in devices:
process_device("PMS", device)
def connect_to_resource(resource):
"""Connect to a plex.tv resource and return a Plex client."""
client_id = resource.clientIdentifier
if client_id in self._plextv_device_cache:
return self._plextv_device_cache[client_id]
client = None
try:
client = resource.connect(timeout=3)
_LOGGER.debug("plex.tv resource connection successful: %s", client)
except NotFound:
_LOGGER.error("plex.tv resource connection failed: %s", resource.name)
self._plextv_device_cache[client_id] = client
return client
for plextv_client in plextv_clients:
if plextv_client.clientIdentifier not in available_clients:
device = await self.hass.async_add_executor_job(
connect_to_resource, plextv_client
)
if device:
process_device("plex.tv", device)
for session in sessions:
if session.TYPE == "photo":
_LOGGER.debug("Photo session detected, skipping: %s", session)
continue
session_username = session.usernames[0]
for player in session.players:
if session_username and session_username not in monitored_users:
ignored_clients.add(player.machineIdentifier)
_LOGGER.debug(
"Ignoring %s client owned by '%s'",
player.product,
session_username,
)
continue
process_device("session", player)
available_clients[player.machineIdentifier]["session"] = session
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in ignored_clients:
continue
if client_id in new_clients:
new_entity_configs.append(client_data)
self._created_clients.add(client_id)
else:
self.async_refresh_entity(
client_id, client_data["device"], client_data.get("session")
)
self._known_clients.update(new_clients | ignored_clients)
idle_clients = (
self._known_clients - self._known_idle - ignored_clients
).difference(available_clients)
for client_id in idle_clients:
self.async_refresh_entity(client_id, None, None)
self._known_idle.add(client_id)
self._plextv_device_cache.pop(client_id, None)
if new_entity_configs:
async_dispatcher_send(
self.hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
sessions,
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def accounts(self):
"""Return accounts associated with the Plex server."""
return set(self._accounts)
@property
def owner(self):
"""Return the Plex server owner username."""
return self._owner_username
@property
def version(self):
"""Return the version of the Plex server."""
return self._version
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=protected-access
@property
def option_ignore_new_shared_users(self):
"""Return ignore_new_shared_users option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_NEW_SHARED_USERS, False)
@property
def option_use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN].get(CONF_USE_EPISODE_ART, False)
@property
def option_monitored_users(self):
"""Return dict of monitored users option."""
return self.options[MP_DOMAIN].get(CONF_MONITORED_USERS, {})
@property
def option_ignore_plexweb_clients(self):
"""Return ignore_plex_web_clients option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_PLEX_WEB_CLIENTS, False)
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
def fetch_item(self, item):
"""Fetch item from Plex server."""
return self._plex_server.fetchItem(item)
def lookup_media(self, media_type, **kwargs):
"""Lookup a piece of media."""
media_type = media_type.lower()
if media_type == DOMAIN:
key = kwargs["plex_key"]
try:
return self.fetch_item(key)
except NotFound:
_LOGGER.error("Media for key %s not found", key)
return None
if media_type == MEDIA_TYPE_PLAYLIST:
try:
playlist_name = kwargs["playlist_name"]
return self.playlist(playlist_name)
except KeyError:
_LOGGER.error("Must specify 'playlist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Playlist '%s' not found", playlist_name,
)
return None
try:
library_name = kwargs["library_name"]
library_section = self.library.section(library_name)
except KeyError:
_LOGGER.error("Must specify 'library_name' for this search")
return None
except NotFound:
_LOGGER.error("Library '%s' not found", library_name)
return None
def lookup_music():
"""Search for music and return a Plex media object."""
album_name = kwargs.get("album_name")
track_name = kwargs.get("track_name")
track_number = kwargs.get("track_number")
try:
artist_name = kwargs["artist_name"]
artist = library_section.get(artist_name)
except KeyError:
_LOGGER.error("Must specify 'artist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Artist '%s' not found in '%s'", artist_name, library_name
)
return None
if album_name:
try:
album = artist.album(album_name)
except NotFound:
_LOGGER.error(
"Album '%s' by '%s' not found", album_name, artist_name
)
return None
if track_name:
try:
return album.track(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' on '%s' by '%s' not found",
track_name,
album_name,
artist_name,
)
return None
if track_number:
for track in album.tracks():
if int(track.index) == int(track_number):
return track
_LOGGER.error(
"Track %d on '%s' by '%s' not found",
track_number,
album_name,
artist_name,
)
return None
return album
if track_name:
try:
return artist.get(track_name)
except NotFound:
_LOGGER.error(
"Track '%s' by '%s' not found", track_name, artist_name
)
return None
return artist
def lookup_tv():
"""Find TV media and return a Plex media object."""
season_number = kwargs.get("season_number")
episode_number = kwargs.get("episode_number")
try:
show_name = kwargs["show_name"]
show = library_section.get(show_name)
except KeyError:
_LOGGER.error("Must specify 'show_name' for this search")
return None
except NotFound:
_LOGGER.error("Show '%s' not found in '%s'", show_name, library_name)
return None
if not season_number:
return show
try:
season = show.season(int(season_number))
except NotFound:
_LOGGER.error(
"Season %d of '%s' not found", season_number, show_name,
)
return None
if not episode_number:
return season
try:
return season.episode(episode=int(episode_number))
except NotFound:
_LOGGER.error(
"Episode not found: %s - S%sE%s",
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
return None
if media_type == MEDIA_TYPE_MUSIC:
return lookup_music()
if media_type == MEDIA_TYPE_EPISODE:
return lookup_tv()
if media_type == MEDIA_TYPE_VIDEO:
try:
video_name = kwargs["video_name"]
return library_section.get(video_name)
except KeyError:
_LOGGER.error("Must specify 'video_name' for this search")
except NotFound:
_LOGGER.error(
"Movie '%s' not found in '%s'", video_name, library_name,
)
| 35.86252
| 103
| 0.56663
|
4a09cfb7e9441566539a11de908440142ce44c1f
| 2,028
|
py
|
Python
|
esses/esses.py
|
mewbak/generative-art
|
cd8c61d469b781b50328e2c7c29fad27bfb17d73
|
[
"MIT"
] | null | null | null |
esses/esses.py
|
mewbak/generative-art
|
cd8c61d469b781b50328e2c7c29fad27bfb17d73
|
[
"MIT"
] | null | null | null |
esses/esses.py
|
mewbak/generative-art
|
cd8c61d469b781b50328e2c7c29fad27bfb17d73
|
[
"MIT"
] | null | null | null |
import cairo
import math
import random
import sys
import os
sys.path.append(os.path.abspath('..'))
from lib import palettes
from lib import colors
def ess(ctx, x, y, width, height, color, line_width):
tl = (x, y)
bl = (x, y + height)
tr = (x + width, y)
br = (x + width, y + height)
points = [tl, bl, tr, br]
random.shuffle(points)
make_curve(ctx, points, color, line_width)
# Lighter core
tints = colors.tints(color, 3)
make_curve(ctx, points, tints[1], max(line_width // 5, 2))
make_curve(ctx, points, tints[2], max(line_width // 10, 1))
def make_curve(ctx, points, color, line_width):
ctx.move_to(*points[0])
ctx.curve_to(*points[1], *points[2], *points[3])
ctx.set_line_width(line_width)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_source_rgb(*color)
ctx.stroke()
def main(filename="output.png", img_width=2000, img_height=2000, palette=random.choice(palettes.PALETTES), rows=20, columns=20, line_width=20):
ims = cairo.ImageSurface(cairo.FORMAT_ARGB32, img_width, img_height)
ims.set_fallback_resolution(300.0, 300.0)
ctx = cairo.Context(ims)
# Background
ctx.rectangle(0, 0, img_width, img_height)
ctx.set_source_rgb(*palettes.hex_to_tuple(palette['background']))
ctx.fill()
cell_width = img_width // columns
cell_height = img_width // rows
for x in range(0, img_width, cell_width):
for y in range(0, img_height, cell_height):
color = palettes.hex_to_tuple(random.choice(palette['colors']))
ess(ctx, x, y, cell_width, cell_height, color, line_width)
ims.write_to_png(filename)
def make_random(filename="output.png"):
p = random.choice(palettes.PALETTES)
r = random.randint(5, 80)
c = random.randint(5, 80) if random.random() < 0.5 else r
lw = random.randint(5, 25)
main(filename=filename, palette=p, rows=r, columns=c, line_width=lw)
if __name__ == "__main__":
for idx in range(5):
make_random(filename="output-{}.png".format(idx))
| 32.190476
| 143
| 0.671598
|
4a09d063b71de90a0e995cf9d165d1b86d92cfde
| 11,651
|
py
|
Python
|
src/poetry/puzzle/solver.py
|
nazarepiedady/poetry
|
8b640886ee39aee4a6c3208f39febf368dc32953
|
[
"MIT"
] | null | null | null |
src/poetry/puzzle/solver.py
|
nazarepiedady/poetry
|
8b640886ee39aee4a6c3208f39febf368dc32953
|
[
"MIT"
] | null | null | null |
src/poetry/puzzle/solver.py
|
nazarepiedady/poetry
|
8b640886ee39aee4a6c3208f39febf368dc32953
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import time
from collections import defaultdict
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import FrozenSet
from typing import Tuple
from typing import TypeVar
from poetry.core.packages.dependency_group import MAIN_GROUP
from poetry.mixology import resolve_version
from poetry.mixology.failure import SolveFailure
from poetry.packages import DependencyPackage
from poetry.puzzle.exceptions import OverrideNeeded
from poetry.puzzle.exceptions import SolverProblemError
from poetry.puzzle.provider import Provider
if TYPE_CHECKING:
from collections.abc import Iterator
from cleo.io.io import IO
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.package import Package
from poetry.core.packages.project_package import ProjectPackage
from poetry.puzzle.transaction import Transaction
from poetry.repositories import Pool
from poetry.repositories import Repository
from poetry.utils.env import Env
class Solver:
def __init__(
self,
package: ProjectPackage,
pool: Pool,
installed: Repository,
locked: Repository,
io: IO,
provider: Provider | None = None,
) -> None:
self._package = package
self._pool = pool
self._installed = installed
self._locked = locked
self._io = io
if provider is None:
provider = Provider(
self._package, self._pool, self._io, installed=installed
)
self._provider = provider
self._overrides: list[dict[DependencyPackage, dict[str, Dependency]]] = []
@property
def provider(self) -> Provider:
return self._provider
@contextmanager
def use_environment(self, env: Env) -> Iterator[None]:
with self.provider.use_environment(env):
yield
def solve(self, use_latest: list[str] | None = None) -> Transaction:
from poetry.puzzle.transaction import Transaction
with self._provider.progress():
start = time.time()
packages, depths = self._solve(use_latest=use_latest)
end = time.time()
if len(self._overrides) > 1:
self._provider.debug(
f"Complete version solving took {end - start:.3f} seconds with"
f" {len(self._overrides)} overrides"
)
self._provider.debug(
"Resolved with overrides:"
f" {', '.join(f'({b})' for b in self._overrides)}"
)
return Transaction(
self._locked.packages,
list(zip(packages, depths)),
installed_packages=self._installed.packages,
root_package=self._package,
)
def solve_in_compatibility_mode(
self,
overrides: tuple[dict[DependencyPackage, dict[str, Dependency]], ...],
use_latest: list[str] | None = None,
) -> tuple[list[Package], list[int]]:
packages = []
depths = []
for override in overrides:
self._provider.debug(
"<comment>Retrying dependency resolution "
f"with the following overrides ({override}).</comment>"
)
self._provider.set_overrides(override)
_packages, _depths = self._solve(use_latest=use_latest)
for index, package in enumerate(_packages):
if package not in packages:
packages.append(package)
depths.append(_depths[index])
continue
else:
idx = packages.index(package)
pkg = packages[idx]
depths[idx] = max(depths[idx], _depths[index])
for dep in package.requires:
if dep not in pkg.requires:
pkg.add_dependency(dep)
return packages, depths
def _solve(
self, use_latest: list[str] | None = None
) -> tuple[list[Package], list[int]]:
if self._provider._overrides:
self._overrides.append(self._provider._overrides)
locked: dict[str, list[DependencyPackage]] = defaultdict(list)
for package in self._locked.packages:
locked[package.name].append(
DependencyPackage(package.to_dependency(), package)
)
for dependency_packages in locked.values():
dependency_packages.sort(
key=lambda p: p.package.version,
reverse=True,
)
try:
result = resolve_version(
self._package, self._provider, locked=locked, use_latest=use_latest
)
packages = result.packages
except OverrideNeeded as e:
return self.solve_in_compatibility_mode(e.overrides, use_latest=use_latest)
except SolveFailure as e:
raise SolverProblemError(e)
combined_nodes = depth_first_search(PackageNode(self._package, packages))
results = dict(aggregate_package_nodes(nodes) for nodes in combined_nodes)
# Merging feature packages with base packages
final_packages = []
depths = []
for package in packages:
if package.features:
for _package in packages:
if (
not _package.features
and _package.name == package.name
and _package.version == package.version
):
for dep in package.requires:
# Prevent adding base package as a dependency to itself
if _package.name == dep.name:
continue
if dep not in _package.requires:
_package.add_dependency(dep)
else:
final_packages.append(package)
depths.append(results[package])
# Return the packages in their original order with associated depths
return final_packages, depths
DFSNodeID = Tuple[str, FrozenSet[str], bool]
T = TypeVar("T", bound="DFSNode")
class DFSNode:
def __init__(self, id: DFSNodeID, name: str, base_name: str) -> None:
self.id = id
self.name = name
self.base_name = base_name
def reachable(self: T) -> list[T]:
return []
def visit(self, parents: list[PackageNode]) -> None:
pass
def __str__(self) -> str:
return str(self.id)
def depth_first_search(source: PackageNode) -> list[list[PackageNode]]:
back_edges: dict[DFSNodeID, list[PackageNode]] = defaultdict(list)
visited: set[DFSNodeID] = set()
topo_sorted_nodes: list[PackageNode] = []
dfs_visit(source, back_edges, visited, topo_sorted_nodes)
# Combine the nodes by name
combined_nodes: dict[str, list[PackageNode]] = defaultdict(list)
for node in topo_sorted_nodes:
node.visit(back_edges[node.id])
combined_nodes[node.name].append(node)
combined_topo_sorted_nodes: list[list[PackageNode]] = [
combined_nodes.pop(node.name)
for node in topo_sorted_nodes
if node.name in combined_nodes
]
return combined_topo_sorted_nodes
def dfs_visit(
node: PackageNode,
back_edges: dict[DFSNodeID, list[PackageNode]],
visited: set[DFSNodeID],
sorted_nodes: list[PackageNode],
) -> None:
if node.id in visited:
return
visited.add(node.id)
for neighbor in node.reachable():
back_edges[neighbor.id].append(node)
dfs_visit(neighbor, back_edges, visited, sorted_nodes)
sorted_nodes.insert(0, node)
class PackageNode(DFSNode):
def __init__(
self,
package: Package,
packages: list[Package],
previous: PackageNode | None = None,
previous_dep: Dependency | None = None,
dep: Dependency | None = None,
) -> None:
self.package = package
self.packages = packages
self.previous = previous
self.previous_dep = previous_dep
self.dep = dep
self.depth = -1
if not previous:
self.category = "dev"
self.groups: frozenset[str] = frozenset()
self.optional = True
elif dep:
self.category = "main" if MAIN_GROUP in dep.groups else "dev"
self.groups = dep.groups
self.optional = dep.is_optional()
else:
raise ValueError("Both previous and dep must be passed")
super().__init__(
(package.complete_name, self.groups, self.optional),
package.complete_name,
package.name,
)
def reachable(self) -> list[PackageNode]:
children: list[PackageNode] = []
if (
self.dep
and self.previous_dep
and self.previous_dep is not self.dep
and self.previous_dep.name == self.dep.name
):
return []
for dependency in self.package.all_requires:
if self.previous and self.previous.name == dependency.name:
# We have a circular dependency.
# Since the dependencies are resolved we can
# simply skip it because we already have it
# N.B. this only catches cycles of length 2;
# dependency cycles in general are handled by the DFS traversal
continue
for pkg in self.packages:
if (
pkg.complete_name == dependency.complete_name
and (
dependency.constraint.allows(pkg.version)
or dependency.allows_prereleases()
and pkg.version.is_unstable()
and dependency.constraint.allows(pkg.version.stable)
)
and not any(
child.package.complete_name == pkg.complete_name
and child.groups == dependency.groups
for child in children
)
):
children.append(
PackageNode(
pkg,
self.packages,
self,
dependency,
self.dep or dependency,
)
)
return children
def visit(self, parents: list[PackageNode]) -> None:
# The root package, which has no parents, is defined as having depth -1
# So that the root package's top-level dependencies have depth 0.
self.depth = 1 + max(
[
parent.depth if parent.base_name != self.base_name else parent.depth - 1
for parent in parents
]
+ [-2]
)
def aggregate_package_nodes(nodes: list[PackageNode]) -> tuple[Package, int]:
package = nodes[0].package
depth = max(node.depth for node in nodes)
groups: list[str] = []
for node in nodes:
groups.extend(node.groups)
category = "main" if any(MAIN_GROUP in node.groups for node in nodes) else "dev"
optional = all(node.optional for node in nodes)
for node in nodes:
node.depth = depth
node.category = category
node.optional = optional
package.category = category
package.optional = optional
return package, depth
| 33.099432
| 88
| 0.57892
|
4a09d1eb38f24cffc0ac01dc4007294f3fb0f7ad
| 3,598
|
py
|
Python
|
qiskit_experiments/curve_analysis/standard_analysis/decay.py
|
jakelishman/qiskit-experiments
|
f4d23506ac5ea4af22721496d8d5c9bcb4562916
|
[
"Apache-2.0"
] | 72
|
2021-02-24T19:28:51.000Z
|
2022-03-27T02:56:59.000Z
|
qiskit_experiments/curve_analysis/standard_analysis/decay.py
|
jakelishman/qiskit-experiments
|
f4d23506ac5ea4af22721496d8d5c9bcb4562916
|
[
"Apache-2.0"
] | 509
|
2021-03-04T13:46:00.000Z
|
2022-03-31T18:09:16.000Z
|
qiskit_experiments/curve_analysis/standard_analysis/decay.py
|
jakelishman/qiskit-experiments
|
f4d23506ac5ea4af22721496d8d5c9bcb4562916
|
[
"Apache-2.0"
] | 70
|
2021-02-24T19:21:39.000Z
|
2022-03-05T04:00:12.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Decay analysis class."""
from typing import List, Union
import numpy as np
import qiskit_experiments.curve_analysis as curve
class DecayAnalysis(curve.CurveAnalysis):
r"""A class to analyze general exponential decay curve.
# section: fit_model
The fit is based on the following decay function.
.. math::
F(x) = {\rm amp} \cdot e^{-x/\tau} + {\rm base}
# section: fit_parameters
defpar \rm amp:
desc: Height of the decay curve.
init_guess: Determined by :py:func:`~qiskit_experiments.curve_analysis.guess.min_height`.
bounds: None
defpar \rm base:
desc: Base line of the decay curve.
init_guess: Determined by the difference of minimum and maximum points.
bounds: None
defpar \tau:
desc: This is the fit parameter of main interest.
init_guess: Determined by :py:func:`~qiskit_experiments.curve_analysis.guess.exp_decay`.
bounds: None
"""
__series__ = [
curve.SeriesDef(
fit_func=lambda x, amp, base, tau: curve.fit_function.exponential_decay(
x,
amp=amp,
lamb=1 / tau,
baseline=base,
),
plot_color="blue",
model_description=r"amp \exp(-x/tau) + base",
plot_fit_uncertainty=True,
)
]
def _generate_fit_guesses(
self, user_opt: curve.FitOptions
) -> Union[curve.FitOptions, List[curve.FitOptions]]:
"""Compute the initial guesses.
Args:
user_opt: Fit options filled with user provided guess and bounds.
Returns:
List of fit options that are passed to the fitter function.
Raises:
AnalysisError: When the y data is likely constant.
"""
curve_data = self._data()
user_opt.p0.set_if_empty(base=curve.guess.min_height(curve_data.y)[0])
alpha = curve.guess.exp_decay(curve_data.x, curve_data.y)
if alpha != 0.0:
user_opt.p0.set_if_empty(
tau=-1 / alpha,
amp=curve.guess.max_height(curve_data.y)[0] - user_opt.p0["base"],
)
else:
# Likely there is no slope. Cannot fit constant line with this model.
# Set some large enough number against to the scan range.
user_opt.p0.set_if_empty(
tau=100 * np.max(curve_data.x),
amp=curve.guess.max_height(curve_data.y)[0] - user_opt.p0["base"],
)
return user_opt
def _evaluate_quality(self, fit_data: curve.FitData) -> Union[str, None]:
"""Algorithmic criteria for whether the fit is good or bad.
A good fit has:
- a reduced chi-squared lower than three
- tau error is less than its value
"""
tau = fit_data.fitval("tau")
criteria = [
fit_data.reduced_chisq < 3,
tau.stderr is None or tau.stderr < tau.value,
]
if all(criteria):
return "good"
return "bad"
| 31.017241
| 100
| 0.603947
|
4a09d26db09f78eddfa51735dd4eb56f555711d7
| 3,934
|
py
|
Python
|
vyper/parser/context.py
|
PavelCore/vyper
|
4da7adf69e880902a5d702754a39d2100cb9bde5
|
[
"MIT"
] | null | null | null |
vyper/parser/context.py
|
PavelCore/vyper
|
4da7adf69e880902a5d702754a39d2100cb9bde5
|
[
"MIT"
] | null | null | null |
vyper/parser/context.py
|
PavelCore/vyper
|
4da7adf69e880902a5d702754a39d2100cb9bde5
|
[
"MIT"
] | null | null | null |
from vyper.utils import (
MemoryPositions,
is_varname_valid,
)
from vyper.types import (
get_size_of_type
)
from vyper.exceptions import (
VariableDeclarationException,
)
from vyper.signatures.function_signature import (
VariableRecord,
)
# Contains arguments, variables, etc
class Context():
def __init__(self, vars, global_ctx, sigs=None, forvars=None, return_type=None,
is_constant=False, is_private=False, is_payable=False, origcode='', method_id=''):
# In-memory variables, in the form (name, memory location, type)
self.vars = vars or {}
self.next_mem = MemoryPositions.RESERVED_MEMORY
# Global variables, in the form (name, storage location, type)
self.globals = global_ctx._globals
# ABI objects, in the form {classname: ABI JSON}
self.sigs = sigs or {}
# Variables defined in for loops, e.g. for i in range(6): ...
self.forvars = forvars or {}
# Return type of the function
self.return_type = return_type
# Is the function constant?
self.is_constant = is_constant
# Is the function payable?
self.is_payable = is_payable
# Number of placeholders generated (used to generate random names)
self.placeholder_count = 1
# Original code (for error pretty-printing purposes)
self.origcode = origcode
# In Loop status. Whether body is currently evaluating within a for-loop or not.
self.in_for_loop = set()
# Count returns in function
self.function_return_count = 0
# Current block scope
self.blockscopes = set()
# In assignment. Whether expressiong is currently evaluating an assignment expression.
self.in_assignment = False
# List of custom units that have been defined.
self.custom_units = global_ctx._custom_units
# defined constants
self.constants = global_ctx._constants
# Callback pointer to jump back to, used in private functions.
self.callback_ptr = None
self.is_private = is_private
# method_id of current function
self.method_id = method_id
def set_in_assignment(self, state: bool):
self.in_assignment = state
def set_in_for_loop(self, name_of_list):
self.in_for_loop.add(name_of_list)
def remove_in_for_loop(self, name_of_list):
self.in_for_loop.remove(name_of_list)
def start_blockscope(self, blockscope_id):
self.blockscopes.add(blockscope_id)
def end_blockscope(self, blockscope_id):
# Remove all variables that have specific blockscope_id attached.
self.vars = {
name: var_record for name, var_record in self.vars.items()
if blockscope_id not in var_record.blockscopes
}
# Remove block scopes
self.blockscopes.remove(blockscope_id)
def increment_return_counter(self):
self.function_return_count += 1
# Add a new variable
def new_variable(self, name, typ):
if not is_varname_valid(name, custom_units=self.custom_units):
raise VariableDeclarationException("Variable name invalid or reserved: " + name)
if any((name in self.vars, name in self.globals, name in self.constants)):
raise VariableDeclarationException("Duplicate variable name: %s" % name)
self.vars[name] = VariableRecord(name, self.next_mem, typ, True, self.blockscopes.copy())
pos = self.next_mem
self.next_mem += 32 * get_size_of_type(typ)
return pos
# Add an anonymous variable (used in some complex function definitions)
def new_placeholder(self, typ):
name = '_placeholder_' + str(self.placeholder_count)
self.placeholder_count += 1
return self.new_variable(name, typ)
# Get the next unused memory location
def get_next_mem(self):
return self.next_mem
| 37.826923
| 99
| 0.672089
|
4a09d28cdce9a8197051cb6f5f0b9e2a40b2153a
| 3,905
|
py
|
Python
|
examples/linear_model/plot_lasso_lars_ic.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T16:35:55.000Z
|
2020-12-03T16:35:55.000Z
|
examples/linear_model/plot_lasso_lars_ic.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 2
|
2022-03-04T21:50:26.000Z
|
2022-03-05T02:52:19.000Z
|
examples/linear_model/plot_lasso_lars_ic.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 1
|
2020-02-16T05:40:12.000Z
|
2020-02-16T05:40:12.000Z
|
"""
==============================================
Lasso model selection via information criteria
==============================================
This example reproduces the example of Fig. 2 of [ZHT2007]_. A
:class:`~sklearn.linear_model.LassoLarsIC` estimator is fit on a
diabetes dataset and the AIC and the BIC criteria are used to select
the best model.
.. note::
It is important to note that the optimization to find `alpha` with
:class:`~sklearn.linear_model.LassoLarsIC` relies on the AIC or BIC
criteria that are computed in-sample, thus on the training set directly.
This approach differs from the cross-validation procedure. For a comparison
of the two approaches, you can refer to the following example:
:ref:`sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py`.
.. topic:: References
.. [ZHT2007] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani.
"On the degrees of freedom of the lasso."
The Annals of Statistics 35.5 (2007): 2173-2192.
<0712.0881>`
"""
# Author: Alexandre Gramfort
# Guillaume Lemaitre
# License: BSD 3 clause
# %%
# We will use the diabetes dataset.
from sklearn.datasets import load_diabetes
X, y = load_diabetes(return_X_y=True, as_frame=True)
n_samples = X.shape[0]
X.head()
# %%
# Scikit-learn provides an estimator called
# :class:`~sklearn.linear_model.LinearLarsIC` that uses either Akaike's
# information criterion (AIC) or the Bayesian information criterion (BIC) to
# select the best model. Before fitting
# this model, we will scale the dataset.
#
# In the following, we are going to fit two models to compare the values
# reported by AIC and BIC.
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LassoLarsIC
from sklearn.pipeline import make_pipeline
lasso_lars_ic = make_pipeline(
StandardScaler(), LassoLarsIC(criterion="aic", normalize=False)
).fit(X, y)
# %%
# To be in line with the definition in [ZHT2007]_, we need to rescale the
# AIC and the BIC. Indeed, Zou et al. are ignoring some constant terms
# compared to the original definition of AIC derived from the maximum
# log-likelihood of a linear model. You can refer to
# :ref:`mathematical detail section for the User Guide <lasso_lars_ic>`.
def zou_et_al_criterion_rescaling(criterion, n_samples, noise_variance):
"""Rescale the information criterion to follow the definition of Zou et al."""
return criterion - n_samples * np.log(2 * np.pi * noise_variance) - n_samples
# %%
import numpy as np
aic_criterion = zou_et_al_criterion_rescaling(
lasso_lars_ic[-1].criterion_,
n_samples,
lasso_lars_ic[-1].noise_variance_,
)
index_alpha_path_aic = np.flatnonzero(
lasso_lars_ic[-1].alphas_ == lasso_lars_ic[-1].alpha_
)[0]
# %%
lasso_lars_ic.set_params(lassolarsic__criterion="bic").fit(X, y)
bic_criterion = zou_et_al_criterion_rescaling(
lasso_lars_ic[-1].criterion_,
n_samples,
lasso_lars_ic[-1].noise_variance_,
)
index_alpha_path_bic = np.flatnonzero(
lasso_lars_ic[-1].alphas_ == lasso_lars_ic[-1].alpha_
)[0]
# %%
# Now that we collected the AIC and BIC, we can as well check that the minima
# of both criteria happen at the same alpha. Then, we can simplify the
# following plot.
index_alpha_path_aic == index_alpha_path_bic
# %%
# Finally, we can plot the AIC and BIC criterion and the subsequent selected
# regularization parameter.
import matplotlib.pyplot as plt
plt.plot(aic_criterion, color="tab:blue", marker="o", label="AIC criterion")
plt.plot(bic_criterion, color="tab:orange", marker="o", label="BIC criterion")
plt.vlines(
index_alpha_path_bic,
aic_criterion.min(),
aic_criterion.max(),
color="black",
linestyle="--",
label="Selected alpha",
)
plt.legend()
plt.ylabel("Information criterion")
plt.xlabel("Lasso model sequence")
_ = plt.title("Lasso model selection via AIC and BIC")
| 32.815126
| 82
| 0.727017
|
4a09d2a19b11c88654049fdfc3606dfbbcb97a30
| 22,385
|
py
|
Python
|
dash/testing/browser.py
|
flovouin/dash
|
8edc3399bd7ded47f6546008f0e0696a734d98d3
|
[
"MIT"
] | 1
|
2021-04-04T09:14:03.000Z
|
2021-04-04T09:14:03.000Z
|
dash/testing/browser.py
|
flovouin/dash
|
8edc3399bd7ded47f6546008f0e0696a734d98d3
|
[
"MIT"
] | null | null | null |
dash/testing/browser.py
|
flovouin/dash
|
8edc3399bd7ded47f6546008f0e0696a734d98d3
|
[
"MIT"
] | 1
|
2022-02-25T23:50:39.000Z
|
2022-02-25T23:50:39.000Z
|
# pylint: disable=missing-docstring
import os
import sys
import time
import logging
import warnings
import percy
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
WebDriverException,
TimeoutException,
MoveTargetOutOfBoundsException,
)
from dash.testing.wait import text_to_equal, style_to_equal, contains_text, until
from dash.testing.dash_page import DashPageMixin
from dash.testing.errors import DashAppLoadingError, BrowserError, TestingTimeoutError
from dash.testing.consts import SELENIUM_GRID_DEFAULT
logger = logging.getLogger(__name__)
class Browser(DashPageMixin):
# pylint: disable=too-many-arguments
def __init__(
self,
browser,
remote=False,
remote_url=None,
headless=False,
options=None,
download_path="",
percy_run=True,
percy_finalize=True,
percy_assets_root="",
wait_timeout=10,
pause=False,
):
self._browser = browser.lower()
self._remote_url = remote_url
self._remote = (
True if remote_url and remote_url != SELENIUM_GRID_DEFAULT else remote
)
self._headless = headless
self._options = options
self._download_path = download_path
self._wait_timeout = wait_timeout
self._percy_finalize = percy_finalize
self._percy_run = percy_run
self._pause = pause
self._driver = until(self.get_webdriver, timeout=1)
self._driver.implicitly_wait(2)
self._wd_wait = WebDriverWait(self.driver, wait_timeout)
self._last_ts = 0
self._url = None
self._window_idx = 0 # switch browser tabs
if self._percy_run:
self.percy_runner = percy.Runner(
loader=percy.ResourceLoader(
webdriver=self.driver,
base_url="/assets",
root_dir=percy_assets_root,
)
)
self.percy_runner.initialize_build()
logger.info("initialize browser with arguments")
logger.info(" headless => %s", self._headless)
logger.info(" download_path => %s", self._download_path)
logger.info(" percy asset root => %s", os.path.abspath(percy_assets_root))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
try:
self.driver.quit()
if self._percy_run and self._percy_finalize:
logger.info("percy runner finalize build now")
self.percy_runner.finalize_build()
else:
logger.info("percy finalize relies on CI job")
except WebDriverException:
logger.exception("webdriver quit was not successful")
except percy.errors.Error:
logger.exception("percy runner failed to finalize properly")
def visit_and_snapshot(
self,
resource_path,
hook_id,
wait_for_callbacks=True,
convert_canvases=False,
assert_check=True,
stay_on_page=False,
):
try:
path = resource_path.lstrip("/")
if path != resource_path:
logger.warning("we stripped the left '/' in resource_path")
self.driver.get("{}/{}".format(self.server_url.rstrip("/"), path))
# wait for the hook_id to present and all callbacks get fired
self.wait_for_element_by_id(hook_id)
self.percy_snapshot(
path,
wait_for_callbacks=wait_for_callbacks,
convert_canvases=convert_canvases,
)
if assert_check:
assert not self.driver.find_elements_by_css_selector(
"div.dash-debug-alert"
), "devtools should not raise an error alert"
if not stay_on_page:
self.driver.back()
except WebDriverException as e:
logger.exception("snapshot at resource %s error", path)
raise e
def percy_snapshot(self, name="", wait_for_callbacks=False, convert_canvases=False):
"""percy_snapshot - visual test api shortcut to `percy_runner.snapshot`.
It also combines the snapshot `name` with the Python version.
"""
snapshot_name = "{} - py{}.{}".format(
name, sys.version_info.major, sys.version_info.minor
)
logger.info("taking snapshot name => %s", snapshot_name)
try:
if wait_for_callbacks:
# the extra one second sleep adds safe margin in the context
# of wait_for_callbacks
time.sleep(1)
until(self._wait_for_callbacks, timeout=40, poll=0.3)
except TestingTimeoutError:
# API will log the error but this TimeoutError should not block
# the test execution to continue and it will still do a snapshot
# as diff reference for the build run.
logger.error(
"wait_for_callbacks failed => status of invalid rqs %s",
self.redux_state_rqs,
)
if convert_canvases:
self.driver.execute_script(
"""
const stash = window._canvasStash = [];
Array.from(document.querySelectorAll('canvas')).forEach(c => {
const i = document.createElement('img');
i.src = c.toDataURL();
i.width = c.width;
i.height = c.height;
i.setAttribute('style', c.getAttribute('style'));
i.className = c.className;
i.setAttribute('data-canvasnum', stash.length);
stash.push(c);
c.parentElement.insertBefore(i, c);
c.parentElement.removeChild(c);
});
"""
)
self.percy_runner.snapshot(name=snapshot_name)
self.driver.execute_script(
"""
const stash = window._canvasStash;
Array.from(
document.querySelectorAll('img[data-canvasnum]')
).forEach(i => {
const c = stash[+i.getAttribute('data-canvasnum')];
i.parentElement.insertBefore(c, i);
i.parentElement.removeChild(i);
});
delete window._canvasStash;
"""
)
else:
self.percy_runner.snapshot(name=snapshot_name)
def take_snapshot(self, name):
"""Hook method to take snapshot when a selenium test fails. The
snapshot is placed under.
- `/tmp/dash_artifacts` in linux
- `%TEMP` in windows
with a filename combining test case name and the
running selenium session id
"""
target = "/tmp/dash_artifacts" if not self._is_windows() else os.getenv("TEMP")
if not os.path.exists(target):
try:
os.mkdir(target)
except OSError:
logger.exception("cannot make artifacts")
self.driver.save_screenshot(
"{}/{}_{}.png".format(target, name, self.session_id)
)
def find_element(self, selector):
"""find_element returns the first found element by the css `selector`
shortcut to `driver.find_element_by_css_selector`."""
return self.driver.find_element_by_css_selector(selector)
def find_elements(self, selector):
"""find_elements returns a list of all elements matching the css
`selector`.
shortcut to `driver.find_elements_by_css_selector`.
"""
return self.driver.find_elements_by_css_selector(selector)
def _get_element(self, elem_or_selector):
if isinstance(elem_or_selector, str):
return self.find_element(elem_or_selector)
return elem_or_selector
def _wait_for(self, method, args, timeout, msg):
"""Abstract generic pattern for explicit WebDriverWait."""
_wait = (
self._wd_wait if timeout is None else WebDriverWait(self.driver, timeout)
)
logger.debug(
"method, timeout, poll => %s %s %s",
method,
_wait._timeout, # pylint: disable=protected-access
_wait._poll, # pylint: disable=protected-access
)
return _wait.until(method(*args), msg)
def wait_for_element(self, selector, timeout=None):
"""wait_for_element is shortcut to `wait_for_element_by_css_selector`
timeout if not set, equals to the fixture's `wait_timeout`."""
return self.wait_for_element_by_css_selector(selector, timeout)
def wait_for_element_by_css_selector(self, selector, timeout=None):
"""Explicit wait until the element is present, timeout if not set,
equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with
`EC.presence_of_element_located`."""
return self._wait_for(
EC.presence_of_element_located,
((By.CSS_SELECTOR, selector),),
timeout,
"timeout {}s => waiting for selector {}".format(
timeout if timeout else self._wait_timeout, selector
),
)
def wait_for_no_elements(self, selector, timeout=None):
"""Explicit wait until an element is NOT found. timeout defaults to
the fixture's `wait_timeout`."""
until(
# if we use get_elements it waits a long time to see if they appear
# so this one calls out directly to execute_script
lambda: self.driver.execute_script(
"return document.querySelectorAll('{}').length".format(selector)
)
== 0,
timeout if timeout else self._wait_timeout,
)
def wait_for_element_by_id(self, element_id, timeout=None):
"""Explicit wait until the element is present, timeout if not set,
equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with
`EC.presence_of_element_located`."""
return self._wait_for(
EC.presence_of_element_located,
((By.ID, element_id),),
timeout,
"timeout {}s => waiting for element id {}".format(
timeout if timeout else self._wait_timeout, element_id
),
)
def wait_for_style_to_equal(self, selector, style, val, timeout=None):
"""Explicit wait until the element's style has expected `value` timeout
if not set, equals to the fixture's `wait_timeout` shortcut to
`WebDriverWait` with customized `style_to_equal` condition."""
return self._wait_for(
method=style_to_equal,
args=(selector, style, val),
timeout=timeout,
msg="style val => {} {} not found within {}s".format(
style, val, timeout if timeout else self._wait_timeout
),
)
def wait_for_text_to_equal(self, selector, text, timeout=None):
"""Explicit wait until the element's text equals the expected `text`.
timeout if not set, equals to the fixture's `wait_timeout`
shortcut to `WebDriverWait` with customized `text_to_equal`
condition.
"""
return self._wait_for(
method=text_to_equal,
args=(selector, text),
timeout=timeout,
msg="text -> {} not found within {}s".format(
text, timeout if timeout else self._wait_timeout
),
)
def wait_for_contains_text(self, selector, text, timeout=None):
"""Explicit wait until the element's text contains the expected `text`.
timeout if not set, equals to the fixture's `wait_timeout`
shortcut to `WebDriverWait` with customized `contains_text`
condition.
"""
return self._wait_for(
method=contains_text,
args=(selector, text),
timeout=timeout,
msg="text -> {} not found inside element within {}s".format(
text, timeout if timeout else self._wait_timeout
),
)
def wait_for_page(self, url=None, timeout=10):
"""wait_for_page navigates to the url in webdriver wait until the
renderer is loaded in browser.
use the `server_url` if url is not provided.
"""
self.driver.get(self.server_url if url is None else url)
try:
self.wait_for_element_by_css_selector(
self.dash_entry_locator, timeout=timeout
)
except TimeoutException:
logger.exception("dash server is not loaded within %s seconds", timeout)
logger.debug(self.get_logs())
raise DashAppLoadingError(
"the expected Dash react entry point cannot be loaded"
" in browser\n HTML => {}\n Console Logs => {}\n".format(
self.driver.find_element_by_tag_name("body").get_property(
"innerHTML"
),
"\n".join((str(log) for log in self.get_logs())),
)
)
if self._pause:
try:
import pdb as pdb_ # pylint: disable=import-outside-toplevel
except ImportError:
import ipdb as pdb_ # pylint: disable=import-outside-toplevel
pdb_.set_trace()
def select_dcc_dropdown(self, elem_or_selector, value=None, index=None):
dropdown = self._get_element(elem_or_selector)
dropdown.click()
menu = dropdown.find_element_by_css_selector("div.Select-menu-outer")
logger.debug("the available options are %s", "|".join(menu.text.split("\n")))
options = menu.find_elements_by_css_selector("div.VirtualizedSelectOption")
if options:
if isinstance(index, int):
options[index].click()
return
for option in options:
if option.text == value:
option.click()
return
logger.error(
"cannot find matching option using value=%s or index=%s", value, index
)
def toggle_window(self):
"""Switch between the current working window and the new opened one."""
idx = (self._window_idx + 1) % 2
self.switch_window(idx=idx)
self._window_idx += 1
def switch_window(self, idx=0):
"""Switch to window by window index shortcut to
`driver.switch_to.window`."""
if len(self.driver.window_handles) <= idx:
raise BrowserError("there is no second window in Browser")
self.driver.switch_to.window(self.driver.window_handles[idx])
def open_new_tab(self, url=None):
"""Open a new tab in browser url is not set, equals to `server_url`."""
self.driver.execute_script(
'window.open("{}", "new window")'.format(
self.server_url if url is None else url
)
)
def get_webdriver(self):
try:
return getattr(self, "_get_{}".format(self._browser))()
except WebDriverException:
logger.exception("<<<Webdriver not initialized correctly>>>")
def _get_wd_options(self):
options = (
self._options[0]
if self._options and isinstance(self._options, list)
else getattr(webdriver, self._browser).options.Options()
)
if self._headless:
options.headless = True
return options
def _get_chrome(self):
options = self._get_wd_options()
capabilities = DesiredCapabilities.CHROME
capabilities["loggingPrefs"] = {"browser": "SEVERE"}
capabilities["goog:loggingPrefs"] = {"browser": "SEVERE"}
if "DASH_TEST_CHROMEPATH" in os.environ:
options.binary_location = os.environ["DASH_TEST_CHROMEPATH"]
options.add_experimental_option(
"prefs",
{
"download.default_directory": self.download_path,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": False,
"safebrowsing.disable_download_protection": True,
},
)
chrome = (
webdriver.Remote(
command_executor=self._remote_url,
options=options,
desired_capabilities=capabilities,
)
if self._remote
else webdriver.Chrome(options=options, desired_capabilities=capabilities)
)
# https://bugs.chromium.org/p/chromium/issues/detail?id=696481
if self._headless:
# pylint: disable=protected-access
chrome.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": self.download_path},
}
res = chrome.execute("send_command", params)
logger.debug("enabled headless download returns %s", res)
chrome.set_window_position(0, 0)
return chrome
def _get_firefox(self):
options = self._get_wd_options()
capabilities = DesiredCapabilities.FIREFOX
capabilities["loggingPrefs"] = {"browser": "SEVERE"}
capabilities["marionette"] = True
# https://developer.mozilla.org/en-US/docs/Download_Manager_preferences
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.dir", self.download_path)
fp.set_preference("browser.download.folderList", 2)
fp.set_preference(
"browser.helperApps.neverAsk.saveToDisk",
"application/octet-stream", # this MIME is generic for binary
)
return (
webdriver.Remote(
command_executor=self._remote_url,
options=options,
desired_capabilities=capabilities,
)
if self._remote
else webdriver.Firefox(
firefox_profile=fp, options=options, capabilities=capabilities
)
)
@staticmethod
def _is_windows():
return sys.platform == "win32"
def multiple_click(self, elem_or_selector, clicks):
"""multiple_click click the element with number of `clicks`."""
for _ in range(clicks):
self._get_element(elem_or_selector).click()
def clear_input(self, elem_or_selector):
"""Simulate key press to clear the input."""
elem = self._get_element(elem_or_selector)
logger.debug("clear input with %s => %s", elem_or_selector, elem)
(
ActionChains(self.driver)
.move_to_element(elem)
.pause(0.2)
.click(elem)
.send_keys(Keys.END)
.key_down(Keys.SHIFT)
.send_keys(Keys.HOME)
.key_up(Keys.SHIFT)
.send_keys(Keys.DELETE)
).perform()
def zoom_in_graph_by_ratio(
self, elem_or_selector, start_fraction=0.5, zoom_box_fraction=0.2, compare=True
):
"""Zoom out a graph with a zoom box fraction of component dimension
default start at middle with a rectangle of 1/5 of the dimension use
`compare` to control if we check the svg get changed."""
elem = self._get_element(elem_or_selector)
prev = elem.get_attribute("innerHTML")
w, h = elem.size["width"], elem.size["height"]
try:
ActionChains(self.driver).move_to_element_with_offset(
elem, w * start_fraction, h * start_fraction
).drag_and_drop_by_offset(
elem, w * zoom_box_fraction, h * zoom_box_fraction
).perform()
except MoveTargetOutOfBoundsException:
logger.exception("graph offset outside of the boundary")
if compare:
assert prev != elem.get_attribute(
"innerHTML"
), "SVG content should be different after zoom"
def click_at_coord_fractions(self, elem_or_selector, fx, fy):
elem = self._get_element(elem_or_selector)
ActionChains(self.driver).move_to_element_with_offset(
elem, elem.size["width"] * fx, elem.size["height"] * fy
).click().perform()
def get_logs(self):
"""Return a list of `SEVERE` level logs after last reset time stamps
(default to 0, resettable by `reset_log_timestamp`.
Chrome only
"""
if self.driver.name.lower() == "chrome":
return [
entry
for entry in self.driver.get_log("browser")
if entry["timestamp"] > self._last_ts
]
warnings.warn("get_logs always return None with webdrivers other than Chrome")
return None
def reset_log_timestamp(self):
"""reset_log_timestamp only work with chrome webdriver."""
if self.driver.name.lower() == "chrome":
entries = self.driver.get_log("browser")
if entries:
self._last_ts = entries[-1]["timestamp"]
@property
def driver(self):
"""Expose the selenium webdriver as fixture property."""
return self._driver
@property
def session_id(self):
return self.driver.session_id
@property
def server_url(self):
return self._url
@server_url.setter
def server_url(self, value):
"""Set the server url so the selenium is aware of the local server
port.
It also implicitly calls `wait_for_page`.
"""
self._url = value
self.wait_for_page()
@property
def download_path(self):
return self._download_path
| 36.878089
| 88
| 0.596694
|
4a09d3af2e4b3b06866e84d41f42ec77c7ceb1d1
| 8,429
|
py
|
Python
|
utils/critics.py
|
leehe228/MAAC
|
c3256b1bad556eb4981858ac30bb2fc3483b3e0e
|
[
"MIT"
] | null | null | null |
utils/critics.py
|
leehe228/MAAC
|
c3256b1bad556eb4981858ac30bb2fc3483b3e0e
|
[
"MIT"
] | null | null | null |
utils/critics.py
|
leehe228/MAAC
|
c3256b1bad556eb4981858ac30bb2fc3483b3e0e
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from itertools import chain
class AttentionCritic(nn.Module):
"""
Attention network, used as critic for all agents. Each agent gets its own
observation and action, and can also attend over the other agents' encoded
observations and actions.
"""
def __init__(self, sa_sizes, hidden_dim=32, norm_in=True, attend_heads=1):
"""
Inputs:
sa_sizes (list of (int, int)): Size of state and action spaces per
agent
hidden_dim (int): Number of hidden dimensions
norm_in (bool): Whether to apply BatchNorm to input
attend_heads (int): Number of attention heads to use (use a number
that hidden_dim is divisible by)
"""
super(AttentionCritic, self).__init__()
assert (hidden_dim % attend_heads) == 0
self.sa_sizes = sa_sizes
self.nagents = len(sa_sizes)
self.attend_heads = attend_heads
self.critic_encoders = nn.ModuleList()
self.critics = nn.ModuleList()
self.state_encoders = nn.ModuleList()
# iterate over agents
for sdim, adim in sa_sizes:
idim = sdim + adim
odim = adim
encoder = nn.Sequential()
if norm_in:
encoder.add_module('enc_bn', nn.BatchNorm1d(idim,
affine=False))
encoder.add_module('enc_fc1', nn.Linear(idim, hidden_dim))
encoder.add_module('enc_nl', nn.LeakyReLU())
self.critic_encoders.append(encoder)
critic = nn.Sequential()
critic.add_module('critic_fc1', nn.Linear(2 * hidden_dim,
hidden_dim))
critic.add_module('critic_nl', nn.LeakyReLU())
critic.add_module('critic_fc2', nn.Linear(hidden_dim, odim))
self.critics.append(critic)
state_encoder = nn.Sequential()
if norm_in:
state_encoder.add_module('s_enc_bn', nn.BatchNorm1d(
sdim, affine=False))
state_encoder.add_module('s_enc_fc1', nn.Linear(sdim,
hidden_dim))
state_encoder.add_module('s_enc_nl', nn.LeakyReLU())
self.state_encoders.append(state_encoder)
attend_dim = hidden_dim // attend_heads
self.key_extractors = nn.ModuleList()
self.selector_extractors = nn.ModuleList()
self.value_extractors = nn.ModuleList()
for i in range(attend_heads):
self.key_extractors.append(nn.Linear(hidden_dim, attend_dim, bias=False))
self.selector_extractors.append(nn.Linear(hidden_dim, attend_dim, bias=False))
self.value_extractors.append(nn.Sequential(nn.Linear(hidden_dim,
attend_dim),
nn.LeakyReLU()))
self.shared_modules = [self.key_extractors, self.selector_extractors,
self.value_extractors, self.critic_encoders]
def shared_parameters(self):
"""
Parameters shared across agents and reward heads
"""
return chain(*[m.parameters() for m in self.shared_modules])
def scale_shared_grads(self):
"""
Scale gradients for parameters that are shared since they accumulate
gradients from the critic loss function multiple times
"""
for p in self.shared_parameters():
p.grad.data.mul_(1. / self.nagents)
def forward(self, inps, agents=None, return_q=True, return_all_q=False,
regularize=False, return_attend=False, logger=None, niter=0):
"""
Inputs:
inps (list of PyTorch Matrices): Inputs to each agents' encoder
(batch of obs + ac)
agents (int): indices of agents to return Q for
return_q (bool): return Q-value
return_all_q (bool): return Q-value for all actions
regularize (bool): returns values to add to loss function for
regularization
return_attend (bool): return attention weights per agent
logger (TensorboardX SummaryWriter): If passed in, important values
are logged
"""
if agents is None:
agents = range(len(self.critic_encoders))
states = [s for s, a in inps]
actions = [a for s, a in inps]
inps = [torch.cat((s, a), dim=1) for s, a in inps] # concat state + action
# extract state-action encoding for each agent
sa_encodings = [encoder(inp) for encoder, inp in zip(self.critic_encoders, inps)]
# extract state encoding for each agent that we're returning Q for
s_encodings = [self.state_encoders[a_i](states[a_i]) for a_i in agents]
# extract keys for each head for each agent
all_head_keys = [[k_ext(enc) for enc in sa_encodings] for k_ext in self.key_extractors]
# extract sa values for each head for each agent
all_head_values = [[v_ext(enc) for enc in sa_encodings] for v_ext in self.value_extractors]
# extract selectors for each head for each agent that we're returning Q for
all_head_selectors = [[sel_ext(enc) for i, enc in enumerate(s_encodings) if i in agents] for sel_ext in self.selector_extractors]
other_all_values = [[] for _ in range(len(agents))]
all_attend_logits = [[] for _ in range(len(agents))]
all_attend_probs = [[] for _ in range(len(agents))]
# calculate attention per head
for curr_head_keys, curr_head_values, curr_head_selectors in zip(all_head_keys, all_head_values, all_head_selectors):
# iterate over agents
for i, a_i, selector in zip(range(len(agents)), agents, curr_head_selectors):
keys = [k for j, k in enumerate(curr_head_keys) if j != a_i]
values = [v for j, v in enumerate(curr_head_values) if j != a_i]
# calculate attention across agents
attend_logits = torch.matmul(selector.view(selector.shape[0], 1, -1), torch.stack(keys).permute(1, 2, 0))
# scale dot-products by size of key (from Attention is All You Need)
scaled_attend_logits = attend_logits / np.sqrt(keys[0].shape[1])
attend_weights = F.softmax(scaled_attend_logits, dim=2)
other_values = (torch.stack(values).permute(1, 2, 0) * attend_weights).sum(dim=2)
other_all_values[i].append(other_values)
all_attend_logits[i].append(attend_logits)
all_attend_probs[i].append(attend_weights)
# calculate Q per agent
all_rets = []
for i, a_i in enumerate(agents):
head_entropies = [(-((probs + 1e-8).log() * probs).squeeze().sum(1).mean()) for probs in all_attend_probs[i]]
agent_rets = []
critic_in = torch.cat((s_encodings[i], *other_all_values[i]), dim=1)
all_q = self.critics[a_i](critic_in)
int_acs = actions[a_i].max(dim=1, keepdim=True)[1]
q = all_q.gather(1, int_acs)
if return_q:
agent_rets.append(q)
if return_all_q:
agent_rets.append(all_q)
if regularize:
# regularize magnitude of attention logits
attend_mag_reg = 1e-3 * sum((logit**2).mean() for logit in all_attend_logits[i])
regs = (attend_mag_reg,)
agent_rets.append(regs)
if return_attend:
agent_rets.append(np.array(all_attend_probs[i]))
if logger is not None:
logger.add_scalars('agent%i/attention' % a_i, dict(('head%i_entropy' % h_i, ent) for h_i, ent in enumerate(head_entropies)), niter)
if len(agent_rets) == 1:
all_rets.append(agent_rets[0])
else:
all_rets.append(agent_rets)
if len(all_rets) == 1:
return all_rets[0]
else:
return all_rets
| 45.562162
| 147
| 0.581564
|
4a09d5408544b849a967218e0f617c3af0fbb105
| 2,771
|
py
|
Python
|
setup.py
|
JoranAngevaare/straxen
|
0b7cbfdceb4f5ef6f9eb0abea862118070331219
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
JoranAngevaare/straxen
|
0b7cbfdceb4f5ef6f9eb0abea862118070331219
|
[
"BSD-3-Clause"
] | 45
|
2021-09-16T13:54:04.000Z
|
2022-03-02T10:34:15.000Z
|
setup.py
|
JoranAngevaare/straxen
|
0b7cbfdceb4f5ef6f9eb0abea862118070331219
|
[
"BSD-3-Clause"
] | 3
|
2022-01-13T16:06:25.000Z
|
2022-02-09T03:31:51.000Z
|
import setuptools
def open_requirements(path):
with open(path) as f:
requires = [
r.split('/')[-1] if r.startswith('git+') else r
for r in f.read().splitlines()]
return requires
# Get requirements from requirements.txt, stripping the version tags
requires = open_requirements('requirements.txt')
tests_requires = open_requirements('extra_requirements/requirements-tests.txt')
doc_requirements = open_requirements('extra_requirements/requirements-docs.txt')
with open('README.md') as file:
readme = file.read()
with open('HISTORY.md') as file:
history = file.read()
setuptools.setup(name='straxen',
version='1.2.6',
description='Streaming analysis for XENON',
author='Straxen contributors, the XENON collaboration',
url='https://github.com/XENONnT/straxen',
long_description=readme + '\n\n' + history,
long_description_content_type="text/markdown",
setup_requires=['pytest-runner'],
install_requires=requires,
tests_require=requires + tests_requires,
# Not testing py3.6 #616
python_requires=">=3.6",
extras_require={
'docs': doc_requirements,
'microstrax': ['hug'],
},
scripts=[
'bin/bootstrax',
'bin/straxer',
'bin/fake_daq',
'bin/microstrax',
'bin/ajax',
'bin/refresh_raw_records',
],
packages=setuptools.find_packages() + ['extra_requirements'],
package_dir={'extra_requirements': 'extra_requirements'},
package_data={'extra_requirements': ['requirements-docs.txt',
'requirements-tests.txt']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Physics',
],
zip_safe=False)
| 42.630769
| 83
| 0.513894
|
4a09d636ee320da43498e5ffb9846a9e444a958d
| 169,908
|
py
|
Python
|
pandas/core/groupby.py
|
kantologist/pandas
|
607910b2a26ab7d7e94dd8ce02198631c8649513
|
[
"BSD-3-Clause"
] | 1
|
2018-03-06T03:22:55.000Z
|
2018-03-06T03:22:55.000Z
|
pandas/core/groupby.py
|
wswsr/pandas
|
aedbd948938f7e9230a321eb49f6c789867ab2b6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby.py
|
wswsr/pandas
|
aedbd948938f7e9230a321eb49f6c789867ab2b6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2018-08-05T22:51:23.000Z
|
2018-08-05T22:51:23.000Z
|
import types
from functools import wraps, partial
import numpy as np
import datetime
import collections
import warnings
import copy
from textwrap import dedent
from pandas.compat import (
zip, range, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import set_function_name
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype,
is_interval_dtype,
is_datetimelike,
is_datetime64_any_dtype,
is_bool, is_integer_dtype,
is_complex_dtype,
is_bool_dtype,
is_scalar,
is_list_like,
is_hashable,
needs_i8_conversion,
_ensure_float64,
_ensure_platform_int,
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna, isnull, notna, _maybe_fill
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
CategoricalIndex, _ensure_index)
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.core.sorting import (get_group_index_sorter, get_group_index,
compress_group_index, get_flattened_iterator,
decons_obs_group_ids, get_indexer_dict)
from pandas.util._decorators import (cache_readonly, Substitution,
Appender, make_signature)
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_kwargs
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
from pandas.plotting._core import boxplot_frame_groupby
from pandas._libs import (lib, reduction,
groupby as libgroupby,
Timestamp, NaT, iNaT)
from pandas._libs.lib import count_level_2d
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function ``func`` group-wise and combine the results together.
The function passed to ``apply`` must take a {input} as its first
argument and return a dataframe, a series or a scalar. ``apply`` will
then take care of combining the results back together into a single
dataframe or series. ``apply`` is therefore a highly flexible
grouping method.
While ``apply`` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods.
Pandas offers a wide range of method that will be much faster
than using ``apply`` for their specific purposes, so try to use them
before reaching for ``apply``.
Parameters
----------
func : function
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to ``func``
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation ``apply`` calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate, transform
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]})
>>> g = df.groupby('A')
From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``.
Calling ``apply`` in various ways, we can get different grouping results:
Example 1: below the function passed to ``apply`` takes a dataframe as
its argument and returns a dataframe. ``apply`` combines the result for
each group together into a new dataframe:
>>> g.apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to ``apply`` takes a dataframe as
its argument and returns a series. ``apply`` combines the result for
each group together into a new dataframe:
>>> g.apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to ``apply`` takes a dataframe as
its argument and returns a scalar. ``apply`` combines the result for
each group together into a series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> ser = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = ser.groupby(ser.index)
From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``.
Calling ``apply`` in various ways, we can get different grouping results:
Example 1: The function passed to ``apply`` takes a series as
its argument and returns a series. ``apply`` combines the result for
each group together into a new series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to ``apply`` takes a series as
its argument and returns a scalar. ``apply`` combines the result for
each group together into a series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function ``func`` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use ``.pipe`` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of ``callable`` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : dict, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumcount', 'ngroup',
'resample',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = ((_common_apply_whitelist |
{'nlargest', 'nsmallest',
'is_monotonic_increasing',
'is_monotonic_decreasing'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = ((_common_apply_whitelist |
frozenset(['dtypes', 'corrwith'])) -
{'boxplot'})
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
_cython_cast_blacklist = frozenset(['rank', 'count', 'size'])
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when ``freq`` is passed)
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
base, loffset
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
_attributes = ('key', 'level', 'freq', 'axis', 'sort')
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate=True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self.grouper, 'name', None) == key and \
isinstance(obj, ABCSeries):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(
"The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level),
name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj._take(indexer, axis=self.axis,
convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self):
attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name))
for attr_name in self._attributes
if getattr(self, attr_name) is not None]
attrs = ", ".join(attrs_list)
cls_name = self.__class__.__name__
return "{}({})".format(cls_name, attrs)
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
self._group_selection = None
# GH12839 clear cached selection too when changing group selection
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
"""
grp = self.grouper
if self.as_index and getattr(grp, 'groupings', None) is not None and \
self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
# GH12839 clear selected obj cache when group selection changes
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original._get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
"""
Parameters:
-----------
func_nm: str
The name of the aggregation function being performed
Returns:
--------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (func_nm not in
_cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = _ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
def _bool_agg(self, val_test, skipna):
"""Shared func to call any / all Cython GroupBy implementations"""
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError: # for objects
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
"""Returns True if any value in the group is truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
"""Returns True if all values in the group are truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var', **kwargs)
else:
self._set_group_selection()
f = lambda x: x.var(ddof=ddof, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False, _convert=True)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False, _convert=True)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionality per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""Shared function for `pad` and `backfill` to call Cython method
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna='%s' keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'." % (dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
self._set_group_selection()
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
self._set_group_selection()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
"""Provides the rank of values within each group
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, efault 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
method : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Compute percentage rank of data within each group
Returns
-----
DataFrame with ranking of values within each group
"""
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""Get result for Cythonized functions
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython
Raises if `needs_values` is False
post_processing : function, default None
Function to be applied to result of Cython function
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isnull(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : int
the axis to group
groupings : array of grouping
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : boolean, default True
whether this grouper will give sorted result or not
group_keys : boolean, default True
mutated : boolean, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False, indexer=None):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids,
ngroups,
self.levels,
self.labels)
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except reduction.InvalidApply:
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [com._values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = _ensure_platform_int(ids)
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = ids
return Series(out,
index=self.result_index,
dtype='int64')
@cache_readonly
def _max_groupsize(self):
"""
Compute size of largest group
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = _ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
@cache_readonly
def label_info(self):
# return the labels of items in original grouped axis
labels, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((labels, self.indexer))
labels = labels[sorter]
return labels
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = _ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
'cummin': 'group_cummin',
'cummax': 'group_cummax',
'rank': {
'name': 'group_rank',
'f': lambda func, a, b, c, d, **kwargs: func(
a, b, c, d,
kwargs.get('ties_method', 'average'),
kwargs.get('ascending', True),
kwargs.get('pct', False),
kwargs.get('na_option', 'keep')
)
}
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func
def _cython_operation(self, kind, values, how, axis, min_count=-1,
**kwargs):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError(
"categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ['add', 'prod', 'cumsum', 'cumprod']:
raise NotImplementedError(
"datetime64 type does not support {} "
"operations".format(how))
elif is_timedelta64_dtype(values):
if how in ['prod', 'cumprod']:
raise NotImplementedError(
"timedelta64 type does not support {} "
"operations".format(how))
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == iNaT).any():
values = _ensure_float64(values)
else:
values = values.astype('int64', copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if how == 'rank':
out_dtype = 'float'
else:
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric,
is_datetimelike, min_count)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# TODO: min_count
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike,
**kwargs)
if is_integer_dtype(result) and not is_datetimelike:
mask = result == iNaT
if mask.any():
result = result.astype('float64')
result[mask] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0, min_count=-1):
return self._cython_operation('aggregate', values, how, axis,
min_count=min_count)
def transform(self, values, how, axis=0, **kwargs):
return self._cython_operation('transform', values, how, axis, **kwargs)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric, is_datetimelike, min_count=-1):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids,
min_count)
else:
agg_func(result, counts, values, comp_ids, min_count)
return result
def _transform(self, result, values, comp_ids, transform_func,
is_numeric, is_datetimelike, **kwargs):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike, **kwargs)
else:
transform_func(result, values, comp_ids, is_datetimelike, **kwargs)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj._take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray))):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
filter_empty : boolean, default False
mutated : boolean, default False
indexer : a intp array
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
def __init__(self, bins, binlabels, filter_empty=False, mutated=False,
indexer=None):
self.bins = _ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
self.indexer = indexer
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = _ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get labels
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper = self.grouper._codes_for_groupby(self.sort)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
return self.grouper.indices
values = _ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
labels = self.grouper.label_info
uniques = self.grouper.result_index
else:
labels, uniques = algorithms.factorize(
self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = ("Interpreting tuple 'by' as a list of keys, rather than "
"a single key. Use 'by=[...]' instead of 'by=(...)'. In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in obj.index.names
for g in keys)
else:
all_in_columns_index = False
except Exception:
all_in_columns_index = False
if not any_callable and not all_in_columns_index and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
stacklevel = 5 # Number of stack levels from df.groupby
obj._check_label_or_level_ambiguity(
gpr, stacklevel=stacklevel)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, (compat.string_types, tuple)) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_doc = dedent("""
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
See also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
@Appender(_apply_docs['template']
.format(input='series',
examples=_apply_docs['series_examples']))
def apply(self, func, *args, **kwargs):
return super(SeriesGroupBy, self).apply(func, *args, **kwargs)
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
versionadded=''))
def aggregate(self, func_or_funcs, *args, **kwargs):
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, collections.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
("using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
self._set_group_selection()
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = _ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or None)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
newb = result._data.blocks[0]
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + loc)]
offset += loc
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except Exception:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
Returns
-------
filtered : DataFrame
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
See also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded=''))
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def _fill(self, direction, limit=None):
"""Overriden method to join grouped columns in output"""
res = super(DataFrameGroupBy, self)._fill(direction, limit=limit)
output = collections.OrderedDict(
(grp.name, grp.grouper) for grp in self.grouper.groupings)
from pandas import concat
return concat((self._wrap_transformed_output(output), res), axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isna(np.atleast_2d(blk.get_values())))
for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = com._default_index(len(results))
return results
boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = _ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data._take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise com.AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except Exception:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = reduction.apply_frame_axis0(sdata, f, names,
starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
| 33.565389
| 87
| 0.549821
|
4a09d6a205f75e174e585df4fb46c7dd76d1a6d5
| 5,062
|
py
|
Python
|
utils/transformations.py
|
Smilels/handtracking
|
edbabcbfdd2a624861410dcdf534c271a4bf9e3c
|
[
"Apache-2.0"
] | 34
|
2020-07-10T02:37:10.000Z
|
2022-02-25T12:53:54.000Z
|
utils/transformations.py
|
Smilels/handtracking
|
edbabcbfdd2a624861410dcdf534c271a4bf9e3c
|
[
"Apache-2.0"
] | 5
|
2020-07-22T11:26:28.000Z
|
2021-12-14T08:21:55.000Z
|
util/transformations.py
|
dejianwei/HigherA2J
|
655d993d4b835ec58396887a85b68ef506b5df9e
|
[
"MIT"
] | 9
|
2020-07-10T18:02:10.000Z
|
2022-01-12T04:12:43.000Z
|
"""Provides different transformation methods on images.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
__author__ = "Paul Wohlhart <wohlhart@icg.tugraz.at>, Markus Oberweger <oberweger@icg.tugraz.at>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "oberweger@icg.tugraz.at"
__status__ = "Development"
def getTransformationMatrix(center, rot, trans, scale):
ca = numpy.cos(rot)
sa = numpy.sin(rot)
sc = scale
cx = center[0]
cy = center[1]
tx = trans[0]
ty = trans[1]
t = numpy.array([ca * sc, -sa * sc, sc * (ca * (-tx - cx) + sa * ( cy + ty)) + cx,
sa * sc, ca * sc, sc * (ca * (-ty - cy) + sa * (-tx - cx)) + cy])
return t
def transformPoint2D(pt, M):
"""
Transform point in 2D coordinates
:param pt: point coordinates
:param M: transformation matrix
:return: transformed point
"""
pt2 = numpy.dot(numpy.asarray(M).reshape((3, 3)), numpy.asarray([pt[0], pt[1], 1]))
return numpy.asarray([pt2[0] / pt2[2], pt2[1] / pt2[2]])
def transformPoints2D(pts, M):
"""
Transform points in 2D coordinates
:param pts: point coordinates
:param M: transformation matrix
:return: transformed points
"""
ret = pts.copy()
for i in range(pts.shape[0]):
ret[i, 0:2] = transformPoint2D(pts[i, 0:2], M)
return ret
def rotatePoint2D(p1, center, angle):
"""
Rotate a point in 2D around center
:param p1: point in 2D (u,v,d)
:param center: 2D center of rotation
:param angle: angle in deg
:return: rotated point
"""
alpha = angle * numpy.pi / 180.
pp = p1.copy()
pp[0:2] -= center[0:2]
pr = numpy.zeros_like(pp)
pr[0] = pp[0]*numpy.cos(alpha) - pp[1]*numpy.sin(alpha)
pr[1] = pp[0]*numpy.sin(alpha) + pp[1]*numpy.cos(alpha)
pr[2] = pp[2]
ps = pr
ps[0:2] += center[0:2]
return ps
def rotatePoints2D(pts, center, angle):
"""
Transform points in 2D coordinates
:param pts: point coordinates
:param center: 2D center of rotation
:param angle: angle in deg
:return: rotated points
"""
ret = pts.copy()
for i in xrange(pts.shape[0]):
ret[i] = rotatePoint2D(pts[i], center, angle)
return ret
def getRotationMatrix(angle_x, angle_y, angle_z):
"""
Get rotation matrix
:param angle_x: angle around x-axis in deg
:param angle_y: angle around y-axis in deg
:param angle_z: angle around z-axis in deg
:return: 4x4 rotation matrix
"""
alpha_x = angle_x * numpy.pi / 180.
alpha_y = angle_y * numpy.pi / 180.
alpha_z = angle_z * numpy.pi / 180.
R = numpy.eye(4)
from transforms3d.euler import euler2mat
R[:3, :3] = euler2mat(alpha_x, alpha_y, alpha_z, 'rxyz')
return R
def rotatePoint3D(p1, center, angle_x, angle_y, angle_z):
"""
Rotate a point in 3D around center
:param p1: point in 3D (x,y,z)
:param center: 3D center of rotation
:param angle_x: angle around x-axis in deg
:param angle_y: angle around y-axis in deg
:param angle_z: angle around z-axis in deg
:return: rotated point
"""
pp = p1.copy()
pp -= center
R = getRotationMatrix(angle_x, angle_y, angle_z)
pr = numpy.array([pp[0], pp[1], pp[2], 1])
ps = numpy.dot(R, pr)
ps = ps[0:3] / ps[3]
ps += center
return ps
def rotatePoints3D(pts, center, angle_x, angle_y, angle_z):
"""
Transform points in 3D coordinates
:param pts: point coordinates
:param center: 3D center of rotation
:param angle_x: angle around x-axis in deg
:param angle_y: angle around y-axis in deg
:param angle_z: angle around z-axis in deg
:return: rotated points
"""
ret = pts.copy()
for i in xrange(pts.shape[0]):
ret[i] = rotatePoint3D(pts[i], center, angle_x, angle_y, angle_z)
return ret
def transformPoint3D(pt, M):
"""
Transform point in 3D coordinates
:param pt: point coordinates
:param M: transformation matrix
:return: transformed point
"""
pt3 = numpy.dot(numpy.asarray(M).reshape((4, 4)), numpy.asarray([pt[0], pt[1], pt[2], 1]))
return numpy.asarray([pt3[0] / pt3[3], pt3[1] / pt3[3], pt3[2] / pt3[3]])
| 30.311377
| 97
| 0.647768
|
4a09d6b4766a9cdd738fc619b99883822cdecc44
| 1,339
|
py
|
Python
|
demo/demo_cell.py
|
anjuchamantha/cellyzer---CDR-data-analyzer
|
185f2a14e5901d839d027aeea5860efe6c24a68f
|
[
"MIT"
] | 11
|
2020-04-03T10:53:59.000Z
|
2022-01-02T18:31:32.000Z
|
demo/demo_cell.py
|
anjuchamantha/cellyzer---CDR-data-analyzer
|
185f2a14e5901d839d027aeea5860efe6c24a68f
|
[
"MIT"
] | null | null | null |
demo/demo_cell.py
|
anjuchamantha/cellyzer---CDR-data-analyzer
|
185f2a14e5901d839d027aeea5860efe6c24a68f
|
[
"MIT"
] | 2
|
2020-07-01T06:21:48.000Z
|
2020-10-23T17:34:40.000Z
|
"""
This is for manual testing the library
"""
import cellyzer as cz
call_file_path = "demo_datasets/long_data/calls_.csv"
antenna_file_path = "demo_datasets/test_data/antennas.csv"
callDataSet = cz.read_call(call_file_path)
cz.read_cell(antenna_file_path, call_csv_path=None)
antennaDataSet = cz.read_cell(antenna_file_path, call_dataset_obj=callDataSet, file_type='csv')
# print antenna data set
# cz.utils.print_dataset(antennaDataSet, name="Antenna Data set")
# get a record of a given cell id
# record = antennaDataSet.get_cell_records(cell_id=1)
# print("Record of cell %s : %s \n" % (1, record.__dict__))
#
# print(">> population around cell")
# population = antennaDataSet.get_population()
# print(population)
# cz.utils.tabulate_list_of_dictionaries(population)
# cz.visualization.cell_population_visualization(population)
#
# print(">> Trip details of user : %s" % "8d27cf2694")
# call_made_locations = antennaDataSet.get_trip_details("8d27cf2694", console_print=True, tabulate=True)
# cz.visualization.trip_visualization(call_made_locations, notebook=False)
#
# test = callDataSet.get_records()
# print(">> Unique users")
# print(antennaDataSet.get_unique_users_around_cell(test))
#
# print(">> Is %s recorded in cell %s" % ("123", 10))
# print(antennaDataSet.check_user_location_matches_cell(contact_no='123', cell_id=10))
| 37.194444
| 104
| 0.775952
|
4a09d6ba80817ee25bfb998609df0ae282064b1f
| 714
|
py
|
Python
|
inbenta_api_signature/url.py
|
inbenta-products/api-signature-client-python
|
d27e15b55746cbb1732387e81fc6b64ebef26d6d
|
[
"MIT"
] | null | null | null |
inbenta_api_signature/url.py
|
inbenta-products/api-signature-client-python
|
d27e15b55746cbb1732387e81fc6b64ebef26d6d
|
[
"MIT"
] | null | null | null |
inbenta_api_signature/url.py
|
inbenta-products/api-signature-client-python
|
d27e15b55746cbb1732387e81fc6b64ebef26d6d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""URL Parse functions."""
try:
# Python 3
from urllib.parse import urlparse, unquote_plus, quote, parse_qs, quote_plus
except ImportError:
# Python 2
from urllib import quote, unquote_plus, quote_plus
from urlparse import urlparse, parse_qs
__all__ = ['urlparse', 'parse_queryparams', 'quote', 'unquote_plus', 'quote_plus']
def parse_queryparams(qs, **kwargs):
"""parse_qs that Mimic the PHP parse_str() query Params parser."""
params = {}
for key, val in parse_qs(qs, **kwargs).items():
if key.endswith('[]'):
key = key[:-2]
elif isinstance(val, list):
val = val[-1]
params[key] = val
return params
| 27.461538
| 82
| 0.627451
|
4a09d6fd43f384e8abd4b427478f7841347b781f
| 8,395
|
py
|
Python
|
glance/common/scripts/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
glance/common/scripts/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
glance/common/scripts/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'get_task',
'unpack_task_input',
'set_base_image_properties',
'validate_location_uri',
'get_image_data_iter',
]
import urllib
from oslo_log import log as logging
from oslo_utils import timeutils
from glance.common import exception
from glance.i18n import _, _LE
LOG = logging.getLogger(__name__)
def get_task(task_repo, task_id):
"""Gets a TaskProxy object.
:param task_repo: TaskRepo object used to perform DB operations
:param task_id: ID of the Task
"""
task = None
try:
task = task_repo.get(task_id)
except exception.NotFound:
msg = _LE('Task not found for task_id %s') % task_id
LOG.exception(msg)
return task
def unpack_task_input(task):
"""Verifies and returns valid task input dictionary.
:param task: Task domain object
"""
task_type = task.type
task_input = task.task_input
if task_type == 'api_image_import':
if not task_input:
msg = _("Input to api_image_import task is empty.")
raise exception.Invalid(msg)
if 'image_id' not in task_input:
msg = _("Missing required 'image_id' field")
raise exception.Invalid(msg)
else:
for key in ["import_from", "import_from_format", "image_properties"]:
if key not in task_input:
msg = (_("Input does not contain '%(key)s' field") %
{"key": key})
raise exception.Invalid(msg)
return task_input
def set_base_image_properties(properties=None):
"""Sets optional base properties for creating Image.
:param properties: Input dict to set some base properties
"""
if isinstance(properties, dict) and len(properties) == 0:
# TODO(nikhil): We can make these properties configurable while
# implementing the pipeline logic for the scripts. The below shown
# are placeholders to show that the scripts work on 'devstack'
# environment.
properties['disk_format'] = 'qcow2'
properties['container_format'] = 'bare'
def validate_location_uri(location):
"""Validate location uri into acceptable format.
:param location: Location uri to be validated
"""
if not location:
raise exception.BadStoreUri(_('Invalid location: %s') % location)
elif location.startswith(('http://', 'https://')):
return location
# NOTE: file type uri is being avoided for security reasons,
# see LP bug #942118 #1400966.
elif location.startswith(("file:///", "filesystem:///")):
msg = _("File based imports are not allowed. Please use a non-local "
"source of image data.")
# NOTE: raise BadStoreUri and let the encompassing block save the error
# msg in the task.message.
raise exception.BadStoreUri(msg)
else:
# TODO(nikhil): add other supported uris
supported = ['http', ]
msg = _("The given uri is not valid. Please specify a "
"valid uri from the following list of supported uri "
"%(supported)s") % {'supported': supported}
raise urllib.error.URLError(msg)
def get_image_data_iter(uri):
"""Returns iterable object either for local file or uri
:param uri: uri (remote or local) to the datasource we want to iterate
Validation/sanitization of the uri is expected to happen before we get
here.
"""
# NOTE(flaper87): This is safe because the input uri is already
# verified before the task is created.
if uri.startswith("file://"):
uri = uri.split("file://")[-1]
# NOTE(flaper87): The caller of this function expects to have
# an iterable object. FileObjects in python are iterable, therefore
# we are returning it as is.
# The file descriptor will be eventually cleaned up by the garbage
# collector once its ref-count is dropped to 0. That is, when there
# won't be any references pointing to this file.
#
# We're not using StringIO or other tools to avoid reading everything
# into memory. Some images may be quite heavy.
return open(uri, "rb")
return urllib.request.urlopen(uri)
class CallbackIterator(object):
"""A proxy iterator that calls a callback function periodically
This is used to wrap a reading file object and proxy its chunks
through to another caller. Periodically, the callback function
will be called with information about the data processed so far,
allowing for status updating or cancel flag checking. The function
can be called every time we process a chunk, or only after we have
processed a certain amount of data since the last call.
:param source: A source iterator whose content will be proxied
through this object.
:param callback: A function to be called periodically while iterating.
The signature should be fn(chunk_bytes, total_bytes),
where chunk is the number of bytes since the last
call of the callback, and total_bytes is the total amount
copied thus far.
:param min_interval: Limit the calls to callback to only when this many
seconds have elapsed since the last callback (a
close() or final iteration may fire the callback in
less time to ensure completion).
"""
def __init__(self, source, callback, min_interval=None):
self._source = source
self._callback = callback
self._min_interval = min_interval
self._chunk_bytes = 0
self._total_bytes = 0
self._timer = None
@property
def callback_due(self):
"""Indicates if a callback should be made.
If no time-based limit is set, this will always be True.
If a limit is set, then this returns True exactly once,
resetting the timer when it does.
"""
if not self._min_interval:
return True
if not self._timer:
self._timer = timeutils.StopWatch(self._min_interval)
self._timer.start()
if self._timer.expired():
self._timer.restart()
return True
else:
return False
def __iter__(self):
return self
def __next__(self):
try:
chunk = next(self._source)
except StopIteration:
# NOTE(danms): Make sure we call the callback the last
# time if we have processed data since the last one.
self._call_callback(b'', is_last=True)
raise
self._call_callback(chunk)
return chunk
def close(self):
self._call_callback(b'', is_last=True)
if hasattr(self._source, 'close'):
return self._source.close()
def _call_callback(self, chunk, is_last=False):
self._total_bytes += len(chunk)
self._chunk_bytes += len(chunk)
if not self._chunk_bytes:
# NOTE(danms): Never call the callback if we haven't processed
# any data since the last time
return
if is_last or self.callback_due:
# FIXME(danms): Perhaps we should only abort the read if
# the callback raises a known abort exception, otherwise
# log and swallow. Need to figure out what exception
# read() callers would be expecting that we could raise
# from here.
self._callback(self._chunk_bytes, self._total_bytes)
self._chunk_bytes = 0
def read(self, size=None):
chunk = self._source.read(size)
self._call_callback(chunk)
return chunk
| 35.125523
| 79
| 0.640262
|
4a09d75351a1c74c248bb87889056c90e80e56c0
| 18,527
|
py
|
Python
|
tests/test_layout_objects.py
|
bcvandendool/crispy-bootstrap5
|
7f14ac490b6f0c28549d801a284b64f686271338
|
[
"MIT"
] | 170
|
2020-11-27T12:40:14.000Z
|
2022-03-29T03:59:59.000Z
|
tests/test_layout_objects.py
|
bcvandendool/crispy-bootstrap5
|
7f14ac490b6f0c28549d801a284b64f686271338
|
[
"MIT"
] | 68
|
2020-11-08T20:58:08.000Z
|
2022-03-17T20:24:08.000Z
|
tests/test_layout_objects.py
|
bcvandendool/crispy-bootstrap5
|
7f14ac490b6f0c28549d801a284b64f686271338
|
[
"MIT"
] | 40
|
2021-02-08T04:58:15.000Z
|
2022-03-18T20:34:21.000Z
|
import random
import pytest
from crispy_forms.bootstrap import (
Accordion,
AccordionGroup,
Alert,
AppendedText,
FieldWithButtons,
InlineCheckboxes,
InlineField,
InlineRadios,
PrependedAppendedText,
PrependedText,
StrictButton,
Tab,
TabHolder,
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Field, Layout, MultiWidgetField
from crispy_forms.utils import render_crispy_form
from django import forms
from django.template import Context, Template
from django.utils.translation import activate, deactivate
from django.utils.translation import gettext as _
from crispy_bootstrap5.bootstrap5 import BS5Accordion, FloatingField
from .forms import (
CheckboxesSampleForm,
CustomCheckboxSelectMultiple,
CustomRadioSelect,
GroupedChoiceForm,
InputsForm,
SampleForm,
SampleFormCustomWidgets,
)
from .utils import parse_expected, parse_form
def test_field_with_custom_template():
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field("email", template="custom_field_template.html")
)
html = render_crispy_form(test_form)
assert "<h1>Special custom field</h1>" in html
def test_multiwidget_field():
template = Template(
"""
{% load crispy_forms_tags %}
{% crispy form %}
"""
)
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
MultiWidgetField(
"datetime_field",
attrs=(
{"rel": "test_dateinput"},
{"rel": "test_timeinput", "style": "width: 30px;", "type": "hidden"},
),
)
)
c = Context({"form": test_form})
html = template.render(c)
assert html.count('class="dateinput') == 1
assert html.count('rel="test_dateinput"') == 1
assert html.count('rel="test_timeinput"') == 2
assert html.count('style="width: 30px;"') == 2
assert html.count('type="hidden"') == 2
def test_field_type_hidden():
template = Template(
"""
{% load crispy_forms_tags %}
{% crispy test_form %}
"""
)
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field("email", type="hidden", data_test=12),
Field("datetime_field"),
)
c = Context({"test_form": test_form})
html = template.render(c)
# Check form parameters
assert html.count('data-test="12"') == 1
assert html.count('name="email"') == 1
assert html.count('class="dateinput') == 1
assert html.count('class="timeinput') == 1
def test_field_wrapper_class(settings):
form = SampleForm()
form.helper = FormHelper()
form.helper.layout = Layout(Field("email", wrapper_class="testing"))
html = render_crispy_form(form)
if settings.CRISPY_TEMPLATE_PACK == "bootstrap":
assert html.count('class="control-group testing"') == 1
elif settings.CRISPY_TEMPLATE_PACK in ("bootstrap3", "bootstrap4"):
assert html.count('class="form-group testing"') == 1
def test_html_with_carriage_returns(settings):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
HTML(
"""
if (a==b){
// some comment
a+1;
foo();
}
"""
)
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == "uni_form":
assert html.count("\n") == 23
elif settings.CRISPY_TEMPLATE_PACK == "bootstrap":
assert html.count("\n") == 25
else:
assert html.count("\n") == 27
def test_i18n():
activate("es")
form = SampleForm()
form.helper = FormHelper()
form.helper.layout = Layout(HTML(_("Enter a valid value.")))
html = render_crispy_form(form)
assert "Introduzca un valor válido" in html
deactivate()
def test_remove_labels():
form = SampleForm()
# remove boolean field as label is still printed in boostrap
del form.fields["is_company"]
for fields in form:
fields.label = False
html = render_crispy_form(form)
assert "<label" not in html
@pytest.mark.parametrize(
"input,expected",
[
("text_input", "text_input.html"),
("text_area", "text_area.html"),
("checkboxes", "checkboxes.html"),
("radio", "radio.html"),
("single_checkbox", "single_checkbox.html"),
],
)
def test_inputs(input, expected):
form = InputsForm()
form.helper = FormHelper()
form.helper.layout = Layout(input)
assert parse_form(form) == parse_expected(expected)
class TestBootstrapLayoutObjects:
def test_custom_django_widget(self):
# Make sure an inherited RadioSelect gets rendered as it
form = SampleFormCustomWidgets()
assert isinstance(form.fields["inline_radios"].widget, CustomRadioSelect)
form.helper = FormHelper()
form.helper.layout = Layout("inline_radios")
html = render_crispy_form(form)
print(html)
assert 'class="form-check-input"' in html
# Make sure an inherited CheckboxSelectMultiple gets rendered as it
assert isinstance(
form.fields["checkboxes"].widget, CustomCheckboxSelectMultiple
)
form.helper.layout = Layout("checkboxes")
html = render_crispy_form(form)
assert 'class="form-check-input"' in html
def test_prepended_appended_text(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
PrependedAppendedText(
"email", "@<>&", "gmail.com", css_class="form-control-lg"
),
AppendedText("password1", "#"),
PrependedText("password2", "$"),
)
assert parse_form(test_form) == parse_expected(
"test_prepended_appended_text.html"
)
def test_inline_radios(self):
test_form = CheckboxesSampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(InlineRadios("inline_radios"))
html = render_crispy_form(test_form)
assert html.count('form-check-inline"') == 2
def test_accordion_and_accordiongroup(self):
random.seed(0)
form = SampleForm()
form.helper = FormHelper()
form.helper.form_tag = False
form.helper.layout = Layout(
Accordion(
AccordionGroup("one", "first_name"),
AccordionGroup("two", "password1", "password2"),
)
)
assert parse_form(form) == parse_expected("accordion.html")
def test_accordion_active_false_not_rendered(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Accordion(
AccordionGroup("one", "first_name"),
# there is no ``active`` kwarg here.
)
)
# The first time, there should be one of them there.
html = render_crispy_form(test_form)
accordion_class = "collapse show"
assert (
html.count('<div id="one" class="accordion-collapse %s"' % accordion_class)
== 1
)
test_form.helper.layout = Layout(
Accordion(
AccordionGroup("one", "first_name", active=False),
) # now ``active`` manually set as False
)
# This time, it shouldn't be there at all.
html = render_crispy_form(test_form)
assert (
html.count('<div id="one" class="accordion-collapse %s"' % accordion_class)
== 0
)
def test_bs5accordion(self):
random.seed(0)
form = SampleForm()
form.helper = FormHelper()
form.helper.form_tag = False
form.helper.layout = Layout(
BS5Accordion(
AccordionGroup("one", "first_name"),
AccordionGroup("two", "password1", "password2"),
)
)
assert parse_form(form) == parse_expected("accordion.html")
def test_bs5accordion_active_false_not_rendered(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
BS5Accordion(
AccordionGroup("one", "first_name"),
# there is no ``active`` kwarg here.
)
)
# The first time, there should be one of them there.
html = render_crispy_form(test_form)
accordion_class = "collapse show"
assert (
html.count('<div id="one" class="accordion-collapse %s"' % accordion_class)
== 1
)
test_form.helper.layout = Layout(
BS5Accordion(
AccordionGroup("one", "first_name", active=False),
) # now ``active`` manually set as False
)
# This time, it shouldn't be there at all.
html = render_crispy_form(test_form)
assert (
html.count('<div id="one" class="accordion-collapse %s"' % accordion_class)
== 0
)
def test_bs5accordion_flush(self):
random.seed(0)
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.form_tag = False
test_form.helper.layout = Layout(
BS5Accordion(
AccordionGroup("one", "first_name"),
AccordionGroup("two", "password1", "password2"),
flush=True,
)
)
assert parse_form(test_form) == parse_expected("accordion_flush.html")
def test_bs5accordion_always_open(self):
random.seed(0)
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.form_tag = False
test_form.helper.layout = Layout(
BS5Accordion(
AccordionGroup("one", "first_name"),
AccordionGroup("two", "password1", "password2"),
always_open=True,
)
)
assert parse_form(test_form) == parse_expected("accordion_always_open.html")
def test_alert(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(Alert(content="Testing..."))
html = render_crispy_form(test_form)
assert html.count('<div class="alert"') == 1
assert html.count('<button type="button" class="close"') == 1
assert html.count("Testing...") == 1
def test_alert_block(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(Alert(content="Testing...", block=True))
html = render_crispy_form(test_form)
assert html.count('<div class="alert alert-block"') == 1
assert html.count("Testing...") == 1
def test_tab_and_tab_holder(self):
test_form = SampleForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
TabHolder(
Tab(
"one",
"first_name",
css_id="custom-name",
css_class="first-tab-class active",
),
Tab("two", "password1", "password2"),
)
)
html = render_crispy_form(test_form)
assert (
html.count(
'<ul class="nav nav-tabs"> <li class="nav-item">'
'<a class="nav-link active" href="#custom-name" data-bs-toggle="tab">'
"One</a></li>"
)
== 1
)
assert html.count("tab-pane") == 2
assert html.count('class="tab-pane first-tab-class active"') == 1
assert html.count('<div id="custom-name"') == 1
assert html.count('<div id="two"') == 1
assert html.count('name="first_name"') == 1
assert html.count('name="password1"') == 1
assert html.count('name="password2"') == 1
def test_tab_helper_reuse(self):
# this is a proper form, according to the docs.
# note that the helper is a class property here,
# shared between all instances
class SampleForm(forms.Form):
val1 = forms.CharField(required=False)
val2 = forms.CharField(required=True)
helper = FormHelper()
helper.layout = Layout(
TabHolder(
Tab("one", "val1"),
Tab("two", "val2"),
)
)
# first render of form => everything is fine
test_form = SampleForm()
html = render_crispy_form(test_form)
# second render of form => first tab should be active,
# but not duplicate class
test_form = SampleForm()
html = render_crispy_form(test_form)
assert html.count('class="nav-item active active"') == 0
# render a new form, now with errors
test_form = SampleForm(data={"val1": "foo"})
html = render_crispy_form(test_form)
tab_class = "tab-pane"
# if settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
# tab_class = 'nav-link'
# else:
# tab_class = 'tab-pane'
# tab 1 should not be active
assert html.count('<div id="one" \n class="{} active'.format(tab_class)) == 0
# tab 2 should be active
assert html.count('<div id="two" \n class="{} active'.format(tab_class)) == 1
def test_radio_attrs(self):
form = CheckboxesSampleForm()
form.fields["inline_radios"].widget.attrs = {"class": "first"}
form.fields["checkboxes"].widget.attrs = {"class": "second"}
html = render_crispy_form(form)
assert 'class="first"' in html
assert 'class="second"' in html
def test_field_with_buttons(self):
form = SampleForm()
form.helper = FormHelper()
form.helper.layout = Layout(
FieldWithButtons(
Field("password1", css_class="span4"),
StrictButton("Go!", css_id="go-button"),
StrictButton("No!", css_class="extra"),
StrictButton("Test", type="submit", name="whatever", value="something"),
css_class="extra",
autocomplete="off",
)
)
html = render_crispy_form(form)
form_group_class = "mb-3"
assert html.count('class="%s extra"' % form_group_class) == 1
assert html.count('autocomplete="off"') == 1
assert html.count('class="span4') == 1
assert html.count('id="go-button"') == 1
assert html.count("Go!") == 1
assert html.count("No!") == 1
assert html.count('class="btn"') == 2
assert html.count('class="btn extra"') == 1
assert html.count('type="submit"') == 1
assert html.count('name="whatever"') == 1
assert html.count('value="something"') == 1
def test_hidden_fields(self):
form = SampleForm()
# All fields hidden
for field in form.fields:
form.fields[field].widget = forms.HiddenInput()
form.helper = FormHelper()
form.helper.layout = Layout(
AppendedText("password1", "foo"),
PrependedText("password2", "bar"),
PrependedAppendedText("email", "bar"),
InlineCheckboxes("first_name"),
InlineRadios("last_name"),
)
html = render_crispy_form(form)
assert html.count("<input") == 5
assert html.count('type="hidden"') == 5
assert html.count("<label") == 0
def test_multiplecheckboxes(self):
test_form = CheckboxesSampleForm()
html = render_crispy_form(test_form)
assert html.count("checked") == 6
test_form.helper = FormHelper(test_form)
test_form.helper[1].wrap(InlineCheckboxes, inline=True)
html = render_crispy_form(test_form)
# TODO Fix this test
# assert html.count('form-check-input"') == 3
def test_multiple_checkboxes_unique_ids(self):
test_form = CheckboxesSampleForm()
html = render_crispy_form(test_form)
expected_ids = [
"checkboxes_0",
"checkboxes_1",
"checkboxes_2",
"alphacheckboxes_0",
"alphacheckboxes_1",
"alphacheckboxes_2",
"numeric_multiple_checkboxes_0",
"numeric_multiple_checkboxes_1",
"numeric_multiple_checkboxes_2",
]
for id_suffix in expected_ids:
expected_str = f'id="id_{id_suffix}"'
assert html.count(expected_str) == 1
def test_inline_field(self):
form = SampleForm()
form.helper = FormHelper()
form.helper.layout = Layout(
InlineField("first_name", wrapper_class="col-4"),
InlineField("is_company", wrapper_class="col-4"),
)
form.helper.form_class = "row row-cols-lg-auto align-items-center"
assert parse_form(form) == parse_expected("test_inline_field.html")
def test_float_field(self):
form = SampleForm()
form.helper = FormHelper()
form.helper.layout = Layout(
FloatingField("first_name"),
)
assert parse_form(form) == parse_expected("test_floating_field.html")
form = InputsForm({})
form.helper = FormHelper()
form.helper.layout = Layout(
FloatingField("text_area"),
FloatingField("select_input"),
)
assert parse_form(form) == parse_expected("test_floating_field_failing.html")
def test_grouped_checkboxes_radios(self):
form = GroupedChoiceForm()
form.helper = FormHelper()
form.helper.layout = Layout("checkbox_select_multiple")
assert parse_form(form) == parse_expected("test_grouped_checkboxes.html")
form.helper.layout = Layout("radio")
assert parse_form(form) == parse_expected("test_grouped_radios.html")
form = GroupedChoiceForm({})
form.helper = FormHelper()
form.helper.layout = Layout("checkbox_select_multiple")
assert parse_form(form) == parse_expected(
"test_grouped_checkboxes_failing.html"
)
form.helper.layout = Layout("radio")
assert parse_form(form) == parse_expected("test_grouped_radios_failing.html")
| 32.79115
| 88
| 0.591461
|
4a09d86e425cf690b981dc0f42bf5747f93f25f4
| 4,831
|
py
|
Python
|
tests/test_analysis/test_outliers/test_hist_outliers.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 326
|
2021-11-18T15:30:50.000Z
|
2022-03-31T09:44:15.000Z
|
tests/test_analysis/test_outliers/test_hist_outliers.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 305
|
2021-11-17T10:28:31.000Z
|
2022-03-31T18:05:03.000Z
|
tests/test_analysis/test_outliers/test_hist_outliers.py
|
Pacman1984/etna
|
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
|
[
"Apache-2.0"
] | 29
|
2021-11-21T12:10:48.000Z
|
2022-03-31T22:55:06.000Z
|
import numpy as np
import pytest
from etna.analysis.outliers import get_anomalies_hist
from etna.analysis.outliers.hist_outliers import compute_f
from etna.analysis.outliers.hist_outliers import hist
from etna.analysis.outliers.hist_outliers import v_optimal_hist
@pytest.mark.parametrize(
"series,bins_number,expected",
(
(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), 1, 60),
(np.array([1, 2, 3, 4, -1, 0, -2, -2, -1]), 2, 7.8),
(np.array([1, 2, 3, 100, 36, 64, -1, 0, -2, -2, -1]), 4, 396.8),
(np.array([1, 2, 3, 4, 5, 6, 6, 7]), 7, 0),
),
)
def test_v_optimal_hist_one_value(series: np.array, bins_number: int, expected: float):
"""Check that v_optimal_hist works correctly."""
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
error = v_optimal_hist(series, bins_number, p, pp)[len(series) - 1][bins_number - 1]
assert error == expected
@pytest.mark.parametrize(
"series,bins_number,expected",
(
(np.array([-1, 0, 4, 3, 8]), 2, np.array([[0, 0], [0.5, 0], [14, 0.5], [17, 1], [50.8, 14.5]])),
(
np.array([4, 2, 3, 5, 3, 1]),
3,
np.array([[0, 0, 0], [2, 0, 0], [2, 0.5, 0], [5, 2, 0.5], [5.2, 4, 2], [10, 5.2, 4]]),
),
),
)
def test_v_optimal_hist(series: np.array, bins_number: int, expected: np.array):
"""Check that v_optimal_hist works correctly."""
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
error = v_optimal_hist(series, bins_number, p, pp)
np.testing.assert_almost_equal(error, expected)
@pytest.mark.parametrize("series_len,k", ((100, 10), (100, 20), (10, 4)))
def test_compute_f_format(random_seed, series_len: int, k: int):
"""Check that computeF produce the correct size output."""
series = np.random.random(size=series_len)
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
_, idx = compute_f(series, k, p, pp)
for ai in range(len(series)):
for bi in range(ai + 1, len(series)):
for ci in range(1, min(bi - ai + 1, k + 1)):
for i in range(len(idx[ai][bi][ci])):
assert len(idx[ai][bi][ci][i]) == ci
@pytest.mark.parametrize(
"series,k,dim,expected",
(
(
np.array([1, 0, 2, 3, 5]),
3,
0,
np.array([[0, 0, 0, 0], [0.5, 0, 0, 0], [2, 0.5, 0, 0], [5, 2, 0.5, 0], [14.8, 5, 2, 0.5]]),
),
(
np.array([-6, -3, 0, -6, -1]),
3,
0,
np.array([[0, 0, 0, 0], [4.5, 0, 0, 0], [18, 4.5, 0, 0], [24.75, 6, 0, 0], [30.8, 18, 6, 0]]),
),
(
np.array([1, 2, 3, 1, 5, 2]),
3,
2,
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0], [8, 2, 0, 0], [8.75, 2, 0.5, 0]]),
),
(
np.array([1, 2, 3, 1, 5, 6]),
3,
2,
np.array(
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0], [8, 2, 0, 0], [14.75, 4.66666667, 0.5, 0]]
),
),
),
)
def test_computef(series: np.array, k: int, dim: int, expected: np.array):
"""Check that computeF works correctly."""
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
res, _ = compute_f(series, k, p, pp)
np.testing.assert_almost_equal(res[dim], expected)
@pytest.mark.parametrize(
"series,bins_number,expected",
(
(np.array([1, 0, 1, -1, 0, 4, 1, 0, 1, 0, 1, 1, 0, 0, -1, 0, 0]), 5, np.array([3, 5, 14])),
(np.array([4, 5, 4, 3, 9, 10, 8, 2, 1, 0, 1, 1, 5, 1, 2]), 4, np.array([12])),
),
)
def test_hist(series: np.array, bins_number: int, expected: np.array):
"""Check that hist works correctly."""
anomalies = hist(series, bins_number)
np.testing.assert_array_equal(anomalies, expected)
def test_in_column(outliers_df_with_two_columns):
outliers = get_anomalies_hist(ts=outliers_df_with_two_columns, in_column="feature")
expected = {"1": [np.datetime64("2021-01-08")], "2": [np.datetime64("2021-01-26")]}
for key in expected:
assert key in outliers
np.testing.assert_array_equal(outliers[key], expected[key])
| 36.052239
| 115
| 0.519354
|
4a09d9c9918fd6a3815b091286b7310ca2e6f5f2
| 744
|
py
|
Python
|
2015/MAC0327/Desafios 1/aula5/p20.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2018-08-02T14:09:26.000Z
|
2018-08-02T14:09:26.000Z
|
2015/MAC0327/Desafios 1/aula5/p20.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | null | null | null |
2015/MAC0327/Desafios 1/aula5/p20.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2020-07-13T04:27:02.000Z
|
2020-07-13T04:27:02.000Z
|
__author__ = 'andre'
def main():
n = int(raw_input())
bills = {}
locations = {}
for i in range(n):
(bil, location, value) = raw_input().split()
value = long(value)
bills[bil] = {'location': location, 'money': value}
try:
locations[location] += value
except KeyError:
locations[location] = value
(m, k) = map(int, raw_input().split())
current_day = 0
for i in range(k):
(day, bil, location) = raw_input().split()
day = int(day)
if current_day == 0:
print "Primeira viagem."
current_day = day
elif current_day != day:
print "Novo dia de viagem"
if __name__ == "__main__":
main()
| 25.655172
| 59
| 0.524194
|
4a09da20612bb903460dee2297e024ec117c2019
| 52,908
|
py
|
Python
|
instapy/unfollow_util.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
instapy/unfollow_util.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
instapy/unfollow_util.py
|
fierysolid/InstaPy
|
a944520b107e6501e1fc41255049ff330f7a1a42
|
[
"MIT"
] | null | null | null |
""" Module which handles the follow features like unfollowing and following """
from datetime import datetime
import os
import random
import json
import csv
import sqlite3
from math import ceil
from .time_util import sleep
from .util import delete_line_from_file
from .util import scroll_bottom
from .util import format_number
from .util import update_activity
from .util import add_user_to_blacklist
from .util import click_element
from .util import web_address_navigator
from .util import get_relationship_counts
from .util import emergency_exit
from .util import load_user_id
from .util import get_username
from .util import find_user_id
from .util import explicit_wait
from .util import get_username_from_id
from .util import is_page_available
from .util import reload_webpage
from .util import click_visibly
from .util import get_action_delay
from .print_log_writer import log_followed_pool
from .print_log_writer import log_uncertain_unfollowed_pool
from .print_log_writer import log_record_all_unfollowed
from .relationship_tools import get_followers
from .relationship_tools import get_nonfollowers
from .database_engine import get_database
from .quota_supervisor import quota_supervisor
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementNotVisibleException
def set_automated_followed_pool(username, unfollow_after, logger, logfolder):
""" Generare a user list based on the InstaPy followed usernames """
pool_name = "{0}{1}_followedPool.csv".format(logfolder, username)
automatedFollowedPool = {"all": {}, "eligible": {}}
time_stamp = None
user_id = "undefined" # 'undefined' rather than None is *intentional
try:
with open(pool_name, 'r+') as followedPoolFile:
reader = csv.reader(followedPoolFile)
for row in reader:
entries = row[0].split(' ~ ')
sz = len(entries)
"""
Data entry styles [historically]:
user, # oldest
datetime ~ user, # after `unfollow_after` was introduced
datetime ~ user ~ user_id, # after `user_id` was added
"""
if sz == 1:
user = entries[0]
elif sz == 2:
time_stamp = entries[0]
user = entries[1]
elif sz == 3:
time_stamp = entries[0]
user = entries[1]
user_id = entries[2]
automatedFollowedPool["all"].update({user: {"id": user_id}})
# get eligible list
if unfollow_after is not None and time_stamp:
try:
log_time = datetime.strptime(time_stamp, '%Y-%m-%d %H:%M')
except ValueError:
continue
former_epoch = (log_time - datetime(1970, 1, 1)).total_seconds()
cur_epoch = (datetime.now() - datetime(1970, 1, 1)).total_seconds()
if cur_epoch - former_epoch > unfollow_after:
automatedFollowedPool["eligible"].update({user: {"id": user_id}})
else:
automatedFollowedPool["eligible"].update({user: {"id": user_id}})
followedPoolFile.close()
except BaseException as exc:
logger.error("Error occurred while generating a user list from the followed pool!\n\t{}"
.format(str(exc).encode("utf-8")))
return automatedFollowedPool
def get_following_status(browser, track, username, person, person_id, logger, logfolder):
""" Verify if you are following the user in the loaded page """
if track == "profile":
ig_homepage = "https://www.instagram.com/"
web_address_navigator(browser, ig_homepage+person)
follow_button_XP = ("//button[text()='Following' or \
text()='Requested' or \
text()='Follow' or \
text()='Follow Back' or \
text()='Unblock']"
)
failure_msg = "--> Unable to detect the following status of '{}'!"
user_inaccessible_msg = ("Couldn't access the profile page of '{}'!"
"\t~might have changed the username".format(username))
# check if the page is available
valid_page = is_page_available(browser, logger)
if not valid_page:
logger.warning(user_inaccessible_msg)
person_new = verify_username_by_id(browser,
username,
person,
None,
logger,
logfolder)
if person_new:
web_address_navigator(browser, ig_homepage+person_new)
valid_page = is_page_available(browser, logger)
if not valid_page:
logger.error(failure_msg.format(person_new.encode("utf-8")))
return "UNAVAILABLE", None
else:
logger.error(failure_msg.format(person.encode("utf-8")))
return "UNAVAILABLE", None
# wait until the follow button is located and visible, then get it
follow_button = explicit_wait(browser, "VOEL", [follow_button_XP, "XPath"], logger, 7, False)
if not follow_button:
browser.execute_script("location.reload()")
update_activity()
follow_button = explicit_wait(browser, "VOEL", [follow_button_XP, "XPath"], logger, 14, False)
if not follow_button:
# cannot find the any of the expected buttons
logger.error(failure_msg.format(person.encode("utf-8")))
return None, None
# get follow status
following_status = follow_button.text
return following_status, follow_button
def unfollow(browser,
username,
amount,
customList,
InstapyFollowed,
nonFollowers,
allFollowing,
style,
automatedFollowedPool,
relationship_data,
dont_include,
white_list,
sleep_delay,
jumps,
logger,
logfolder):
""" Unfollows the given amount of users"""
if (customList is not None and
type(customList) in [tuple, list] and
len(customList) == 3 and
customList[0] == True and
type(customList[1]) in [list, tuple, set] and
len(customList[1]) > 0 and
customList[2] in ["all", "nonfollowers"]):
customList_data = customList[1]
if type(customList_data) != list:
customList_data = list(customList_data)
unfollow_track = customList[2]
customList = True
else:
customList = False
if (InstapyFollowed is not None and
type(InstapyFollowed) in [tuple, list] and
len(InstapyFollowed) == 2 and
InstapyFollowed[0] == True and
InstapyFollowed[1] in ["all", "nonfollowers"]):
unfollow_track = InstapyFollowed[1]
InstapyFollowed = True
else:
InstapyFollowed = False
unfollowNum = 0
user_link = "https://www.instagram.com/{}/".format(username)
# check URL of the webpage, if it already is the one to be navigated then do not navigate to it again
web_address_navigator(browser, user_link)
# check how many poeple we are following
allfollowers, allfollowing = get_relationship_counts(browser, username, logger)
if allfollowing is None:
logger.warning("Unable to find the count of users followed ~leaving unfollow feature")
return 0
elif allfollowing == 0:
logger.warning("There are 0 people to unfollow ~leaving unfollow feature")
return 0
if amount > allfollowing:
logger.info("There are less users to unfollow than you have requested: "
"{}/{} ~using available amount\n".format(allfollowing, amount))
amount = allfollowing
if (customList == True or
InstapyFollowed == True or
nonFollowers == True):
if customList == True:
logger.info("Unfollowing from the list of pre-defined usernames\n")
unfollow_list = customList_data
elif InstapyFollowed == True:
logger.info("Unfollowing the users followed by InstaPy\n")
unfollow_list = list(automatedFollowedPool["eligible"].keys())
elif nonFollowers == True:
logger.info("Unfollowing the users who do not follow back\n")
""" Unfollow only the users who do not follow you back """
unfollow_list = get_nonfollowers(browser,
username,
relationship_data,
False,
True,
logger,
logfolder)
# pick only the users in the right track- ["all" or "nonfollowers"] for `customList` and
# `InstapyFollowed` unfollow methods
if customList == True or InstapyFollowed == True:
if unfollow_track == "nonfollowers":
all_followers = get_followers(browser,
username,
"full",
relationship_data,
False,
True,
logger,
logfolder)
loyal_users = [user for user in unfollow_list if user in all_followers]
logger.info("Found {} loyal followers! ~will not unfollow them".format(len(loyal_users)))
unfollow_list = [user for user in unfollow_list if user not in loyal_users]
elif unfollow_track != "all":
logger.info("Unfollow track is not specified! ~choose \"all\" or \"nonfollowers\"")
return 0
# re-generate unfollow list according to the `unfollow_after` parameter for `customList` and
# `nonFollowers` unfollow methods
if customList == True or nonFollowers == True:
not_found = []
non_eligible = []
for person in unfollow_list:
if person not in automatedFollowedPool["all"].keys():
not_found.append(person)
elif (person in automatedFollowedPool["all"].keys() and
person not in automatedFollowedPool["eligible"].keys()):
non_eligible.append(person)
unfollow_list = [user for user in unfollow_list if user not in non_eligible]
logger.info("Total {} users available to unfollow"
" ~not found in 'followedPool.csv': {} | didn't pass `unfollow_after`: {}\n".format(
len(unfollow_list), len(not_found), len(non_eligible)))
elif InstapyFollowed == True:
non_eligible = [user for user in automatedFollowedPool["all"].keys() if
user not in automatedFollowedPool["eligible"].keys()]
logger.info("Total {} users available to unfollow ~didn't pass `unfollow_after`: {}\n"
.format(len(unfollow_list), len(non_eligible)))
if len(unfollow_list) < 1:
logger.info("There are no any users available to unfollow")
return 0
# choose the desired order of the elements
if style == "LIFO":
unfollow_list = list(reversed(unfollow_list))
elif style == "RANDOM":
random.shuffle(unfollow_list)
if amount > len(unfollow_list):
logger.info("You have requested more amount: {} than {} of users available to unfollow"
"~using available amount\n".format(amount, len(unfollow_list)))
amount = len(unfollow_list)
# unfollow loop
try:
sleep_counter = 0
sleep_after = random.randint(8, 12)
index = 0
for person in unfollow_list:
if unfollowNum >= amount:
logger.warning(
"--> Total unfollows reached it's amount given {}\n"
.format(unfollowNum))
break
if jumps["consequent"]["unfollows"] >= jumps["limit"]["unfollows"]:
logger.warning("--> Unfollow quotient reached its peak!\t~leaving Unfollow-Users activity\n")
break
if sleep_counter >= sleep_after and sleep_delay not in [0, None]:
delay_random = random.randint(ceil(sleep_delay*0.85), ceil(sleep_delay*1.14))
logger.info("Unfollowed {} new users ~sleeping about {}\n".format(sleep_counter,
'{} seconds'.format(delay_random) if delay_random < 60 else
'{} minutes'.format(float("{0:.2f}".format(delay_random/60)))))
sleep(delay_random)
sleep_counter = 0
sleep_after = random.randint(8, 12)
pass
if person not in dont_include:
logger.info(
"Ongoing Unfollow [{}/{}]: now unfollowing '{}'..."
.format(unfollowNum+1, amount, person.encode('utf-8')))
person_id = (automatedFollowedPool["all"][person]["id"] if
person in automatedFollowedPool["all"].keys() else False)
unfollow_state, msg = unfollow_user(browser,
"profile",
username,
person,
person_id,
None,
relationship_data,
logger,
logfolder)
if unfollow_state == True:
unfollowNum += 1
sleep_counter += 1
# reset jump counter after a successful unfollow
jumps["consequent"]["unfollows"] = 0
elif msg == "jumped":
# will break the loop after certain consecutive jumps
jumps["consequent"]["unfollows"] += 1
elif msg in ["temporary block", "not connected", "not logged in"]:
# break the loop in extreme conditions to prevent misbehaviours
logger.warning("There is a serious issue: '{}'!\t~leaving Unfollow-Users activity".format(msg))
break
else:
# if the user in dont include (should not be) we shall remove him from the follow list
# if he is a white list user (set at init and not during run time)
if person in white_list:
delete_line_from_file('{0}{1}_followedPool.csv'.format(logfolder, username),
person, logger)
list_type = 'whitelist'
else:
list_type = 'dont_include'
logger.info("Not unfollowed '{}'!\t~user is in the list {}\n".format(person, list_type))
index += 1
continue
except BaseException as e:
logger.error("Unfollow loop error: {}\n".format(str(e)))
elif allFollowing == True:
logger.info("Unfollowing the users you are following")
# unfollow from profile
try:
following_link = browser.find_elements_by_xpath(
'//section//ul//li[3]')
click_element(browser, following_link[0])
# update server calls
update_activity()
except BaseException as e:
logger.error("following_link error {}".format(str(e)))
return 0
# scroll down the page to get sufficient amount of usernames
get_users_through_dialog(browser, None, username, amount,
allfollowing, False, None, None,
None, {"enabled": False, "percentage": 0},
"Unfollow", jumps, logger, logfolder)
# find dialog box
dialog = browser.find_element_by_xpath(
"//div[text()='Following']/../../following-sibling::div")
sleep(3)
# get persons, unfollow buttons, and length of followed pool
person_list_a = dialog.find_elements_by_tag_name("a")
person_list = []
for person in person_list_a:
if person and hasattr(person, 'text') and person.text:
person_list.append(person.text)
follow_buttons = dialog.find_elements_by_tag_name('button')
# re-generate person list to unfollow according to the `unfollow_after` parameter
user_info = list(zip(follow_buttons, person_list))
non_eligible = []
not_found = []
for button, person in user_info:
if person not in automatedFollowedPool["all"].keys():
not_found.append(person)
elif (person in automatedFollowedPool["all"].keys() and
person not in automatedFollowedPool["eligible"].keys()):
non_eligible.append(person)
user_info = [pair for pair in user_info if pair[1] not in non_eligible]
logger.info("Total {} users available to unfollow"
" ~not found in 'followedPool.csv': {} | didn't pass `unfollow_after`: {}".format(
len(user_info), len(not_found), len(non_eligible)))
if len(user_info) < 1:
logger.info("There are no any users to unfollow")
return 0
elif len(user_info) < amount:
logger.info("Could not grab requested amount of usernames to unfollow: "
"{}/{} ~using available amount".format(len(user_info), amount))
amount = len(user_info)
if style == "LIFO":
user_info = list(reversed(user_info))
elif style == "RANDOM":
random.shuffle(user_info)
# unfollow loop
try:
hasSlept = False
for button, person in user_info:
if unfollowNum >= amount:
logger.info(
"--> Total unfollowNum reached it's amount given: {}"
.format(unfollowNum))
break
if jumps["consequent"]["unfollows"] >= jumps["limit"]["unfollows"]:
logger.warning("--> Unfollow quotient reached its peak!\t~leaving Unfollow-Users activity\n")
break
if (unfollowNum != 0 and
hasSlept is False and
unfollowNum % 10 == 0 and
sleep_delay not in [0, None]):
logger.info("sleeping for about {} min\n"
.format(int(sleep_delay/60)))
sleep(sleep_delay)
hasSlept = True
pass
if person not in dont_include:
logger.info(
"Ongoing Unfollow [{}/{}]: now unfollowing '{}'..."
.format(unfollowNum+1, amount, person.encode('utf-8')))
person_id = (automatedFollowedPool["all"][person]["id"] if
person in automatedFollowedPool["all"].keys() else False)
unfollow_state, msg = unfollow_user(browser,
"dialog",
username,
person,
person_id,
button,
relationship_data,
logger,
logfolder)
if unfollow_state == True:
unfollowNum += 1
# reset jump counter after a successful unfollow
jumps["consequent"]["unfollows"] = 0
elif msg == "jumped":
# will break the loop after certain consecutive jumps
jumps["consequent"]["unfollows"] += 1
elif msg in ["temporary block", "not connected", "not logged in"]:
# break the loop in extreme conditions to prevent misbehaviours
logger.warning("There is a serious issue: '{}'!\t~leaving Unfollow-Users activity".format(msg))
break
# To only sleep once until there is the next unfollow
if hasSlept:
hasSlept = False
else:
logger.info("Not unfollowing '{}'! ~user is in the whitelist\n".format(person))
except Exception as exc:
logger.error("Unfollow loop error:\n\n{}\n\n".format(str(exc).encode('utf-8')))
else:
logger.info("Please select a proper unfollow method! ~leaving unfollow activity\n")
return unfollowNum
def follow_user(browser, track, login, user_name, button, blacklist, logger, logfolder):
""" Follow a user either from the profile page or post page or dialog box """
# list of available tracks to follow in: ["profile", "post" "dialog"]
# check action availability
if quota_supervisor("follows") == "jump":
return False, "jumped"
if track in ["profile", "post"]:
if track == "profile":
# check URL of the webpage, if it already is user's profile page, then do not navigate to it again
user_link = "https://www.instagram.com/{}/".format(user_name)
web_address_navigator(browser, user_link)
# find out CURRENT following status
following_status, follow_button = get_following_status(browser,
track,
login,
user_name,
None,
logger,
logfolder)
if following_status in ["Follow", "Follow Back"]:
click_visibly(browser, follow_button) # click to follow
follow_state, msg = verify_action(browser, "follow", track, login,
user_name, None, logger, logfolder)
if follow_state != True:
return False, msg
elif following_status in ["Following", "Requested"]:
if following_status == "Following":
logger.info("--> Already following '{}'!\n".format(user_name))
elif following_status == "Requested":
logger.info("--> Already requested '{}' to follow!\n".format(user_name))
sleep(1)
return False, "already followed"
elif following_status in ["Unblock", "UNAVAILABLE"]:
if following_status == "Unblock":
failure_msg = "user is in block"
elif following_status == "UNAVAILABLE":
failure_msg = "user is inaccessible"
logger.warning("--> Couldn't follow '{}'!\t~{}".format(user_name, failure_msg))
return False, following_status
elif following_status is None:
sirens_wailing, emergency_state = emergency_exit(browser, login, logger)
if sirens_wailing == True:
return False, emergency_state
else:
logger.warning("--> Couldn't unfollow '{}'!\t~unexpected failure".format(user_name))
return False, "unexpected failure"
elif track == "dialog":
click_element(browser, button)
sleep(3)
## general tasks after a successful follow
logger.info("--> Followed '{}'!".format(user_name.encode("utf-8")))
update_activity('follows')
# get user ID to record alongside username
user_id = get_user_id(browser, track, user_name, logger)
logtime = datetime.now().strftime('%Y-%m-%d %H:%M')
log_followed_pool(login, user_name, logger, logfolder, logtime, user_id)
follow_restriction("write", user_name, None, logger)
if blacklist['enabled'] == True:
action = 'followed'
add_user_to_blacklist(user_name,
blacklist['campaign'],
action,
logger,
logfolder)
# get the post-follow delay time to sleep
naply = get_action_delay("follow")
sleep(naply)
return True, "success"
def scroll_to_bottom_of_followers_list(browser, element):
browser.execute_script(
"arguments[0].children[1].scrollIntoView()", element)
sleep(1)
return
def get_users_through_dialog(browser,
login,
user_name,
amount,
users_count,
randomize,
dont_include,
blacklist,
follow_times,
simulation,
channel,
jumps,
logger,
logfolder):
sleep(2)
person_followed = []
real_amount = amount
if randomize and amount >= 3:
# expanding the population for better sampling distribution
amount = amount * 3
if amount > int(users_count*0.85): # taking 85 percent of possible amounts is a safe study
amount = int(users_count*0.85)
try_again = 0
sc_rolled = 0
# find dialog box
dialog_address = "//div[text()='Followers' or text()='Following']/../../following-sibling::div"
dialog = browser.find_element_by_xpath(dialog_address)
# scroll to end of follower list to initiate first load which hides the suggestions
scroll_to_bottom_of_followers_list(browser, dialog)
buttons = get_buttons_from_dialog(dialog, channel)
abort = False
person_list = []
total_list = len(buttons)
simulated_list = []
simulator_counter = 0
# scroll down if the generated list of user to follow is not enough to
# follow amount set
while (total_list < amount) and not abort:
before_scroll = total_list
for i in range(4):
scroll_bottom(browser, dialog, 2)
sc_rolled += 1
simulator_counter += 1
buttons = get_buttons_from_dialog(dialog, channel)
total_list = len(buttons)
abort = (before_scroll == total_list)
if abort:
if total_list < real_amount:
logger.info("Failed to load desired amount of users!\n")
if sc_rolled > 85: # you may want to use up to 100
if total_list < amount:
logger.info("Too many requests sent! attempt: {} | gathered links: {}"
"\t~sleeping a bit".format(try_again+1, total_list))
sleep(random.randint(600, 655))
try_again += 1
sc_rolled = 0
# Will follow a little bit of users in order to simulate real interaction
if (simulation["enabled"] == True and
simulation["percentage"] >= random.randint(1, 100) and
(simulator_counter > random.randint(5, 17) or
abort == True or
total_list >= amount or
sc_rolled == random.randint(3, 5)) and
len(buttons) > 0):
quick_amount = 1 if not total_list >= amount else random.randint(1, 4)
for i in range(0, quick_amount):
quick_index = random.randint(0, len(buttons)-1)
quick_button = buttons[quick_index]
quick_username = dialog_username_extractor(quick_button)
if quick_username and quick_username[0] not in simulated_list:
logger.info("Simulated follow : {}".format(len(simulated_list)+1))
quick_follow = follow_through_dialog(browser,
login,
quick_username,
quick_button,
quick_amount,
dont_include,
blacklist,
follow_times,
jumps,
logger,
logfolder)
print('')
simulated_list.extend(quick_follow)
simulator_counter = 0
person_list = dialog_username_extractor(buttons)
if randomize:
random.shuffle(person_list)
person_list = person_list[:(real_amount-len(simulated_list))]
for user in simulated_list: # add simulated users to the `person_list` in random index
if user not in person_list:
person_list.insert(random.randint(0, abs(len(person_list)-1)), user)
return person_list, simulated_list
def dialog_username_extractor(buttons):
""" Extract username of a follow button from a dialog box """
if not isinstance(buttons, list):
buttons = [buttons]
person_list = []
for person in buttons:
if person and hasattr(person, 'text') and person.text:
try:
person_list.append(person.find_element_by_xpath("../../../*")
.find_elements_by_tag_name("a")[1].text)
except IndexError:
pass # Element list is too short to have a [1] element
return person_list
def follow_through_dialog(browser,
login,
person_list,
buttons,
amount,
dont_include,
blacklist,
follow_times,
jumps,
logger,
logfolder):
""" Will follow username directly inside a dialog box """
if not isinstance(person_list, list):
person_list = [person_list]
if not isinstance(buttons, list):
buttons = [buttons]
person_followed = []
followNum = 0
try:
for person, button in zip(person_list, buttons):
if followNum >= amount:
logger.info("--> Total follow number reached: {}"
.format(followNum))
break
elif jumps["consequent"]["follows"] >= jumps["limit"]["follows"]:
logger.warning("--> Follow quotient reached its peak!\t~leaving Follow-Through-Dialog activity\n")
break
if (person not in dont_include and
not follow_restriction("read", person, follow_times, logger)):
follow_state, msg = follow_user(browser,
"dialog",
login,
person,
button,
blacklist,
logger,
logfolder)
if follow_state == True:
# register this session's followed user for further interaction
person_followed.append(person)
followNum += 1
# reset jump counter after a successful follow
jumps["consequent"]["follows"] = 0
elif msg == "jumped":
# will break the loop after certain consecutive jumps
jumps["consequent"]["follows"] += 1
else:
logger.info("Not followed '{}' ~inappropriate user".format(person))
except BaseException as e:
logger.error("Error occurred while following through dialog box:\n{}".format(str(e)))
return person_followed
def get_given_user_followers(browser,
login,
user_name,
amount,
dont_include,
randomize,
blacklist,
follow_times,
simulation,
jumps,
logger,
logfolder):
"""
For the given username, follow their followers.
:param browser: webdriver instance
:param login:
:param user_name: given username of account to follow
:param amount: the number of followers to follow
:param dont_include: ignore these usernames
:param randomize: randomly select from users' followers
:param blacklist:
:param follow_times:
:param logger: the logger instance
:param logfolder: the logger folder
:return: list of user's followers also followed
"""
user_name = user_name.strip()
user_link = "https://www.instagram.com/{}/".format(user_name)
web_address_navigator(browser, user_link)
# check how many people are following this user.
allfollowers, allfollowing = get_relationship_counts(browser, user_name, logger)
# skip early for no followers
if not allfollowers:
logger.info("'{}' has no followers".format(user_name))
return [], []
elif allfollowers < amount:
logger.warning("'{}' has less followers- {}, than the given amount of {}".format(
user_name, allfollowers, amount))
# locate element to user's followers
try:
followers_link = browser.find_elements_by_xpath(
'//a[@href="/{}/followers/"]'.format(user_name))
click_element(browser, followers_link[0])
# update server calls
update_activity()
except NoSuchElementException:
logger.error('Could not find followers\' link for {}'.format(user_name))
return [], []
except BaseException as e:
logger.error("`followers_link` error {}".format(str(e)))
return [], []
channel = "Follow"
person_list, simulated_list = get_users_through_dialog(browser, login, user_name, amount,
allfollowers, randomize, dont_include,
blacklist, follow_times, simulation,
channel, jumps, logger, logfolder)
return person_list, simulated_list
def get_given_user_following(browser,
login,
user_name,
amount,
dont_include,
randomize,
blacklist,
follow_times,
simulation,
jumps,
logger,
logfolder):
user_name = user_name.strip()
user_link = "https://www.instagram.com/{}/".format(user_name)
web_address_navigator(browser, user_link)
# check how many poeple are following this user.
# throw RuntimeWarning if we are 0 people following this user
try:
allfollowing = format_number(browser.find_element_by_xpath("//a[contains"
"(@href,'following')]/span").text)
except NoSuchElementException:
try:
allfollowing = browser.execute_script(
"return window._sharedData.entry_data."
"ProfilePage[0].graphql.user.edge_follow.count")
except WebDriverException:
try:
browser.execute_script("location.reload()")
update_activity()
allfollowing = browser.execute_script(
"return window._sharedData.entry_data."
"ProfilePage[0].graphql.user.edge_follow.count")
except WebDriverException:
try:
topCount_elements = browser.find_elements_by_xpath(
"//span[contains(@class,'g47SY')]")
if topCount_elements:
allfollowing = format_number(topCount_elements[2].text)
else:
logger.info("Failed to get following count of '{}' ~empty list".format(user_name))
allfollowing = None
except NoSuchElementException:
logger.error("\nError occurred during getting the following count of '{}'\n".format(user_name))
return [], []
# skip early for no followers
if not allfollowing:
logger.info("'{}' has no any following".format(user_name))
return [], []
elif allfollowing < amount:
logger.warning("'{}' has less following- {} than the desired amount of {}".format(
user_name, allfollowing, amount))
try:
following_link = browser.find_elements_by_xpath(
'//a[@href="/{}/following/"]'.format(user_name))
click_element(browser, following_link[0])
# update server calls
update_activity()
except NoSuchElementException:
logger.error('Could not find following\'s link for {}'.format(user_name))
return [], []
except BaseException as e:
logger.error("`following_link` error {}".format(str(e)))
return [], []
channel = "Follow"
person_list, simulated_list = get_users_through_dialog(browser, login, user_name, amount,
allfollowing, randomize, dont_include,
blacklist, follow_times, simulation,
channel, jumps, logger, logfolder)
return person_list, simulated_list
def dump_follow_restriction(profile_name, logger, logfolder):
""" Dump follow restriction data to a local human-readable JSON """
try:
# get a DB and start a connection
db, id = get_database()
conn = sqlite3.connect(db)
with conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT * FROM followRestriction WHERE profile_id=:var", {"var": id})
data = cur.fetchall()
if data:
# get the existing data
filename = "{}followRestriction.json".format(logfolder)
if os.path.isfile(filename):
with open(filename) as followResFile:
current_data = json.load(followResFile)
else:
current_data = {}
# pack the new data
follow_data = {user_data[1]: user_data[2] for user_data in data or []}
current_data[profile_name] = follow_data
# dump the fresh follow data to a local human readable JSON
with open(filename, 'w') as followResFile:
json.dump(current_data, followResFile)
except Exception as exc:
logger.error("Pow! Error occurred while dumping follow restriction data to a local JSON:\n\t{}".format(str(exc).encode("utf-8")))
finally:
if conn:
# close the open connection
conn.close()
def follow_restriction(operation, username, limit, logger):
""" Keep track of the followed users and help avoid excessive follow of the same user """
try:
# get a DB and start a connection
db, id = get_database()
conn = sqlite3.connect(db)
with conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT * FROM followRestriction WHERE profile_id=:id_var AND username=:name_var",
{"id_var": id, "name_var": username})
data = cur.fetchone()
follow_data = dict(data) if data else None
if operation == "write":
if follow_data is None:
# write a new record
cur.execute("INSERT INTO followRestriction (profile_id, username, times) VALUES (?, ?, ?)",
(id, username, 1))
else:
# update the existing record
follow_data["times"] += 1
sql = "UPDATE followRestriction set times = ? WHERE profile_id=? AND username = ?"
cur.execute(sql, (follow_data["times"], id, username))
# commit the latest changes
conn.commit()
elif operation == "read":
if follow_data is None:
return False
elif follow_data["times"] < limit:
return False
else:
exceed_msg = "" if follow_data["times"] == limit else "more than "
logger.info("---> {} has already been followed {}{} times"
.format(username, exceed_msg, str(limit)))
return True
except Exception as exc:
logger.error("Dap! Error occurred with follow Restriction:\n\t{}".format(str(exc).encode("utf-8")))
finally:
if conn:
# close the open connection
conn.close()
def unfollow_user(browser, track, username, person, person_id, button, relationship_data, logger, logfolder):
""" Unfollow a user either from the profile or post page or dialog box """
# list of available tracks to unfollow in: ["profile", "post" "dialog"]
# check action availability
if quota_supervisor("unfollows") == "jump":
return False, "jumped"
if track in ["profile", "post"]:
""" Method of unfollowing from a user's profile page or post page """
if track == "profile":
user_link = "https://www.instagram.com/{}/".format(person)
web_address_navigator(browser, user_link)
# find out CURRENT follow status
following_status, follow_button = get_following_status(browser,
track,
username,
person,
person_id,
logger,
logfolder)
if following_status in ["Following", "Requested"]:
click_element(browser, follow_button) # click to unfollow
sleep(4) # TODO: use explicit wait here
confirm_unfollow(browser)
unfollow_state, msg = verify_action(browser, "unfollow", track, username,
person, person_id, logger, logfolder)
if unfollow_state != True:
return False, msg
elif following_status in ["Follow", "Follow Back"]:
logger.info("--> Already unfollowed '{}'!".format(person))
post_unfollow_cleanup(["successful", "uncertain"], username, person, relationship_data, logger, logfolder)
return False, "already unfollowed"
elif following_status in ["Unblock", "UNAVAILABLE"]:
if following_status == "Unblock":
failure_msg = "user is in block"
elif following_status == "UNAVAILABLE":
failure_msg = "user is inaccessible"
logger.warning("--> Couldn't unfollow '{}'!\t~{}".format(person, failure_msg))
post_unfollow_cleanup("uncertain", username, person, relationship_data, logger, logfolder)
return False, following_status
elif following_status is None:
sirens_wailing, emergency_state = emergency_exit(browser, username, logger)
if sirens_wailing == True:
return False, emergency_state
else:
logger.warning("--> Couldn't unfollow '{}'!\t~unexpected failure".format(person))
return False, "unexpected failure"
elif track == "dialog":
""" Method of unfollowing from a dialog box """
click_element(browser, button)
sleep(4) # TODO: use explicit wait here
confirm_unfollow(browser)
# general tasks after a successful unfollow
logger.info("--> Unfollowed '{}'!".format(person))
update_activity('unfollows')
post_unfollow_cleanup("successful", username, person, relationship_data, logger, logfolder)
# get the post-unfollow delay time to sleep
naply = get_action_delay("unfollow")
sleep(naply)
return True, "success"
def confirm_unfollow(browser):
""" Deal with the confirmation dialog boxes during an unfollow """
attempt = 0
while attempt<3:
try:
attempt += 1
button_xp = "//button[text()='Unfollow']" # "//button[contains(text(), 'Unfollow')]"
unfollow_button = browser.find_element_by_xpath(button_xp)
if unfollow_button.is_displayed():
click_element(browser, unfollow_button)
sleep(2)
break
except (ElementNotVisibleException, NoSuchElementException) as exc:
# prob confirm dialog didn't pop up
if isinstance(exc, ElementNotVisibleException):
break
elif isinstance(exc, NoSuchElementException):
sleep(1)
pass
def post_unfollow_cleanup(state, username, person, relationship_data, logger, logfolder):
""" Casual local data cleaning after an unfollow """
if not isinstance(state, list):
state = [state]
delete_line_from_file("{0}{1}_followedPool.csv"
.format(logfolder, username), person, logger)
if "successful" in state:
if person in relationship_data[username]["all_following"]:
relationship_data[username]["all_following"].remove(person)
if "uncertain" in state:
# this user was found in our unfollow list but currently is not being followed
log_uncertain_unfollowed_pool(username, person, logger, logfolder)
# save any unfollowed person
log_record_all_unfollowed(username, person, logger, logfolder)
sleep(3)
print('')
def get_buttons_from_dialog(dialog, channel):
""" Gets buttons from the `Followers` or `Following` dialog boxes"""
if channel == "Follow":
# get follow buttons. This approach will find the follow buttons and
# ignore the Unfollow/Requested buttons.
buttons = dialog.find_elements_by_xpath(
"//button[text()='Follow']")
elif channel == "Unfollow":
buttons = dialog.find_elements_by_xpath(
"//button[text() = 'Following']")
return buttons
def get_user_id(browser, track, username, logger):
""" Get user's ID either from a profile page or post page """
user_id = "unknown"
if track != "dialog": # currently do not get the user ID for follows from 'dialog'
user_id = find_user_id(browser, track, username, logger)
return user_id
def verify_username_by_id(browser, username, person, person_id, logger, logfolder):
""" Check if the given user has changed username after the time of followed """
# try to find the user by ID
if person_id is None:
person_id = load_user_id(username, person, logger, logfolder)
if person_id and person_id not in [None, "unknown", "undefined"] :
# get the [new] username of the user from the stored user ID
person_new = get_username_from_id(browser, person_id, logger)
if person_new:
if person_new != person:
logger.info("User '{}' has changed username and now is called '{}' :S"
.format(person, person_new))
return person_new
else:
logger.info("The user with the ID of '{}' is unreachable".format(person))
else:
logger.info("The user ID of '{}' doesn't exist in local records".format(person))
return None
def verify_action(browser, action, track, username, person, person_id, logger, logfolder):
""" Verify if the action has succeeded """
# currently supported actions are follow & unfollow
if action in ["follow", "unfollow"]:
if action == "follow":
post_action_text = "//button[text()='Following' or text()='Requested']"
elif action == "unfollow":
post_action_text = "//button[text()='Follow' or text()='Follow Back']"
button_change = explicit_wait(browser, "VOEL", [post_action_text, "XPath"], logger, 7, False)
if not button_change:
reload_webpage(browser)
following_status, follow_button = get_following_status(browser,
track,
username,
person,
person_id,
logger,
logfolder)
# find action state *.^
if following_status in ["Following", "Requested"]:
action_state = False if action == "unfollow" else True
elif following_status in ["Follow", "Follow Back"]:
action_state = True if action == "unfollow" else False
else:
action_state = None
# handle it!
if action_state == True:
logger.info("Last {} is verified after reloading the page!".format(action))
elif action_state == False:
#try to do the action one more time!
click_visibly(browser, follow_button)
if action == "unfollow":
sleep(4) # TODO: use explicit wait here
confirm_unfollow(browser)
button_change = explicit_wait(browser, "VOEL", [post_action_text, "XPath"], logger, 9, False)
if not button_change:
logger.warning("Phew! Last {0} is not verified."
"\t~'{1}' might be temporarily blocked from {0}ing\n"
.format(action, username))
sleep(210)
return False, "temporary block"
elif action_state == None:
logger.error("Hey! Last {} is not verified out of an unexpected failure!".format(action))
return False, "unexpected"
return True, "success"
| 40.295506
| 137
| 0.529977
|
4a09db321b692f935d854b8efe24680758a23081
| 3,671
|
py
|
Python
|
cliva_fl/utils/time_tracker.py
|
DataManagementLab/thesis-fl_client-side_validation
|
0f6a35d08966133e6a8c13a110b9307d91f2d9cb
|
[
"MIT"
] | null | null | null |
cliva_fl/utils/time_tracker.py
|
DataManagementLab/thesis-fl_client-side_validation
|
0f6a35d08966133e6a8c13a110b9307d91f2d9cb
|
[
"MIT"
] | null | null | null |
cliva_fl/utils/time_tracker.py
|
DataManagementLab/thesis-fl_client-side_validation
|
0f6a35d08966133e6a8c13a110b9307d91f2d9cb
|
[
"MIT"
] | null | null | null |
from enum import unique
import time
from collections import defaultdict
from typing import Dict, Optional
class TimeTracker:
def __init__(self):
self.clear()
self.init_timeframes()
def start_timeframe(self, key):
assert not self.timeframes[key].get('time_from'), 'Timeframe can not be started twice.'
self.timeframes[key]['time_from'] = time.time()
def stop_timeframe(self, key):
assert self.timeframes[key].get('time_from'), 'Timeframe can only be stopped after being started.'
assert not self.timeframes[key].get('time_to'), 'Timeframe can not be stopped twice.'
self.timeframes[key]['time_to'] = time.time()
def get_timeframe(self, key, format=None) -> Dict: # "%Y-%m-%d %H:%M:%S"
res = self.timeframes.get(key)
if format:
res = { k: time.strftime(format, time.gmtime(v)) for k, v in res.items() }
return res
def start(self, id):
assert not id in self.start_times, f"Timer of id '{id}' is already running."
self.start_times[id] = time.time()
def stop(self, id):
assert id in self.start_times, f"Timer of id '{id}' is not running and can not be stopped."
time_diff = time.time() - self.start_times[id]
self.total_times[id] += time_diff
self.total_times_history[id].append(time_diff)
del self.start_times[id]
def reset(self, id):
assert not id in self.start_times, f"Timer of id '{id}' can not be reset while it is running."
if id in self.total_times:
del self.total_times[id]
del self.total_times_history[id]
def clear(self):
self.total_times = defaultdict(float)
self.total_times_history = defaultdict(list)
self.start_times = dict()
def init_timeframes(self):
self.timeframes = defaultdict(dict)
def last(self, id, n=1, get_range=False):
assert n <= len(self.total_times_history[id]), f"There are less than {n} elements in history for id {id}."
if get_range:
return self.total_times_history[id][-n:]
else:
return self.total_times_history[id][-n]
def get(self, id, default=0):
if self.has(id):
return self[id]
else:
return default
def has(self, id):
return id in self.total_times or id in self.start_times
@property
def ids(self):
return set(self.total_times) | set(self.start_times)
def __getitem__(self, id):
assert self.has(id), f"Timer of id '{id}' is unknown."
res = 0.0
if id in self.total_times: res += self.total_times[id]
if id in self.start_times: res += time.time() - self.start_times[id]
return res
def __str__(self):
res = ""
for n, id in enumerate(sorted(self.ids)):
res += "{}\t{}\t{}".format(id, self[id], "(Running)" if id in self.start_times else "")
if n + 1 < len(self.ids): res += "\n"
return res
@classmethod
def from_dict(cls, time_dict):
assert 'total_times' in time_dict and 'total_times_history' in time_dict and 'start_times' in time_dict, "Invalid time dictionary"
tt = cls()
tt.total_times = defaultdict(float, time_dict['total_times'])
tt.total_times_history = defaultdict(list, time_dict['total_times_history'])
tt.start_times = time_dict['start_times']
return tt
def get_dict(self):
return dict(
total_times=self.total_times,
total_times_history=self.total_times_history,
start_times=self.start_times)
| 35.640777
| 138
| 0.61836
|
4a09db72cee4142de6183b2fcf508e0a14c2b7e5
| 1,105
|
py
|
Python
|
Module_01_Computer_Vision_Basics/1.4_Basic_Image_Processing/1.4.2_Rotation/rotation.py
|
CactusJackFX/PyImageSearch_Guru
|
01f5bce644b58848db029f72656002e21545bb10
|
[
"Apache-2.0"
] | 2
|
2020-02-12T12:17:01.000Z
|
2021-01-07T02:31:18.000Z
|
Module_01_Computer_Vision_Basics/1.4_Basic_Image_Processing/1.4.2_Rotation/rotation.py
|
CactusJackFX/PyImageSearch_Guru
|
01f5bce644b58848db029f72656002e21545bb10
|
[
"Apache-2.0"
] | 1
|
2020-03-22T06:33:10.000Z
|
2020-03-22T06:33:10.000Z
|
Module_01_Computer_Vision_Basics/1.4_Basic_Image_Processing/1.4.2_Rotation/rotation.py
|
CactusJackFX/PyImageSearch_Guru
|
01f5bce644b58848db029f72656002e21545bb10
|
[
"Apache-2.0"
] | 3
|
2020-02-18T05:24:13.000Z
|
2020-09-21T06:58:58.000Z
|
# import the necessary packages
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image and show it
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
cv2.waitKey(0)
# grab the dimensions of the image and calculate the center of the image
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# rotate our image by 45 degrees
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by 45 Degrees", rotated)
cv2.waitKey(0)
# rotate our image by -90 degrees
M = cv2.getRotationMatrix2D((cX, cY), -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 Degrees", rotated)
cv2.waitKey(0)
# finally, let's use our helper function in imutils to rotate the image by
# 180 degrees (flipping it upside down)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180 Degrees", rotated)
cv2.waitKey(0)
| 29.864865
| 74
| 0.717647
|
4a09dbc05386be205ec762b8ed6d3bbee6a83129
| 1,927
|
py
|
Python
|
python/GafferAppleseedUITest/__init__.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 561
|
2016-10-18T04:30:48.000Z
|
2022-03-30T06:52:04.000Z
|
python/GafferAppleseedUITest/__init__.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 1,828
|
2016-10-14T19:01:46.000Z
|
2022-03-30T16:07:19.000Z
|
python/GafferAppleseedUITest/__init__.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 120
|
2016-10-18T15:19:13.000Z
|
2021-12-20T16:28:23.000Z
|
##########################################################################
#
# Copyright (c) 2015, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from .DocumentationTest import DocumentationTest
from .NodeUITest import NodeUITest
if __name__ == "__main__":
unittest.main()
| 45.880952
| 77
| 0.689673
|
4a09dc5094ef3c9586b964f43ce1c7754715a8a2
| 682
|
py
|
Python
|
ex076.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
ex076.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
ex076.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
# Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.
pc_do_cev = 'Processador', 329,\
'Memória ', 304,\
'Placa mãe', 278,\
'HD', 173,\
'SSD', 224.5,\
'Fonte', 169,\
'Gabinete', 133,\
'Monitor', 280,\
'Kit teclado e mouse', 130,\
'Windows 10 pro', 120,\
'Antivírus', 40
print('-='*15)
print(f'{"Pc do Curso em Vídeo":^30}')
print('-='*15)
for x in range(0, len(pc_do_cev)):
if x % 2 == 0:
print(f'{pc_do_cev[x]:.<21}', end='')
else:
print(f'R${pc_do_cev[x]:>9.2f}')
print('-='*15)
| 27.28
| 187
| 0.585044
|
4a09dd23289de16b18eebd4f67739846b48111c8
| 3,608
|
py
|
Python
|
tests/core/test_history.py
|
grdorin/mopidy
|
76db44088c102d7ad92a3fc6a15a938e66b99b0d
|
[
"Apache-2.0"
] | 6,700
|
2015-01-01T03:57:59.000Z
|
2022-03-30T09:31:31.000Z
|
tests/core/test_history.py
|
grdorin/mopidy
|
76db44088c102d7ad92a3fc6a15a938e66b99b0d
|
[
"Apache-2.0"
] | 1,141
|
2015-01-02T09:48:59.000Z
|
2022-03-28T22:25:30.000Z
|
tests/core/test_history.py
|
grdorin/mopidy
|
76db44088c102d7ad92a3fc6a15a938e66b99b0d
|
[
"Apache-2.0"
] | 735
|
2015-01-01T21:15:50.000Z
|
2022-03-20T16:13:44.000Z
|
import unittest
from mopidy.core import HistoryController
from mopidy.internal.models import HistoryState, HistoryTrack
from mopidy.models import Artist, Ref, Track
class PlaybackHistoryTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.tracks = [
Track(
uri="dummy1:a",
name="foo",
artists=[Artist(name="foober"), Artist(name="barber")],
),
Track(uri="dummy2:a", name="foo"),
Track(uri="dummy3:a", name="bar"),
Track(uri="dummy4:a", name="foo", artists=[Artist(name=None)]),
]
self.history = HistoryController()
def test_add_track(self):
self.history._add_track(self.tracks[0])
assert self.history.get_length() == 1
self.history._add_track(self.tracks[1])
assert self.history.get_length() == 2
self.history._add_track(self.tracks[2])
assert self.history.get_length() == 3
def test_non_tracks_are_rejected(self):
with self.assertRaises(TypeError):
self.history._add_track(object())
assert self.history.get_length() == 0
def test_history_entry_contents(self):
track = self.tracks[0]
self.history._add_track(track)
result = self.history.get_history()
(timestamp, ref) = result[0]
assert isinstance(timestamp, int)
assert track.uri == ref.uri
assert track.name in ref.name
for artist in track.artists:
assert artist.name in ref.name
def test_track_artist_no_name(self):
self.history._add_track(self.tracks[3])
assert self.history.get_length() == 1
class CoreHistorySaveLoadStateTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.tracks = [
Track(uri="dummy1:a", name="foober"),
Track(uri="dummy2:a", name="foo"),
Track(uri="dummy3:a", name="bar"),
]
self.refs = []
for t in self.tracks:
self.refs.append(Ref.track(uri=t.uri, name=t.name))
self.history = HistoryController()
def test_save(self):
self.history._add_track(self.tracks[2])
self.history._add_track(self.tracks[1])
value = self.history._save_state()
assert len(value.history) == 2
# last in, first out
assert value.history[0].track == self.refs[1]
assert value.history[1].track == self.refs[2]
def test_load(self):
state = HistoryState(
history=[
HistoryTrack(timestamp=34, track=self.refs[0]),
HistoryTrack(timestamp=45, track=self.refs[2]),
HistoryTrack(timestamp=56, track=self.refs[1]),
]
)
coverage = ["history"]
self.history._load_state(state, coverage)
hist = self.history.get_history()
assert len(hist) == 3
assert hist[0] == (34, self.refs[0])
assert hist[1] == (45, self.refs[2])
assert hist[2] == (56, self.refs[1])
# after import, adding more tracks must be possible
self.history._add_track(self.tracks[1])
hist = self.history.get_history()
assert len(hist) == 4
assert hist[0][1] == self.refs[1]
assert hist[1] == (34, self.refs[0])
assert hist[2] == (45, self.refs[2])
assert hist[3] == (56, self.refs[1])
def test_load_invalid_type(self):
with self.assertRaises(TypeError):
self.history._load_state(11, None)
def test_load_none(self):
self.history._load_state(None, None)
| 31.929204
| 75
| 0.591186
|
4a09dda5258a969ae8834b9b982cfccc35dc8753
| 1,758
|
py
|
Python
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForDomainNameProxyServiceRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForDomainNameProxyServiceRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForDomainNameProxyServiceRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SaveBatchTaskForDomainNameProxyServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveBatchTaskForDomainNameProxyService')
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_DomainNames(self):
return self.get_query_params().get('DomainNames')
def set_DomainNames(self,DomainNames):
for i in range(len(DomainNames)):
if DomainNames[i] is not None:
self.add_query_param('DomainName.' + str(i + 1) , DomainNames[i]);
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status)
| 33.807692
| 94
| 0.754835
|
4a09dda58fc3f7e717b29f30374e3d9f0d4254d7
| 394
|
py
|
Python
|
pybitid/pysix.py
|
baby636/pybitid
|
e8be5cd387785576f1aa10863fc1356b0f38dc63
|
[
"MIT"
] | 1
|
2021-07-18T07:48:16.000Z
|
2021-07-18T07:48:16.000Z
|
pybitid/pysix.py
|
baby636/pybitid
|
e8be5cd387785576f1aa10863fc1356b0f38dc63
|
[
"MIT"
] | null | null | null |
pybitid/pysix.py
|
baby636/pybitid
|
e8be5cd387785576f1aa10863fc1356b0f38dc63
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Version: 0.0.4
Utils functions used for compatibility with Python 2.7 / 3.3
'''
'''
Encodes a unicode string in bytes (utf8 encoding)
'''
def to_bytes(x): return x if bytes == str else x.encode()
'''
Converts an integer to a bytes
'''
i2b = chr if bytes == str else lambda x: bytes([x])
'''
Converts a bytes to an integer
'''
b2i = ord if bytes == str else lambda x: x
| 18.761905
| 60
| 0.662437
|
4a09de16f2f27450109daf5085b4f1ee881c2bc8
| 915
|
py
|
Python
|
blog_project/urls.py
|
bharathkumar-12/Blog-App
|
ffa280906ba3b0b40bdc3045ccd3f55bf8e7026f
|
[
"MIT"
] | 1
|
2020-12-05T04:41:46.000Z
|
2020-12-05T04:41:46.000Z
|
blog_project/urls.py
|
bharathkumar-12/Blog-App
|
ffa280906ba3b0b40bdc3045ccd3f55bf8e7026f
|
[
"MIT"
] | null | null | null |
blog_project/urls.py
|
bharathkumar-12/Blog-App
|
ffa280906ba3b0b40bdc3045ccd3f55bf8e7026f
|
[
"MIT"
] | null | null | null |
"""blog_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/',include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls'), name=''),
path('',include('blog.urls')),
]
| 36.6
| 77
| 0.696175
|
4a09de8a1977d55b5b8e6e31e6de6d51e4e3798e
| 1,610
|
py
|
Python
|
tests/test_cumsum.py
|
SamDM/Paddle2ONNX
|
5ae527e966c4ea62b1f35fd326efbc45385c5580
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cumsum.py
|
SamDM/Paddle2ONNX
|
5ae527e966c4ea62b1f35fd326efbc45385c5580
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cumsum.py
|
SamDM/Paddle2ONNX
|
5ae527e966c4ea62b1f35fd326efbc45385c5580
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from onnxbase import APIOnnx
from onnxbase import randtool
class Net(paddle.nn.Layer):
"""
simplr Net
"""
def __init__(self):
super(Net, self).__init__()
def forward(self, inputs):
"""
forward
"""
x = paddle.cumsum(inputs, axis=0)
return x
def test_cumsum_11():
"""
api: paddle.cumsum
op version: 11
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'cumsum', [11])
obj.set_input_data(
"input_data",
paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')))
obj.run()
def test_cumsum_12():
"""
api: paddle.cumsum
op version: 12
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'cumsum', [12])
obj.set_input_data(
"input_data",
paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')))
obj.run()
| 25.15625
| 78
| 0.632298
|
4a09df831e64341b71255227c058d148cc118c84
| 6,059
|
py
|
Python
|
PyAlgorithm/Graph_Algorithm/depth_first_search.py
|
allenliuzihao/PyAlgorithm
|
0468f1fc90795d7524e8674aecbfbd2214b256a7
|
[
"MIT"
] | null | null | null |
PyAlgorithm/Graph_Algorithm/depth_first_search.py
|
allenliuzihao/PyAlgorithm
|
0468f1fc90795d7524e8674aecbfbd2214b256a7
|
[
"MIT"
] | null | null | null |
PyAlgorithm/Graph_Algorithm/depth_first_search.py
|
allenliuzihao/PyAlgorithm
|
0468f1fc90795d7524e8674aecbfbd2214b256a7
|
[
"MIT"
] | null | null | null |
'''
Implementation and applications for depth first search
'''
from graph import Graph
# for finding the strongest connected components in a graph
finishing_time = 0 # for recording the current finishing time
leader = None # for recording the current leader of the SCC
'''
Given a node in the graph, find its depth first search searching path
'''
def depth_first_search(value, graph):
node = Graph.findNodeWithValue(graph.getNodes(), value)
stack = []
order_of_visit = []
stack.append(node)
node.setVisited(True)
while len(stack) != 0:
curr_node = stack.pop()
order_of_visit.append(curr_node)
neighbors = curr_node.getNeighbors()
for neighbor in neighbors:
if not neighbor.getVisited():
neighbor.setVisited(True)
stack.append(neighbor)
return order_of_visit
'''
Small modification of depth first search to label the topological order of each node
'''
def dfs_topological_sort(node, graph, label):
node.setVisited(True)
neighbors = node.getNeighbors()
curr_label = label
for neighbor in neighbors:
if not neighbor.getVisited():
curr_label = dfs_topological_sort(neighbor, graph, curr_label)
node.setLabel(curr_label)
curr_label -= 1
return curr_label
'''
Topological sort using DFS
'''
def topological_sort(graph):
nodes = graph.getNodes()
label = len(nodes)
for node in nodes:
if not node.getVisited():
label = dfs_topological_sort(node, graph, label)
topological_order = []
for i in xrange(1, len(nodes) + 1):
for node in nodes:
if node.getLabel() == i:
topological_order.append(node)
return topological_order
'''
Helper for DFS on the reverse graph for the SCC problem
'''
def depth_first_search_SCC_reverse(graph, node, order_of_visit):
global finishing_time
node.setVisited(True)
neighbors = node.getNeighbors()
for neighbor in neighbors:
if not neighbor.getVisited():
depth_first_search_SCC_reverse(graph, neighbor, order_of_visit)
finishing_time += 1
node.setLabel(finishing_time)
order_of_visit.insert(0, node)
'''
Helper for finding the leaders in the graph using DFS
'''
def depth_first_search_SCC_leader(graph, node, SCC):
global leader
node.setVisited(True)
neighbors = node.getNeighbors()
for neighbor in neighbors:
neighbor.setLeader(leader)
if not neighbor.getVisited():
SCC.append(neighbor)
depth_first_search_SCC_leader(graph, neighbor, SCC)
'''
Find the strongest connected components in DAG (every node in SCC can be reached from other node)
Idea:
1. Run the DFS on the reversed graph. Keep track of the finish time (from the order of visit) of each node
2. Run the DFS on the original graph in the order of decreasing finish time of each node.
Analysis:
Key property in DAG: acyclic. No cycle in the graph
Assume the SCC: C1, C2, C3
We know that the C1, C2 and C3 doesn't have cycle between them. Because for example if C1 has edge to C2, C2 has
edge to C1, then C1 C2 should have form a bigger SCC, which contracdict our previous assumption that C1 and C2 are
SCC
Also, reversing the edges in DAG doesn't change the SCC.
Then we need to prove that if C1 has edge to C2 in original graph. Then the max finishing time of C1 and C2 is in C2
This is true because in the reversed graph, C2 has an edge that points to C1. If we compute the finishing time in C1 first
, then by the point DFS finish in C1, C2 must have higher finishing time since it comes after C1. If we compute the finishing
time in C2 first, based on the property of DFS, the max finishing time must be in C2. So we have the lemma that if in original
graph C1 point to C2, then the max finish time must be in C2.
Then, given the example below, C0 -> C1 -> C2, in the second run of DFS, we will first explore C2 since it has the max finishing
time, then C1, then C0. This order of exploration will make sure that each run of DFS will find the SCC
Since we just run 2 times DFS, the overall run time is still O(m + n)
'''
def find_SCC(graph):
global leader
graph.reverseEdges()
SCCs = [] # store the final result in list
order_of_visit = []
for node in graph.getNodes():
if not node.getVisited():
depth_first_search_SCC_reverse(graph, node, order_of_visit)
graph.clearVisited()
graph.clearReversed()
graph.reverseEdges()
for node in order_of_visit:
leader = node
if not node.getVisited():
SCC = [leader]
depth_first_search_SCC_leader(graph, node, SCC)
SCCs.append(SCC)
return SCCs
if __name__ == '__main__':
graph = Graph()
for i in xrange(1, 12):
graph.addNode(i)
#graph.addDirectedEdge(1, 7)
#graph.addDirectedEdge(4, 1)
#graph.addDirectedEdge(7, 4)
#graph.addDirectedEdge(7, 9)
#graph.addDirectedEdge(3, 9)
#graph.addDirectedEdge(6, 3)
#graph.addDirectedEdge(9, 6)
#graph.addDirectedEdge(6, 8)
#graph.addDirectedEdge(8, 2)
#graph.addDirectedEdge(2, 5)
#graph.addDirectedEdge(5, 8)
graph.addDirectedEdge(1, 2)
graph.addDirectedEdge(2, 3)
graph.addDirectedEdge(3, 1)
graph.addDirectedEdge(2, 4)
graph.addDirectedEdge(4, 5)
graph.addDirectedEdge(4, 6)
graph.addDirectedEdge(6, 5)
graph.addDirectedEdge(5, 7)
graph.addDirectedEdge(7, 6)
graph.addDirectedEdge(3, 9)
graph.addDirectedEdge(3, 11)
graph.addDirectedEdge(11, 9)
graph.addDirectedEdge(10, 11)
graph.addDirectedEdge(9, 10)
graph.addDirectedEdge(9, 6)
graph.addDirectedEdge(9, 8)
graph.addDirectedEdge(8, 10)
graph.addDirectedEdge(8, 7)
for SCC in find_SCC(graph):
print "******************************* component ********************************"
for node in SCC:
print node
print "******************************* end component *****************************\n"
| 32.575269
| 128
| 0.674039
|
4a09e181cf221af2a8c9a479a514063386caa1d8
| 393
|
py
|
Python
|
akatsuki/akatsuki/wsgi.py
|
raun/vigilant-system
|
f535e16d63c7d48e550b52e4d1e833569cb43669
|
[
"MIT"
] | null | null | null |
akatsuki/akatsuki/wsgi.py
|
raun/vigilant-system
|
f535e16d63c7d48e550b52e4d1e833569cb43669
|
[
"MIT"
] | 8
|
2021-09-08T14:13:53.000Z
|
2021-09-09T02:12:44.000Z
|
akatsuki/akatsuki/wsgi.py
|
raun/vigilant-system
|
f535e16d63c7d48e550b52e4d1e833569cb43669
|
[
"MIT"
] | 1
|
2021-08-29T05:17:43.000Z
|
2021-08-29T05:17:43.000Z
|
"""
WSGI config for akatsuki project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'akatsuki.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
4a09e1c9a65ea7ac37b958d9ce8aab1a70b95d0a
| 15,920
|
py
|
Python
|
abc_smc.py
|
2piruben/BFM_multistate
|
daa098fa56fb4bc3be04f40fd96a1bc939b72687
|
[
"MIT"
] | null | null | null |
abc_smc.py
|
2piruben/BFM_multistate
|
daa098fa56fb4bc3be04f40fd96a1bc939b72687
|
[
"MIT"
] | null | null | null |
abc_smc.py
|
2piruben/BFM_multistate
|
daa098fa56fb4bc3be04f40fd96a1bc939b72687
|
[
"MIT"
] | null | null | null |
import data_analysis as ed
import numpy as np
from scipy.stats import norm, uniform, multivariate_normal
from scipy.optimize import minimize
import sys,ast
from random import choices,seed,random
from tqdm import tqdm
from p_tqdm import p_umap
from functools import partial
import models as md
gamma = 300 # generic label
## Aproximate Bayesian Computation Sequential Monte Carlo. It parallelizes the search using p_umap
def GeneratePar_Multimodel(processcall = 0, models = [], modelweights = None,
previouspars = None, previousweights = None,
eps_dist = 10000, kernels = None, includecondition = 'all'):
# Generates a parameter point from the the list of models "models" each one with a weight "modelweights"
# "previouspars" is used to generate the sampling kernel from the previous iteration if available
# "eps_dist" is the target distance of the SMC step
# processcall is a dummy variable that can be useful when tracking the function performance
# it also allows the use of p_tqdm mapping that forces the use of an iterator
seed() # setting random seeds for each thread/process to avoid having the same random sequence in each thread
np.random.seed()
evaluated_distances = []
distance = eps_dist + 1 # original distance is beyond eps_dist
while distance>eps_dist: # until a suitable parameter is found
proposed_model = choices(models, weights = modelweights)[0]
integrator_name = md.model_properties[proposed_model]['integrator_name']
if 'includecondition' in md.model_properties[proposed_model]:
includecondition = md.model_properties[proposed_model]['includecondition']
if previouspars is None: # if is the first iteration
proposed_pars = md.sampleprior(proposed_model)
# print('proposed_pars_firsttime', proposed_model, proposed_pars)
else:
selected_pars = choices(previouspars[proposed_model], weights = previousweights[proposed_model])[0]
proposed_pars = np.array(selected_pars) + kernels[proposed_model].rvs()
if md.model_properties[proposed_model]['sto'] == 'ME': # full stochastic integration
sto_trajs = True
elif md.model_properties[proposed_model]['sto'] == 'MF':
sto_trajs = False
resurrectionzeros = md.model_properties[proposed_model]['resurrectionzeros']
# print('proposed_pars', proposed_pars)
if (md.evaluateprior(proposed_pars,proposed_model) > 0):
distance = 0
gammas = [300,500,1300]
for gamma in gammas:
integration_pars = md.prior_to_integrator(proposed_pars,proposed_model,gamma)
distance += ed.DistanceLikelihood(gamma, integrator_name, integration_pars,
return_trajs = False, sto_trajs = sto_trajs,
resurrectionzeros = resurrectionzeros, includecondition = includecondition)
# print('with distance: {} and eps_dist" {}\n'.format(distance,eps_dist))
evaluated_distances.append(distance)
else:
distance = 2*eps_dist
# Calculate weight
if previouspars is None:
weight = 1
else:
sum_denom = 0
for ipars,pars in enumerate(previouspars[proposed_model]):
kernel_evaluation = kernels[proposed_model].pdf(proposed_pars-pars)
sum_denom += kernel_evaluation*previousweights[proposed_model][ipars]
weight = md.evaluateprior(proposed_pars, model = proposed_model)/sum_denom
return proposed_pars, distance, weight, evaluated_distances, proposed_model
####
def GeneratePars_Multimodel(models = [], modelweights = [], previouspars = None,
previousweights = None, eps_dist = 10000, Npars = 1000,
previouskerneldict = None, kernelfactor = 1.0):
# Calls GeneratePar in parallel using p_umap to generate Npars parameter valid points
previouscovardict = {}
kerneldict = {}
if previouspars is not None: # Loop to remove model from sampling if does not have points left
for imodel,model in enumerate(models):
if not previouspars[model]: # if there are not pars left for this model
modelweights[imodel] = 0 # do not attempt to sample it
if previouspars is not None:
for imodel,model in enumerate(models):
if len(previouspars[model])>3: # if
previouscovardict[model] = 2.0*kernelfactor*np.cov(np.array(previouspars[model]).T)
# print('covariance matrix previous parset:',previouscovar)
kerneldict[model] = multivariate_normal(cov = previouscovardict[model], allow_singular = True)
else: # if there are very few points left, use the last kernel
kerneldict[model] = previouskerneldict[model]
# print('Sample from kernel for model ', model, kerneldict[model].rvs())
else:
kerneldict = None # first evaluation, when there is no parameters (or need) to estimate kernel
trials = 0
# for N in tqdm(range(Npars)):
# GenerateParstePar(0,model = 'Berg',gamma = gamma, previouspars = previouspars,
# previousweights = previousweights, eps_dist = eps_dist, kernel = kernel)
results = p_umap(
partial(GeneratePar_Multimodel, models = models, modelweights = modelweights,
previouspars = previouspars, previousweights = previousweights,
eps_dist = eps_dist, kernels = kerneldict), range(Npars))
newpars = {}
newweights = {}
accepted_distances = []
evaluated_distances = []
for model in models:
newpars[model] = []
newweights[model] = []
for result in results:
accepted_distances.append(result[1])
evaluated_distances.extend(result[3]) # flatten list
newpars[result[4]].append(result[0])
newweights[result[4]].append(result[2])
for model in models:
newweights[model] /= np.sum(newweights[model])
print("acceptance rate:", Npars/len(evaluated_distances))
print("min accepted distance: ",np.min(accepted_distances))
print("median accepted distance: ",np.median(accepted_distances))
print("median evaluated distance: ",np.median(evaluated_distances))
return(newpars,newweights,accepted_distances, Npars/len(evaluated_distances), kerneldict)
def Sequential_ABC_Multimodel(models = ['BergU','WeakStrongU'],initial_dist = 30000,
final_dist =500, Npars = 1000, adaptative_kernel = False):
# Main function call for the SMC.
# - models - is a list of models from models.py
# initial_dist, final_dist - initial and final distance for the SMC
# Npars - number of parameters to sample per iteration
# prior_label - if numeric, it can be used to restart the SMC at an iteration of a previous run
# adaptative_kernel if True allows to change the bandwith of the kernel if the number of points accepted is too large/small
pars = None # dictionary containing the accepted parameters for each model
weights = None
distance = initial_dist
idistance = 0
not_converged = True
last_round = False
kernelfactor = 1.0
kerneldict = None
modelweights = np.ones(len(models))/len(models)
while not_converged:
idistance += 1
print("SMC step with target distance: {}".format(distance))
pars,weights,accepted_distances,acceptance,kerneldict = GeneratePars_Multimodel(models = models,
modelweights = modelweights,
previouspars = pars, previousweights = weights,
eps_dist = distance, Npars = Npars,
previouskerneldict = kerneldict, kernelfactor = kernelfactor)
proposed_dist = np.median(accepted_distances)
if last_round is True:
not_converged = False
label = 'final'
else:
label = idistance
if proposed_dist<final_dist:
distance = final_dist
last_round = True
else:
distance = proposed_dist
foldername = ''
for model in models:
foldername += model
foldername += '_'
for model in models:
np.savetxt('smc_multimode/{}/pars_{}_{}.out'.format(foldername,model,label), pars[model])
np.savetxt('smc_multimode/{}/weights_{}_{}.out'.format(foldername,model,label), weights[model])
np.savetxt('smc_multimode/{}/distances_{}_{}.out'.format(foldername,model,label), accepted_distances)
if acceptance < 0.1 and kernelfactor>0.1 and adaptative_kernel:
kernelfactor = kernelfactor * 0.7
print('Reducing kernel width to : ',kernelfactor)
elif acceptance > 0.5 and kernelfactor<1 and adaptative_kernel:
kernelfactor = kernelfactor / 0.7
print('Increasing kernel width to : ',kernelfactor)
def DeterministicMinimizations(method = 'Nelder-Mead',
models = ['WeakStrong_weakcatch','WeakStrong_fullcatchbond','WeakStrongU','BergO'],
repeats = 15):
# Search for the maximum of the Likelihood function using a valid method from scipy.optimize.
for model in models:
print('Evaluating Model {}'.format(model))
for repeat in range(repeats):
sto = md.model_properties[model]['sto']
if sto == 'ME': # full stochastic integration
sto_trajs = True
elif sto == 'MF':
sto_trajs = False
if 'likelihoodcondition' in md.model_properties[model]:
likelihoodconditions = md.model_properties[model]['likelihoodcondition']
else:
likelihoodconditions = 'all'
distancefunction300 = partial(ed.DistanceLikelihood, gamma = 300, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'],
includecondition = likelihoodconditions)
distancefunction500 = partial(ed.DistanceLikelihood, gamma = 500, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'],
includecondition = likelihoodconditions)
distancefunction1300 = partial(ed.DistanceLikelihood, gamma = 1300, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'],
includecondition = likelihoodconditions)
distancefull = lambda x : (distancefunction300(params = md.prior_to_integrator(x,model,300)) +
distancefunction500(params = md.prior_to_integrator(x,model,500)) +
distancefunction1300(params = md.prior_to_integrator(x,model,1300)))
x0 = GeneratePar_Multimodel(processcall = 0, models = [model], modelweights = None,
previouspars = None, previousweights = None, eps_dist = 10000, kernels = None)[0]
# x0 can be replaced with the MAP of the ABC-SMC
print('Optimization {}: x0 = {}'.format(repeat,x0))
res = minimize(distancefull, x0 = x0, method = method)
print('Result: (success = {}) {}'.format(res.success, res.x,))
print('With distance: {}'.format(distancefull(res.x)))
print('\n')
print('\n','-'*10,'\n')
def BayesFactors(models = ['WeakStrong_weakcatch','BergO','WeakStrongU','WeakStrong_fullcatchbond'], threshold = 1000):
# Using Didelot method to calculate BF from summary statistic by approximating
# the evidence for the model as the sum of weights of ABC
sumweights_list = []
# for model in models:
for model in models:
for bandwidth in [1.0]:
sto = md.model_properties[model]['sto']
if sto == 'ME': # full stochastic integration
sto_trajs = True
elif sto == 'MF':
sto_trajs = False
distancefunction300 = partial(ed.DistanceLikelihood, gamma = 300, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'])
distancefunction500 = partial(ed.DistanceLikelihood, gamma = 500, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'])
distancefunction1300 = partial(ed.DistanceLikelihood, gamma = 1300, model = md.model_properties[model]['integrator_name'],
return_trajs = False, sto_trajs = sto_trajs, resurrectionzeros = md.model_properties[model]['resurrectionzeros'])
distancefull = lambda x : (distancefunction300(params = md.prior_to_integrator(x,model,300)) +
distancefunction500(params = md.prior_to_integrator(x,model,500)) +
distancefunction1300(params = md.prior_to_integrator(x,model,1300)))
##### Looking for best file to compare
nn = 10
low = True
dis = np.loadtxt('smc/distances_{}_MF_300_{}.out'.format(model,nn))
print('Testing file {} with max dist: {}'.format(nn,max(dis)))
while (low is True):
try:
dis = np.loadtxt('smc/distances_{}_MF_300_{}.out'.format(model,nn))
if max(dis)>threshold:
print('Testing file {} with max dist: {}'.format(nn,max(dis)))
nn += 1
else:
print('Accepted file {} with max dist: {}'.format(nn,max(dis)))
nn += 1
low = False
except: # if the file is not found
print('File note found for model {} with threshold {}'.format(model,threshold))
dis = np.loadtxt('smc/distances_{}_MF_300_final.out'.format(model))
print('Using final file with max dist {}'.format(max(dis)))
nn = 'final'
low = False
data = np.loadtxt('smc/pars_{}_MF_300_{}.out'.format(model,nn))
covar = np.cov(data.T)
kernel = multivariate_normal(cov = bandwidth*covar)
perturbations = kernel.rvs(size = len(data))
newdata = data + perturbations
print('covar dim for model {} is {}'.format(model,covar.shape))
sumweights = 0
for point in tqdm(newdata):
priorevaluation = md.evaluateprior(point,model)
#if (priorevaluation > 0):
if True:
distance = distancefull(point)
if distance < threshold:
sum_denom = 0
for ipars,pars in enumerate(data):
kernel_evaluation = kernel.pdf(point-pars)
sum_denom += kernel_evaluation
sumweights += priorevaluation/sum_denom
print('Sum of weights for model {} with bd {} is :{}'.format(model, bandwidth, sumweights))
sumweights_list.append(sumweights)
for i in range(len(models)):
for j in range (len(models)):
if i<j:
print("Approxiamte Bayes factor of {} over {} is {}".format(
models[i],models[j], sumweights_list[i]/sumweights_list[j]))
| 46.011561
| 144
| 0.629334
|
4a09e35bb7d0f0c0a070f51c2ed85db4b0429012
| 4,551
|
py
|
Python
|
pims/application.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | 2
|
2022-01-19T08:58:12.000Z
|
2022-01-28T14:40:41.000Z
|
pims/application.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | 18
|
2021-09-20T08:47:11.000Z
|
2022-03-14T15:51:37.000Z
|
pims/application.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | null | null | null |
# * Copyright (c) 2020-2021. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import logging
logger = logging.getLogger("pims.app")
logger.info("[green bold]PIMS initialization...")
from pims.fastapi_tweaks import apply_fastapi_tweaks
apply_fastapi_tweaks()
import time
from fastapi import FastAPI, Request
from pydantic import ValidationError
from pims.cache import _startup_cache
from pims.config import get_settings
from pims.docs import get_redoc_html
from pims.api.exceptions import add_problem_exception_handler
from pims.api import (
server, housekeeping, formats, metadata, thumb, window, resized, annotation, tile,
operations,
histograms, filters, colormaps
)
from . import __api_version__, __version__
app = FastAPI(
title="Cytomine Python Image Management Server PIMS",
description="Cytomine Python Image Management Server (PIMS) HTTP API. "
"While this API is intended to be internal, a lot of the "
"following specification can be ported to the "
"external (public) Cytomine API.",
version=__api_version__,
docs_url=None,
redoc_url=None,
)
@app.on_event("startup")
async def startup():
# Check PIMS configuration
try:
settings = get_settings()
logger.info("[green bold]PIMS is starting with config:[/]")
for k, v in settings.dict().items():
logger.info(f"[green]* {k}:[/] [blue]{v}[/]", extra={"highlight": False})
except ValidationError as e:
logger.error("Impossible to read or parse some PIMS settings:")
logger.error(e)
exit(-1)
# Check optimisation are enabled for external libs
from pydantic import compiled as pydantic_compiled
if not pydantic_compiled:
logger.warning(f"[red]Pydantic is running in non compiled mode.")
from pyvips import API_mode as pyvips_binary
if not pyvips_binary:
logger.warning("[red]Pyvips is running in non binary mode.")
from shapely.speedups import enabled as shapely_speedups
if not shapely_speedups:
logger.warning("[red]Shapely is running without speedups.")
# Caching
if not get_settings().cache_enabled:
logger.warning(f"[orange3]Cache is disabled by configuration.")
else:
try:
await _startup_cache(__version__)
logger.info(f"[green]Cache is ready!")
except ConnectionError:
logger.error(
f"[red]Impossible to connect to cache database. "
f"Disabling cache!"
)
@app.middleware("http")
async def log_requests(request: Request, call_next):
start = time.time()
response = await call_next(request)
now = time.time()
duration = (now - start) * 1000
args = dict(request.query_params)
log_params = [
('method', request.method, 'magenta'),
('path', request.url.path, 'blue'),
('status', response.status_code, 'yellow'),
('duration', f"{duration:.2f}ms", 'green'),
('params', args, 'blue'),
]
parts = []
for name, value, color in log_params:
parts.append(f"[{color}]{value}[/]")
line = " ".join(parts)
logger.info(line, extra={"highlight": False})
return response
@app.get("/docs", include_in_schema=False)
def docs(req: Request):
root_path = req.scope.get("root_path", "").rstrip("/")
openapi_url = root_path + app.openapi_url
return get_redoc_html(openapi_url=openapi_url, title=app.title)
app.include_router(metadata.router)
app.include_router(tile.router)
app.include_router(thumb.router)
app.include_router(resized.router)
app.include_router(window.router)
app.include_router(annotation.router)
app.include_router(histograms.router)
app.include_router(formats.router)
app.include_router(filters.router)
app.include_router(colormaps.router)
app.include_router(operations.router)
app.include_router(housekeeping.router)
app.include_router(server.router)
add_problem_exception_handler(app)
| 32.507143
| 86
| 0.692595
|
4a09e3b33b3f6a1b7f06c9d91f83c91374f58d63
| 2,014
|
py
|
Python
|
lyrebird/utils.py
|
huihuilong/lyrebird
|
732ad57850c64a1a1800d82027ebca1c9e18f5bf
|
[
"MIT"
] | null | null | null |
lyrebird/utils.py
|
huihuilong/lyrebird
|
732ad57850c64a1a1800d82027ebca1c9e18f5bf
|
[
"MIT"
] | null | null | null |
lyrebird/utils.py
|
huihuilong/lyrebird
|
732ad57850c64a1a1800d82027ebca1c9e18f5bf
|
[
"MIT"
] | null | null | null |
import re
import math
import time
import socket
from contextlib import closing
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def convert_time(duration):
if duration < 1:
return str(round(duration * 1000)) + 'ms'
else:
return str(round(duration, 2)) + 's'
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print(f'{method.__name__} execution time {(te-ts)*1000}')
return result
return timed
def is_port_in_use(port, host='127.0.0.1'):
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect((host, int(port)))
return True
except socket.error:
return False
finally:
if sock:
sock.close()
def is_target_match_patterns(pattern_list, target):
if not pattern_list or not target:
return False
for pattern in pattern_list:
if re.search(pattern, target):
return True
return False
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class CaseInsensitiveDict(dict):
def __init__(self, dict):
for k,v in dict.items():
self.__setitem__(k, v)
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def get(self, key, default=None):
return super(CaseInsensitiveDict, self).get(key.lower(), default)
| 25.493671
| 73
| 0.612214
|
4a09e405288756e66437914a5c1318b28a623453
| 11,487
|
py
|
Python
|
monasca_tempest_tests/tests/api/test_dimensions.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
monasca_tempest_tests/tests/api/test_dimensions.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
monasca_tempest_tests/tests/api/test_dimensions.py
|
guilhermesteinmuller/monasca-tempest-plugin
|
e6eb044ba96164f8f089036291d5331382e33ccd
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# (C) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from urllib import urlencode
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from monasca_tempest_tests.tests.api import base
from monasca_tempest_tests.tests.api import constants
from monasca_tempest_tests.tests.api import helpers
class TestDimensions(base.BaseMonascaTest):
@classmethod
def resource_setup(cls):
super(TestDimensions, cls).resource_setup()
metric_name1 = data_utils.rand_name()
name1 = "name_1"
name2 = "name_2"
value1 = "value_1"
value2 = "value_2"
timestamp = int(round(time.time() * 1000))
time_iso = helpers.timestamp_to_iso(timestamp)
metric1 = helpers.create_metric(name=metric_name1,
dimensions={name1: value1,
name2: value2
})
cls.monasca_client.create_metrics(metric1)
metric1 = helpers.create_metric(name=metric_name1,
dimensions={name1: value2})
cls.monasca_client.create_metrics(metric1)
metric_name2 = data_utils.rand_name()
name3 = "name_3"
value3 = "value_3"
metric2 = helpers.create_metric(name=metric_name2,
dimensions={name3: value3})
cls.monasca_client.create_metrics(metric2)
metric_name3 = data_utils.rand_name()
metric3 = helpers.create_metric(name=metric_name3,
dimensions={name1: value3})
cls.monasca_client.create_metrics(metric3)
cls._test_metric1 = metric1
cls._test_metric2 = metric2
cls._test_metric_names = {metric_name1, metric_name2, metric_name3}
cls._dim_names_metric1 = [name1, name2]
cls._dim_names_metric2 = [name3]
cls._dim_names = cls._dim_names_metric1 + cls._dim_names_metric2
cls._dim_values_for_metric1 = [value1, value2]
cls._dim_values = [value1, value2, value3]
param = '?start_time=' + time_iso
returned_name_set = set()
for i in range(constants.MAX_RETRIES):
resp, response_body = cls.monasca_client.list_metrics(
param)
elements = response_body['elements']
metric_name1_count = 0
for element in elements:
returned_name_set.add(str(element['name']))
if (str(element['name']) == metric_name1):
metric_name1_count += 1
# Java version of influxdb never returns both metric1 in the list but Python does.
if cls._test_metric_names.issubset(returned_name_set) \
and (metric_name1_count == 2 or i == constants.MAX_RETRIES - 1):
return
time.sleep(constants.RETRY_WAIT_SECS)
assert False, 'Unable to initialize metrics'
@classmethod
def resource_cleanup(cls):
super(TestDimensions, cls).resource_cleanup()
@decorators.attr(type='gate')
def test_list_dimension_values_without_metric_name(self):
param = '?dimension_name=' + self._dim_names[0]
resp, response_body = self.monasca_client.list_dimension_values(param)
self.assertEqual(200, resp.status)
self.assertTrue({'links', 'elements'} == set(response_body))
response_values_length = len(response_body['elements'])
values = [str(response_body['elements'][i]['dimension_value'])
for i in range(response_values_length)]
self.assertEqual(values, self._dim_values)
@decorators.attr(type='gate')
def test_list_dimension_values_with_metric_name(self):
parms = '?metric_name=' + self._test_metric1['name']
parms += '&dimension_name=' + self._dim_names[0]
resp, response_body = self.monasca_client.list_dimension_values(parms)
self.assertEqual(200, resp.status)
self.assertTrue({'links', 'elements'} == set(response_body))
response_values_length = len(response_body['elements'])
values = [str(response_body['elements'][i]['dimension_value'])
for i in range(response_values_length)]
self.assertEqual(values, self._dim_values_for_metric1)
@decorators.attr(type='gate')
def test_list_dimension_values_limit_and_offset(self):
param = '?dimension_name=' + self._dim_names[0]
resp, response_body = self.monasca_client.list_dimension_values(param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
num_dim_values = len(elements)
for limit in range(1, num_dim_values):
start_index = 0
params = [('limit', limit)]
offset = None
while True:
num_expected_elements = limit
if (num_expected_elements + start_index) > num_dim_values:
num_expected_elements = num_dim_values - start_index
these_params = list(params)
# If not the first call, use the offset returned by the last
# call
if offset:
these_params.extend([('offset', str(offset))])
query_parms = '?dimension_name=' + self._dim_names[0] + '&' + \
urlencode(these_params)
resp, response_body = \
self.monasca_client.list_dimension_values(query_parms)
self.assertEqual(200, resp.status)
if not response_body['elements']:
self.fail("No metrics returned")
response_values_length = len(response_body['elements'])
if response_values_length == 0:
self.fail("No dimension names returned")
new_elements = [str(response_body['elements'][i]
['dimension_value']) for i in
range(response_values_length)]
self.assertEqual(num_expected_elements, len(new_elements))
expected_elements = elements[start_index:start_index + limit]
expected_dimension_values = \
[expected_elements[i]['dimension_value'] for i in range(
len(expected_elements))]
self.assertEqual(expected_dimension_values, new_elements)
start_index += num_expected_elements
if start_index >= num_dim_values:
break
# Get the next set
offset = self._get_offset(response_body)
@decorators.attr(type='gate')
@decorators.attr(type=['negative'])
def test_list_dimension_values_no_dimension_name(self):
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.list_dimension_values)
@decorators.attr(type='gate')
def test_list_dimension_names(self):
resp, response_body = self.monasca_client.list_dimension_names()
self.assertEqual(200, resp.status)
self.assertTrue({'links', 'elements'} == set(response_body))
response_names_length = len(response_body['elements'])
names = [str(response_body['elements'][i]['dimension_name']) for i
in range(response_names_length)]
self.assertEqual(names, self._dim_names)
@decorators.attr(type='gate')
def test_list_dimension_names_with_metric_name(self):
self._test_list_dimension_names_with_metric_name(
self._test_metric1['name'], self._dim_names_metric1)
self._test_list_dimension_names_with_metric_name(
self._test_metric2['name'], self._dim_names_metric2)
@decorators.attr(type='gate')
def test_list_dimension_names_limit_and_offset(self):
resp, response_body = self.monasca_client.list_dimension_names()
self.assertEqual(200, resp.status)
elements = response_body['elements']
num_dim_names = len(elements)
for limit in range(1, num_dim_names):
start_index = 0
params = [('limit', limit)]
offset = None
while True:
num_expected_elements = limit
if (num_expected_elements + start_index) > num_dim_names:
num_expected_elements = num_dim_names - start_index
these_params = list(params)
# If not the first call, use the offset returned by the last
# call
if offset:
these_params.extend([('offset', str(offset))])
query_parms = '?' + urlencode(these_params)
resp, response_body = self.monasca_client.list_dimension_names(
query_parms)
self.assertEqual(200, resp.status)
if not response_body['elements']:
self.fail("No metrics returned")
response_names_length = len(response_body['elements'])
if response_names_length == 0:
self.fail("No dimension names returned")
new_elements = [str(response_body['elements'][i]
['dimension_name']) for i in
range(response_names_length)]
self.assertEqual(num_expected_elements, len(new_elements))
expected_elements = elements[start_index:start_index + limit]
expected_dimension_names = \
[expected_elements[i]['dimension_name'] for i in range(
len(expected_elements))]
self.assertEqual(expected_dimension_names, new_elements)
start_index += num_expected_elements
if start_index >= num_dim_names:
break
# Get the next set
offset = self._get_offset(response_body)
@decorators.attr(type='gate')
@decorators.attr(type=['negative'])
def test_list_dimension_names_with_wrong_metric_name(self):
self._test_list_dimension_names_with_metric_name(
'wrong_metric_name', [])
def _test_list_dimension_names_with_metric_name(self, metric_name,
dimension_names):
param = '?metric_name=' + metric_name
resp, response_body = self.monasca_client.list_dimension_names(param)
self.assertEqual(200, resp.status)
self.assertTrue(set(['links', 'elements']) == set(response_body))
response_names_length = len(response_body['elements'])
names = [str(response_body['elements'][i]['dimension_name']) for i
in range(response_names_length)]
self.assertEqual(names, dimension_names)
| 45.948
| 94
| 0.617916
|
4a09e4c1ccf4f501d39fa741af1916b7c1770e7b
| 758
|
py
|
Python
|
test/test_boxplot.py
|
G-linch/pyecharts
|
8cee0f9fd9800ea388b1199ec6ac4746dd8e5455
|
[
"MIT"
] | 1
|
2019-07-02T18:45:29.000Z
|
2019-07-02T18:45:29.000Z
|
test/test_boxplot.py
|
G-linch/pyecharts
|
8cee0f9fd9800ea388b1199ec6ac4746dd8e5455
|
[
"MIT"
] | 1
|
2019-08-16T06:20:23.000Z
|
2019-08-16T06:20:23.000Z
|
test/test_boxplot.py
|
G-linch/pyecharts
|
8cee0f9fd9800ea388b1199ec6ac4746dd8e5455
|
[
"MIT"
] | 1
|
2021-02-02T02:57:59.000Z
|
2021-02-02T02:57:59.000Z
|
from unittest.mock import patch
from nose.tools import eq_
from pyecharts.charts import Boxplot
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_boxpolt_base(fake_writer):
v1 = [
[850, 740, 900, 1070, 930, 850, 950, 980, 980, 880, 1000, 980],
[960, 940, 960, 940, 880, 800, 850, 880, 900, 840, 830, 790],
]
v2 = [
[890, 810, 810, 820, 800, 770, 760, 740, 750, 760, 910, 920],
[890, 840, 780, 810, 760, 810, 790, 810, 820, 850, 870, 870],
]
c = Boxplot()
c.add_xaxis(["expr1", "expr2"]).add_yaxis("A", c.prepare_data(v1)).add_yaxis(
"B", c.prepare_data(v2)
)
c.render()
_, content = fake_writer.call_args[0]
eq_(c.theme, "white")
eq_(c.renderer, "canvas")
| 29.153846
| 81
| 0.598945
|
4a09e4cc004a2bcca1be4208c7bc1bbe6a3d7756
| 3,603
|
py
|
Python
|
src/utils/load_values.py
|
jcraig0/AmeriFacts
|
1ebeb422ca2fce2663ed4fcc2731cbb4a84bf1d4
|
[
"MIT"
] | null | null | null |
src/utils/load_values.py
|
jcraig0/AmeriFacts
|
1ebeb422ca2fce2663ed4fcc2731cbb4a84bf1d4
|
[
"MIT"
] | null | null | null |
src/utils/load_values.py
|
jcraig0/AmeriFacts
|
1ebeb422ca2fce2663ed4fcc2731cbb4a84bf1d4
|
[
"MIT"
] | null | null | null |
import zipfile
import json
import pandas
import boto3
file = zipfile.ZipFile('2018/data/1_year_entire_sf/All_Geographies.zip')
resolutions = {
'040': 'State',
'050': 'County',
'500': 'Congressional_District'
}
columns = json.loads(open('columns.json').read())
items_dict = {resolution: [] for resolution in resolutions.keys()}
print('Reading survey data...')
for file_name in file.namelist():
if file_name[0] == 'g' and file_name[6:8] != 'us' \
and file_name[-4:] == '.csv':
geo_file = pandas.read_csv(file.open(file_name), encoding='latin-1',
header=None)
rows = {idx: (row.iloc[48], row.iloc[49])
for idx, row in geo_file.iterrows()
if row.iloc[48][:3] in resolutions.keys()
and row.iloc[48][3:5] == '00'}
for seq_num, seq_columns in columns.items():
file_name_end = '20181' + file_name[6:8] + seq_num + '000.txt'
est_file = pandas.read_csv(file.open('e' + file_name_end),
header=None)
moe_file = pandas.read_csv(file.open('m' + file_name_end),
header=None)
for idx, row in rows.items():
res_num = row[0][:3]
try:
curr_item = next(item for item in items_dict[res_num]
if item['PutRequest']['Item']['ID']['S']
== row[0])
except StopIteration:
name = row[1]
if res_num == '500':
name = name.replace(' (116th Congress)', '')
curr_item = {'PutRequest': {'Item': {
'ID': {'S': row[0]}, 'Name': {'S': name}
}}}
items_dict[res_num].append(curr_item)
req_columns = {}
for i, column in enumerate(seq_columns):
if not pandas.isnull(est_file.iloc[idx, column[1]]):
req_columns[column[0]] = \
{'N': str(est_file.iloc[idx, column[1]])}
if ':' in column[0] or i+1 == len(seq_columns) \
or ':' not in seq_columns[i+1][0]:
req_columns[column[0] + ' MOE'] = \
{'N': str(moe_file.iloc[idx, column[1]])}
curr_item['PutRequest']['Item'].update(req_columns)
client = boto3.client('dynamodb', region_name='us-east-2')
all_attributes = list(
items_dict[list(items_dict.keys())[0]][0]['PutRequest']['Item'].keys())
attributes = [attr for i, attr in enumerate(all_attributes)
if attr not in ('ID', 'Name') and 'MOE' not in attr
and (':' in attr or ':' not in all_attributes[i+1])]
for res_num, items in items_dict.items():
for attribute in attributes:
items = sorted(items, key=lambda item:
float(item['PutRequest']['Item'][attribute]['N'])
if attribute in item['PutRequest']['Item']
else float('-inf'), reverse=True)
for i, item in enumerate(items):
item['PutRequest']['Item'][attribute + ' Ord'] = {'N': str(i + 1)}
print("Loading data into table '{}'...".format(resolutions[res_num]))
for i in range(0, len(items), 25):
print(' Items {} to {}...'.format(i+1, min(i+25, len(items))))
client.batch_write_item(
RequestItems={resolutions[res_num]: items[i:i+25]})
| 44.481481
| 78
| 0.501249
|
4a09e4ef25c8e5be94efef7f83a99788036004ba
| 11,514
|
py
|
Python
|
tests/python/relay/test_pass_lazy_gradient_init.py
|
jacobpostman/incubator-tvm
|
fdef79d317d455eb5c9e9e86feb97416eb594690
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
tests/python/relay/test_pass_lazy_gradient_init.py
|
jacobpostman/incubator-tvm
|
fdef79d317d455eb5c9e9e86feb97416eb594690
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2
|
2020-09-14T09:18:25.000Z
|
2020-09-24T03:28:18.000Z
|
tests/python/relay/test_pass_lazy_gradient_init.py
|
jacobpostman/incubator-tvm
|
fdef79d317d455eb5c9e9e86feb97416eb594690
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay import create_executor, transform
from tvm.relay.testing import rand, run_infer_type
import tvm.testing
from tvm.testing import assert_allclose
import pytest
def test_tc():
"""Simple testcase, check that transformation typechecks."""
mod = tvm.IRModule()
shape = (20, 20)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x1 = relay.var("x1", t)
x2 = relay.var("x2", t)
# f(x1,x2) = (x1-x2)*x2
y = relay.Function([x1, x2], (x1 - x2) * x2)
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
# function input/output types should remain the same
assert mod["main"].checked_type == relay.FuncType([t, t], t)
def test_add():
"""Simple add testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = x+x
y = relay.Function([x], x+x)
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy() + x.asnumpy())
def test_add_tuple():
"""Add elements of tuple. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
tensor_type = relay.TensorType(shape, dtype)
t = relay.TupleType([tensor_type, tensor_type])
x = relay.var("x", t)
# f((x1,x2)) = x1 + x2
y = relay.Function([x], relay.TupleGetItem(x, 0) + relay.TupleGetItem(x, 1))
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
mod = tvm.transform.PrintIR(show_meta_data=True)(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], tensor_type)
ex = create_executor(mod=mod)
x = (rand(dtype, *shape), rand(dtype, *shape))
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x[0].asnumpy() + x[1].asnumpy())
def test_mult():
"""Simple multiplication testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (15, 15)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = x*x
y = relay.Function([x], x * x)
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy() * x.asnumpy())
def test_ret_tuple():
"""Test tuple return type. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = (x,x)
func = relay.Function([x], relay.Tuple([x,x * relay.const(2.0)]))
func = run_infer_type(func)
mod["main"] = func
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], relay.TupleType([t, t]))
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(func)(x)
assert_allclose(y[0].asnumpy(), x.asnumpy())
assert_allclose(y[1].asnumpy(), x.asnumpy() * 2.0)
def test_add_broadcast():
"""Test adding matrices of different size. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x1 = relay.var("x1", t1)
x2 = relay.var("x2", t2)
func = relay.Function([x1,x2], x1 + x2)
func = run_infer_type(func)
mod["main"] = func
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
x1_np = rand(dtype, *shape1).asnumpy()
x2_np = rand(dtype, *shape2).asnumpy()
expected_forward = x1_np + x2_np
expected_forward_type = relay.TensorType(expected_forward.shape, dtype)
assert mod["main"].checked_type == relay.FuncType([t1, t2], expected_forward_type)
ex = create_executor(mod=mod)
forward = ex.evaluate(func)(x1_np, x2_np)
assert_allclose(forward.asnumpy(), expected_forward)
def test_reverse_ad_identity():
"""Simple test with reverse mode ad."""
# of f(x) = x
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t],
relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
(forward), (grad,) = ex.evaluate(back_func)(x)
assert_allclose(forward.asnumpy(), x.asnumpy())
assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy()))
def test_multivar_reverse_ad():
"""Simple test with multivariate reverse mode ad."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t, t],
relay.TupleType([t, relay.TupleType([t, t])]))
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y, ) = ex.evaluate(back_func)(x, y)
assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy())
assert_allclose(grad_x.asnumpy(), y.asnumpy())
assert_allclose(grad_y.asnumpy(), x.asnumpy())
def test_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
func = relay.Function([], relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
back_func = mod["main"]
transform.PartialEvaluate()(mod)
def test_after_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
back_func = mod["main"]
seq = tvm.transform.Sequential([
transform.PartialEvaluate(),
transform.LazyGradientInit(),
transform.DeadCodeElimination()
])
mod = seq(mod)
assert mod["main"].checked_type == relay.FuncType([t, t],
relay.TupleType([t, relay.TupleType([t, t])]))
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = ex.evaluate(back_func)(x, y)
assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy())
assert_allclose(grad_x.asnumpy(), y.asnumpy())
assert_allclose(grad_y.asnumpy(), x.asnumpy())
def test_before_partial_eval():
"""Test transformation before PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x * y)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
seq = tvm.transform.Sequential([
transform.LazyGradientInit(),
transform.PartialEvaluate(),
transform.DeadCodeElimination()
])
mod = seq(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t, t],
relay.TupleType([t, relay.TupleType([t, t])]))
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = ex.evaluate(back_func)(x, y)
assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy())
assert_allclose(grad_x.asnumpy(), y.asnumpy())
assert_allclose(grad_y.asnumpy(), x.asnumpy())
def test_zeros():
"""Simple test using "zeros" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros(shape, dtype))
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy())
def test_ones():
"""Simple test using "ones" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones(shape, dtype))
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy() + np.ones_like(x.asnumpy()))
def test_zeros_like():
"""Simple test using "zeros_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros_like(x))
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy())
def test_ones_like():
"""Simple test using "ones_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones_like(x))
mod["main"] = y
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
ex = create_executor(mod=mod)
x = rand(dtype, *shape)
y = ex.evaluate(y)(x)
assert_allclose(y.asnumpy(), x.asnumpy() + np.ones_like(x.asnumpy()))
if __name__ == "__main__":
pytest.main([__file__])
| 27.744578
| 98
| 0.651381
|
4a09e5e8779ff1f34ca782443a5e8df7bf3176b0
| 22
|
py
|
Python
|
alertaclient/version.py
|
linuxtechie/python-alerta-client
|
cb6d344218d5b74b86f9e3f28f858853e05996f6
|
[
"MIT"
] | null | null | null |
alertaclient/version.py
|
linuxtechie/python-alerta-client
|
cb6d344218d5b74b86f9e3f28f858853e05996f6
|
[
"MIT"
] | null | null | null |
alertaclient/version.py
|
linuxtechie/python-alerta-client
|
cb6d344218d5b74b86f9e3f28f858853e05996f6
|
[
"MIT"
] | null | null | null |
__version__ = '6.5.0'
| 11
| 21
| 0.636364
|
4a09e6da546c02d9b223037ec0c6498508d5658c
| 1,612
|
py
|
Python
|
pysimplegui/DemoPrograms/Demo_Matplotlib_PyLab.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
pysimplegui/DemoPrograms/Demo_Matplotlib_PyLab.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | 2
|
2020-06-06T00:30:56.000Z
|
2021-06-10T22:30:37.000Z
|
pysimplegui/DemoPrograms/Demo_Matplotlib_PyLab.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import PySimpleGUI as sg
import matplotlib
import pylab
matplotlib.use('TkAgg')
"""
Demonstrates one way of embedding PyLab figures into a PySimpleGUI window.
"""
# ------------------------------- PASTE YOUR PYLAB CODE HERE -------------------------------
from numpy import sin
from numpy import cos
x = pylab.linspace(-3, 3, 30)
y = x**2
pylab.plot(x, sin(x))
pylab.plot(x, cos(x), 'r-')
pylab.plot(x, -sin(x), 'g--')
fig = pylab.gcf()
figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds
# ------------------------------- END OF YOUR MATPLOTLIB CODE -------------------------------
# ------------------------------- Beginning of Matplotlib helper code -----------------------
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
# ------------------------------- Beginning of GUI CODE -------------------------------
# define the window layout
layout = [[sg.Text('Plot test', font='Any 18')],
[sg.Canvas(size=(figure_w, figure_h), key='canvas')],
[sg.OK(pad=((figure_w / 2, 0), 3), size=(4, 2))]]
# create the form and show it without the plot
window = sg.Window('Demo Application - Embedding Matplotlib In PySimpleGUI',
layout, finalize=True)
# add the plot to the window
fig_canvas_agg = draw_figure(window['canvas'].TKCanvas, fig)
event, values = window.read()
window.close()
| 29.309091
| 93
| 0.593672
|
4a09e77a9784ff33f83d1b80e41305168a157cf4
| 634
|
py
|
Python
|
json.py
|
blockchainhelppro/CelvinRost
|
aa2661747d06e4610928466521e4da1db77aeadc
|
[
"MIT"
] | 2
|
2018-08-15T21:27:59.000Z
|
2018-08-21T17:56:12.000Z
|
json.py
|
blockchainhelppro/CelvinRost
|
aa2661747d06e4610928466521e4da1db77aeadc
|
[
"MIT"
] | null | null | null |
json.py
|
blockchainhelppro/CelvinRost
|
aa2661747d06e4610928466521e4da1db77aeadc
|
[
"MIT"
] | 1
|
2021-12-06T04:03:32.000Z
|
2021-12-06T04:03:32.000Z
|
import collections
from .types import (
is_primitive_type,
)
def normalize_object_for_json(obj):
if is_primitive_type(obj):
return obj
elif isinstance(obj, (collections.Sequence, collections.Set)):
return [
normalize_object_for_json(value)
for value
in obj
]
elif isinstance(obj, collections.Mapping):
return {
normalize_object_for_json(key): normalize_object_for_json(value)
for key, value
in obj.items()
}
else:
raise TypeError("Unable to normalize object of type: {0}".format(type(obj)))
| 25.36
| 84
| 0.616719
|
4a09e797b18e3112dcb311450d1cc0ad262dd643
| 1,004
|
py
|
Python
|
learnpy_ecourse/class6/ex2_list_to_dict.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 528
|
2015-01-07T15:28:51.000Z
|
2022-03-27T09:45:37.000Z
|
learnpy_ecourse/class6/ex2_list_to_dict.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 19
|
2015-07-01T23:52:27.000Z
|
2021-09-22T04:30:34.000Z
|
learnpy_ecourse/class6/ex2_list_to_dict.py
|
fallenfuzz/pynet
|
9624d83cca160fd325a34e838e4474c9b80fe2ab
|
[
"Apache-2.0"
] | 555
|
2015-01-18T07:21:43.000Z
|
2022-03-20T21:25:22.000Z
|
#!/usr/bin/env python
'''
Disclaimer - This is a solution to the below problem given the content we have
discussed in class. It is not necessarily the best solution to the problem.
In other words, I generally only use things we have covered up to this point
in the class (with some exceptions which I will usually note).
Python for Network Engineers
https://pynet.twb-tech.com
Learning Python
2. Write a function that converts a list to a dictionary where the
index of the list is used as the key to the new dictionary (the
function should return the new dictionary).
'''
def list_to_dict(a_list):
'''
Convert a list to a dictionary
'''
new_dict = {}
for i, v in enumerate(a_list):
new_dict[i] = v
return new_dict
# Create a simple test list
test_list = range(100, 110)
test_list.append('whatever')
# Call the function
test_dict = list_to_dict(test_list)
# Display the results
print
print "List: %s" % str(test_list)
print "Dict: %s" % str(test_dict)
print
| 21.826087
| 78
| 0.723108
|
4a09ea472d3066f9aaf557513cbd9bd4551533da
| 129,161
|
py
|
Python
|
pysnmp/INFORMANT-WMI-EXCHANGE.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/INFORMANT-WMI-EXCHANGE.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/INFORMANT-WMI-EXCHANGE.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module INFORMANT-WMI-EXCHANGE (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INFORMANT-WMI-EXCHANGE
# Produced by pysmi-0.3.4 at Mon Apr 29 19:42:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, NotificationType, Counter32, iso, Bits, ObjectIdentity, MibIdentifier, TimeTicks, IpAddress, Counter64, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "NotificationType", "Counter32", "iso", "Bits", "ObjectIdentity", "MibIdentifier", "TimeTicks", "IpAddress", "Counter64", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ModuleIdentity")
TextualConvention, DateAndTime, DisplayString, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DateAndTime", "DisplayString", "TruthValue")
WtcsDisplayString, informant = mibBuilder.importSymbols("WTCS", "WtcsDisplayString", "informant")
wmiExchange = ModuleIdentity((1, 3, 6, 1, 4, 1, 9600, 1, 23))
wmiExchange.setRevisions(('2008-04-14 17:17', '2005-04-11 04:09',))
if mibBuilder.loadTexts: wmiExchange.setLastUpdated('200804141717Z')
if mibBuilder.loadTexts: wmiExchange.setOrganization('Informant Systems, Inc.')
exchangeClusterResourceTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1), )
if mibBuilder.loadTexts: exchangeClusterResourceTable.setStatus('current')
exchangeClusterResourceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "excrIndex"))
if mibBuilder.loadTexts: exchangeClusterResourceEntry.setStatus('current')
excrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrIndex.setStatus('current')
excrName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrName.setStatus('current')
excrOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrOwner.setStatus('current')
excrState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrState.setStatus('current')
excrType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrType.setStatus('current')
excrVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 1, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excrVirtualMachine.setStatus('current')
exchangeConnectorStateTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2), )
if mibBuilder.loadTexts: exchangeConnectorStateTable.setStatus('current')
exchangeConnectorStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "excsIndex"))
if mibBuilder.loadTexts: exchangeConnectorStateEntry.setStatus('current')
excsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsIndex.setStatus('current')
excsDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsDN.setStatus('current')
excsGroupDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsGroupDN.setStatus('current')
excsGroupGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsGroupGUID.setStatus('current')
excsGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsGUID.setStatus('current')
excsIsUp = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsIsUp.setStatus('current')
excsName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 2, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: excsName.setStatus('current')
exchangeLinkTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3), )
if mibBuilder.loadTexts: exchangeLinkTable.setStatus('current')
exchangeLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exlIndex"))
if mibBuilder.loadTexts: exchangeLinkEntry.setStatus('current')
exlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlIndex.setStatus('current')
exlActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlActionFreeze.setStatus('current')
exlActionKick = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlActionKick.setStatus('current')
exlActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlActionThaw.setStatus('current')
exlExtendedStateInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlExtendedStateInfo.setStatus('current')
exlGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlGlobalStop.setStatus('current')
exlIncreasingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlIncreasingTime.setStatus('current')
exlLinkDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlLinkDN.setStatus('current')
exlLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlLinkName.setStatus('current')
exlNextScheduledConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 10), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlNextScheduledConnection.setStatus('current')
exlNumberOfMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlNumberOfMessages.setStatus('current')
exlOldestMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlOldestMessage.setStatus('current')
exlProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlProtocolName.setStatus('current')
exlSizeOfQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlSizeOfQueue.setStatus('current')
exlStateActive = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateActive.setStatus('current')
exlStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateFlags.setStatus('current')
exlStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateFrozen.setStatus('current')
exlStateReady = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateReady.setStatus('current')
exlStateRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateRemote.setStatus('current')
exlStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateRetry.setStatus('current')
exlStateScheduled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlStateScheduled.setStatus('current')
exlSupportedLinkActions = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 22), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlSupportedLinkActions.setStatus('current')
exlTypeCurrentlyUnreachable = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypeCurrentlyUnreachable.setStatus('current')
exlTypeDeferredDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypeDeferredDelivery.setStatus('current')
exlTypeInternal = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypeInternal.setStatus('current')
exlTypeLocalDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 26), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypeLocalDelivery.setStatus('current')
exlTypePendingCategorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 27), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypePendingCategorization.setStatus('current')
exlTypePendingRouting = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 28), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypePendingRouting.setStatus('current')
exlTypePendingSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 29), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypePendingSubmission.setStatus('current')
exlTypeRemoteDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 30), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlTypeRemoteDelivery.setStatus('current')
exlVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 31), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlVersion.setStatus('current')
exlVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlVirtualMachine.setStatus('current')
exlVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 3, 1, 33), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exlVirtualServerName.setStatus('current')
exchangeQueueTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4), )
if mibBuilder.loadTexts: exchangeQueueTable.setStatus('current')
exchangeQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqIndex"))
if mibBuilder.loadTexts: exchangeQueueEntry.setStatus('current')
exqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqIndex.setStatus('current')
exqCanEnumAll = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumAll.setStatus('current')
exqCanEnumFailed = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumFailed.setStatus('current')
exqCanEnumFirstNMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumFirstNMessages.setStatus('current')
exqCanEnumFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumFrozen.setStatus('current')
exqCanEnumInvertSense = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumInvertSense.setStatus('current')
exqCanEnumLargerThan = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumLargerThan.setStatus('current')
exqCanEnumNLargestMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 8), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumNLargestMessages.setStatus('current')
exqCanEnumNOldestMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumNOldestMessages.setStatus('current')
exqCanEnumOlderThan = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumOlderThan.setStatus('current')
exqCanEnumRecipient = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumRecipient.setStatus('current')
exqCanEnumSender = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqCanEnumSender.setStatus('current')
exqGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 13), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqGlobalStop.setStatus('current')
exqIncreasingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqIncreasingTime.setStatus('current')
exqLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqLinkName.setStatus('current')
exqMsgEnumFlagsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqMsgEnumFlagsSupported.setStatus('current')
exqNumberOfMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqNumberOfMessages.setStatus('current')
exqProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 18), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqProtocolName.setStatus('current')
exqQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqQueueName.setStatus('current')
exqSizeOfQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 20), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqSizeOfQueue.setStatus('current')
exqVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqVersion.setStatus('current')
exqVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 22), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqVirtualMachine.setStatus('current')
exqVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 4, 1, 23), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqVirtualServerName.setStatus('current')
exchangeServerStateTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5), )
if mibBuilder.loadTexts: exchangeServerStateTable.setStatus('current')
exchangeServerStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exssIndex"))
if mibBuilder.loadTexts: exchangeServerStateEntry.setStatus('current')
exssIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssIndex.setStatus('current')
exssClusterState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssClusterState.setStatus('current')
exssClusterStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssClusterStateString.setStatus('current')
exssCPUState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssCPUState.setStatus('current')
exssCPUStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssCPUStateString.setStatus('current')
exssDisksState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssDisksState.setStatus('current')
exssDisksStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssDisksStateString.setStatus('current')
exssDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssDN.setStatus('current')
exssGroupDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssGroupDN.setStatus('current')
exssGroupGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssGroupGUID.setStatus('current')
exssGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssGUID.setStatus('current')
exssMemoryState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssMemoryState.setStatus('current')
exssMemoryStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssMemoryStateString.setStatus('current')
exssName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssName.setStatus('current')
exssQueuesState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssQueuesState.setStatus('current')
exssQueuesStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 16), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssQueuesStateString.setStatus('current')
exssServerMaintenance = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssServerMaintenance.setStatus('current')
exssServerState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssServerState.setStatus('current')
exssServerStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssServerStateString.setStatus('current')
exssServicesState = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("ok", 1), ("warning", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssServicesState.setStatus('current')
exssServicesStateString = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 21), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssServicesStateString.setStatus('current')
exssUnreachable = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssUnreachable.setStatus('current')
exssVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 5, 1, 23), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exssVersion.setStatus('current')
exchangeDSAccessDCTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6), )
if mibBuilder.loadTexts: exchangeDSAccessDCTable.setStatus('current')
exchangeDSAccessDCEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exdsIndex"))
if mibBuilder.loadTexts: exchangeDSAccessDCEntry.setStatus('current')
exdsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsIndex.setStatus('current')
exdsConfigurationType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("manual", 0), ("automatic", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsConfigurationType.setStatus('current')
exdsIsFast = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsIsFast.setStatus('current')
exdsIsInSync = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsIsInSync.setStatus('current')
exdsIsUp = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsIsUp.setStatus('current')
exdsLDAPPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsLDAPPort.setStatus('current')
exdsName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsName.setStatus('current')
exdsType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 6, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("configurationDomainController", 0), ("localDomainController", 1), ("globalCatalog", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exdsType.setStatus('current')
exchangeFolderTreeTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7), )
if mibBuilder.loadTexts: exchangeFolderTreeTable.setStatus('current')
exchangeFolderTreeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exftIndex"))
if mibBuilder.loadTexts: exchangeFolderTreeEntry.setStatus('current')
exftIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftIndex.setStatus('current')
exftAdministrativeGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftAdministrativeGroup.setStatus('current')
exftAdministrativeNote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftAdministrativeNote.setStatus('current')
exftAssociatedPublicStores = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftAssociatedPublicStores.setStatus('current')
exftCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 5), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftCreationTime.setStatus('current')
exftGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftGUID.setStatus('current')
exftHasLocalPublicStore = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftHasLocalPublicStore.setStatus('current')
exftLastModificationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 8), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftLastModificationTime.setStatus('current')
exftMapiFolderTree = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftMapiFolderTree.setStatus('current')
exftName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftName.setStatus('current')
exftRootFolderURL = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 7, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exftRootFolderURL.setStatus('current')
exchangeLinkV2Table = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8), )
if mibBuilder.loadTexts: exchangeLinkV2Table.setStatus('current')
exchangeLinkV2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exl2Index"))
if mibBuilder.loadTexts: exchangeLinkV2Entry.setStatus('current')
exl2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2Index.setStatus('current')
exl2ActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2ActionFreeze.setStatus('current')
exl2ActionKick = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2ActionKick.setStatus('current')
exl2ActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2ActionThaw.setStatus('current')
exl2ExtendedStateInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2ExtendedStateInfo.setStatus('current')
exl2GlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2GlobalStop.setStatus('current')
exl2LinkDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2LinkDN.setStatus('current')
exl2LinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2LinkId.setStatus('current')
exl2LinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2LinkName.setStatus('current')
exl2MessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2MessageCount.setStatus('current')
exl2NextScheduledConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 11), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2NextScheduledConnection.setStatus('current')
exl2OldestMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2OldestMessage.setStatus('current')
exl2ProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2ProtocolName.setStatus('current')
exl2Size = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2Size.setStatus('current')
exl2StateActive = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateActive.setStatus('current')
exl2StateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateFlags.setStatus('current')
exl2StateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateFrozen.setStatus('current')
exl2StateReady = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateReady.setStatus('current')
exl2StateRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateRemote.setStatus('current')
exl2StateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateRetry.setStatus('current')
exl2StateScheduled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2StateScheduled.setStatus('current')
exl2SupportedLinkActions = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 22), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2SupportedLinkActions.setStatus('current')
exl2TypeCurrentlyUnreachable = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypeCurrentlyUnreachable.setStatus('current')
exl2TypeDeferredDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypeDeferredDelivery.setStatus('current')
exl2TypeInternal = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypeInternal.setStatus('current')
exl2TypeLocalDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 26), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypeLocalDelivery.setStatus('current')
exl2TypePendingCategorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 27), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypePendingCategorization.setStatus('current')
exl2TypePendingRouting = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 28), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypePendingRouting.setStatus('current')
exl2TypePendingSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 29), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypePendingSubmission.setStatus('current')
exl2TypeRemoteDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 30), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2TypeRemoteDelivery.setStatus('current')
exl2Version = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 31), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2Version.setStatus('current')
exl2VirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 32), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2VirtualMachine.setStatus('current')
exl2VirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 8, 1, 33), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exl2VirtualServerName.setStatus('current')
exchangeLogonTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9), )
if mibBuilder.loadTexts: exchangeLogonTable.setStatus('current')
exchangeLogonEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exloIndex"))
if mibBuilder.loadTexts: exchangeLogonEntry.setStatus('current')
exloIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloIndex.setStatus('current')
exloAdapterSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 2), Gauge32()).setUnits('Kbits/s').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloAdapterSpeed.setStatus('current')
exloClientIP = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloClientIP.setStatus('current')
exloClientMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("classicOnline", 1), ("cached", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloClientMode.setStatus('current')
exloClientName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloClientName.setStatus('current')
exloClientVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloClientVersion.setStatus('current')
exloCodePageID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloCodePageID.setStatus('current')
exloFolderOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 8), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloFolderOperationRate.setStatus('current')
exloHostAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloHostAddress.setStatus('current')
exloLastOperationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 10), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLastOperationTime.setStatus('current')
exloLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 11), Gauge32()).setUnits('msec').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLatency.setStatus('current')
exloLocaleID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLocaleID.setStatus('current')
exloLoggedOnUserAccount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLoggedOnUserAccount.setStatus('current')
exloLoggedOnUsersMailboxLegacyDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLoggedOnUsersMailboxLegacyDN.setStatus('current')
exloLogonTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 15), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloLogonTime.setStatus('current')
exloMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 16), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloMacAddress.setStatus('current')
exloMailboxDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 17), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloMailboxDisplayName.setStatus('current')
exloMailboxLegacyDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 18), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloMailboxLegacyDN.setStatus('current')
exloMessagingOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 19), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloMessagingOperationRate.setStatus('current')
exloOpenAttachmentCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 20), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloOpenAttachmentCount.setStatus('current')
exloOpenFolderCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloOpenFolderCount.setStatus('current')
exloOpenMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 22), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloOpenMessageCount.setStatus('current')
exloOtherOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 23), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloOtherOperationRate.setStatus('current')
exloProgressOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 24), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloProgressOperationRate.setStatus('current')
exloRowID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 25), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloRowID.setStatus('current')
exloRPCSucceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloRPCSucceeded.setStatus('current')
exloServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 27), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloServerName.setStatus('current')
exloStorageGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 28), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloStorageGroupName.setStatus('current')
exloStoreName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 29), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloStoreName.setStatus('current')
exloStoreType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("mailboxStore", 1), ("publicStore", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exloStoreType.setStatus('current')
exloStreamOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 31), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloStreamOperationRate.setStatus('current')
exloTableOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 32), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloTableOperationRate.setStatus('current')
exloTotalOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 33), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloTotalOperationRate.setStatus('current')
exloTransferOperationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 9, 1, 34), Gauge32()).setUnits('per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: exloTransferOperationRate.setStatus('current')
exchangeMailboxTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10), )
if mibBuilder.loadTexts: exchangeMailboxTable.setStatus('current')
exchangeMailboxEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exmIndex"))
if mibBuilder.loadTexts: exchangeMailboxEntry.setStatus('current')
exmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmIndex.setStatus('current')
exmAssocContentCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmAssocContentCount.setStatus('current')
exmDateDiscoveredAbsentInDS = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmDateDiscoveredAbsentInDS.setStatus('current')
exmDeletedMessageSizeExtended = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 4), WtcsDisplayString()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: exmDeletedMessageSizeExtended.setStatus('current')
exmLastLoggedOnUserAccount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmLastLoggedOnUserAccount.setStatus('current')
exmLastLogoffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 6), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmLastLogoffTime.setStatus('current')
exmLastLogonTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 7), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmLastLogonTime.setStatus('current')
exmLegacyDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmLegacyDN.setStatus('current')
exmMailboxDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmMailboxDisplayName.setStatus('current')
exmMailboxGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmMailboxGUID.setStatus('current')
exmServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmServerName.setStatus('current')
exmSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 12), WtcsDisplayString()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: exmSize.setStatus('current')
exmStorageGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmStorageGroupName.setStatus('current')
exmStorageLimitInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8, 16))).clone(namedValues=NamedValues(("belowLimit", 1), ("issueWarning", 2), ("prohibitSend", 4), ("noChecking", 8), ("mailboxDisabled", 16)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmStorageLimitInfo.setStatus('current')
exmStoreName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmStoreName.setStatus('current')
exmTotalItems = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 10, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmTotalItems.setStatus('current')
exchangeMessageTrackingTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11), )
if mibBuilder.loadTexts: exchangeMessageTrackingTable.setStatus('current')
exchangeMessageTrackingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exmtIndex"))
if mibBuilder.loadTexts: exchangeMessageTrackingEntry.setStatus('current')
exmtIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtIndex.setStatus('current')
exmtAttemptedPartnerServer = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtAttemptedPartnerServer.setStatus('current')
exmtClientIP = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtClientIP.setStatus('current')
exmtClientName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtClientName.setStatus('current')
exmtCost = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtCost.setStatus('current')
exmtDeliveryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 6), Gauge32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtDeliveryTime.setStatus('current')
exmtEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtEncrypted.setStatus('current')
exmtEntryType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtEntryType.setStatus('current')
exmtExpansionDL = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtExpansionDL.setStatus('current')
exmtKeyID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtKeyID.setStatus('current')
exmtLinkedMessageID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtLinkedMessageID.setStatus('current')
exmtMessageID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 12), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtMessageID.setStatus('current')
exmtOriginationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 13), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtOriginationTime.setStatus('current')
exmtPartnerServer = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtPartnerServer.setStatus('current')
exmtPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtPriority.setStatus('current')
exmtRecipientAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 16), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtRecipientAddress.setStatus('current')
exmtRecipientCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtRecipientCount.setStatus('current')
exmtRecipientStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 18), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtRecipientStatus.setStatus('current')
exmtSenderAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtSenderAddress.setStatus('current')
exmtServerIP = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 20), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtServerIP.setStatus('current')
exmtServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 21), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtServerName.setStatus('current')
exmtSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 22), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtSize.setStatus('current')
exmtSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 23), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtSubject.setStatus('current')
exmtSubjectID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 24), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtSubjectID.setStatus('current')
exmtTimeLogged = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 25), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtTimeLogged.setStatus('current')
exmtVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 11, 1, 26), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exmtVersion.setStatus('current')
exchangePublicFolderTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12), )
if mibBuilder.loadTexts: exchangePublicFolderTable.setStatus('current')
exchangePublicFolderEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "expfIndex"))
if mibBuilder.loadTexts: exchangePublicFolderEntry.setStatus('current')
expfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIndex.setStatus('current')
expfAddressBookName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfAddressBookName.setStatus('current')
expfAdministrativeNote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfAdministrativeNote.setStatus('current')
expfAdminSecurityDescriptor = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfAdminSecurityDescriptor.setStatus('current')
expfADProxyPath = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfADProxyPath.setStatus('current')
expfAssociatedMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfAssociatedMessageCount.setStatus('current')
expfAttachmentCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfAttachmentCount.setStatus('current')
expfCategorizationCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfCategorizationCount.setStatus('current')
expfComment = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfComment.setStatus('current')
expfContactCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfContactCount.setStatus('current')
expfContainsRules = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfContainsRules.setStatus('current')
expfCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfCreationTime.setStatus('current')
expfDeletedItemLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 13), Gauge32()).setUnits('days').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfDeletedItemLifetime.setStatus('current')
expfFolderTree = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfFolderTree.setStatus('current')
expfFriendlyUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfFriendlyUrl.setStatus('current')
expfHasChildren = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfHasChildren.setStatus('current')
expfHasLocalReplica = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfHasLocalReplica.setStatus('current')
expfIsMailEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIsMailEnabled.setStatus('current')
expfIsNormalFolder = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIsNormalFolder.setStatus('current')
expfIsPerUserReadDisabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIsPerUserReadDisabled.setStatus('current')
expfIsSearchFolder = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIsSearchFolder.setStatus('current')
expfIsSecureInSite = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfIsSecureInSite.setStatus('current')
expfLastAccessTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 23), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfLastAccessTime.setStatus('current')
expfLastModificationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 24), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfLastModificationTime.setStatus('current')
expfMaximumItemSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 25), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfMaximumItemSize.setStatus('current')
expfMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfMessageCount.setStatus('current')
expfMessageWithAttachmentsCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 27), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfMessageWithAttachmentsCount.setStatus('current')
expfName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 28), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfName.setStatus('current')
expfNormalMessageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 29), Gauge32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfNormalMessageSize.setStatus('current')
expfOwnerCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 30), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfOwnerCount.setStatus('current')
expfParentFriendlyUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 31), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfParentFriendlyUrl.setStatus('current')
expfPath = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 32), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfPath.setStatus('current')
expfProhibitPostLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 33), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfProhibitPostLimit.setStatus('current')
expfPublishInAddressBook = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 34), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfPublishInAddressBook.setStatus('current')
expfRecipientCountOnAssociateMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 35), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfRecipientCountOnAssociateMsg.setStatus('current')
expfRecipientCountOnNormalMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 36), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfRecipientCountOnNormalMsg.setStatus('current')
expfReplicaAgeLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 37), Gauge32()).setUnits('days').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfReplicaAgeLimit.setStatus('current')
expfReplicaList = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 38), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfReplicaList.setStatus('current')
expfReplicationMessagePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notUrgent", 0), ("normal", 1), ("urgent", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfReplicationMessagePriority.setStatus('current')
expfReplicationSchedule = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 40), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfReplicationSchedule.setStatus('current')
expfReplicationStyle = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 41), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("custom", 0), ("never", 1), ("always", 2), ("everyHour", 3), ("everyTwoHours", 4), ("everyFourHours", 5), ("usePublicStoreSchedule", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfReplicationStyle.setStatus('current')
expfRestrictionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 42), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfRestrictionCount.setStatus('current')
expfSecurityDescriptor = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 43), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfSecurityDescriptor.setStatus('current')
expfStorageLimitStyle = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 44), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("usePublicStoreStyle", 0), ("useSpecifiedQuota", 1), ("noQuota", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfStorageLimitStyle.setStatus('current')
expfTargetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 45), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfTargetAddress.setStatus('current')
expfTotalMessageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 46), Gauge32()).setUnits('bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfTotalMessageSize.setStatus('current')
expfUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 47), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfUrl.setStatus('current')
expfUsePublicStoreAgeLimits = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 48), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfUsePublicStoreAgeLimits.setStatus('current')
expfUsePublicStoreDelItemLifetm = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 49), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expfUsePublicStoreDelItemLifetm.setStatus('current')
expfWarningLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 12, 1, 50), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: expfWarningLimit.setStatus('current')
exchangeQueueV2Table = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13), )
if mibBuilder.loadTexts: exchangeQueueV2Table.setStatus('current')
exchangeQueueV2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exq2Index"))
if mibBuilder.loadTexts: exchangeQueueV2Entry.setStatus('current')
exq2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2Index.setStatus('current')
exq2CanEnumAll = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2CanEnumAll.setStatus('current')
exq2GlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2GlobalStop.setStatus('current')
exq2LinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2LinkId.setStatus('current')
exq2LinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2LinkName.setStatus('current')
exq2MessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2MessageCount.setStatus('current')
exq2MsgEnumFlagsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2MsgEnumFlagsSupported.setStatus('current')
exq2ProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2ProtocolName.setStatus('current')
exq2QueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2QueueId.setStatus('current')
exq2QueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2QueueName.setStatus('current')
exq2Size = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2Size.setStatus('current')
exq2Version = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2Version.setStatus('current')
exq2VirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2VirtualMachine.setStatus('current')
exq2VirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 13, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exq2VirtualServerName.setStatus('current')
exchangeQueueCacheReloadEvtTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 14), )
if mibBuilder.loadTexts: exchangeQueueCacheReloadEvtTable.setStatus('current')
exchangeQueueCacheReloadEvtEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 14, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqcreIndex"))
if mibBuilder.loadTexts: exchangeQueueCacheReloadEvtEntry.setStatus('current')
exqcreIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 14, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqcreIndex.setStatus('current')
exqcreReloadTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 14, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqcreReloadTime.setStatus('current')
exchangeQueuedMessageTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15), )
if mibBuilder.loadTexts: exchangeQueuedMessageTable.setStatus('current')
exchangeQueuedMessageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqmIndex"))
if mibBuilder.loadTexts: exchangeQueuedMessageEntry.setStatus('current')
exqmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmIndex.setStatus('current')
exqmActionDeleteNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmActionDeleteNDR.setStatus('current')
exqmActionDeleteNoNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmActionDeleteNoNDR.setStatus('current')
exqmActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmActionFreeze.setStatus('current')
exqmActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmActionThaw.setStatus('current')
exqmExpiry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 6), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmExpiry.setStatus('current')
exqmHighPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmHighPriority.setStatus('current')
exqmLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmLinkId.setStatus('current')
exqmLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmLinkName.setStatus('current')
exqmLowPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmLowPriority.setStatus('current')
exqmMessageId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmMessageId.setStatus('current')
exqmNormalPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmNormalPriority.setStatus('current')
exqmProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmProtocolName.setStatus('current')
exqmQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmQueueId.setStatus('current')
exqmQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmQueueName.setStatus('current')
exqmReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 16), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmReceived.setStatus('current')
exqmRecipientCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmRecipientCount.setStatus('current')
exqmRecipients = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 18), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmRecipients.setStatus('current')
exqmSender = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmSender.setStatus('current')
exqmSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 20), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmSize.setStatus('current')
exqmStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmStateFlags.setStatus('current')
exqmStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmStateFrozen.setStatus('current')
exqmStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmStateRetry.setStatus('current')
exqmSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 24), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmSubject.setStatus('current')
exqmSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 25), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmSubmission.setStatus('current')
exqmVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmVersion.setStatus('current')
exqmVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 27), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmVirtualMachine.setStatus('current')
exqmVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 15, 1, 28), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqmVirtualServerName.setStatus('current')
exchangeQueueVirtualServerTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16), )
if mibBuilder.loadTexts: exchangeQueueVirtualServerTable.setStatus('current')
exchangeQueueVirtualServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exvsIndex"))
if mibBuilder.loadTexts: exchangeQueueVirtualServerEntry.setStatus('current')
exvsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsIndex.setStatus('current')
exvsGlobalActionsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsGlobalActionsSupported.setStatus('current')
exvsGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsGlobalStop.setStatus('current')
exvsProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsProtocolName.setStatus('current')
exvsVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsVirtualMachine.setStatus('current')
exvsVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 16, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exvsVirtualServerName.setStatus('current')
exchangeServerTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17), )
if mibBuilder.loadTexts: exchangeServerTable.setStatus('current')
exchangeServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exsIndex"))
if mibBuilder.loadTexts: exchangeServerEntry.setStatus('current')
exsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsIndex.setStatus('current')
exsAdministrativeGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 2), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsAdministrativeGroup.setStatus('current')
exsAdministrativeNote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 3), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsAdministrativeNote.setStatus('current')
exsCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsCreationTime.setStatus('current')
exsDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsDN.setStatus('current')
exsExchangeVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsExchangeVersion.setStatus('current')
exsFQDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsFQDN.setStatus('current')
exsGUID = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsGUID.setStatus('current')
exsIsFrontEndServer = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsIsFrontEndServer.setStatus('current')
exsLastModificationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 10), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsLastModificationTime.setStatus('current')
exsMessageTrackingEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsMessageTrackingEnabled.setStatus('current')
exsMessageTrackingLogFileLifetm = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 12), Gauge32()).setUnits('days').setMaxAccess("readonly")
if mibBuilder.loadTexts: exsMessageTrackingLogFileLifetm.setStatus('current')
exsMessageTrackingLogFilePath = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsMessageTrackingLogFilePath.setStatus('current')
exsMonitoringEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 14), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsMonitoringEnabled.setStatus('current')
exsMTADataPath = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsMTADataPath.setStatus('current')
exsName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 16), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsName.setStatus('current')
exsRoutingGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 17), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsRoutingGroup.setStatus('current')
exsSubjectLoggingEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsSubjectLoggingEnabled.setStatus('current')
exsType = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 17, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("standard", 0), ("enterprise", 1), ("conferencing", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsType.setStatus('current')
exchangeQueuedSMTPMessageTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18), )
if mibBuilder.loadTexts: exchangeQueuedSMTPMessageTable.setStatus('current')
exchangeQueuedSMTPMessageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqsmIndex"))
if mibBuilder.loadTexts: exchangeQueuedSMTPMessageEntry.setStatus('current')
exqsmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmIndex.setStatus('current')
exqsmActionDeleteNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmActionDeleteNDR.setStatus('current')
exqsmActionDeleteNoNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmActionDeleteNoNDR.setStatus('current')
exqsmActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmActionFreeze.setStatus('current')
exqsmActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmActionThaw.setStatus('current')
exqsmExpiry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 6), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmExpiry.setStatus('current')
exqsmHighPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmHighPriority.setStatus('current')
exqsmLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmLinkId.setStatus('current')
exqsmLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmLinkName.setStatus('current')
exqsmLowPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmLowPriority.setStatus('current')
exqsmMessageId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmMessageId.setStatus('current')
exqsmNormalPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmNormalPriority.setStatus('current')
exqsmProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmProtocolName.setStatus('current')
exqsmQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmQueueId.setStatus('current')
exqsmQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmQueueName.setStatus('current')
exqsmReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 16), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmReceived.setStatus('current')
exqsmRecipientCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmRecipientCount.setStatus('current')
exqsmRecipients = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 18), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmRecipients.setStatus('current')
exqsmSender = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmSender.setStatus('current')
exqsmSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 20), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmSize.setStatus('current')
exqsmStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmStateFlags.setStatus('current')
exqsmStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmStateFrozen.setStatus('current')
exqsmStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmStateRetry.setStatus('current')
exqsmSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 24), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmSubject.setStatus('current')
exqsmSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 25), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmSubmission.setStatus('current')
exqsmVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmVersion.setStatus('current')
exqsmVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 27), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmVirtualMachine.setStatus('current')
exqsmVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 18, 1, 28), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsmVirtualServerName.setStatus('current')
exchangeQueuedX400MessageTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19), )
if mibBuilder.loadTexts: exchangeQueuedX400MessageTable.setStatus('current')
exchangeQueuedX400MessageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqxmIndex"))
if mibBuilder.loadTexts: exchangeQueuedX400MessageEntry.setStatus('current')
exqxmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmIndex.setStatus('current')
exqxmActionDeleteNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmActionDeleteNDR.setStatus('current')
exqxmActionDeleteNoNDR = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmActionDeleteNoNDR.setStatus('current')
exqxmActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmActionFreeze.setStatus('current')
exqxmActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmActionThaw.setStatus('current')
exqxmExpiry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 6), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmExpiry.setStatus('current')
exqxmHighPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmHighPriority.setStatus('current')
exqxmLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmLinkId.setStatus('current')
exqxmLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmLinkName.setStatus('current')
exqxmLowPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmLowPriority.setStatus('current')
exqxmMessageId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 11), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmMessageId.setStatus('current')
exqxmNormalPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmNormalPriority.setStatus('current')
exqxmProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmProtocolName.setStatus('current')
exqxmQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmQueueId.setStatus('current')
exqxmQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmQueueName.setStatus('current')
exqxmReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 16), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmReceived.setStatus('current')
exqxmRecipientCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmRecipientCount.setStatus('current')
exqxmRecipients = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 18), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmRecipients.setStatus('current')
exqxmSender = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 19), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmSender.setStatus('current')
exqxmSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 20), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmSize.setStatus('current')
exqxmStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmStateFlags.setStatus('current')
exqxmStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmStateFrozen.setStatus('current')
exqxmStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmStateRetry.setStatus('current')
exqxmSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 24), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmSubject.setStatus('current')
exqxmSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 25), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmSubmission.setStatus('current')
exqxmVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 26), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmVersion.setStatus('current')
exqxmVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 27), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmVirtualMachine.setStatus('current')
exqxmVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 19, 1, 28), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxmVirtualServerName.setStatus('current')
exchangeQueueSMTPVirtualSrvTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20), )
if mibBuilder.loadTexts: exchangeQueueSMTPVirtualSrvTable.setStatus('current')
exchangeQueueSMTPVirtualSrvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqsvsIndex"))
if mibBuilder.loadTexts: exchangeQueueSMTPVirtualSrvEntry.setStatus('current')
exqsvsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsIndex.setStatus('current')
exqsvsGlobalActionsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsGlobalActionsSupported.setStatus('current')
exqsvsGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsGlobalStop.setStatus('current')
exqsvsProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsProtocolName.setStatus('current')
exqsvsVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsVirtualMachine.setStatus('current')
exqsvsVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 20, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqsvsVirtualServerName.setStatus('current')
exchangeQueueX400VirtualSrvTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21), )
if mibBuilder.loadTexts: exchangeQueueX400VirtualSrvTable.setStatus('current')
exchangeQueueX400VirtualSrvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exqxvsIndex"))
if mibBuilder.loadTexts: exchangeQueueX400VirtualSrvEntry.setStatus('current')
exqxvsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsIndex.setStatus('current')
exqxvsGlobalActionsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsGlobalActionsSupported.setStatus('current')
exqxvsGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsGlobalStop.setStatus('current')
exqxvsProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsProtocolName.setStatus('current')
exqxvsVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsVirtualMachine.setStatus('current')
exqxvsVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 21, 1, 6), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exqxvsVirtualServerName.setStatus('current')
exchangeScheduleIntervalTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 22), )
if mibBuilder.loadTexts: exchangeScheduleIntervalTable.setStatus('current')
exchangeScheduleIntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 22, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exsiIndex"))
if mibBuilder.loadTexts: exchangeScheduleIntervalEntry.setStatus('current')
exsiIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 22, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsiIndex.setStatus('current')
exsiStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 22, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsiStartTime.setStatus('current')
exsiStopTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 22, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsiStopTime.setStatus('current')
exchangeSMTPLinkTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23), )
if mibBuilder.loadTexts: exchangeSMTPLinkTable.setStatus('current')
exchangeSMTPLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exslIndex"))
if mibBuilder.loadTexts: exchangeSMTPLinkEntry.setStatus('current')
exslIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslIndex.setStatus('current')
exslActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslActionFreeze.setStatus('current')
exslActionKick = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslActionKick.setStatus('current')
exslActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslActionThaw.setStatus('current')
exslExtendedStateInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslExtendedStateInfo.setStatus('current')
exslGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslGlobalStop.setStatus('current')
exslLinkDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslLinkDN.setStatus('current')
exslLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslLinkId.setStatus('current')
exslLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslLinkName.setStatus('current')
exslMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslMessageCount.setStatus('current')
exslNextScheduledConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 11), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslNextScheduledConnection.setStatus('current')
exslOldestMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslOldestMessage.setStatus('current')
exslProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslProtocolName.setStatus('current')
exslKSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 14), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exslKSize.setStatus('current')
exslMSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 15), Gauge32()).setUnits('MB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exslMSize.setStatus('current')
exslStateActive = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateActive.setStatus('current')
exslStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateFlags.setStatus('current')
exslStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateFrozen.setStatus('current')
exslStateReady = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateReady.setStatus('current')
exslStateRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateRemote.setStatus('current')
exslStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateRetry.setStatus('current')
exslStateScheduled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslStateScheduled.setStatus('current')
exslSupportedLinkActions = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 23), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslSupportedLinkActions.setStatus('current')
exslTypeCurrentlyUnreachable = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypeCurrentlyUnreachable.setStatus('current')
exslTypeDeferredDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypeDeferredDelivery.setStatus('current')
exslTypeInternal = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 26), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypeInternal.setStatus('current')
exslTypeLocalDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 27), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypeLocalDelivery.setStatus('current')
exslTypePendingCategorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 28), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypePendingCategorization.setStatus('current')
exslTypePendingRouting = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 29), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypePendingRouting.setStatus('current')
exslTypePendingSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 30), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypePendingSubmission.setStatus('current')
exslTypeRemoteDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 31), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslTypeRemoteDelivery.setStatus('current')
exslVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslVersion.setStatus('current')
exslVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 33), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslVirtualMachine.setStatus('current')
exslVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 23, 1, 34), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exslVirtualServerName.setStatus('current')
exchangeSMTPQueueTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24), )
if mibBuilder.loadTexts: exchangeSMTPQueueTable.setStatus('current')
exchangeSMTPQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exsqIndex"))
if mibBuilder.loadTexts: exchangeSMTPQueueEntry.setStatus('current')
exsqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqIndex.setStatus('current')
exsqCanEnumAll = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqCanEnumAll.setStatus('current')
exsqGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqGlobalStop.setStatus('current')
exsqLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqLinkId.setStatus('current')
exsqLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqLinkName.setStatus('current')
exsqMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqMessageCount.setStatus('current')
exsqMsgEnumFlagsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqMsgEnumFlagsSupported.setStatus('current')
exsqProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqProtocolName.setStatus('current')
exsqQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqQueueId.setStatus('current')
exsqQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqQueueName.setStatus('current')
exsqKSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 11), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqKSize.setStatus('current')
exsqMSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 12), Gauge32()).setUnits('MB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqMSize.setStatus('current')
exsqVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqVersion.setStatus('current')
exsqVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqVirtualMachine.setStatus('current')
exsqVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 24, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exsqVirtualServerName.setStatus('current')
exchangeX400LinkTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25), )
if mibBuilder.loadTexts: exchangeX400LinkTable.setStatus('current')
exchangeX400LinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exxlIndex"))
if mibBuilder.loadTexts: exchangeX400LinkEntry.setStatus('current')
exxlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlIndex.setStatus('current')
exxlActionFreeze = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlActionFreeze.setStatus('current')
exxlActionKick = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlActionKick.setStatus('current')
exxlActionThaw = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlActionThaw.setStatus('current')
exxlExtendedStateInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlExtendedStateInfo.setStatus('current')
exxlGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlGlobalStop.setStatus('current')
exxlLinkDN = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 7), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlLinkDN.setStatus('current')
exxlLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlLinkId.setStatus('current')
exxlLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlLinkName.setStatus('current')
exxlMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlMessageCount.setStatus('current')
exxlNextScheduledConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 11), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlNextScheduledConnection.setStatus('current')
exxlOldestMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlOldestMessage.setStatus('current')
exxlProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 13), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlProtocolName.setStatus('current')
exxlKSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 14), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlKSize.setStatus('current')
exxlMSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 15), Gauge32()).setUnits('MB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlMSize.setStatus('current')
exxlStateActive = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateActive.setStatus('current')
exxlStateFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 17), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateFlags.setStatus('current')
exxlStateFrozen = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateFrozen.setStatus('current')
exxlStateReady = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 19), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateReady.setStatus('current')
exxlStateRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateRemote.setStatus('current')
exxlStateRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateRetry.setStatus('current')
exxlStateScheduled = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 22), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlStateScheduled.setStatus('current')
exxlSupportedLinkActions = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 23), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlSupportedLinkActions.setStatus('current')
exxlTypeCurrentlyUnreachable = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 24), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypeCurrentlyUnreachable.setStatus('current')
exxlTypeDeferredDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypeDeferredDelivery.setStatus('current')
exxlTypeInternal = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 26), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypeInternal.setStatus('current')
exxlTypeLocalDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 27), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypeLocalDelivery.setStatus('current')
exxlTypePendingCategorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 28), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypePendingCategorization.setStatus('current')
exxlTypePendingRouting = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 29), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypePendingRouting.setStatus('current')
exxlTypePendingSubmission = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 30), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypePendingSubmission.setStatus('current')
exxlTypeRemoteDelivery = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 31), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlTypeRemoteDelivery.setStatus('current')
exxlVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlVersion.setStatus('current')
exxlVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 33), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlVirtualMachine.setStatus('current')
exxlVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 25, 1, 34), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxlVirtualServerName.setStatus('current')
exchangeX400QueueTable = MibTable((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26), )
if mibBuilder.loadTexts: exchangeX400QueueTable.setStatus('current')
exchangeX400QueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1), ).setIndexNames((0, "INFORMANT-WMI-EXCHANGE", "exxqIndex"))
if mibBuilder.loadTexts: exchangeX400QueueEntry.setStatus('current')
exxqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqIndex.setStatus('current')
exxqCanEnumAll = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqCanEnumAll.setStatus('current')
exxqGlobalStop = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqGlobalStop.setStatus('current')
exxqLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 4), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqLinkId.setStatus('current')
exxqLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 5), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqLinkName.setStatus('current')
exxqMessageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqMessageCount.setStatus('current')
exxqMsgEnumFlagsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqMsgEnumFlagsSupported.setStatus('current')
exxqProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 8), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqProtocolName.setStatus('current')
exxqQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 9), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqQueueId.setStatus('current')
exxqQueueName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 10), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqQueueName.setStatus('current')
exxqKSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 11), Gauge32()).setUnits('KB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqKSize.setStatus('current')
exxqMSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 12), Gauge32()).setUnits('MB').setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqMSize.setStatus('current')
exxqVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqVersion.setStatus('current')
exxqVirtualMachine = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 14), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqVirtualMachine.setStatus('current')
exxqVirtualServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9600, 1, 23, 26, 1, 15), WtcsDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: exxqVirtualServerName.setStatus('current')
mibBuilder.exportSymbols("INFORMANT-WMI-EXCHANGE", exxlNextScheduledConnection=exxlNextScheduledConnection, exl2StateRetry=exl2StateRetry, exqmSender=exqmSender, exqmSize=exqmSize, exlNumberOfMessages=exlNumberOfMessages, exloRowID=exloRowID, exslStateReady=exslStateReady, exxlTypeRemoteDelivery=exxlTypeRemoteDelivery, exloClientMode=exloClientMode, exqmStateFlags=exqmStateFlags, exmtRecipientStatus=exmtRecipientStatus, exlStateActive=exlStateActive, exl2ActionFreeze=exl2ActionFreeze, exmtIndex=exmtIndex, exqxmVersion=exqxmVersion, exqsvsProtocolName=exqsvsProtocolName, exloProgressOperationRate=exloProgressOperationRate, exssServerState=exssServerState, exqxvsProtocolName=exqxvsProtocolName, exqsmStateRetry=exqsmStateRetry, exmtExpansionDL=exmtExpansionDL, exslTypePendingCategorization=exslTypePendingCategorization, exxqGlobalStop=exxqGlobalStop, exl2LinkId=exl2LinkId, exqmMessageId=exqmMessageId, exmLastLogonTime=exmLastLogonTime, exmtSize=exmtSize, exqxmStateRetry=exqxmStateRetry, exqCanEnumFailed=exqCanEnumFailed, exchangeLogonTable=exchangeLogonTable, exqmQueueName=exqmQueueName, exqsmActionDeleteNDR=exqsmActionDeleteNDR, exsqMsgEnumFlagsSupported=exsqMsgEnumFlagsSupported, exqsvsGlobalStop=exqsvsGlobalStop, exxlVersion=exxlVersion, exchangeMessageTrackingEntry=exchangeMessageTrackingEntry, exqxmExpiry=exqxmExpiry, exxlStateRetry=exxlStateRetry, exchangeDSAccessDCEntry=exchangeDSAccessDCEntry, exsqVirtualServerName=exsqVirtualServerName, exqVersion=exqVersion, exqsmStateFlags=exqsmStateFlags, exqxmHighPriority=exqxmHighPriority, exdsIsInSync=exdsIsInSync, exsCreationTime=exsCreationTime, exqmExpiry=exqmExpiry, exxlStateFrozen=exxlStateFrozen, exssClusterStateString=exssClusterStateString, exchangeQueueSMTPVirtualSrvTable=exchangeQueueSMTPVirtualSrvTable, exmDeletedMessageSizeExtended=exmDeletedMessageSizeExtended, exchangeLinkEntry=exchangeLinkEntry, excrOwner=excrOwner, exmtClientName=exmtClientName, exqsmNormalPriority=exqsmNormalPriority, exlTypeCurrentlyUnreachable=exlTypeCurrentlyUnreachable, expfPublishInAddressBook=expfPublishInAddressBook, exchangeMessageTrackingTable=exchangeMessageTrackingTable, exslMessageCount=exslMessageCount, exloLogonTime=exloLogonTime, expfReplicationMessagePriority=expfReplicationMessagePriority, exxqVirtualMachine=exxqVirtualMachine, exl2Version=exl2Version, exloOpenMessageCount=exloOpenMessageCount, excsGUID=excsGUID, exslVirtualServerName=exslVirtualServerName, exl2LinkDN=exl2LinkDN, expfComment=expfComment, exqxmActionDeleteNoNDR=exqxmActionDeleteNoNDR, exmtEncrypted=exmtEncrypted, expfAddressBookName=expfAddressBookName, exchangeClusterResourceEntry=exchangeClusterResourceEntry, exchangeConnectorStateEntry=exchangeConnectorStateEntry, exqmQueueId=exqmQueueId, exxlExtendedStateInfo=exxlExtendedStateInfo, exlTypePendingSubmission=exlTypePendingSubmission, exftAssociatedPublicStores=exftAssociatedPublicStores, exxlStateRemote=exxlStateRemote, exssCPUStateString=exssCPUStateString, exsMessageTrackingLogFilePath=exsMessageTrackingLogFilePath, exqsvsVirtualServerName=exqsvsVirtualServerName, exloStoreType=exloStoreType, exloMailboxDisplayName=exloMailboxDisplayName, exxqCanEnumAll=exxqCanEnumAll, exqmNormalPriority=exqmNormalPriority, exq2LinkName=exq2LinkName, exchangeQueuedX400MessageEntry=exchangeQueuedX400MessageEntry, exl2MessageCount=exl2MessageCount, expfWarningLimit=expfWarningLimit, exchangeQueuedMessageEntry=exchangeQueuedMessageEntry, exqsmVersion=exqsmVersion, exslStateFrozen=exslStateFrozen, expfProhibitPostLimit=expfProhibitPostLimit, exl2Index=exl2Index, exssIndex=exssIndex, exftCreationTime=exftCreationTime, exloLocaleID=exloLocaleID, exqsmReceived=exqsmReceived, exvsVirtualMachine=exvsVirtualMachine, exqxmMessageId=exqxmMessageId, exq2ProtocolName=exq2ProtocolName, exqCanEnumAll=exqCanEnumAll, exxqQueueName=exxqQueueName, exl2LinkName=exl2LinkName, exl2StateReady=exl2StateReady, exlOldestMessage=exlOldestMessage, exqsmExpiry=exqsmExpiry, exchangePublicFolderEntry=exchangePublicFolderEntry, exlProtocolName=exlProtocolName, exchangeConnectorStateTable=exchangeConnectorStateTable, exloHostAddress=exloHostAddress, exchangeSMTPLinkEntry=exchangeSMTPLinkEntry, exmIndex=exmIndex, exqsmHighPriority=exqsmHighPriority, expfIndex=expfIndex, exvsVirtualServerName=exvsVirtualServerName, exslActionFreeze=exslActionFreeze, exslProtocolName=exslProtocolName, exlActionThaw=exlActionThaw, exsFQDN=exsFQDN, expfRecipientCountOnNormalMsg=expfRecipientCountOnNormalMsg, excsName=excsName, exsqLinkName=exsqLinkName, exqmActionThaw=exqmActionThaw, exsqLinkId=exsqLinkId, exssGroupDN=exssGroupDN, exl2Size=exl2Size, exdsIsUp=exdsIsUp, expfADProxyPath=expfADProxyPath, exslOldestMessage=exslOldestMessage, exl2VirtualMachine=exl2VirtualMachine, exxlLinkId=exxlLinkId, exxlTypePendingCategorization=exxlTypePendingCategorization, expfIsPerUserReadDisabled=expfIsPerUserReadDisabled, exlIndex=exlIndex, exmtLinkedMessageID=exmtLinkedMessageID, expfSecurityDescriptor=expfSecurityDescriptor, exssGUID=exssGUID, exchangeSMTPQueueTable=exchangeSMTPQueueTable, exq2MsgEnumFlagsSupported=exq2MsgEnumFlagsSupported, exqcreIndex=exqcreIndex, exxqVersion=exxqVersion, exdsName=exdsName, exdsType=exdsType, exdsLDAPPort=exdsLDAPPort, exxlSupportedLinkActions=exxlSupportedLinkActions, exxqVirtualServerName=exxqVirtualServerName, exxlStateFlags=exxlStateFlags, exqxvsVirtualServerName=exqxvsVirtualServerName, expfName=expfName, exslTypePendingSubmission=exslTypePendingSubmission, exmtEntryType=exmtEntryType, exqxmActionDeleteNDR=exqxmActionDeleteNDR, exqmActionFreeze=exqmActionFreeze, exsiIndex=exsiIndex, exlStateReady=exlStateReady, exsqVersion=exsqVersion, exqxmStateFrozen=exqxmStateFrozen, exqNumberOfMessages=exqNumberOfMessages, exxlStateScheduled=exxlStateScheduled, exqmStateFrozen=exqmStateFrozen, exslActionThaw=exslActionThaw, exqmVirtualMachine=exqmVirtualMachine, exxlActionKick=exxlActionKick, exftRootFolderURL=exftRootFolderURL, exsqKSize=exsqKSize, exq2QueueId=exq2QueueId, exsMessageTrackingLogFileLifetm=exsMessageTrackingLogFileLifetm, exmMailboxGUID=exmMailboxGUID, exq2VirtualServerName=exq2VirtualServerName, exssUnreachable=exssUnreachable, PYSNMP_MODULE_ID=wmiExchange, exxlLinkDN=exxlLinkDN, exqsmVirtualMachine=exqsmVirtualMachine, exloMacAddress=exloMacAddress, exqProtocolName=exqProtocolName, expfUsePublicStoreAgeLimits=expfUsePublicStoreAgeLimits, excsGroupGUID=excsGroupGUID, expfAssociatedMessageCount=expfAssociatedMessageCount, exmtPartnerServer=exmtPartnerServer, exqsvsVirtualMachine=exqsvsVirtualMachine, exmtVersion=exmtVersion, exftHasLocalPublicStore=exftHasLocalPublicStore, exqmStateRetry=exqmStateRetry, wmiExchange=wmiExchange, exqsmQueueId=exqsmQueueId, exqxmRecipients=exqxmRecipients, exslLinkDN=exslLinkDN, exchangeQueuedX400MessageTable=exchangeQueuedX400MessageTable, exxlGlobalStop=exxlGlobalStop, exchangeDSAccessDCTable=exchangeDSAccessDCTable, exxlMessageCount=exxlMessageCount, exl2ActionKick=exl2ActionKick, exmtCost=exmtCost, exmStoreName=exmStoreName, exslStateScheduled=exslStateScheduled, exssDisksStateString=exssDisksStateString, exqmSubmission=exqmSubmission, expfReplicationSchedule=expfReplicationSchedule, exq2VirtualMachine=exq2VirtualMachine, exqVirtualServerName=exqVirtualServerName, exchangeScheduleIntervalTable=exchangeScheduleIntervalTable, exqxmActionFreeze=exqxmActionFreeze, exqxmActionThaw=exqxmActionThaw, exqmIndex=exqmIndex, exloLoggedOnUserAccount=exloLoggedOnUserAccount, exmStorageGroupName=exmStorageGroupName, exvsGlobalActionsSupported=exvsGlobalActionsSupported, exssVersion=exssVersion, exl2TypeCurrentlyUnreachable=exl2TypeCurrentlyUnreachable, exloFolderOperationRate=exloFolderOperationRate, excrIndex=excrIndex, expfFolderTree=expfFolderTree, exslTypeDeferredDelivery=exslTypeDeferredDelivery, exloStorageGroupName=exloStorageGroupName, exchangeFolderTreeEntry=exchangeFolderTreeEntry, exxlTypePendingRouting=exxlTypePendingRouting, exchangeMailboxTable=exchangeMailboxTable, expfParentFriendlyUrl=expfParentFriendlyUrl, exq2Size=exq2Size, exftAdministrativeNote=exftAdministrativeNote, exchangeX400QueueEntry=exchangeX400QueueEntry, exftLastModificationTime=exftLastModificationTime, exloOtherOperationRate=exloOtherOperationRate, exmAssocContentCount=exmAssocContentCount, exxqKSize=exxqKSize, exssDN=exssDN, exchangeServerEntry=exchangeServerEntry, exqsmVirtualServerName=exqsmVirtualServerName, expfDeletedItemLifetime=expfDeletedItemLifetime, exqxvsVirtualMachine=exqxvsVirtualMachine, exsType=exsType, exslLinkId=exslLinkId, exloMailboxLegacyDN=exloMailboxLegacyDN, exl2SupportedLinkActions=exl2SupportedLinkActions, exl2NextScheduledConnection=exl2NextScheduledConnection, exssGroupGUID=exssGroupGUID, exqsmRecipients=exqsmRecipients, exloOpenFolderCount=exloOpenFolderCount, expfAttachmentCount=expfAttachmentCount, exqmActionDeleteNDR=exqmActionDeleteNDR, exxlIndex=exxlIndex, exsiStartTime=exsiStartTime, exlStateScheduled=exlStateScheduled, excrVirtualMachine=excrVirtualMachine, expfMessageWithAttachmentsCount=expfMessageWithAttachmentsCount, exloOpenAttachmentCount=exloOpenAttachmentCount, expfOwnerCount=expfOwnerCount, exsMonitoringEnabled=exsMonitoringEnabled, exloClientName=exloClientName, excsIsUp=excsIsUp, exssQueuesStateString=exssQueuesStateString, exlVersion=exlVersion, exmtDeliveryTime=exmtDeliveryTime, exl2StateActive=exl2StateActive)
mibBuilder.exportSymbols("INFORMANT-WMI-EXCHANGE", excsIndex=excsIndex, exqxmIndex=exqxmIndex, exl2VirtualServerName=exl2VirtualServerName, exchangeX400QueueTable=exchangeX400QueueTable, exloTotalOperationRate=exloTotalOperationRate, exchangePublicFolderTable=exchangePublicFolderTable, exl2ActionThaw=exl2ActionThaw, exq2LinkId=exq2LinkId, exxqMessageCount=exxqMessageCount, exmLegacyDN=exmLegacyDN, exl2TypeRemoteDelivery=exl2TypeRemoteDelivery, exchangeLinkV2Table=exchangeLinkV2Table, exchangeLinkV2Entry=exchangeLinkV2Entry, excrState=excrState, exsqMessageCount=exsqMessageCount, exxlKSize=exxlKSize, exqxmSize=exqxmSize, exxlMSize=exxlMSize, exchangeServerTable=exchangeServerTable, exsExchangeVersion=exsExchangeVersion, exq2GlobalStop=exq2GlobalStop, exqCanEnumNOldestMessages=exqCanEnumNOldestMessages, exqsmSize=exqsmSize, exqsmQueueName=exqsmQueueName, exslTypeInternal=exslTypeInternal, exmtMessageID=exmtMessageID, exssName=exssName, exssServicesState=exssServicesState, exlStateRemote=exlStateRemote, exqxmStateFlags=exqxmStateFlags, exq2Index=exq2Index, exqmLowPriority=exqmLowPriority, exxqMsgEnumFlagsSupported=exxqMsgEnumFlagsSupported, exslStateRemote=exslStateRemote, exqQueueName=exqQueueName, exsqMSize=exsqMSize, exlTypeLocalDelivery=exlTypeLocalDelivery, exmtClientIP=exmtClientIP, exslSupportedLinkActions=exslSupportedLinkActions, exq2Version=exq2Version, exqmHighPriority=exqmHighPriority, exqsmLinkId=exqsmLinkId, exqCanEnumNLargestMessages=exqCanEnumNLargestMessages, expfIsSecureInSite=expfIsSecureInSite, exqmLinkId=exqmLinkId, exchangeQueueX400VirtualSrvEntry=exchangeQueueX400VirtualSrvEntry, expfReplicationStyle=expfReplicationStyle, exloAdapterSpeed=exloAdapterSpeed, exqsmSubject=exqsmSubject, exq2QueueName=exq2QueueName, exxlVirtualMachine=exxlVirtualMachine, exssQueuesState=exssQueuesState, exqCanEnumRecipient=exqCanEnumRecipient, exqCanEnumFrozen=exqCanEnumFrozen, exlSupportedLinkActions=exlSupportedLinkActions, exxlStateActive=exxlStateActive, exl2TypeInternal=exl2TypeInternal, exxlTypePendingSubmission=exxlTypePendingSubmission, exlActionKick=exlActionKick, exloLatency=exloLatency, exlTypePendingRouting=exlTypePendingRouting, exxqLinkId=exxqLinkId, expfIsMailEnabled=expfIsMailEnabled, exxlTypeDeferredDelivery=exxlTypeDeferredDelivery, exqIndex=exqIndex, exqCanEnumFirstNMessages=exqCanEnumFirstNMessages, exl2StateRemote=exl2StateRemote, expfCategorizationCount=expfCategorizationCount, exlTypeRemoteDelivery=exlTypeRemoteDelivery, exxlOldestMessage=exxlOldestMessage, exqCanEnumOlderThan=exqCanEnumOlderThan, exvsIndex=exvsIndex, expfIsNormalFolder=expfIsNormalFolder, exvsGlobalStop=exvsGlobalStop, exloCodePageID=exloCodePageID, exssServerStateString=exssServerStateString, excrName=excrName, exqCanEnumSender=exqCanEnumSender, expfStorageLimitStyle=expfStorageLimitStyle, exl2GlobalStop=exl2GlobalStop, exloIndex=exloIndex, exxlActionFreeze=exxlActionFreeze, exl2TypeLocalDelivery=exl2TypeLocalDelivery, exdsIsFast=exdsIsFast, exslTypeLocalDelivery=exslTypeLocalDelivery, exloRPCSucceeded=exloRPCSucceeded, exlStateRetry=exlStateRetry, exftMapiFolderTree=exftMapiFolderTree, exsSubjectLoggingEnabled=exsSubjectLoggingEnabled, exloTransferOperationRate=exloTransferOperationRate, exsIsFrontEndServer=exsIsFrontEndServer, exmtKeyID=exmtKeyID, exqMsgEnumFlagsSupported=exqMsgEnumFlagsSupported, exsiStopTime=exsiStopTime, exxqQueueId=exxqQueueId, exchangeQueueSMTPVirtualSrvEntry=exchangeQueueSMTPVirtualSrvEntry, exqmActionDeleteNoNDR=exqmActionDeleteNoNDR, exqmSubject=exqmSubject, exloLastOperationTime=exloLastOperationTime, exsAdministrativeNote=exsAdministrativeNote, exsqCanEnumAll=exsqCanEnumAll, exchangeLinkTable=exchangeLinkTable, exqsvsGlobalActionsSupported=exqsvsGlobalActionsSupported, exqmLinkName=exqmLinkName, exqxmNormalPriority=exqxmNormalPriority, expfContainsRules=expfContainsRules, exqVirtualMachine=exqVirtualMachine, expfPath=expfPath, exmLastLogoffTime=exmLastLogoffTime, expfAdminSecurityDescriptor=expfAdminSecurityDescriptor, exslStateFlags=exslStateFlags, expfTargetAddress=expfTargetAddress, exlLinkDN=exlLinkDN, exxqIndex=exxqIndex, exmServerName=exmServerName, exqsmSender=exqsmSender, expfHasLocalReplica=expfHasLocalReplica, exchangeQueueV2Table=exchangeQueueV2Table, exchangeQueueCacheReloadEvtTable=exchangeQueueCacheReloadEvtTable, exqmRecipients=exqmRecipients, exchangeScheduleIntervalEntry=exchangeScheduleIntervalEntry, exslMSize=exslMSize, exlTypeInternal=exlTypeInternal, exqSizeOfQueue=exqSizeOfQueue, exqxmSender=exqxmSender, exssServerMaintenance=exssServerMaintenance, exmSize=exmSize, exdsIndex=exdsIndex, exqxmQueueName=exqxmQueueName, exsqQueueId=exsqQueueId, exchangeQueueV2Entry=exchangeQueueV2Entry, exftIndex=exftIndex, exchangeServerStateEntry=exchangeServerStateEntry, exqLinkName=exqLinkName, exchangeQueueTable=exchangeQueueTable, exlTypePendingCategorization=exlTypePendingCategorization, exlStateFrozen=exlStateFrozen, exl2ExtendedStateInfo=exl2ExtendedStateInfo, exlIncreasingTime=exlIncreasingTime, exlExtendedStateInfo=exlExtendedStateInfo, expfCreationTime=expfCreationTime, exlSizeOfQueue=exlSizeOfQueue, exsName=exsName, exlActionFreeze=exlActionFreeze, expfLastModificationTime=expfLastModificationTime, exqxmSubject=exqxmSubject, exssMemoryStateString=exssMemoryStateString, exmtSubject=exmtSubject, exqGlobalStop=exqGlobalStop, exchangeQueueX400VirtualSrvTable=exchangeQueueX400VirtualSrvTable, exslTypePendingRouting=exslTypePendingRouting, exmLastLoggedOnUserAccount=exmLastLoggedOnUserAccount, expfMaximumItemSize=expfMaximumItemSize, exqxmReceived=exqxmReceived, exsqGlobalStop=exsqGlobalStop, exqmRecipientCount=exqmRecipientCount, expfLastAccessTime=expfLastAccessTime, exq2MessageCount=exq2MessageCount, exqsmProtocolName=exqsmProtocolName, exqsmMessageId=exqsmMessageId, exmtRecipientAddress=exmtRecipientAddress, exlVirtualServerName=exlVirtualServerName, exmtRecipientCount=exmtRecipientCount, exqsmStateFrozen=exqsmStateFrozen, exqsmSubmission=exqsmSubmission, exchangeLogonEntry=exchangeLogonEntry, exqsmActionThaw=exqsmActionThaw, exqmVirtualServerName=exqmVirtualServerName, exchangeClusterResourceTable=exchangeClusterResourceTable, exxqLinkName=exxqLinkName, exl2StateScheduled=exl2StateScheduled, exqsmIndex=exqsmIndex, exsqProtocolName=exsqProtocolName, exftName=exftName, exloClientIP=exloClientIP, exslIndex=exslIndex, exloMessagingOperationRate=exloMessagingOperationRate, exslTypeCurrentlyUnreachable=exslTypeCurrentlyUnreachable, exchangeFolderTreeTable=exchangeFolderTreeTable, exslStateRetry=exslStateRetry, exqsmLinkName=exqsmLinkName, exxlLinkName=exxlLinkName, exssCPUState=exssCPUState, expfRestrictionCount=expfRestrictionCount, exqsmActionDeleteNoNDR=exqsmActionDeleteNoNDR, exslGlobalStop=exslGlobalStop, exxlTypeInternal=exxlTypeInternal, exmtSenderAddress=exmtSenderAddress, exqxvsIndex=exqxvsIndex, exqxmSubmission=exqxmSubmission, exmtServerName=exmtServerName, exlLinkName=exlLinkName, exqsmRecipientCount=exqsmRecipientCount, exchangeQueueVirtualServerEntry=exchangeQueueVirtualServerEntry, exmMailboxDisplayName=exmMailboxDisplayName, expfUsePublicStoreDelItemLifetm=expfUsePublicStoreDelItemLifetm, exslKSize=exslKSize, exq2CanEnumAll=exq2CanEnumAll, exslLinkName=exslLinkName, exsqIndex=exsqIndex, exxlStateReady=exxlStateReady, exchangeQueueEntry=exchangeQueueEntry, exchangeSMTPLinkTable=exchangeSMTPLinkTable, expfReplicaAgeLimit=expfReplicaAgeLimit, exmStorageLimitInfo=exmStorageLimitInfo, expfContactCount=expfContactCount, exloServerName=exloServerName, exftGUID=exftGUID, exslVersion=exslVersion, exslActionKick=exslActionKick, exloStoreName=exloStoreName, exchangeQueuedMessageTable=exchangeQueuedMessageTable, exslTypeRemoteDelivery=exslTypeRemoteDelivery, exqmReceived=exqmReceived, exl2ProtocolName=exl2ProtocolName, exxlVirtualServerName=exxlVirtualServerName, exxlTypeLocalDelivery=exxlTypeLocalDelivery, exvsProtocolName=exvsProtocolName, exxqProtocolName=exxqProtocolName, exqxvsGlobalActionsSupported=exqxvsGlobalActionsSupported, exssServicesStateString=exssServicesStateString, exchangeX400LinkTable=exchangeX400LinkTable, exl2TypePendingCategorization=exl2TypePendingCategorization, exftAdministrativeGroup=exftAdministrativeGroup, exchangeQueueCacheReloadEvtEntry=exchangeQueueCacheReloadEvtEntry, expfTotalMessageSize=expfTotalMessageSize, exqxmQueueId=exqxmQueueId, exmDateDiscoveredAbsentInDS=exmDateDiscoveredAbsentInDS, exlStateFlags=exlStateFlags, exloStreamOperationRate=exloStreamOperationRate, exssMemoryState=exssMemoryState, exlNextScheduledConnection=exlNextScheduledConnection, exsRoutingGroup=exsRoutingGroup, excrType=excrType, exssDisksState=exssDisksState, exmtOriginationTime=exmtOriginationTime, exqCanEnumLargerThan=exqCanEnumLargerThan, exl2StateFlags=exl2StateFlags, exmtSubjectID=exmtSubjectID, exqxmLinkName=exqxmLinkName, exssClusterState=exssClusterState, exchangeQueuedSMTPMessageTable=exchangeQueuedSMTPMessageTable, exsqQueueName=exsqQueueName, exqxmLowPriority=exqxmLowPriority, exsIndex=exsIndex, exchangeMailboxEntry=exchangeMailboxEntry, exqxmVirtualMachine=exqxmVirtualMachine, exxlActionThaw=exxlActionThaw, exslVirtualMachine=exslVirtualMachine, expfUrl=expfUrl, exqxmVirtualServerName=exqxmVirtualServerName, exqxvsGlobalStop=exqxvsGlobalStop, exqCanEnumInvertSense=exqCanEnumInvertSense, exdsConfigurationType=exdsConfigurationType)
mibBuilder.exportSymbols("INFORMANT-WMI-EXCHANGE", exl2TypeDeferredDelivery=exl2TypeDeferredDelivery, expfRecipientCountOnAssociateMsg=expfRecipientCountOnAssociateMsg, exqxmRecipientCount=exqxmRecipientCount, exsAdministrativeGroup=exsAdministrativeGroup, exqmProtocolName=exqmProtocolName, exxlProtocolName=exxlProtocolName, exloClientVersion=exloClientVersion, exsLastModificationTime=exsLastModificationTime, exqxmProtocolName=exqxmProtocolName, exqcreReloadTime=exqcreReloadTime, excsGroupDN=excsGroupDN, exmtTimeLogged=exmtTimeLogged, exchangeX400LinkEntry=exchangeX400LinkEntry, exqIncreasingTime=exqIncreasingTime, exlGlobalStop=exlGlobalStop, exchangeQueueVirtualServerTable=exchangeQueueVirtualServerTable, exqmVersion=exqmVersion, exlVirtualMachine=exlVirtualMachine, exmtServerIP=exmtServerIP, exslStateActive=exslStateActive, excsDN=excsDN, exqsmActionFreeze=exqsmActionFreeze, exxlTypeCurrentlyUnreachable=exxlTypeCurrentlyUnreachable, exsDN=exsDN, expfNormalMessageSize=expfNormalMessageSize, exslExtendedStateInfo=exslExtendedStateInfo, exsqVirtualMachine=exsqVirtualMachine, exloLoggedOnUsersMailboxLegacyDN=exloLoggedOnUsersMailboxLegacyDN, expfIsSearchFolder=expfIsSearchFolder, exmtAttemptedPartnerServer=exmtAttemptedPartnerServer, exchangeServerStateTable=exchangeServerStateTable, exl2TypePendingRouting=exl2TypePendingRouting, exsMessageTrackingEnabled=exsMessageTrackingEnabled, exslNextScheduledConnection=exslNextScheduledConnection, exxqMSize=exxqMSize, exchangeSMTPQueueEntry=exchangeSMTPQueueEntry, exloTableOperationRate=exloTableOperationRate, exmtPriority=exmtPriority, exmTotalItems=exmTotalItems, exchangeQueuedSMTPMessageEntry=exchangeQueuedSMTPMessageEntry, exsGUID=exsGUID, exl2StateFrozen=exl2StateFrozen, expfFriendlyUrl=expfFriendlyUrl, exqxmLinkId=exqxmLinkId, exlTypeDeferredDelivery=exlTypeDeferredDelivery, expfAdministrativeNote=expfAdministrativeNote, expfMessageCount=expfMessageCount, exsMTADataPath=exsMTADataPath, expfHasChildren=expfHasChildren, exqsvsIndex=exqsvsIndex, exqsmLowPriority=exqsmLowPriority, expfReplicaList=expfReplicaList, exl2OldestMessage=exl2OldestMessage, exl2TypePendingSubmission=exl2TypePendingSubmission)
| 113.100701
| 9,365
| 0.763427
|
4a09ea63be4800f9d4aa4de9bf21541b651becbb
| 35,079
|
py
|
Python
|
scenedetect/video_manager.py
|
markbaumgarten/PySceneDetect
|
73ab482cb31e284295af73cb3d41c4e8f0e004dc
|
[
"BSD-3-Clause"
] | null | null | null |
scenedetect/video_manager.py
|
markbaumgarten/PySceneDetect
|
73ab482cb31e284295af73cb3d41c4e8f0e004dc
|
[
"BSD-3-Clause"
] | null | null | null |
scenedetect/video_manager.py
|
markbaumgarten/PySceneDetect
|
73ab482cb31e284295af73cb3d41c4e8f0e004dc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/PySceneDetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2014-2019 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/PySceneDetect/
#
# This software uses Numpy, OpenCV, click, tqdm, simpletable, and pytest.
# See the included LICENSE files or one of the above URLs for more information.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" Module: ``scenedetect.video_manager``
This module contains the :py:class:`VideoManager` class, which provides a consistent
interface to reading videos, specific exceptions raised upon certain error
conditions, and some global helper functions to open/close multiple videos,
as well as validate their parameters.
The :py:class:`VideoManager` can be constructed with a path to a video (or sequence of
videos) and a start and end time/duration, then passed to a `SceneManager`
object for performing scene detection analysis. If the start time is modified,
then it also needs to be reflected in the `SceneManager`.
The :py:class:`VideoManager` class attempts to emulate some methods of the OpenCV
cv2.VideoCapture object, and can be used interchangably with one with
respect to a SceneManager object.
"""
# There also used to be an asynchronous implementation in addition to the
# synchronous VideoManager, but the performance was poor. In the future, I may
# consider rewriting an asynchronous frame grabber in C++ and write a C-API to
# interface with the Python ctypes module. - B.C.
# Standard Library Imports
from __future__ import print_function
import os
import math
import shutil
import subprocess
# Third-Party Library Imports
import cv2
import numpy as np
# PySceneDetect Library Imports
from scenedetect.platform import STRING_TYPE
import scenedetect.frame_timecode
from scenedetect.frame_timecode import FrameTimecode
##
## VideoManager Exceptions
##
class VideoOpenFailure(Exception):
""" VideoOpenFailure: Raised when an OpenCV VideoCapture object fails to open (i.e. calling
the isOpened() method returns a non True value). """
def __init__(self, file_list=None, message=
"OpenCV VideoCapture object failed to return True when calling isOpened()."):
# type: (Iterable[(str, str)], str)
# Pass message string to base Exception class.
super(VideoOpenFailure, self).__init__(message)
# list of (filename: str, filepath: str)
self.file_list = file_list
class VideoFramerateUnavailable(Exception):
""" VideoFramerateUnavailable: Raised when the framerate cannot be determined from the video,
and the framerate has not been overriden/forced in the VideoManager. """
def __init__(self, file_name=None, file_path=None, message=
"OpenCV VideoCapture object failed to return framerate when calling "
"get(cv2.CAP_PROP_FPS)."):
# type: (str, str, str)
# Pass message string to base Exception class.
super(VideoFramerateUnavailable, self).__init__(message)
# Set other exception properties.
self.file_name = file_name
self.file_path = file_path
class VideoParameterMismatch(Exception):
""" VideoParameterMismatch: Raised when opening multiple videos with a VideoManager, and some
of the video parameters (frame height, frame width, and framerate/FPS) do not match. """
def __init__(self, file_list=None, message=
"OpenCV VideoCapture object parameters do not match."):
# type: (Iterable[Tuple[int, float, float, str, str]], str)
# Pass message string to base Exception class.
super(VideoParameterMismatch, self).__init__(message)
# list of (param_mismatch_type: int, parameter value, expected value,
# filename: str, filepath: str)
# where param_mismatch_type is an OpenCV CAP_PROP (e.g. CAP_PROP_FPS).
self.file_list = file_list
class VideoDecodingInProgress(RuntimeError):
""" VideoDecodingInProgress: Raised when attempting to call certain VideoManager methods that
must be called *before* start() has been called. """
pass
class VideoDecoderNotStarted(RuntimeError):
""" VideoDecodingInProgress: Raised when attempting to call certain VideoManager methods that
must be called *after* start() has been called. """
pass
class InvalidDownscaleFactor(ValueError):
""" InvalidDownscaleFactor: Raised when trying to set invalid downscale factor,
i.e. the supplied downscale factor was not a positive integer greater than zero. """
pass
##
## VideoManager Constants & Helper Functions
##
DEFAULT_DOWNSCALE_FACTORS = {
3200: 12, # ~4k
2100: 8, # ~2k
1700: 6, # ~1080p
1200: 5,
900: 4, # ~720p
600: 3,
400: 2 # ~480p
}
"""Dict[int, int]: The default downscale factor for a video of size W x H,
which enforces the constraint that W >= 200 to ensure an adequate amount
of pixels for scene detection while providing a speedup in processing. """
def compute_downscale_factor(frame_width):
# type: (int) -> int
""" Compute Downscale Factor: Returns the optimal default downscale factor based on
a video's resolution (specifically, the width parameter).
Returns:
int: The defalt downscale factor to use with a video of frame_height x frame_width.
"""
for width in sorted(DEFAULT_DOWNSCALE_FACTORS, reverse=True):
if frame_width >= width:
return DEFAULT_DOWNSCALE_FACTORS[width]
return 1
def get_video_name(video_file):
# type: (str) -> Tuple[str, str]
""" Get Video Name: Returns a string representing the video file/device name.
Returns:
str: Video file name or device ID. In the case of a video, only the file
name is returned, not the whole path. For a device, the string format
is 'Device 123', where 123 is the integer ID of the capture device.
"""
if isinstance(video_file, int):
return ('Device %d' % video_file, video_file)
return (os.path.split(video_file)[1], video_file)
def get_num_frames(cap_list):
# type: (List[cv2.VideoCapture]) -> int
""" Get Number of Frames: Returns total number of frames in the cap_list.
Calls get(CAP_PROP_FRAME_COUNT) and returns the sum for all VideoCaptures.
"""
return sum([math.trunc(cap.get(cv2.CAP_PROP_FRAME_COUNT)) for cap in cap_list])
def get_rotation(file_path):
# type: (str) -> str
""" Get Rotation: Returns the number of degrees a video has been rotated.
Uses subprocess to call external program ffprobe on the video.
Checks for presence of ffprobe prior to calling ffprobe.
"""
try:
ffprobe_exists = lambda x: shutil.which('ffprobe') is not None
if not ffprobe_exists:
# TODO: Inform the user that ffprobe should be installed to handle rotated videos?
return None
cmd = '''ffprobe -loglevel error -select_streams v:0 -show_entries stream_tags=rotate \
-of default=nw=1:nk=1 -i "%s"''' % file_path
rotation = subprocess.getoutput(cmd)
return rotation or None
except:
# TODO: Logging of exception?
return None
def open_captures(video_files, framerate=None, validate_parameters=True):
# type: (Iterable[str], float, bool) -> Tuple[List[VideoCapture], float, Tuple[int, int]]
""" Open Captures - helper function to open all capture objects, set the framerate,
and ensure that all open captures have been opened and the framerates match on a list
of video file paths, or a list containing a single device ID.
Arguments:
video_files (list of str(s)/int): A list of one or more paths (str), or a list
of a single integer device ID, to open as an OpenCV VideoCapture object.
A ValueError will be raised if the list does not conform to the above.
framerate (float, optional): Framerate to assume when opening the video_files.
If not set, the first open video is used for deducing the framerate of
all videos in the sequence.
validate_parameters (bool, optional): If true, will ensure that the frame sizes
(width, height) and frame rate (FPS) of all passed videos is the same.
A VideoParameterMismatch is raised if the framerates do not match.
Returns:
A tuple of form (cap_list, framerate, framesize) where cap_list is a list of open
OpenCV VideoCapture objects in the same order as the video_files list, framerate
is a float of the video(s) framerate(s), and framesize is a tuple of (width, height)
where width and height are integers representing the frame size in pixels.
Raises:
ValueError: No video file(s) specified, or invalid/multiple device IDs specified.
TypeError: `framerate` must be type `float`.
IOError: Video file(s) not found.
VideoFramerateUnavailable: Video framerate could not be obtained and `framerate`
was not set manually.
VideoParameterMismatch: All videos in `video_files` do not have equal parameters.
Set `validate_parameters=False` to skip this check.
VideoOpenFailure: Video(s) could not be opened.
"""
is_device = False
if not video_files:
raise ValueError("Expected at least 1 video file or device ID.")
if isinstance(video_files[0], int):
if len(video_files) > 1:
raise ValueError("If device ID is specified, no video sources may be appended.")
elif video_files[0] < 0:
raise ValueError("Invalid/negative device ID specified.")
is_device = True
elif not all([isinstance(video_file, (str, STRING_TYPE)) for video_file in video_files]):
raise ValueError("Unexpected element type in video_files list (expected str(s)/int).")
elif framerate is not None and not isinstance(framerate, float):
raise TypeError("Expected type float for parameter framerate.")
# Check if files exist.
if not is_device and any([not os.path.exists(video_file) for video_file in video_files]):
raise IOError("Video file(s) not found.")
cap_list = []
try:
cap_list = [cv2.VideoCapture(video_file) for video_file in video_files]
rotation_list = [get_rotation(video_file) for video_file in video_files]
video_names = [get_video_name(video_file) for video_file in video_files]
closed_caps = [video_names[i] for i, cap in
enumerate(cap_list) if not cap.isOpened()]
if closed_caps:
raise VideoOpenFailure(closed_caps)
cap_framerates = [cap.get(cv2.CAP_PROP_FPS) for cap in cap_list]
cap_framerate, check_framerate = validate_capture_framerate(
video_names, cap_framerates, framerate)
# Store frame sizes as integers (VideoCapture.get() returns float).
cap_frame_sizes = [(math.trunc(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
math.trunc(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
for cap in cap_list]
cap_frame_size = cap_frame_sizes[0]
# If we need to validate the parameters, we check that the FPS and width/height
# of all open captures is identical (or almost identical in the case of FPS).
if validate_parameters:
validate_capture_parameters(
video_names=video_names, cap_frame_sizes=cap_frame_sizes,
check_framerate=check_framerate, cap_framerates=cap_framerates)
except:
release_captures(cap_list)
raise
return (cap_list, cap_framerate, cap_frame_size, rotation_list)
def release_captures(cap_list):
# type: (Iterable[VideoCapture]) -> None
""" Close Captures: Calls the release() method on every capture in cap_list. """
for cap in cap_list:
cap.release()
def close_captures(cap_list):
# type: (Iterable[VideoCapture]) -> None
""" Close Captures: Calls the close() method on every capture in cap_list. """
for cap in cap_list:
cap.close()
def validate_capture_framerate(video_names, cap_framerates, framerate=None):
# type: (List[Tuple[str, str]], List[float], Optional[float]) -> Tuple[float, bool]
""" Validate Capture Framerate: Ensures that the passed capture framerates are valid and equal.
Raises:
ValueError: Invalid framerate (must be positive non-zero value).
TypeError: Framerate must be of type float.
VideoFramerateUnavailable: Framerate for video could not be obtained,
and `framerate` was not set.
"""
check_framerate = True
cap_framerate = cap_framerates[0]
if framerate is not None:
if isinstance(framerate, float):
if framerate < scenedetect.frame_timecode.MINIMUM_FRAMES_PER_SECOND_FLOAT:
raise ValueError("Invalid framerate (must be a positive non-zero value).")
cap_framerate = framerate
check_framerate = False
else:
raise TypeError("Expected float for framerate, got %s." % type(framerate).__name__)
else:
unavailable_framerates = [(video_names[i][0], video_names[i][1]) for
i, fps in enumerate(cap_framerates) if fps <
scenedetect.frame_timecode.MINIMUM_FRAMES_PER_SECOND_FLOAT]
if unavailable_framerates:
raise VideoFramerateUnavailable(unavailable_framerates)
return (cap_framerate, check_framerate)
def validate_capture_parameters(video_names, cap_frame_sizes, check_framerate=False,
cap_framerates=None):
# type: (List[Tuple[str, str]], List[Tuple[int, int]], Optional[bool],
# Optional[List[float]]) -> None
""" Validate Capture Parameters: Ensures that all passed capture frame sizes and (optionally)
framerates are equal. Raises VideoParameterMismatch if there is a mismatch.
Raises:
VideoParameterMismatch
"""
bad_params = []
max_framerate_delta = scenedetect.frame_timecode.MINIMUM_FRAMES_PER_SECOND_FLOAT
# Check heights/widths match.
bad_params += [(cv2.CAP_PROP_FRAME_WIDTH, frame_size[0],
cap_frame_sizes[0][0], video_names[i][0], video_names[i][1]) for
i, frame_size in enumerate(cap_frame_sizes)
if abs(frame_size[0] - cap_frame_sizes[0][0]) > 0]
bad_params += [(cv2.CAP_PROP_FRAME_HEIGHT, frame_size[1],
cap_frame_sizes[0][1], video_names[i][0], video_names[i][1]) for
i, frame_size in enumerate(cap_frame_sizes)
if abs(frame_size[1] - cap_frame_sizes[0][1]) > 0]
# Check framerates if required.
if check_framerate:
bad_params += [(cv2.CAP_PROP_FPS, fps, cap_framerates[0], video_names[i][0],
video_names[i][1]) for i, fps in enumerate(cap_framerates)
if math.fabs(fps - cap_framerates[0]) > max_framerate_delta]
if bad_params:
raise VideoParameterMismatch(bad_params)
##
## VideoManager Class Implementation
##
class VideoManager(object):
""" Provides a cv2.VideoCapture-like interface to a set of one or more video files,
or a single device ID. Supports seeking and setting end time/duration. """
def __init__(self, video_files, framerate=None, logger=None):
# type: (List[str], Optional[float])
""" VideoManager Constructor Method (__init__)
Arguments:
video_files (list of str(s)/int): A list of one or more paths (str), or a list
of a single integer device ID, to open as an OpenCV VideoCapture object.
framerate (float, optional): Framerate to assume when storing FrameTimecodes.
If not set (i.e. is None), it will be deduced from the first open capture
in video_files, else raises a VideoFramerateUnavailable exception.
Raises:
ValueError: No video file(s) specified, or invalid/multiple device IDs specified.
TypeError: `framerate` must be type `float`.
IOError: Video file(s) not found.
VideoFramerateUnavailable: Video framerate could not be obtained and `framerate`
was not set manually.
VideoParameterMismatch: All videos in `video_files` do not have equal parameters.
Set `validate_parameters=False` to skip this check.
VideoOpenFailure: Video(s) could not be opened.
"""
if not video_files:
raise ValueError("At least one string/integer must be passed in the video_files list.")
# These VideoCaptures are only open in this process.
self._cap_list, self._cap_framerate, self._cap_framesize, self._rotation_list = \
open_captures(video_files=video_files, framerate=framerate)
self._end_of_video = False
self._start_time = self.get_base_timecode()
self._end_time = None
self._curr_time = self.get_base_timecode()
self._last_frame = None
self._curr_cap, self._curr_cap_idx = None, None
self._video_file_paths = video_files
self._logger = logger
self._rotates = {"270": 1, "180": 2, "90": 3}
if self._logger is not None:
self._logger.info(
'Loaded %d video%s, framerate: %.2f FPS, resolution: %d x %d',
len(self._cap_list), 's' if len(self._cap_list) > 1 else '',
self.get_framerate(), *self.get_framesize())
self._started = False
self._downscale_factor = 1
self._frame_length = get_num_frames(self._cap_list)
def set_downscale_factor(self, downscale_factor=None):
# type: (Optional[int]) -> None
""" Set Downscale Factor - sets the downscale/subsample factor of returned frames.
If N is the downscale_factor, the size of the frames returned becomes
frame_width/N x frame_height/N via subsampling.
If downscale_factor is None, the downscale factor is computed automatically
based on the current video's resolution. A downscale_factor of 1 indicates
no downscaling.
"""
if downscale_factor is None:
self._downscale_factor = compute_downscale_factor(self.get_framesize()[0])
else:
if not downscale_factor > 0:
raise InvalidDownscaleFactor()
self._downscale_factor = downscale_factor
if self._logger is not None:
effective_framesize = self.get_framesize_effective()
self._logger.info(
'Downscale factor set to %d, effective resolution: %d x %d',
self._downscale_factor, effective_framesize[0], effective_framesize[1])
def get_num_videos(self):
# type: () -> int
""" Get Number of Videos - returns the length of the capture list (self._cap_list),
representing the number of videos the VideoManager has opened.
Returns:
int: Number of videos, equal to length of capture list.
"""
return len(self._cap_list)
def get_video_paths(self):
# type: () -> List[str]
""" Get Video Paths - returns list of strings containing paths to the open video(s).
Returns:
List[str]: List of paths to the video files opened by the VideoManager.
"""
return list(self._video_file_paths)
def get_framerate(self):
# type: () -> float
""" Get Framerate - returns the framerate the VideoManager is assuming for all
open VideoCaptures. Obtained from either the capture itself, or the passed
framerate parameter when the VideoManager object was constructed.
Returns:
float: Framerate, in frames/sec.
"""
return self._cap_framerate
def get_base_timecode(self):
# type: () -> FrameTimecode
""" Get Base Timecode - returns a FrameTimecode object at frame 0 / time 00:00:00.
The timecode returned by this method can be used to perform arithmetic (e.g.
addition), passing the resulting values back to the VideoManager (e.g. for the
set_duration() method), as the framerate of the returned FrameTimecode object
matches that of the VideoManager.
As such, this method is equivalent to creating a FrameTimecode at frame 0 with
the VideoManager framerate, for example, given a VideoManager called obj,
the following expression will evaluate as True:
obj.get_base_timecode() == FrameTimecode(0, obj.get_framerate())
Furthermore, the base timecode object returned by a particular VideoManager
should not be passed to another one, unless you first verify that their
framerates are the same.
Returns:
FrameTimecode object set to frame 0/time 00:00:00 with the video(s) framerate.
"""
return FrameTimecode(timecode=0, fps=self._cap_framerate)
def get_current_timecode(self):
# type: () -> FrameTimecode
""" Get Current Timecode - returns a FrameTimecode object at current VideoManager position.
Returns:
FrameTimecode: Timecode at the current VideoManager position.
"""
return self._curr_time
def get_framesize(self):
# type: () -> Tuple[int, int]
""" Get Frame Size - returns the frame size of the video(s) open in the
VideoManager's capture objects.
Returns:
Tuple[int, int]: Video frame size in the form (width, height) where width
and height represent the size of the video frame in pixels.
"""
return self._cap_framesize
def get_framesize_effective(self):
# type: () -> Tuple[int, int]
""" Get Frame Size - returns the frame size of the video(s) open in the
VideoManager's capture objects, divided by the current downscale factor.
Returns:
Tuple[int, int]: Video frame size in the form (width, height) where width
and height represent the size of the video frame in pixels.
"""
return [num_pixels / self._downscale_factor for num_pixels in self._cap_framesize]
def set_duration(self, duration=None, start_time=None, end_time=None):
# type: (Optional[FrameTimecode], Optional[FrameTimecode], Optional[FrameTimecode]) -> None
""" Set Duration - sets the duration/length of the video(s) to decode, as well as
the start/end times. Must be called before start() is called, otherwise a
VideoDecodingInProgress exception will be thrown. May be called after reset()
as well.
Arguments:
duration (Optional[FrameTimecode]): The (maximum) duration in time to
decode from the opened video(s). Mutually exclusive with end_time
(i.e. if duration is set, end_time must be None).
start_time (Optional[FrameTimecode]): The time/first frame at which to
start decoding frames from. If set, the input video(s) will be
seeked to when start() is called, at which point the frame at
start_time can be obtained by calling retrieve().
end_time (Optional[FrameTimecode]): The time at which to stop decoding
frames from the opened video(s). Mutually exclusive with duration
(i.e. if end_time is set, duration must be None).
Raises:
VideoDecodingInProgress: Must call before start().
"""
if self._started:
raise VideoDecodingInProgress()
# Ensure any passed timecodes have the proper framerate.
if ((duration is not None and not duration.equal_framerate(self._cap_framerate)) or
(start_time is not None and not start_time.equal_framerate(self._cap_framerate)) or
(end_time is not None and not end_time.equal_framerate(self._cap_framerate))):
raise ValueError("FrameTimecode framerate does not match.")
if duration is not None and end_time is not None:
raise TypeError("Only one of duration and end_time may be specified, not both.")
if start_time is not None:
self._start_time = start_time
if end_time is not None:
if end_time < start_time:
raise ValueError("end_time is before start_time in time.")
self._end_time = end_time
elif duration is not None:
self._end_time = self._start_time + duration
if self._end_time is not None:
self._frame_length = min(self._frame_length, self._end_time.get_frames() + 1)
self._frame_length -= self._start_time.get_frames()
if self._logger is not None:
self._logger.info(
'Duration set, start: %s, duration: %s, end: %s.',
start_time.get_timecode() if start_time is not None else start_time,
duration.get_timecode() if duration is not None else duration,
end_time.get_timecode() if end_time is not None else end_time)
def get_duration(self):
# type: () -> FrameTimecode
""" Get Duration - gets the duration/length of the video(s) to decode, as well as
the start/end times.
If the end time was not set by set_duration(), the end timecode is calculated
as the start timecode + total duration.
Returns:
Tuple[FrameTimecode, FrameTimecode, FrameTimecode]: The current video(s)
total duration, start timecode, and end timecode.
"""
frame_length = self.get_base_timecode() + self._frame_length
end_time = self._end_time
if end_time is None:
end_time = self.get_base_timecode() + frame_length
return (frame_length, self._start_time, end_time)
def start(self):
# type: () -> None
""" Start - starts video decoding and seeks to start time. Raises
exception VideoDecodingInProgress if the method is called after the
decoder process has already been started.
Raises:
VideoDecodingInProgress: Must call stop() before this method if
start() has already been called after initial construction.
"""
if self._started:
raise VideoDecodingInProgress()
self._started = True
self._get_next_cap()
self.seek(self._start_time)
def seek(self, timecode):
# type: (FrameTimecode) -> bool
""" Seek - seeks forwards to the passed timecode.
Only supports seeking forwards (i.e. timecode must be greater than the
current VideoManager position). Can only be used after the start()
method has been called.
Arguments:
timecode (FrameTimecode): Time in video to seek forwards to.
Returns:
bool: True if seeking succeeded, False if no more frames / end of video.
Raises:
VideoDecoderNotStarted: Must call start() before this method.
"""
while self._curr_time < timecode:
if not self.grab(): # raises VideoDecoderNotStarted if start() was not called
return False
return True
def release(self):
# type: () -> None
""" Release (cv2.VideoCapture method), releases all open capture(s). """
release_captures(self._cap_list)
self._cap_list = []
self._started = False
def reset(self):
# type: () -> None
""" Reset - Reopens captures passed to the constructor of the VideoManager.
Can only be called after the release() method has been called.
Raises:
VideoDecodingInProgress: Must call release() before this method.
"""
if self._started:
raise VideoDecodingInProgress()
self._started = False
self._end_of_video = False
self._curr_time = self.get_base_timecode()
self._cap_list, self._cap_framerate, self._cap_framesize, self._rotation_list = \
open_captures(video_files=self._video_file_paths,
framerate=self._curr_time.get_framerate())
self._curr_cap, self._curr_cap_idx = None, None
def get(self, capture_prop, index=None):
# type: (int, Optional[int]) -> Union[float, int]
""" Get (cv2.VideoCapture method) - obtains capture properties from the current
VideoCapture object in use. Index represents the same index as the original
video_files list passed to the constructor. Getting/setting the position (POS)
properties has no effect; seeking is implemented using VideoDecoder methods.
Note that getting the property CAP_PROP_FRAME_COUNT will return the integer sum of
the frame count for all VideoCapture objects if index is not specified (or is None),
otherwise the frame count for the given VideoCapture index is returned instead.
Arguments:
capture_prop: OpenCV VideoCapture property to get (i.e. CAP_PROP_FPS).
index (int, optional): Index in file_list of capture to get property from (default
is zero). Index is not checked and will raise exception if out of bounds.
Returns:
float: Return value from calling get(property) on the VideoCapture object.
"""
if capture_prop == cv2.CAP_PROP_FRAME_COUNT and index is None:
return self._frame_length
elif capture_prop == cv2.CAP_PROP_POS_FRAMES:
return self._curr_time
elif index is None:
index = 0
return self._cap_list[index].get(capture_prop)
def grab(self):
# type: () -> bool
""" Grab (cv2.VideoCapture method) - retrieves a frame but does not return it.
Returns:
bool: True if a frame was grabbed, False otherwise.
Raises:
VideoDecoderNotStarted: Must call start() before this method.
"""
if not self._started:
raise VideoDecoderNotStarted()
grabbed = False
if self._curr_cap is not None and self._end_of_video != True:
while not grabbed:
grabbed = self._curr_cap.grab()
if not grabbed and not self._get_next_cap():
break
else:
self._curr_time += 1
if self._end_time is not None and self._curr_time > self._end_time:
grabbed = False
self._last_frame = None
return grabbed
def retrieve(self):
# type: () -> Tuple[bool, Union[None, numpy.ndarray]]
""" Retrieve (cv2.VideoCapture method) - retrieves and returns a frame.
Frame returned corresponds to last call to get().
Returns:
Tuple[bool, Union[None, numpy.ndarray]]: Returns tuple of
(True, frame_image) if a frame was grabbed during the last call
to grab(), and where frame_image is a numpy ndarray of the
decoded frame, otherwise returns (False, None).
Raises:
VideoDecoderNotStarted: Must call start() before this method.
"""
if not self._started:
raise VideoDecoderNotStarted()
retrieved = False
if self._curr_cap is not None and self._end_of_video != True:
while not retrieved:
retrieved, self._last_frame = self._curr_cap.retrieve()
self._rotate()
if not retrieved and not self._get_next_cap():
break
if self._downscale_factor > 1:
self._last_frame = self._last_frame[
::self._downscale_factor, ::self._downscale_factor, :]
if self._end_time is not None and self._curr_time > self._end_time:
retrieved = False
self._last_frame = None
return (retrieved, self._last_frame)
def _rotate(self):
rotate_degrees = self._rotation_list[self._curr_cap_idx]
if rotate_degrees:
self._last_frame = np.rot90(self._last_frame, self._rotates[rotate_degrees])
def read(self):
# type: () -> Tuple[bool, Union[None, numpy.ndarray]]
""" Read (cv2.VideoCapture method) - retrieves and returns a frame.
Returns:
Tuple[bool, Union[None, numpy.ndarray]]: Returns tuple of
(True, frame_image) if a frame was grabbed, where frame_image
is a numpy ndarray of the decoded frame, otherwise (False, None).
Raises:
VideoDecoderNotStarted: Must call start() before this method.
"""
if not self._started:
raise VideoDecoderNotStarted()
read_frame = False
if self._curr_cap is not None and self._end_of_video != True:
read_frame, self._last_frame = self._curr_cap.read()
# Switch to the next capture when the current one is over
if not read_frame and self._get_next_cap():
read_frame, self._last_frame = self._curr_cap.read()
# Downscale frame if there was any
if read_frame and self._downscale_factor > 1:
self._last_frame = self._last_frame[
::self._downscale_factor, ::self._downscale_factor, :]
if self._end_time is not None and self._curr_time > self._end_time:
read_frame = False
self._last_frame = None
if read_frame:
self._curr_time += 1
return (read_frame, self._last_frame)
def _get_next_cap(self):
# type: () -> bool
self._curr_cap = None
if self._curr_cap_idx is None:
self._curr_cap_idx = 0
self._curr_cap = self._cap_list[0]
return True
else:
if not (self._curr_cap_idx + 1) < len(self._cap_list):
self._end_of_video = True
return False
self._curr_cap_idx += 1
self._curr_cap = self._cap_list[self._curr_cap_idx]
return True
| 42.831502
| 99
| 0.654865
|
4a09ead0646f73f25e4ff8f4c4e3c5918c22fdd4
| 854
|
py
|
Python
|
setup_1.py
|
KwonJiHun1/openSW
|
606cc3fb56d5afd8b89baa9c3bfef5f945a52348
|
[
"MIT"
] | 243
|
2021-04-25T22:28:24.000Z
|
2022-03-31T13:15:35.000Z
|
setup_1.py
|
KwonJiHun1/openSW
|
606cc3fb56d5afd8b89baa9c3bfef5f945a52348
|
[
"MIT"
] | 30
|
2021-06-30T18:57:24.000Z
|
2022-03-28T10:24:49.000Z
|
setup_1.py
|
KwonJiHun1/openSW
|
606cc3fb56d5afd8b89baa9c3bfef5f945a52348
|
[
"MIT"
] | 40
|
2021-05-03T01:58:29.000Z
|
2022-03-27T13:22:12.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="retina-face", #pip install retina-face
version="0.0.10",
author="Sefik Ilkin Serengil",
author_email="serengil@gmail.com",
description="RetinaFace: Deep Face Detection Framework in TensorFlow for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/serengil/retinaface",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires='>=3.5.5',
install_requires=["numpy>=1.14.0", "gdown>=3.10.1", "Pillow>=5.2.0", "opencv-python>=3.4.4", "tensorflow>=1.9.0"]
)
| 35.583333
| 117
| 0.669789
|
4a09eaf77c619fbec9c723f52cabb94b338bf79f
| 10,978
|
py
|
Python
|
plot_LAMR.py
|
ciodar/task-conditioned
|
be53eec1409932aeba59dd366471171fada3246b
|
[
"MIT"
] | 21
|
2020-08-18T11:01:59.000Z
|
2022-03-12T22:49:26.000Z
|
plot_LAMR.py
|
java235567/yolo3-kaist
|
bf4f3154641af1674430bb209e23d0d4ebb2de9c
|
[
"MIT"
] | 4
|
2021-01-02T04:06:29.000Z
|
2022-01-16T02:08:06.000Z
|
plot_LAMR.py
|
java235567/yolo3-kaist
|
bf4f3154641af1674430bb209e23d0d4ebb2de9c
|
[
"MIT"
] | 9
|
2020-10-02T07:19:19.000Z
|
2022-03-25T10:12:15.000Z
|
import os
import numpy as np
import brambox.boxes as bbb
import matplotlib.pyplot as plt
import scipy.interpolate
import glob
filename_detection = 'results/detection_results.json'
identify = lambda f: os.path.splitext("/".join(f.rsplit('/')[-3:]))[0]
# parse ground truth from all videos in all sets
ground_truth = bbb.parse('anno_dollar', 'annotations/*/*/*.txt', identify, occlusion_tag_map=[0.0, 0.25, 0.75])
print(len(ground_truth))
# print(identify)
# filter ground truth by marking boxes with the ignore flag
bbb.filter_ignore(ground_truth, [bbb.ClassLabelFilter(['person']), # only consider 'person' objects
bbb.HeightRangeFilter((50, float('Inf'))), # select instances of 50 pixels or higher
bbb.OcclusionAreaFilter(
(0.65, float('Inf')))]) # only include objects that are 65% visible or more
for _, annos in ground_truth.items():
for i in range(len(annos)):
annos[i].class_label = 'person'
# modify ground truth aspect ratio
bbb.modify(ground_truth, [bbb.AspectRatioModifier(.41, modify_ignores=False)]);
# split and copy to day and night ground truth
ground_truth_day = {key: values for key, values in ground_truth.items() if
key.startswith('set06') or key.startswith('set07') or key.startswith('set08')}
ground_truth_night = {key: values for key, values in ground_truth.items() if
key.startswith('set09') or key.startswith('set10') or key.startswith('set11')}
def parse_detections(format, input, identify_fun=identify, clslabelmap=['person']):
dets = bbb.parse(format, input, identify_fun, class_label_map=clslabelmap)
bbb.modify(dets, [bbb.AspectRatioModifier(.41)])
bbb.filter_discard(dets, [bbb.HeightRangeFilter((50 / 1.25, float('Inf')))])
return dets
detections_all = {}
### some state-of-the-art collection from internet (not confirm from all authors).
# detections_all['MSDS'] = parse_detections('det_coco','results/SOTA/MSDS.json')
# detections_all['MSDS_sanitized'] = parse_detections('det_coco','results/SOTA/MSDS_sanitized.json')
# detections_all['IAF'] = parse_detections('det_coco','results/SOTA/IAF.json')
# detections_all['Early fusion'] = parse_detections('det_coco','results/SOTA/early_fusion.json')
# detections_all['Late fusion'] = parse_detections('det_coco','results/SOTA/late_fusion.json')
# detections_all['RCNN thermal'] = parse_detections('det_coco','results/SOTA/KAIST_thermal.json')
# detections_all['RCNN rgb'] = parse_detections('det_coco','results/SOTA/KAIST_rgb.json')
# detections_all['YOLO TLV'] = parse_detections('det_coco','results/SOTA/yolov2_VLT.json')
# detections_all['Bottom-up'] = parse_detections('det_coco','results/SOTA/bottom_up.json')
# detections_all['Ours visible'] = parse_detections('det_coco','results/ours/ours_visible.json')
# detections_all['Ours TC Det'] = parse_detections('det_coco','results/ours/tc_det.json')
# ###TC_CB_V only for compare overall
detections_all['No Conditioning'] = parse_detections('det_coco','results/ours/no_condition.json')
detections_all['TC Det'] = parse_detections('det_coco','results/ours/tc_det.json')
detections_all['TC Res Group + Det'] = parse_detections('det_coco','results/ours/tc_res_group_det.json')
detections_all['TC Res Group'] = parse_detections('det_coco','results/ours/tc_res_group.json')
detections_all['TC Res All'] = parse_detections('det_coco','results/ours/tc_res_all.json')
# detections_all['Ours newest'] = parse_detections('det_coco','results/ours/ours_newest_mix_80_20.json')
# split and copy to day and night detections
detections_day = {}
detections_night = {}
for label, detections in detections_all.items():
detections_day[label] = {key: values for key, values in detections.items() if
key.startswith('set06') or key.startswith('set07') or key.startswith('set08')}
detections_night[label] = {key: values for key, values in detections.items() if
key.startswith('set09') or key.startswith('set10') or key.startswith('set11')}
detectors_to_plot = [
# ### Among Ours
'No Conditioning',
'TC Res Group',
'TC Det',
'TC Res All',
'TC Res Group + Det',
# 'Ours newest',
### this is compare SOTA, comment out this if (and comment above) if you want to see compare with the state-of-the-art
# 'Ours visible',
# 'Ours TC Det',
# 'Bottom-up',
# 'MSDS',
# 'MSDS_sanitized',
# 'IAF',
# 'Early fusion',
# 'Late fusion',
# 'RCNN thermal',
# 'RCNN rgb',
# 'YOLO TLV',
]
def lamr(miss_rate, fppi, num_of_samples=9):
""" Compute the log average miss-rate from a given MR-FPPI curve.
The log average miss-rate is defined as the average of a number of evenly spaced log miss-rate samples
on the :math:`{log}(FPPI)` axis within the range :math:`[10^{-2}, 10^{0}]`
Args:
miss_rate (list): miss-rate values
fppi (list): FPPI values
num_of_samples (int, optional): Number of samples to take from the curve to measure the average precision; Default **9**
Returns:
Number: log average miss-rate
"""
samples = np.logspace(-2., 0., num_of_samples)
m = np.array(miss_rate)
f = np.array(fppi)
interpolated = scipy.interpolate.interp1d(f, m, fill_value=(1., 0.), bounds_error=False)(samples)
# print('interpolated: ')
# print(interpolated)
for i, value in enumerate(interpolated):
if value <= 0:
interpolated[i] = interpolated[i - 1]
log_interpolated = np.log(interpolated)
avg = sum(log_interpolated) / len(log_interpolated)
return np.exp(avg)
def generate_curves(ground_truth, results, pr=True, title="", filename="", overlap=0.5, only_plot=None,
linewidth=2, figsize=(8, 6), legendloc=3):
curves = []
scores = {}
# colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# colors = ['#1919ff', '#ff7f0e', '#ff1919', '#ff19ff', '#19ff19', '#19ff19']
colors = ['#1919ff', '#ff7f0e', '#ff1919', '#ff19ff', '#19ff19']
i = 0
linestyles = ['-','-','-','--', '--', '--', '-.', ':']
for label, detections in results.items():
if pr:
ys, xs = bbb.pr(detections, ground_truth, overlap)
score = round(bbb.ap(ys, xs) * 100, 2)
else:
ys, xs = bbb.mr_fppi(detections, ground_truth, overlap)
score = round(lamr(ys, xs) * 100, 2)
color = colors[i % len(colors)]
linestyle = linestyles[i % len(linestyles)]
if only_plot is None or label in only_plot:
i += 1
curves += [(label, ys, xs, score, color, linestyle)]
# print(score)
# score = round(score,2)
# print(score)
scores[label] = score
if pr:
# sort from highest ap to lowest
sorted_curves = sorted(curves, key=lambda curve: curve[3], reverse=True)
else:
# sort from lowest to highest
sorted_curves = sorted(curves, key=lambda curve: curve[3])
fig, ax = plt.subplots(figsize=figsize)
for label, ys, xs, score, color, linestyle in sorted_curves:
# skip curves not mensioned in only_plot
if only_plot is not None and label not in only_plot:
continue
if pr:
plt.plot(xs, ys, color=color, linestyle=linestyle, label=f"{score:.2f}% {label}", linewidth=linewidth)
else:
plt.loglog(xs, ys, color=color, linestyle=linestyle, label=f"{score:.2f}% {label}", linewidth=linewidth)
plt.legend(loc=legendloc)
plt.gcf().suptitle(title, weight='bold',x=0.5,y=0.95)
if pr:
plt.grid(which='major')
plt.gca().set_ylabel('Precision')
plt.gca().set_xlabel('Recall')
plt.gca().set_xlim([0, 1])
plt.gca().set_ylim([0, 1])
plt.tight_layout(pad=0)
else:
# modify the y axis a bit
from matplotlib.ticker import FormatStrFormatter, LogLocator
subs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.4, 8.0] # ticks to show per decade
ax.yaxis.set_minor_locator(LogLocator(subs=subs))
ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.yaxis.grid(which='minor')
ax.xaxis.grid(which='major')
plt.setp(ax.get_ymajorticklabels(), visible=False) # disable major labels
plt.gca().set_ylabel('Miss rate')
plt.gca().set_xlabel('FPPI')
plt.gca().set_ylim([0.1, 1])
plt.gca().set_ylabel('Miss rate')
plt.gca().set_xlabel('FPPI')
plt.tight_layout(pad=0)
# plt.gca().set_xlim([0, 10])
if filename:
# plt.savefig(filename+'.pdf', format='pdf', bbox_inches = 'tight',pad_inches=0, transparent=True)
plt.savefig(filename+'.png', format='png')
return scores,plt.get_current_fig_manager()
#scores_all_ap,curves_all_ap = generate_curves(ground_truth, detections_all, True, title="Day and night time",
# filename="all_precision", only_plot=detectors_to_plot, figsize=(6,4))
scores_all_lamr,curves_all_mr = generate_curves(ground_truth, detections_all, False, title="Day and night time",
filename="all_missrate", only_plot=detectors_to_plot, figsize=(8,6))
#scores_day_ap,curves_day_ap = generate_curves(ground_truth_day, detections_day, True, title="Day time",
# filename="day_precision", only_plot=detectors_to_plot, figsize=(6,4))
scores_day_lamr,curves_day_mr = generate_curves(ground_truth_day, detections_day, False, title="Day time",
filename="day_missrate", only_plot=detectors_to_plot, figsize=(8,6))
#scores_night_ap,curves_night_ap = generate_curves(ground_truth_night, detections_night, True, title="Night time",
# filename="night_precision", only_plot=detectors_to_plot, figsize=(6,4))
scores_night_lamr,curves_night_mr = generate_curves(ground_truth_night, detections_night, False, title="Night time",
filename="night_missrate", only_plot=detectors_to_plot, figsize=(8,6), legendloc='lower left')
# plt.show()
# ###this is for ablation studies on TC-Det for LaTeX
# print('Thermal \t& %.2f \t& %.2f \t& %.2f \t \\\\'%(scores_all_lamr['Thermal'],scores_day_lamr['Thermal'],scores_night_lamr['Thermal']))
# print('Fine-tune \t& %.2f \t& %.2f \t& %.2f \t \\\\'%(scores_all_lamr['Finetuning'],scores_day_lamr['Finetuning'],scores_night_lamr['Finetuning']))
# print('TC-Det-Y \t& %.2f \t& %.2f \t& %.2f \t \\\\'%(scores_all_lamr['TC_Det_Y'],scores_day_lamr['TC_Det_Y'],scores_night_lamr['TC_Det_Y']))
# print('TC-Det-V \t& %.2f \t& %.2f \t& %.2f \t \\\\'%(scores_all_lamr['TC_Det_V'],scores_day_lamr['TC_Det_V'],scores_night_lamr['TC_Det_V']))
# print('TC-Det-CV \t& %.2f \t& %.2f \t& %.2f \t \\\\'%(scores_all_lamr['TC_Det_CV'],scores_day_lamr['TC_Det_CV'],scores_night_lamr['TC_Det_CV']))
| 45.741667
| 149
| 0.655493
|
4a09ebf2f57e5bba6bfbb2656275b34e45e4c869
| 1,116
|
py
|
Python
|
examples/callback_buttons.py
|
markelovstyle/vkbottle
|
b31da756a94b42836bcdc5aa88aaa4d868b5e929
|
[
"MIT"
] | null | null | null |
examples/callback_buttons.py
|
markelovstyle/vkbottle
|
b31da756a94b42836bcdc5aa88aaa4d868b5e929
|
[
"MIT"
] | null | null | null |
examples/callback_buttons.py
|
markelovstyle/vkbottle
|
b31da756a94b42836bcdc5aa88aaa4d868b5e929
|
[
"MIT"
] | null | null | null |
# Example of sending and receiving an event after pressing the Callback button
# Documentation: https://vk.cc/aC9JG2
import os
import logging
from vkbottle import Keyboard, Callback, GroupTypes, GroupEventType
from vkbottle.bot import Bot, Message
bot = Bot(os.environ["TOKEN"])
logging.basicConfig(level=logging.INFO)
KEYBOARD = Keyboard(one_time=False).add(Callback("Callback-кнопка", payload={"cmd": "callback"})).get_json()
@bot.on.private_message(text="/callback")
async def send_callback_button(message: Message):
await message.answer("Лови!", keyboard=KEYBOARD)
@bot.on.raw_event(GroupEventType.MESSAGE_EVENT, dataclass=GroupTypes.MessageEvent)
async def handle_message_event(event: GroupTypes.MessageEvent):
# The event_data parameter accepts three object types
# In this example we will consider "show_snackbar" type
await bot.api.messages.send_message_event_answer(
event_id=event.object.event_id,
user_id=event.object.user_id,
peer_id=event.object.peer_id,
event_data='{"type":"show_snackbar", "text":"Сейчас я исчезну"}'
)
bot.run_forever()
| 31.885714
| 108
| 0.757168
|
4a09ec06be6063fa8ec930e61dbb45ac7f02f8a7
| 1,850
|
py
|
Python
|
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
HOST = "host"
PORT = "port"
USE_SSL = "use_ssl"
USERNAME_PASSWORD = "username_password"
class ConnectionSchema(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"host": {
"type": "string",
"title": "Host",
"description": "Server Host, e.g. example.com",
"order": 1
},
"port": {
"type": "integer",
"title": "Port",
"description": "Port, e.g. 389",
"default": 389,
"order": 2
},
"use_ssl": {
"type": "boolean",
"title": "Use SSL",
"description": "Use SSL?",
"order": 3
},
"username_password": {
"$ref": "#/definitions/credential_username_password",
"title": "Username and Password",
"description": "Username and password",
"order": 4
}
},
"required": [
"host",
"port",
"use_ssl",
"username_password"
],
"definitions": {
"credential_username_password": {
"id": "credential_username_password",
"type": "object",
"title": "Credential: Username and Password",
"description": "A username and password combination",
"properties": {
"password": {
"type": "string",
"title": "Password",
"displayType": "password",
"description": "The password",
"format": "password",
"order": 2
},
"username": {
"type": "string",
"title": "Username",
"description": "The username to log in with",
"order": 1
}
},
"required": [
"username",
"password"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 22.02381
| 59
| 0.510811
|
4a09ec2d2e3a8ffdcff4739418f912dd97ed6a06
| 15,876
|
py
|
Python
|
pytorch/function/train_partnet.py
|
densechen/CloserLook3D
|
3463aad1b5f20fd9cc10f9eb7cc1e781f36d68ea
|
[
"MIT"
] | null | null | null |
pytorch/function/train_partnet.py
|
densechen/CloserLook3D
|
3463aad1b5f20fd9cc10f9eb7cc1e781f36d68ea
|
[
"MIT"
] | null | null | null |
pytorch/function/train_partnet.py
|
densechen/CloserLook3D
|
3463aad1b5f20fd9cc10f9eb7cc1e781f36d68ea
|
[
"MIT"
] | null | null | null |
"""
Distributed training script for part segmentation with PartNet dataset
"""
import argparse
import os
import sys
import time
import json
import random
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import torch
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
import datasets.data_utils as d_utils
from models import build_multi_part_segmentation
from datasets import PartNetSeg
from utils.util import AverageMeter, partnet_metrics
from utils.lr_scheduler import get_scheduler
from utils.logger import setup_logger
from utils.config import config, update_config
def parse_option():
parser = argparse.ArgumentParser('PartNet part-segmentation training')
parser.add_argument('--cfg', type=str, required=True, help='config file')
parser.add_argument('--data_root', type=str, default='data', help='root director of dataset')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--base_learning_rate', type=float, help='base learning rate')
parser.add_argument('--weight_decay', type=float, help='weight_decay')
parser.add_argument('--epochs', type=int, help='number of training epochs')
parser.add_argument('--start_epoch', type=int, help='used for resume')
parser.add_argument('--grid_clip_norm', type=float, help='grid_clip_norm')
# io
parser.add_argument('--load_path', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--val_freq', type=int, default=10, help='val frequency')
parser.add_argument('--log_dir', type=str, default='log', help='log dir [default: log]')
# misc
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
args, unparsed = parser.parse_known_args()
update_config(args.cfg)
config.data_root = args.data_root
config.num_workers = args.num_workers
config.load_path = args.load_path
config.print_freq = args.print_freq
config.save_freq = args.save_freq
config.val_freq = args.val_freq
config.rng_seed = args.rng_seed
ddir_name = args.cfg.split('.')[-2].split('/')[-1]
config.log_dir = os.path.join(args.log_dir, 'partnet', f'{ddir_name}_{int(time.time())}')
if args.batch_size:
config.batch_size = args.batch_size
if args.base_learning_rate:
config.base_learning_rate = args.base_learning_rate
if args.weight_decay:
config.weight_decay = args.weight_decay
if args.epochs:
config.epochs = args.epochs
if args.start_epoch:
config.start_epoch = args.start_epoch
if args.grid_clip_norm:
config.grid_clip_norm = args.grid_clip_norm
print(args)
print(config)
torch.manual_seed(args.rng_seed)
torch.cuda.manual_seed_all(args.rng_seed)
random.seed(args.rng_seed)
np.random.seed(args.rng_seed)
return args, config
def get_loader(args):
# set the data loader
train_transforms = transforms.Compose([
d_utils.PointcloudToTensor(),
d_utils.PointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip),
])
test_transforms = transforms.Compose([
d_utils.PointcloudToTensor()
])
train_dataset = PartNetSeg(input_features_dim=config.input_features_dim,
data_root=args.data_root, transforms=train_transforms,
split='train')
val_dataset = PartNetSeg(input_features_dim=config.input_features_dim,
data_root=args.data_root, transforms=test_transforms,
split='val')
test_dataset = PartNetSeg(input_features_dim=config.input_features_dim,
data_root=args.data_root, transforms=test_transforms,
split='test')
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
return train_loader, val_loader, test_loader
def load_checkpoint(config, model, optimizer, scheduler):
logger.info("=> loading checkpoint '{}'".format(config.load_path))
checkpoint = torch.load(config.load_path, map_location='cpu')
config.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("=> loaded successfully '{}' (epoch {})".format(config.load_path, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(config, epoch, model, optimizer, scheduler):
logger.info('==> Saving...')
state = {
'config': config,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
}
torch.save(state, os.path.join(config.log_dir, 'current.pth'))
if epoch % config.save_freq == 0:
torch.save(state, os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth'))
logger.info("Saved in {}".format(os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth')))
def main(config):
train_loader, val_loader, test_loader = get_loader(config)
n_data = len(train_loader.dataset)
logger.info(f"length of training dataset: {n_data}")
n_data = len(val_loader.dataset)
logger.info(f"length of validation dataset: {n_data}")
n_data = len(test_loader.dataset)
logger.info(f"length of testing dataset: {n_data}")
model, criterion = build_multi_part_segmentation(config)
logger.info(str(model))
model.cuda()
criterion.cuda()
if config.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr= config.base_learning_rate,
momentum=config.momentum,
weight_decay=config.weight_decay)
elif config.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
elif config.optimizer == 'adamW':
optimizer = torch.optim.AdamW(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
else:
raise NotImplementedError(f"Optimizer {config.optimizer} not supported")
scheduler = get_scheduler(optimizer, len(train_loader), config)
# optionally resume from a checkpoint
if config.load_path:
assert os.path.isfile(config.load_path)
load_checkpoint(config, model, optimizer, scheduler)
logger.info("==> checking loaded ckpt")
validate('resume', 'val', val_loader, model, criterion, config)
validate('resume', 'test', test_loader, model, criterion, config)
# tensorboard
summary_writer = SummaryWriter(log_dir=config.log_dir)
# routine
for epoch in range(config.start_epoch, config.epochs + 1):
tic = time.time()
loss = train(epoch, train_loader, model, criterion, optimizer, scheduler, config)
logger.info('epoch {}, total time {:.2f}, lr {:.5f}'.format(epoch,
(time.time() - tic),
optimizer.param_groups[0]['lr']))
if epoch % config.val_freq == 0:
validate(epoch, 'val', val_loader, model, criterion, config)
validate(epoch, 'test', test_loader, model, criterion, config)
else:
validate(epoch, 'val', val_loader, model, criterion, config, num_votes=1)
validate(epoch, 'test', test_loader, model, criterion, config, num_votes=1)
save_checkpoint(config, epoch, model, optimizer, scheduler)
if summary_writer is not None:
# tensorboard logger
summary_writer.add_scalar('ins_loss', loss, epoch)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
def train(epoch, train_loader, model, criterion, optimizer, scheduler, config):
"""
One epoch training
"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, (points, mask, features, points_labels, shape_labels) in enumerate(train_loader):
data_time.update(time.time() - end)
bsz = points.size(0)
# forward
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
shape_labels = shape_labels.cuda(non_blocking=True)
pred = model(points, mask, features)
loss = criterion(pred, points_labels, shape_labels)
optimizer.zero_grad()
loss.backward()
if config.grid_clip_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grid_clip_norm)
optimizer.step()
scheduler.step()
# update meters
loss_meter.update(loss.item(), bsz)
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % config.print_freq == 0:
logger.info(f'Train: [{epoch}/{config.epochs + 1}][{idx}/{len(train_loader)}]\t'
f'T {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f})')
return loss_meter.avg
def validate(epoch, split, test_loader, model, criterion, config, num_votes=10):
"""
One epoch validating
"""
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
vote_logits = None
vote_points_labels = None
vote_shape_labels = None
TS = d_utils.BatchPointcloudScaleAndJitter(scale_low=config.scale_low,
scale_high=config.scale_high,
std=config.noise_std,
clip=config.noise_clip)
for v in range(num_votes):
all_logits = []
all_points_labels = []
all_shape_labels = []
for idx, (points, mask, features, points_labels, shape_labels) in enumerate(test_loader):
# augment for voting
if v > 0:
points = TS(points)
if config.input_features_dim == 3:
features = points
features = features.transpose(1, 2).contiguous()
elif config.input_features_dim == 4:
features = torch.ones(size=(points.shape[0], points.shape[1], 1), dtype=torch.float32)
features = torch.cat([features, points], -1)
features = features.transpose(1, 2).contiguous()
else:
raise NotImplementedError(
f"input_features_dim {config.input_features_dim} in voting not supported")
# forward
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
shape_labels = shape_labels.cuda(non_blocking=True)
pred = model(points, mask, features)
loss = criterion(pred, points_labels, shape_labels)
losses.update(loss.item(), points.size(0))
# collect
bsz = points.shape[0]
for ib in range(bsz):
sl = shape_labels[ib]
logits = pred[sl][ib]
pl = points_labels[ib]
all_logits.append(logits.cpu().numpy())
all_points_labels.append(pl.cpu().numpy())
all_shape_labels.append(sl.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(test_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
if vote_logits is None:
vote_logits = all_logits
vote_points_labels = all_points_labels
vote_shape_labels = all_shape_labels
else:
for i in range(len(vote_logits)):
vote_logits[i] = vote_logits[i] + (all_logits[i] - vote_logits[i]) / (v + 1)
msIoU, mpIoU, mmsIoU, mmpIoU = partnet_metrics(config.num_classes, config.num_parts,
vote_shape_labels,
vote_logits,
vote_points_labels)
logger.info(f'E{epoch} V{v} {split} * mmsIoU {mmsIoU:.3%} mmpIoU {mmpIoU:.3%}')
logger.info(f'E{epoch} V{v} {split} * msIoU {msIoU}')
logger.info(f'E{epoch} V{v} {split} * mpIoU {mpIoU}')
return mmsIoU, mmpIoU
if __name__ == "__main__":
opt, config = parse_option()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
os.makedirs(opt.log_dir, exist_ok=True)
os.environ["JOB_LOG_DIR"] = config.log_dir
logger = setup_logger(output=config.log_dir, distributed_rank=0, name="partnet")
path = os.path.join(config.log_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(opt), f, indent=2)
json.dump(vars(config), f, indent=2)
os.system('cp %s %s' % (opt.cfg, config.log_dir))
logger.info("Full config saved to {}".format(path))
main(config)
| 41.669291
| 110
| 0.587176
|
4a09ece810422d1f75a6151d247458ea183962c9
| 120
|
py
|
Python
|
examples/djangoapp/urls.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 51
|
2015-04-01T14:53:46.000Z
|
2022-03-16T09:16:10.000Z
|
examples/djangoapp/urls.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 17
|
2015-02-04T11:25:02.000Z
|
2021-07-10T10:17:53.000Z
|
examples/djangoapp/urls.py
|
gelnior/couchdbkit
|
8277d6ffd00553ae0b0b2368636460d40f8d8225
|
[
"MIT"
] | 40
|
2015-01-13T23:38:01.000Z
|
2022-02-26T22:08:01.000Z
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'djangoapp.greeting.views.home'),
)
| 17.142857
| 48
| 0.675
|
4a09ed3fca752841c743ad16cdb77ab603b1c442
| 2,378
|
py
|
Python
|
Cuatrimestres/1/TeoriadeControlI/python/superficie3d.py
|
chelizalde/DCA
|
34fd4d500117a9c0a75b979b8b0f121c1992b9dc
|
[
"MIT"
] | null | null | null |
Cuatrimestres/1/TeoriadeControlI/python/superficie3d.py
|
chelizalde/DCA
|
34fd4d500117a9c0a75b979b8b0f121c1992b9dc
|
[
"MIT"
] | null | null | null |
Cuatrimestres/1/TeoriadeControlI/python/superficie3d.py
|
chelizalde/DCA
|
34fd4d500117a9c0a75b979b8b0f121c1992b9dc
|
[
"MIT"
] | 1
|
2021-03-20T12:44:13.000Z
|
2021-03-20T12:44:13.000Z
|
#!/usr/bin/env python
from pylab import *
from numpy import exp, abs, meshgrid, linspace, array, sin, cos, pi, sqrt
from mpl_toolkits.mplot3d import Axes3D
v0 = array([1, 1, 0])
eta = array([0.2, -0.2, 0])
r = linspace(0, 1, 300)
p = linspace(0, 2*pi, 300)
R, P = meshgrid(r, p)
f = lambda r, p: r*cos(p) + 1
g = lambda r, p: r*sin(p) + 1
h = lambda r: (r**2 - 1)**2
X, Y, Z = f(R, P), g(R, P), h(R)
a3d = gca(projection='3d')
a3d.plot_surface(X, Y, Z, alpha=0.15)
o = array([0, 0, 0])
ex = array([1.75, 0, 0]) + o
ey = array([0, 1.75, 0]) + o
ez = array([0, 0, 1.0]) + o
a3d.text(ex[0] + 0.1, ex[1], ex[2], r'$v_1$', fontsize=20)
a3d.text(ey[0] + 0.05, ey[1], ey[2], r'$v_2$', fontsize=20)
a3d.text(ez[0], ez[1] - 0.05, ez[2] + 0.1, r'$f_1(v)$', fontsize=20)
xs, ys, zs = zip(o, ex)
plot(xs, ys, zs)
xs, ys, zs = zip(o, ey)
plot(xs, ys, zs)
xs, ys, zs = zip(o, ez)
plot(xs, ys, zs)
xs, ys, zs = [v0[0], v0[0] + eta[0]], [v0[1], v0[1] + eta[1]], [v0[2] + 1, h(sqrt(eta[0]**2 + eta[1]**2+ eta[2]**2))]
a3d.scatter(xs, ys, zs)
xs1, ys1, zs1 = zip([0, v0[1], 0], v0, [v0[0], 0, 0])
plot(xs1, ys1, zs1, "--")
plot([xs[0], xs1[1]], [ys[0], ys1[1]], [zs[0], zs1[1]], "--")
xs1, ys1, zs1 = zip([0, v0[1] + eta[1], 0], v0 + eta, [v0[0] + eta[0], 0, 0])
plot(xs1, ys1, zs1, "--")
plot([xs[1], xs1[1]], [ys[1], ys1[1]], [zs[1], zs1[1]], "--")
a3d.quiver( o[0] + eta[0], o[0] + eta[1], o[0] + eta[2], 1, -1, 0, linewidth=2, alpha=0.3, length=0.3)
a3d.quiver(v0[0] + eta[0], v0[1] + eta[1], v0[2] + eta[2], 1, -1, 0, linewidth=2, alpha=0.3, length=0.3)
a3d.text( o[0] + eta[0] + 0.05, o[0] + eta[1], o[0] + eta[2], r'$\eta$', fontsize=20)
a3d.text(v0[0] + eta[0] + 0.1, v0[1] + eta[1] - 0.05, v0[2] + eta[2], r'$v_0 + \alpha \eta$', fontsize=20)
a3d.text(v0[0] - 0.05, v0[1] + 0.1, v0[2] , r'$v_0$', fontsize=20)
a3d.text(v0[0], v0[1] + 0.05, v0[2] + 1, r'$f_1(v_0)$', fontsize=20)
a3d.text(v0[0] + eta[0], v0[1] + eta[1] + 0.05, zs[1], r'$f_1(v_0 + \alpha \eta)$', fontsize=20)
grid(False)
a = gca()
xticks([])
yticks([])
a.set_zticks([])
a.set_zlim3d([0, 1])
a.set_ylim3d([0, 2])
a.set_xlim3d([0, 2])
a.set_xticklabels([])
a.set_yticklabels([])
a.set_zticklabels([])
a.set_axis_off()
# Se guarda la figura en la misma carpeta
savefig("superficie3d.pdf", bbox_inches='tight', pad_inches=0, transparent="True")
| 32.135135
| 117
| 0.530698
|
4a09ef96e341062ad13c68763d68eb151bf9f569
| 8,455
|
py
|
Python
|
sdk/marketplaceordering/azure-mgmt-marketplaceordering/azure/mgmt/marketplaceordering/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/marketplaceordering/azure-mgmt-marketplaceordering/azure/mgmt/marketplaceordering/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/marketplaceordering/azure-mgmt-marketplaceordering/azure/mgmt/marketplaceordering/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgreementTerms(Resource):
"""Terms properties for provided Publisher/Offer/Plan tuple.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param publisher: Publisher identifier string of image being deployed.
:type publisher: str
:param product: Offer identifier string of image being deployed.
:type product: str
:param plan: Plan identifier string of image being deployed.
:type plan: str
:param license_text_link: Link to HTML with Microsoft and Publisher terms.
:type license_text_link: str
:param privacy_policy_link: Link to the privacy policy of the publisher.
:type privacy_policy_link: str
:param retrieve_datetime: Date and time in UTC of when the terms were accepted. This is empty
if Accepted is false.
:type retrieve_datetime: str
:param signature: Terms signature.
:type signature: str
:param accepted: If any version of the terms have been accepted, otherwise false.
:type accepted: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'plan': {'key': 'properties.plan', 'type': 'str'},
'license_text_link': {'key': 'properties.licenseTextLink', 'type': 'str'},
'privacy_policy_link': {'key': 'properties.privacyPolicyLink', 'type': 'str'},
'retrieve_datetime': {'key': 'properties.retrieveDatetime', 'type': 'str'},
'signature': {'key': 'properties.signature', 'type': 'str'},
'accepted': {'key': 'properties.accepted', 'type': 'bool'},
}
def __init__(
self,
*,
publisher: Optional[str] = None,
product: Optional[str] = None,
plan: Optional[str] = None,
license_text_link: Optional[str] = None,
privacy_policy_link: Optional[str] = None,
retrieve_datetime: Optional[str] = None,
signature: Optional[str] = None,
accepted: Optional[bool] = None,
**kwargs
):
super(AgreementTerms, self).__init__(**kwargs)
self.publisher = publisher
self.product = product
self.plan = plan
self.license_text_link = license_text_link
self.privacy_policy_link = privacy_policy_link
self.retrieve_datetime = retrieve_datetime
self.signature = signature
self.accepted = accepted
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates Microsoft.MarketplaceOrdering service is not able to process the incoming request. The reason is provided in the error message.
:param error: The details of the error.
:type error: ~azure.mgmt.marketplaceordering.models.ErrorResponseError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseError'},
}
def __init__(
self,
*,
error: Optional["ErrorResponseError"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseError(msrest.serialization.Model):
"""The details of the error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
class Operation(msrest.serialization.Model):
"""Microsoft.MarketplaceOrdering REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.marketplaceordering.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.MarketplaceOrdering.
:type provider: str
:param resource: Resource on which the operation is performed: Agreement, virtualmachine, etc.
:type resource: str
:param operation: Operation type: Get Agreement, Sign Agreement, Cancel Agreement etc.
:type operation: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list MarketplaceOrdering operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of Microsoft.MarketplaceOrdering operations supported by the
Microsoft.MarketplaceOrdering resource provider.
:type value: list[~azure.mgmt.marketplaceordering.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
| 32.270992
| 159
| 0.613838
|
4a09f0cabf541c3ee4479837273728e0026d672a
| 113
|
py
|
Python
|
gammapy/makers/tests/test_core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 155
|
2015-02-25T12:38:02.000Z
|
2022-03-13T17:54:30.000Z
|
gammapy/makers/tests/test_core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 3,131
|
2015-01-06T15:36:23.000Z
|
2022-03-31T17:30:57.000Z
|
gammapy/makers/tests/test_core.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 158
|
2015-03-16T20:36:44.000Z
|
2022-03-30T16:05:37.000Z
|
from gammapy.makers import MAKER_REGISTRY
def test_maker_registry():
assert "Maker" in str(MAKER_REGISTRY)
| 18.833333
| 41
| 0.787611
|
4a09f1f27ddc875c4a4559827eff48a95ce172e6
| 459
|
py
|
Python
|
homework/day01/login.py
|
kellysan/oldboy
|
dc9aaa53b1af6007742cb09fb26b712a11e43bda
|
[
"Apache-2.0"
] | null | null | null |
homework/day01/login.py
|
kellysan/oldboy
|
dc9aaa53b1af6007742cb09fb26b712a11e43bda
|
[
"Apache-2.0"
] | 1
|
2019-05-21T22:53:13.000Z
|
2019-05-21T22:53:13.000Z
|
homework/day01/login.py
|
kellysan/oldboy
|
dc9aaa53b1af6007742cb09fb26b712a11e43bda
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# File Name: login
# Description :
# Author : SanYapeng
# date: 2019-04-25
# Change Activity: 2019-04-25:
import getpass
count = 0
while count < 3:
user_name = input("请输入您的姓名")
user_password = input("请输入您的密码")
if user_name == "sanyapeng" and user_password == 123:
print("登录成功")
else:
count += 1
print("您还剩余 %d" %(3 - count))
| 19.956522
| 57
| 0.540305
|
4a09f22666f0840d806a6edb464d01416a3a5872
| 17,748
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
|
emcvipr/ambari
|
045e0d2ac94b8b81819d2efbfd7e1bddc67a7756
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-06T06:24:04.000Z
|
2021-05-06T06:24:04.000Z
|
ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
|
emcvipr/ambari
|
045e0d2ac94b8b81819d2efbfd7e1bddc67a7756
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
|
emcvipr/ambari
|
045e0d2ac94b8b81819d2efbfd7e1bddc67a7756
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-10-31T11:42:31.000Z
|
2021-04-26T07:17:53.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from unittest import TestCase
class TestHDP21StackAdvisor(TestCase):
def setUp(self):
import imp
self.testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp21StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
hdp21StackAdvisorClassName = 'HDP21StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp206StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp21StackAdvisorPath, 'rb') as fp:
stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(stack_advisor_impl, hdp21StackAdvisorClassName)
self.stackAdvisor = clazz()
def test_recommendOozieConfigurations_noFalconServer(self):
configurations = {}
clusterData = {
"components" : []
}
expected = {
"oozie-site": {"properties":{}},
"oozie-env": {"properties":{}}
}
self.stackAdvisor.recommendOozieConfigurations(configurations, clusterData, {"configurations":{}}, None)
self.assertEquals(configurations, expected)
def test_recommendOozieConfigurations_withFalconServer(self):
configurations = {
"falcon-env" : {
"properties" : {
"falcon_user" : "falcon"
}
}
}
services = {
"services": [
{
"StackServices": {
"service_name": "FALCON"
}, "components": []
},],
"configurations": configurations
}
clusterData = {
"components" : ["FALCON_SERVER"]
}
expected = {
"oozie-site": {
"properties": {
"oozie.services.ext": "org.apache.oozie.service.JMSAccessorService," +
"org.apache.oozie.service.PartitionDependencyManagerService," +
"org.apache.oozie.service.HCatAccessorService",
"oozie.service.ProxyUserService.proxyuser.falcon.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts" : "*"
}
},
"falcon-env" : {
"properties" : {
"falcon_user" : "falcon"
}
},
"oozie-env": {
"properties": {}
}
}
self.stackAdvisor.recommendOozieConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendHiveConfigurations_mapMemoryLessThan2048(self):
configurations = {}
clusterData = {
"mapMemory": 567,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 1024
}
expected = {
"hive-site": {
"properties": {
"hive.auto.convert.join.noconditionaltask.size": "718624085",
"hive.tez.java.opts": "-server -Xmx1645m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
"hive.tez.container.size": "2056"
}
},
"hive-env": {
"properties": {}
}
}
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, {"configurations": {}, "services": []}, None)
self.maxDiff = None
self.assertEquals(configurations, expected)
def test_recommendHiveConfigurations_mapMemoryMoreThan2048(self):
configurations = {}
clusterData = {
"mapMemory": 3000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 1024
}
expected = {
"hive-site": {
"properties": {
"hive.auto.convert.join.noconditionaltask.size": "1048576000",
"hive.tez.java.opts": "-server -Xmx2401m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
"hive.tez.container.size": "3000"
}
},
"hive-env": {
"properties": {}
}
}
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, {"configurations":{}, "services": []}, None)
self.assertEquals(configurations, expected)
def test_createComponentLayoutRecommendations_mastersIn10nodes(self):
services = json.load(open(os.path.join(self.testDirectory, 'services.json')))
hosts = json.load(open(os.path.join(self.testDirectory, 'hosts.json')))
expected_layout = [
[u'NAMENODE', u'GANGLIA_SERVER', u'ZOOKEEPER_SERVER', u'DRPC_SERVER', u'NIMBUS', u'STORM_REST_API', u'STORM_UI_SERVER', u'MYSQL_SERVER'],
[u'SECONDARY_NAMENODE', u'HISTORYSERVER', u'APP_TIMELINE_SERVER', u'RESOURCEMANAGER', u'ZOOKEEPER_SERVER'],
[u'HIVE_METASTORE', u'HIVE_SERVER', u'WEBHCAT_SERVER', u'HBASE_MASTER', u'OOZIE_SERVER', u'ZOOKEEPER_SERVER', u'FALCON_SERVER']
]
masterComponents = [component['StackServiceComponents']['component_name'] for service in services["services"] for component in service["components"]
if self.stackAdvisor.isMasterComponent(component)]
recommendation = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
groups = []
for host_group in recommendation['blueprint']['host_groups']:
components = [component['name'] for component in host_group['components'] if component['name'] in masterComponents]
if len(components) > 0:
groups.append(components)
def sort_nested_lists(list):
result_list = []
for sublist in list:
result_list.append(sorted(sublist))
return sorted(result_list)
self.assertEquals(sort_nested_lists(expected_layout), sort_nested_lists(groups))
def test_recommendHiveConfigurations_jdbcUrl(self):
services = {
"services" : [
{
"StackServices" : {
"service_name" : "HIVE",
},
"components" : [ {
"StackServiceComponents" : {
"component_name" : "HIVE_METASTORE",
"service_name" : "HIVE",
"hostnames" : ["example.com"]
}
}]
}
],
"configurations": {}
}
hosts = json.load(open(os.path.join(self.testDirectory, 'hosts.json')))
clusterData = {
"mapMemory": 3000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
configurations = {
"hive-site": {
"properties": {
"javax.jdo.option.ConnectionDriverName": "",
"ambari.hive.db.schema.name": "hive_name",
"javax.jdo.option.ConnectionURL": ""
}
},
"hive-env": {
"properties": {
"hive_database": "New MySQL Database"
}
}
}
services['configurations'] = configurations
hosts = {
"items": [
{
"Hosts": {
"host_name": "example.com"
}
}
]
}
# new mysql
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:mysql://example.com/hive_name?createDatabaseIfNotExist=true")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "com.mysql.jdbc.Driver")
# existing Mysql
services['configurations']['hive-env']['properties']['hive_database'] = 'Existing MySQL Database'
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:mysql://example.com/hive_name")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "com.mysql.jdbc.Driver")
# existing postgres
services['configurations']['hive-env']['properties']['hive_database'] = 'Existing PostgreSQL Database'
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:postgresql://example.com:5432/hive_name")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "org.postgresql.Driver")
# existing oracle
services['configurations']['hive-env']['properties']['hive_database'] = 'Existing Oracle Database'
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:oracle:thin:@//example.com:1521/hive_name")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "oracle.jdbc.driver.OracleDriver")
# existing sqla
services['configurations']['hive-env']['properties']['hive_database'] = 'Existing SQL Anywhere Database'
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:sqlanywhere:host=example.com;database=hive_name")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "sap.jdbc4.sqlanywhere.IDriver")
# existing Mysql / MariaDB
services['configurations']['hive-env']['properties']['hive_database'] = 'Existing MySQL / MariaDB Database'
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionURL'], "jdbc:mysql://example.com/hive_name")
self.assertEquals(configurations['hive-site']['properties']['javax.jdo.option.ConnectionDriverName'], "com.mysql.jdbc.Driver")
def test_recommendHiveConfigurations_containersRamIsLess(self):
configurations = {}
clusterData = {
"mapMemory": 3000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"hive-site": {
"properties": {
"hive.auto.convert.join.noconditionaltask.size": "268435456",
"hive.tez.java.opts": "-server -Xmx615m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
"hive.tez.container.size": "768"
}
},
"hive-env": {
"properties": {}
}
}
self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, {"configurations":{}, "services": []}, None)
self.assertEquals(configurations, expected)
def test_recommendHbaseConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
host_item = {
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
hosts = {
"items" : [host_item for i in range(1, 600)]
}
services = {
"services" : [
],
"configurations": {
"hbase-site": {
"properties": {
"hbase.superuser": "hbase"
}
},
"hbase-env": {
"properties": {
"hbase_user": "hbase123"
}
}
}
}
expected = {
'hbase-site': {
'properties': {
'hbase.superuser': 'hbase123'
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "8192",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendHDFSConfigurations(self):
configurations = {
"hadoop-env": {
"properties": {
"hdfs_user": "hdfs"
}
}
}
hosts = {
"items": [
{
"Hosts": {
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
}]}
services = {
"services": [
{
"StackServices": {
"service_name": "HDFS"
}, "components": []
}],
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
clusterData = {
"totalAvailableRam": 2048
}
expected = {
'hadoop-env': {
'properties': {
'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256',
'hdfs_user' : "hdfs"
}
},
"core-site": {
"properties": {
"hadoop.proxyuser.hdfs.hosts": "*",
"hadoop.proxyuser.hdfs.groups": "*",
"hadoop.proxyuser.ambari_user.hosts": "*",
"hadoop.proxyuser.ambari_user.groups": "*"
}
},
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'
}
}
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateHiveConfigurations(self):
configurations = {'yarn-site': {'properties': {'yarn.scheduler.maximum-allocation-mb': '4096'}}}
# 1) ok: hive.tez.container.size > recommended
recommendedDefaults = {'hive.tez.container.size': '1024',
'hive.tez.java.opts': '-Xmx256m',
'hive.auto.convert.join.noconditionaltask.size': '1000000000'}
properties = {'hive.tez.container.size': '2048',
'hive.tez.java.opts': '-Xmx300m',
'hive.auto.convert.join.noconditionaltask.size': '1100000000'}
res_expected = []
res = self.stackAdvisor.validateHiveConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: yarn.scheduler.maximum-allocation-mb < hive.tez.container.size
configurations = {'yarn-site': {'properties': {'yarn.scheduler.maximum-allocation-mb': '256'}}}
res_expected = [{'config-type': 'hive-site',
'message': 'hive.tez.container.size is greater than the maximum container size specified in yarn.scheduler.maximum-allocation-mb',
'type': 'configuration',
'config-name': 'hive.tez.container.size',
'level': 'WARN'},
]
res = self.stackAdvisor.validateHiveConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
| 38.332613
| 186
| 0.628691
|
4a09f2de4df0c81dcdacdb539f4371b75f9073b3
| 967
|
py
|
Python
|
Scraping/SiteDownloader/site_downloader.py
|
Shokr/Python_Playbook
|
e29ba874890349742f198982cc40763b30a58842
|
[
"MIT"
] | 12
|
2020-04-05T10:01:11.000Z
|
2022-02-05T08:11:54.000Z
|
Scraping/SiteDownloader/site_downloader.py
|
Shokr/Python_Playbook
|
e29ba874890349742f198982cc40763b30a58842
|
[
"MIT"
] | null | null | null |
Scraping/SiteDownloader/site_downloader.py
|
Shokr/Python_Playbook
|
e29ba874890349742f198982cc40763b30a58842
|
[
"MIT"
] | 6
|
2020-04-03T22:24:34.000Z
|
2022-01-28T12:22:34.000Z
|
import requests
from bs4 import BeautifulSoup
from requests import RequestException, HTTPError, ConnectionError, URLRequired, TooManyRedirects
def download(url, tries=3):
"""
This function downloads a site using request
and also has some functionality in place to
catch exceptions and do retries if the script
didn't work.
"""
# creates a user agent in requests
headers = requests.utils.default_headers()
headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
})
try:
site = requests.get(url, headers=headers)
soup = BeautifulSoup(site.text, 'lxml')
except (RequestException, HTTPError, ConnectionError, URLRequired, TooManyRedirects) as e:
print('Download error: {}'.format(e))
if tries > 0:
# recursive call until tries is 0
return download(url, tries - 1)
soup = None
return soup
| 32.233333
| 101
| 0.66908
|
4a09f3cd1bef3099c80dd932898789f087462a3b
| 19,591
|
py
|
Python
|
dev/Tools/Python/2.7.12/windows/Lib/site.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 2,557
|
2016-07-19T22:20:45.000Z
|
2022-01-25T10:53:35.000Z
|
build/cmd/win32/Python27/Lib/site.py
|
IamBaoMouMou/AliOS-Things
|
195a9160b871b3d78de6f8cf6c2ab09a71977527
|
[
"Apache-2.0"
] | 1,360
|
2016-07-20T02:06:42.000Z
|
2021-07-27T12:46:40.000Z
|
build/cmd/win32/Python27/Lib/site.py
|
IamBaoMouMou/AliOS-Things
|
195a9160b871b3d78de6f8cf6c2ab09a71977527
|
[
"Apache-2.0"
] | 645
|
2015-04-21T21:53:02.000Z
|
2022-03-29T05:36:14.000Z
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
import traceback
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See https://www.python.org/psf/license/",
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
| 32.597338
| 84
| 0.613292
|
4a09f44bffdda96d18c0b7d56ab905d373164906
| 5,977
|
py
|
Python
|
archai/common/yaml_utils.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | 344
|
2020-06-12T22:12:56.000Z
|
2022-03-29T06:48:20.000Z
|
archai/common/yaml_utils.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | 29
|
2020-06-13T19:56:49.000Z
|
2022-03-30T20:26:48.000Z
|
archai/common/yaml_utils.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | 68
|
2020-06-12T19:32:43.000Z
|
2022-03-05T06:58:40.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Mapping, MutableMapping, Any, Optional
_PREFIX_NODE = '_copy' # for copy node content command (must be dict)
_PREFIX_PATH = '_copy:' # for copy node value command (must be scaler)
def resolve_all(root_d:MutableMapping):
_resolve_all(root_d, root_d, '/', set())
def _resolve_all(root_d:MutableMapping, cur:MutableMapping, cur_path:str, prev_paths:set):
assert is_proper_path(cur_path)
if cur_path in prev_paths:
return # else we get in to infinite recursion
prev_paths.add(cur_path)
# if cur dict has '_copy' node with path in it
child_path = cur.get(_PREFIX_NODE, None)
if child_path and isinstance(child_path, str):
# resolve this path to get source dict
child_d = _resolve_path(root_d, _rel2full_path(cur_path, child_path), prev_paths)
# we expect target path to point to dict so we can merge its keys
if not isinstance(child_d, Mapping):
raise RuntimeError(f'Path "{child_path}" should be dictionary but its instead "{child_d}"')
# replace keys that have not been overriden
_merge_source(child_d, cur)
# remove command key
del cur[_PREFIX_NODE]
for k in cur.keys():
# if this key needs path resolution, get target and replace the value
rpath = _req_resolve(cur[k])
if rpath:
cur[k] = _resolve_path(root_d,
_rel2full_path(_join_path(cur_path, k), rpath), prev_paths)
# if replaced value is again dictionary, recurse on it
if isinstance(cur[k], MutableMapping):
_resolve_all(root_d, cur[k], _join_path(cur_path, k), prev_paths)
def _merge_source(source:Mapping, dest:MutableMapping)->None:
# for anything that source has but dest doesn't, just do copy
for sk in source:
if sk not in dest:
dest[sk] = source[sk]
else:
sv = source[sk]
dv = dest[sk]
# recursively merge child nodes
if isinstance(sv, Mapping) and isinstance(dv, MutableMapping):
_merge_source(source[sk], dest[sk])
# else at least dest value is not dict and should not be overriden
def _req_resolve(v:Any)->Optional[str]:
"""If the value is actually a path we need resolve then return that path or return None"""
if isinstance(v, str) and v.startswith(_PREFIX_PATH):
# we will almost always have space after _copy command
return v[len(_PREFIX_PATH):].strip()
return None
def _join_path(path1:str, path2:str):
mid = 1 if path1.endswith('/') else 0
mid += 1 if path2.startswith('/') else 0
# only 3 possibilities
if mid==0:
res = path1 + '/' + path2
elif mid==1:
res = path1 + path2
else:
res = path1[:-1] + path2
return _norm_ended(res)
def _norm_ended(path:str)->str:
if len(path) > 1 and path.endswith('/'):
path = path[:-1]
return path
def is_proper_path(path:str)->bool:
return path.startswith('/') and (len(path)==1 or not path.endswith('/'))
def _rel2full_path(cwd:str, rel_path:str)->str:
"""Given current directory and path, we return abolute path. For example,
cwd='/a/b/c' and rel_path='../d/e' should return '/a/b/d/e'. Note that rel_path
can hold absolute path in which case it will start with '/'
"""
assert len(cwd) > 0 and cwd.startswith('/'), 'cwd must be absolute path'
rel_parts = rel_path.split('/')
if rel_path.startswith('/'):
cwd_parts = [] # rel_path is absolute path so ignore cwd
else:
cwd_parts = cwd.split('/')
full_parts = cwd_parts + rel_parts
final = []
for i in range(len(full_parts)):
part = full_parts[i].strip()
if not part or part == '.': # remove blank strings and single dots
continue
if part == '..':
if len(final):
final.pop()
else:
raise RuntimeError(f'cannot create abs path for cwd={cwd} and rel_path={rel_path}')
else:
final.append(part)
final = '/' + '/'.join(final) # should work even when final is empty
assert not '..' in final and is_proper_path(final) # make algo indeed worked
return final
def _resolve_path(root_d:MutableMapping, path:str, prev_paths:set)->Any:
"""For given path returns value or node from root_d"""
assert is_proper_path(path)
# traverse path in root dict hierarchy
cur_path = '/' # path at each iteration of for loop
d = root_d
for part in path.split('/'):
if not part:
continue # there will be blank vals at start
# For each part, we need to be able find key in dict but some dics may not
# be fully resolved yet. For last key, d will be either dict or other value.
if isinstance(d, Mapping):
# for this section, make sure everything is resolved
# before we prob for the key
_resolve_all(root_d, d, cur_path, prev_paths)
if part in d:
# "cd" into child node
d = d[part]
cur_path = _join_path(cur_path, part)
else:
raise RuntimeError(f'Path {path} could not be found in specified dictionary at "{part}"')
else:
raise KeyError(f'Path "{path}" cannot be resolved because "{cur_path}" is not a dictionary so "{part}" cannot exist in it')
# last child is our answer
rpath = _req_resolve(d)
if rpath:
next_path = _rel2full_path(cur_path, rpath)
if next_path == path:
raise RuntimeError(f'Cannot resolve path "{path}" because it is circular reference')
d = _resolve_path(root_d, next_path, prev_paths)
return d
| 38.56129
| 136
| 0.613518
|
4a09f466dc439e416bd2c8d09fa2d9a28b6b94e5
| 1,168
|
py
|
Python
|
aldryn_people/migrations/0005_auto_20150723_1508.py
|
what-digital/aldryn-people
|
0c00f7a7f1c259471959ae88088849386a245432
|
[
"BSD-3-Clause"
] | 11
|
2015-01-01T18:31:02.000Z
|
2018-04-27T10:34:39.000Z
|
aldryn_people/migrations/0005_auto_20150723_1508.py
|
what-digital/aldryn-people
|
0c00f7a7f1c259471959ae88088849386a245432
|
[
"BSD-3-Clause"
] | 139
|
2015-01-14T15:33:56.000Z
|
2018-12-18T15:49:49.000Z
|
aldryn_people/migrations/0005_auto_20150723_1508.py
|
what-digital/aldryn-people
|
0c00f7a7f1c259471959ae88088849386a245432
|
[
"BSD-3-Clause"
] | 42
|
2015-01-16T17:25:03.000Z
|
2018-12-06T11:42:20.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('aldryn_people', '0004_auto_20150622_1606'),
]
operations = [
migrations.AddField(
model_name='grouptranslation',
name='slug',
field=models.SlugField(default='', max_length=255, verbose_name='slug'),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='persons', verbose_name='group', blank=True, to='aldryn_people.Group', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='persons', null=True, blank=True, to=settings.AUTH_USER_MODEL, unique=True),
preserve_default=True,
),
]
| 33.371429
| 176
| 0.638699
|
4a09f4b9df48867624b0c9191af9e1dc05874d32
| 1,563
|
py
|
Python
|
bot.py
|
CALO77103/spotify_download_bot
|
604032b4e18af859e7f6e1dad249014d29993e6a
|
[
"MIT"
] | null | null | null |
bot.py
|
CALO77103/spotify_download_bot
|
604032b4e18af859e7f6e1dad249014d29993e6a
|
[
"MIT"
] | null | null | null |
bot.py
|
CALO77103/spotify_download_bot
|
604032b4e18af859e7f6e1dad249014d29993e6a
|
[
"MIT"
] | null | null | null |
import logging
import os
from command_handlers import send_spotify_songs
from telegram import Update
from telegram.ext import CallbackContext, CommandHandler, Updater
logger = logging.getLogger(__name__)
def setup_logging():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
def start(update: Update, context: CallbackContext):
update.effective_message.reply_text("Hi, welcome to my bot!, this is a simple bot when u can download song only with /spotify [url]. Thank you for using me and happy downloading!")
context.bot.send_message(
chat_id=update.effective_chat.id, text="join here for download songs: @spotifymusicforfree"
)
def error(update: Update, context: CallbackContext, error):
logger.warning('Update "%s" caused error "%s"', update, error)
def main():
TOKEN = os.environ.get('TOKEN')
APP_NAME = os.environ.get('APP_NAME')
# Port is given by Heroku
PORT = os.environ.get('PORT')
# Set up the Updater
updater = Updater(
TOKEN,
use_context=True,
)
dp = updater.dispatcher
# Add handlers
dp.add_handler(CommandHandler('start', start))
dp.add_error_handler(error)
dp.add_handler(
CommandHandler(
'spotify',
send_spotify_songs.send_spotify_songs,
pass_args=True,
pass_job_queue=True,
pass_chat_data=True
)
)
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| 24.809524
| 184
| 0.666027
|
4a09f4e45d55851fecea13344cc8a676bdf31864
| 3,643
|
py
|
Python
|
cvtlib/drawing.py
|
raikel/cvtlib
|
75a4b1922558491f4367ae655d1c386a5a5a6ccd
|
[
"MIT"
] | null | null | null |
cvtlib/drawing.py
|
raikel/cvtlib
|
75a4b1922558491f4367ae655d1c386a5a5a6ccd
|
[
"MIT"
] | null | null | null |
cvtlib/drawing.py
|
raikel/cvtlib
|
75a4b1922558491f4367ae655d1c386a5a5a6ccd
|
[
"MIT"
] | null | null | null |
import cv2
from typing import Tuple
import numpy as np
class Drawer:
TEXT_TOP = 0
TEXT_BOTTOM = 0
def __init__(
self,
box_color: Tuple[int, int, int] = (255, 0, 0),
box_linewidth: int = 2,
font_type: int = cv2.FONT_HERSHEY_SIMPLEX,
font_color: Tuple[int, int, int] = (255, 255, 255),
font_scale: int = 2,
font_linewidth: int = 2,
text_margins: tuple = (2, 2, 2, 2),
text_line_sep: float = 1.3,
text_loc: int = 0,
text_background: tuple = (255, 0, 0)
):
self.box_color: tuple = box_color
self.box_linewidth: int = box_linewidth
self.font_type: int = font_type
self.font_color: tuple = font_color
self.font_scale: int = font_scale
self.font_linewidth: int = font_linewidth
self.text_margins = text_margins
self.text_line_sep = text_line_sep
self.text_loc = text_loc
self.text_background = text_background
def draw_labeled_box(
self,
frame: np.ndarray,
label: str,
rect: Tuple[int, int, int, int]
):
lines = [line.strip() for line in label.split('\n')]
offsets = []
current_offset = 0
text_width = 0
for line in lines:
line_size, _ = cv2.getTextSize(
text=line,
fontFace=self.font_type,
fontScale=self.font_scale,
thickness=self.font_linewidth
)
current_offset += int(self.text_line_sep * line_size[1])
offsets.append(current_offset)
text_width = max(text_width, line_size[0])
text_height = current_offset
if self.text_loc == self.TEXT_TOP:
text_corner = (
rect[0] + self.text_margins[3],
rect[1] - text_height - self.text_margins[2]
)
elif self.text_loc == self.TEXT_BOTTOM:
text_corner = (
rect[0] + self.text_margins[3],
rect[3] + self.text_margins[0]
)
else:
raise ValueError(f'Invalid text location "{self.text_loc}"')
background_rect = (
text_corner[0] - self.text_margins[3],
text_corner[1] - self.text_margins[0],
text_corner[0] + text_width + self.text_margins[1],
text_corner[1] + text_height + self.text_margins[2],
)
cv2.rectangle(
frame,
(rect[0], rect[1]),
(rect[2], rect[3]),
self.box_color,
self.box_linewidth
)
if self.text_background is not None:
cv2.rectangle(
frame,
(background_rect[0], background_rect[1]),
(background_rect[2], background_rect[3]),
self.text_background,
-1
)
for i, line in enumerate(lines):
cv2.putText(
frame,
line,
(text_corner[0], text_corner[1] + offsets[i]),
self.font_type,
self.font_scale,
self.font_color,
self.font_linewidth,
cv2.LINE_AA
)
return frame
def draw_text(
self,
frame: np.ndarray,
text: str,
pos: Tuple[int, int]
):
cv2.putText(
frame,
text,
pos,
self.font_type,
self.font_scale,
self.font_color,
self.font_linewidth,
cv2.LINE_AA
)
return frame
| 28.24031
| 72
| 0.513039
|
4a09f674a28026583ead63ae4b7abbff1e423d58
| 2,699
|
py
|
Python
|
examples/tour_examples/introjs_google_tour.py
|
adityasarin/SeleniumBase
|
419e4c52a9cffd140e01070eaae0e8326cfd6d8e
|
[
"MIT"
] | null | null | null |
examples/tour_examples/introjs_google_tour.py
|
adityasarin/SeleniumBase
|
419e4c52a9cffd140e01070eaae0e8326cfd6d8e
|
[
"MIT"
] | 1
|
2021-06-01T23:17:18.000Z
|
2021-06-01T23:17:18.000Z
|
examples/tour_examples/introjs_google_tour.py
|
adityasarin/SeleniumBase
|
419e4c52a9cffd140e01070eaae0e8326cfd6d8e
|
[
"MIT"
] | null | null | null |
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_tour(self):
self.open('https://google.com')
self.wait_for_element('input[title="Search"]')
self.create_introjs_tour() # OR self.create_tour(theme="introjs")
self.add_tour_step(
"Click to begin the Google Tour!", title="SeleniumBase Tours")
self.add_tour_step(
"Type in your search query here.", 'input[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "Google")
self.wait_for_element('[role="listbox"]') # Wait for autocomplete
self.create_introjs_tour()
self.add_tour_step(
"Then click here to search.", 'input[value="Google Search"]')
self.add_tour_step(
"Or press [ENTER] after typing a query here.", '[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "GitHub\n")
self.wait_for_element("#search")
self.create_introjs_tour()
self.add_tour_step(
"Search results appear here!", title="(5-second autoplay on)")
self.add_tour_step("Let's take another tour:")
self.play_tour(interval=5) # Tour automatically continues after 5 sec
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element('input#searchboxinput')
self.create_introjs_tour()
self.add_tour_step("Welcome to Google Maps!")
self.add_tour_step(
"Type in a location here.", "#searchboxinput", title="Search Box")
self.add_tour_step(
"Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step(
"Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step(
"Use this button to switch to Satellite view.",
"div.widget-minimap", alignment="right")
self.add_tour_step(
"Click here to zoom in.", "#widget-zoom-in", alignment="left")
self.add_tour_step(
"Or click here to zoom out.", "#widget-zoom-out", alignment="left")
self.add_tour_step(
"Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step(
"Or click here to see more Google apps.", '[title="Google apps"]',
alignment="left")
self.add_tour_step(
"Thanks for trying out SeleniumBase Tours!",
title="End of Guided Tour")
self.play_tour()
| 40.893939
| 79
| 0.615413
|
4a09f72c2f52a8ddda828f4b1ba487e14903da27
| 37,206
|
py
|
Python
|
saleor/graphql/attribute/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 1,392
|
2021-10-06T15:54:28.000Z
|
2022-03-31T20:50:55.000Z
|
saleor/graphql/attribute/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 888
|
2021-10-06T10:48:54.000Z
|
2022-03-31T11:00:30.000Z
|
saleor/graphql/attribute/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 538
|
2021-10-07T16:21:27.000Z
|
2022-03-31T22:58:57.000Z
|
import graphene
import pytest
from ....attribute import AttributeInputType
from ....page.error_codes import PageErrorCode
from ....product.error_codes import ProductErrorCode
from ...product.mutations.products import AttrValuesInput
from ..utils import AttributeAssignmentMixin, validate_attributes_input
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_product(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_product_no_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_product_too_many_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["abc", "efg"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.INVALID.value
assert error.params["attributes"] == [
graphene.Node.to_global_id("Attribute", color_attribute.pk)
]
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_product_empty_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a", None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[" "],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
def test_validate_attributes_input_for_product_lack_of_required_attribute(
weight_attribute, color_attribute, product_type
):
# given
product_attributes = product_type.product_attributes.all()
attr = product_attributes.first()
attr.value_required = True
attr.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_attributes,
is_page_attributes=False,
creation=True,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_product_creation_multiple_errors(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 2
assert {error.code for error in errors} == {
ProductErrorCode.INVALID.value,
ProductErrorCode.REQUIRED.value,
}
assert {attr for error in errors for attr in error.params["attributes"]} == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_page(
creation, weight_attribute, color_attribute, page_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
page_type.page_attributes.all(),
is_page_attributes=True,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_page_no_values_given(
creation, weight_attribute, color_attribute, page_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
page_type.page_attributes.all(),
is_page_attributes=True,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == PageErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_page_too_many_values_given(
creation, weight_attribute, color_attribute, page_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["abc", "efg"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
page_type.page_attributes.all(),
is_page_attributes=True,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == PageErrorCode.INVALID.value
assert error.params["attributes"] == [
graphene.Node.to_global_id("Attribute", color_attribute.pk)
]
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_for_page_empty_values_given(
creation, weight_attribute, color_attribute, page_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a", None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[" "],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
page_type.page_attributes.all(),
is_page_attributes=True,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == PageErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
def test_validate_attributes_input_for_page_lack_of_required_attribute(
weight_attribute, color_attribute, page_type
):
# given
page_attributes = page_type.page_attributes.all()
attr = page_attributes.first()
attr.value_required = True
attr.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data, page_attributes, is_page_attributes=True, creation=True
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == PageErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
}
def test_validate_attributes_input_for_page_multiple_errors(
weight_attribute, color_attribute, page_type
):
# given
color_attribute.input_type = AttributeInputType.DROPDOWN
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
page_type.page_attributes.all(),
is_page_attributes=True,
creation=True,
)
# then
assert len(errors) == 2
assert {error.code for error in errors} == {
PageErrorCode.INVALID.value,
PageErrorCode.REQUIRED.value,
}
assert {attr for error in errors for attr in error.params["attributes"]} == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["b"],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data, attributes, is_page_attributes=False, creation=creation
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_no_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data, attributes, is_page_attributes=False, creation=creation
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_not_required_variant_selection_attributes_input_no_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = False
color_attribute.input_type = AttributeInputType.MULTISELECT
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = False
weight_attribute.input_type = AttributeInputType.MULTISELECT
weight_attribute.save(update_fields=["value_required", "input_type"])
# To be verified.
product_type.variant_attributes.add(weight_attribute)
product_type.variant_attributes.add(color_attribute)
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data,
attributes,
is_page_attributes=False,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_too_many_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["abc", "efg"],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data,
attributes,
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.INVALID.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_input_empty_values_given(
creation, weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=[" "],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data, attributes, is_page_attributes=False, creation=creation
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
def test_validate_attributes_input_multiple_errors(
weight_attribute, color_attribute, product_type
):
# given
color_attribute.value_required = True
color_attribute.save(update_fields=["value_required", "input_type"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required", "input_type"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=[None],
file_url=None,
content_type=None,
references=[],
),
),
(
color_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", color_attribute.pk),
values=["a", "b"],
file_url=None,
content_type=None,
references=[],
),
),
]
attributes = product_type.variant_attributes.all()
# when
errors = validate_attributes_input(
input_data, attributes, is_page_attributes=False, creation=True
)
# then
assert len(errors) == 2
assert {error.code for error in errors} == {
ProductErrorCode.INVALID.value,
ProductErrorCode.REQUIRED.value,
}
assert {attr for error in errors for attr in error.params["attributes"]} == {
graphene.Node.to_global_id("Attribute", attr.pk)
for attr in [weight_attribute, color_attribute]
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_with_file_input_type_for_product(
creation, weight_attribute, file_attribute, product_type
):
# given
file_attribute.value_required = True
file_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
file_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", file_attribute.pk),
values=[],
file_url="test_file.jpeg",
content_type="image/jpeg",
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_with_file_input_type_for_product_no_file_given(
creation, weight_attribute, file_attribute, product_type
):
# given
file_attribute.value_required = True
file_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
file_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", file_attribute.pk),
values=[],
file_url="",
content_type="image/jpeg",
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", file_attribute.pk)
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_not_required_attrs_with_file_input_type_for_product_no_file_given(
creation, weight_attribute, file_attribute, product_type
):
# given
file_attribute.value_required = False
file_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = False
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
file_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", file_attribute.pk),
values=[],
file_url="",
content_type="image/jpeg",
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("creation", [True, False])
def test_validate_attributes_with_file_input_type_for_product_empty_file_value(
creation, weight_attribute, file_attribute, product_type
):
# given
file_attribute.value_required = True
file_attribute.save(update_fields=["value_required"])
weight_attribute.value_required = True
weight_attribute.save(update_fields=["value_required"])
input_data = [
(
weight_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", weight_attribute.pk),
values=["a"],
file_url=None,
content_type=None,
references=[],
),
),
(
file_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", file_attribute.pk),
values=[],
file_url=" ",
content_type="image/jpeg",
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", file_attribute.pk)
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_numeric_attributes_input_for_product(
creation, numeric_attribute, product_type
):
# given
numeric_attribute.value_required = True
numeric_attribute.save(update_fields=["value_required"])
input_data = [
(
numeric_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", numeric_attribute.pk),
values=["12.34"],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert not errors
@pytest.mark.parametrize("value", ["qvd", "12.se", " "])
@pytest.mark.parametrize("creation", [True, False])
def test_validate_numeric_attributes_input_for_product_not_numeric_value_given(
creation, value, numeric_attribute, product_type
):
# given
numeric_attribute.value_required = True
numeric_attribute.save(update_fields=["value_required"])
input_data = [
(
numeric_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", numeric_attribute.pk),
values=[value],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.INVALID.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", numeric_attribute.pk)
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_numeric_attributes_input_for_product_blank_value(
creation, numeric_attribute, product_type
):
# given
numeric_attribute.value_required = True
numeric_attribute.save(update_fields=["value_required"])
input_data = [
(
numeric_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", numeric_attribute.pk),
values=[None],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.REQUIRED.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", numeric_attribute.pk)
}
@pytest.mark.parametrize("creation", [True, False])
def test_validate_numeric_attributes_input_for_product_more_than_one_value_given(
creation, numeric_attribute, product_type
):
# given
numeric_attribute.value_required = True
numeric_attribute.save(update_fields=["value_required"])
input_data = [
(
numeric_attribute,
AttrValuesInput(
global_id=graphene.Node.to_global_id("Attribute", numeric_attribute.pk),
values=["12", 1, 123],
file_url=None,
content_type=None,
references=[],
),
),
]
# when
errors = validate_attributes_input(
input_data,
product_type.product_attributes.all(),
is_page_attributes=False,
creation=creation,
)
# then
assert len(errors) == 1
error = errors[0]
assert error.code == ProductErrorCode.INVALID.value
assert set(error.params["attributes"]) == {
graphene.Node.to_global_id("Attribute", numeric_attribute.pk)
}
@pytest.mark.parametrize(
"file_url, expected_value",
[
("http://localhost:8000/media/Test.jpg", "Test.jpg"),
("/media/Test.jpg", "Test.jpg"),
("Test.jpg", "Test.jpg"),
("", ""),
("/ab/cd.jpg", "/ab/cd.jpg"),
],
)
def test_clean_file_url_in_attribute_assignment_mixin(file_url, expected_value):
result = AttributeAssignmentMixin._clean_file_url(file_url)
assert result == expected_value
| 29.181176
| 88
| 0.604204
|
4a09f73491ccc0996b1d89b7accb0e4abbbe7470
| 3,567
|
py
|
Python
|
nailgun/nailgun/utils/expression_parser.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | 1
|
2021-04-06T16:13:35.000Z
|
2021-04-06T16:13:35.000Z
|
nailgun/nailgun/utils/expression_parser.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/utils/expression_parser.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ply.lex
import ply.yacc
from nailgun.errors import errors
tokens = (
'NUMBER', 'STRING', 'TRUE', 'FALSE', 'NULL', 'AND', 'OR', 'NOT', 'IN',
'EQUALS', 'NOT_EQUALS', 'LPAREN', 'RPAREN',
'MODELPATH',
)
def t_NUMBER(t):
r'-?\d+'
t.value = int(t.value)
return t
def t_STRING(t):
r'(?P<openingquote>["\']).*?(?P=openingquote)'
t.value = t.value[1:-1]
return t
def t_TRUE(t):
r'true'
t.value = True
return t
def t_FALSE(t):
r'false'
t.value = False
return t
def t_NULL(t):
r'null'
t.value = None
return t
t_AND = r'and'
t_OR = r'or'
t_NOT = r'not'
t_IN = r'in'
t_MODELPATH = r'\w*?\:[\w\.\-]+\??'
t_EQUALS = r'=='
t_NOT_EQUALS = r'!='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_ignore = ' \t\r\n'
def t_error(t):
errors.LexError("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
ply.lex.lex()
context = {
'models': {}
}
precedence = (
('left', 'OR'),
('left', 'AND'),
('left', 'EQUALS', 'NOT_EQUALS'),
('left', 'IN', 'NOT'),
)
def p_expression_binop(p):
"""expression : expression EQUALS expression
| expression NOT_EQUALS expression
| expression OR expression
| expression AND expression
| expression IN expression
"""
if p[2] == '==':
p[0] = p[1] == p[3]
elif p[2] == '!=':
p[0] = p[1] != p[3]
elif p[2] == 'or':
p[0] = p[1] or p[3]
elif p[2] == 'and':
p[0] = p[1] and p[3]
elif p[2] == 'in':
p[0] = p[1] in p[3]
def p_not_expression(p):
"""expression : NOT expression
"""
p[0] = not p[2]
def p_expression_group(p):
"""expression : LPAREN expression RPAREN
"""
p[0] = p[2]
def p_expression_scalar(p):
"""expression : NUMBER
| STRING
| NULL
| TRUE
| FALSE
"""
p[0] = p[1]
def p_expression_modelpath(p):
"""expression : MODELPATH
"""
model_name, attribute = p[1].split(':', 1)
try:
model = context['models'][model_name]
except KeyError:
raise errors.UnknownModel("Unknown model '%s'" % model_name)
strict = True
if attribute.endswith('?'):
strict = False
attribute = attribute[:-1]
def get_attribute_value(model, path):
value = model[path.pop(0)]
return get_attribute_value(value, path) if len(path) else value
try:
p[0] = get_attribute_value(model, attribute.split('.'))
except (KeyError, AttributeError) as e:
if strict:
raise e
else:
p[0] = None
def p_error(p):
raise errors.ParseError("Syntax error at '%s'" % getattr(p, 'value', ''))
parser = ply.yacc.yacc(debug=False, write_tables=False)
def evaluate(expression, models=None):
context['models'] = models if models is not None else {}
return parser.parse(expression)
| 21.359281
| 78
| 0.565461
|
4a09f7d7e35316cfe9edbefc4f79b5a1b8b158c7
| 3,864
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_cookie.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_cookie.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_cookie.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
# Simple test suite for Cookie.py
from test.test_support import run_unittest, run_doctest, check_warnings
import unittest
import Cookie
class CookieTests(unittest.TestCase):
# Currently this only tests SimpleCookie
def test_basic(self):
cases = [
{ 'data': 'chips=ahoy; vienna=finger',
'dict': {'chips':'ahoy', 'vienna':'finger'},
'repr': "<SimpleCookie: chips='ahoy' vienna='finger'>",
'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger',
},
{ 'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'},
'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; fudge=\\n;'>''',
'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
},
# Check illegal cookies that have an '=' char in an unquoted value
{ 'data': 'keebler=E=mc2',
'dict': {'keebler' : 'E=mc2'},
'repr': "<SimpleCookie: keebler='E=mc2'>",
'output': 'Set-Cookie: keebler=E=mc2',
}
]
for case in cases:
C = Cookie.SimpleCookie()
C.load(case['data'])
self.assertEqual(repr(C), case['repr'])
self.assertEqual(C.output(sep='\n'), case['output'])
for k, v in sorted(case['dict'].iteritems()):
self.assertEqual(C[k].value, v)
def test_load(self):
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
self.assertEqual(C.output(['path']),
'Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme')
self.assertEqual(C.js_output(), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme; Version=1";
// end hiding -->
</script>
""")
self.assertEqual(C.js_output(['path']), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme";
// end hiding -->
</script>
""")
# loading 'expires'
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01-Jan-2010 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01-Jan-2010 00:00:00 GMT')
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01-Jan-98 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01-Jan-98 00:00:00 GMT')
def test_extended_encode(self):
# Issue 9824: some browsers don't follow the standard; we now
# encode , and ; to keep them from tripping up.
C = Cookie.SimpleCookie()
C['val'] = "some,funky;stuff"
self.assertEqual(C.output(['val']),
'Set-Cookie: val="some\\054funky\\073stuff"')
def test_quoted_meta(self):
# Try cookie with quoted meta-data
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
def test_main():
run_unittest(CookieTests)
with check_warnings(('.+Cookie class is insecure; do not use it',
DeprecationWarning)):
run_doctest(Cookie)
if __name__ == '__main__':
test_main()
| 39.030303
| 84
| 0.534679
|
4a09f88aa9d824501046708dc194bf9c00360ca0
| 3,712
|
py
|
Python
|
python/jittor/test/test_memory_profiler.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 2,571
|
2020-03-20T03:38:35.000Z
|
2022-03-31T08:20:05.000Z
|
python/jittor/test/test_memory_profiler.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 197
|
2020-03-20T04:11:47.000Z
|
2022-03-31T10:14:24.000Z
|
python/jittor/test/test_memory_profiler.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 284
|
2020-03-20T03:53:15.000Z
|
2022-03-28T07:20:32.000Z
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Guoye Yang <498731903@qq.com>
# Dun Liang <randonlang@gmail.com>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
from jittor import nn, Module
from jittor.models import resnet
import numpy as np
import sys, os
import random
import math
import unittest
from jittor.test.test_reorder_tuner import simple_parser
from jittor.test.test_log import find_log_with_re
from jittor.dataset.mnist import MNIST
import jittor.transform as trans
import time
skip_this_test = False
class MnistNet(Module):
def __init__(self):
self.model = resnet.Resnet18()
self.layer = nn.Linear(1000,10)
def execute(self, x):
x = self.model(x)
x = self.layer(x)
return x
@unittest.skipIf(skip_this_test, "skip_this_test")
class TestMemoryProfiler(unittest.TestCase):
@classmethod
def setUpClass(self):
# hyper-parameters
self.batch_size = 100
self.weight_decay = 0.0001
self.momentum = 0.9
self.learning_rate = 0.1
# mnist dataset
self.train_loader = MNIST(train=True, transform=trans.Resize(224)) \
.set_attrs(batch_size=self.batch_size, shuffle=True)
self.train_loader.num_workers = 4
# setup random seed
def setup_seed(self, seed):
np.random.seed(seed)
random.seed(seed)
jt.seed(seed)
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
@jt.flag_scope(use_cuda=1, use_stat_allocator=1, trace_py_var=3, profile_memory_enable=1)
def test_resnet(self):
self.setup_seed(1)
loss_list=[]
acc_list=[]
mnist_net = MnistNet()
global prev
prev = time.time()
SGD = nn.SGD(mnist_net.parameters(), self.learning_rate, self.momentum, self.weight_decay)
iters = 10
for batch_idx, (data, target) in enumerate(self.train_loader):
if (batch_idx > iters):
break
jt.display_memory_info()
output = mnist_net(data)
loss = nn.cross_entropy_loss(output, target)
SGD.step(loss)
def callback(batch_idx, loss, output, target):
global prev
pred = np.argmax(output, axis=1)
acc = np.mean(target==pred)
loss_list.append(loss[0])
acc_list.append(acc)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAcc: {:.6f} \tTime:{:.3f}'
.format(0, batch_idx, iters,1. * batch_idx / 6.0, loss[0], acc, time.time()-prev))
jt.fetch(batch_idx, loss, output, target, callback)
jt.sync_all(True)
jt.display_max_memory_info()
_, out = jt.get_max_memory_treemap()
out_ = out.split('\n')
assert(out_[0] == 'root()')
assert(out_[3].endswith('(_run_module_as_main)'))
assert(out_[7].endswith('(_run_code)'))
_, out = jt.get_max_memory_treemap(build_by=1)
out_ = out.split('\n')
assert(out_[0] == 'root()')
assert(out_[4].endswith('(_run_module_as_main)'))
assert(out_[8].endswith('(_run_code)'))
def test_sample(self):
net = jt.models.resnet18()
with jt.flag_scope(trace_py_var=3, profile_memory_enable=1):
imgs = jt.randn((1,3,224,224))
net(imgs).sync()
jt.get_max_memory_treemap()
if __name__ == "__main__":
unittest.main()
| 34.691589
| 102
| 0.592403
|
4a09f8abde0dff4bdf15dbd3512b26c3e3a4ee9f
| 569
|
py
|
Python
|
talk_code.py
|
ADGEfficiency/energy-py-talk
|
c0a52bfaf64fac0ec9113736bd3ff9ba35dca3a5
|
[
"MIT"
] | null | null | null |
talk_code.py
|
ADGEfficiency/energy-py-talk
|
c0a52bfaf64fac0ec9113736bd3ff9ba35dca3a5
|
[
"MIT"
] | null | null | null |
talk_code.py
|
ADGEfficiency/energy-py-talk
|
c0a52bfaf64fac0ec9113736bd3ff9ba35dca3a5
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import energy_py
with tf.Session() as sess:
env = energy_py.make_env(
env_id='battery',
episode_length=288,
dataset='example'
)
agent = energy_py.make_agent(
sess=sess,
agent_id='dqn',
env=env,
total_steps=1000000
)
observation = env.reset()
done = False
while not done:
action = agent.act(observation)
next_observation, reward, done, info = env.step(action)
training_info = agent.learn()
observation = next_observation
| 21.074074
| 63
| 0.602812
|
4a09f8c7dba02a05cde4d531c8346b01006dc6c2
| 5,775
|
py
|
Python
|
PythonClient/agents/CAL_agent/perception/cal_network.py
|
NextTechLab/Conditional-Affordance-Learning
|
9460901717d3b1b0963bb64c3218f3d3d54695b6
|
[
"MIT"
] | 9
|
2019-06-28T04:55:34.000Z
|
2021-04-08T20:41:23.000Z
|
PythonClient/agents/CAL_agent/perception/cal_network.py
|
indranil1997/Conditional-Affordance-Learning
|
9460901717d3b1b0963bb64c3218f3d3d54695b6
|
[
"MIT"
] | 1
|
2019-07-12T01:33:20.000Z
|
2019-07-12T01:33:20.000Z
|
PythonClient/agents/CAL_agent/perception/cal_network.py
|
indranil1997/Conditional-Affordance-Learning
|
9460901717d3b1b0963bb64c3218f3d3d54695b6
|
[
"MIT"
] | 5
|
2019-06-28T04:55:43.000Z
|
2020-11-28T03:48:42.000Z
|
import numpy as np
import os, sys
from PIL import Image
import json
import torch
from torchvision import transforms
from torch.autograd import Variable
from .net import get_model
#import imgaug as ia
#import imgaug.augmenters as iaa
BASE_PATH = os.path.abspath(os.path.join('.', '.'))
MODEL_PATH = BASE_PATH + "/agents/CAL_agent/perception/model_data/models/"
# classes of the categorical affordances
CAT_DICT = {
'red_light': [False, True],
'hazard_stop': [False, True],
'speed_sign': [-1, 30, 60, 90],
}
# normalizing constants of the continuous affordances
REG_DICT = {
'center_distance': 1.6511945645500001,
'veh_distance': 50.0,
'relative_angle': 0.759452569632
}
def get_augmentations():
# applies the given augmenter in 50% of all cases,
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
seq = iaa.Sequential([
# execute 0 to 5 of the following (less important) augmenters per image
iaa.SomeOf((0, 5),
[
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-4, 0),
first=iaa.Multiply((0.5, 1.5), per_channel=True),
second=iaa.ContrastNormalization((0.5, 2.0))
)
]),
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
],
random_order=True
)
],
random_order=True
)
return seq
### helper functions
def load_json(path):
with open(path + '.json', 'r') as json_file:
f = json.load(json_file)
return f
def to_np(t):
return np.array(t.data.cpu())
def softmax(x):
#return np.exp(x)/sum(np.exp(x))
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
### data transforms
class Rescale(object):
def __init__(self, scalar):
self.scalar = scalar
def __call__(self, im):
w, h = [int(s*self.scalar) for s in im.size]
return transforms.Resize((h, w))(im)
class Crop(object):
def __init__(self, box):
assert len(box) == 4
self.box = box
def __call__(self, im):
return im.crop(self.box)
def get_transform():
return transforms.Compose([
Crop((0, 120, 800, 480)),
Rescale(0.4),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
### network
class CAL_network(object):
def __init__(self, name='gru'):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self._transform = get_transform()
# get the model
params = load_json(MODEL_PATH + "params")
self.model, _ = get_model(params)
self.model.load_state_dict(torch.load(MODEL_PATH + "test.pth"))
self.model.eval().to(self.device);
def predict(self, sequence, direction):
print('cal_network sequence',len(sequence))
#print(torch.cat(sequence).size())
inputs = {
'sequence': torch.cat(sequence).unsqueeze(0).to(self.device),
'direction': torch.Tensor([direction]).to(dtype=torch.int),
}
preds = self.model(inputs)
preds = {k: to_np(v) for k,v in preds.items()}
#print("before cat",preds)
out = {}
out.update({k: self.cat_process(k, preds[k]) for k in CAT_DICT})
out.update({k: self.reg_process(k, preds[k]) for k in REG_DICT})
return out
def preprocess(self, arr):
im = self._transform(Image.fromarray(arr))
return im.unsqueeze(0)
@staticmethod
def cat_process(cl, arr):
arr=softmax(arr)
max_idx = np.argmax(arr)
pred_class = CAT_DICT[cl][max_idx]
pred_prob = np.max(arr)
print("probability:",pred_prob)
return (pred_class, pred_prob)
@staticmethod
def reg_process(cl, arr):
arr = np.clip(arr, -1, 1)
return arr*REG_DICT[cl]
| 32.8125
| 140
| 0.559654
|
4a09f9b6cc6fc44a6d3ca36465bcb42dc2af344b
| 1,114
|
py
|
Python
|
components/contrib/_converters/ApacheParquet/to_TSV/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
components/contrib/_converters/ApacheParquet/to_TSV/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
components/contrib/_converters/ApacheParquet/to_TSV/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_parquet_to_tsv(
data_path: InputPath('ApacheParquet'),
output_data_path: OutputPath('TSV'),
):
'''Converts Apache Parquet to TSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <alexey.volkov@ark-kun.com>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
sep='\t',
)
if __name__ == '__main__':
convert_apache_parquet_to_tsv_op = create_component_from_func(
convert_apache_parquet_to_tsv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1', 'pandas==1.0.3'],
annotations={
"author": "Alexey Volkov <alexey.volkov@ark-kun.com>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_TSV/component.yaml",
},
)
| 31.828571
| 164
| 0.684022
|
4a09f9bb6eed38db667b52080f5e4bed7f23377f
| 18,719
|
py
|
Python
|
pyscf/dft/test/test_xcfun.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 1
|
2017-03-03T02:12:08.000Z
|
2017-03-03T02:12:08.000Z
|
pyscf/dft/test/test_xcfun.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | null | null | null |
pyscf/dft/test/test_xcfun.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy
from pyscf import gto, scf
from pyscf import dft
from pyscf import lib
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = 'h 0 0 0; h 1 .5 0; h 0 4 1; h 1 0 .2'
mol.basis = 'aug-ccpvdz'
mol.build()
#dm = scf.RHF(mol).run(conv_tol=1e-14).make_rdm1()
dm = numpy.load(os.path.realpath(os.path.join(__file__, '..', 'dm_h4.npy')))
mf = dft.RKS(mol)
mf.grids.atom_grid = {"H": (50, 110)}
mf.prune = None
mf.grids.build(with_non0tab=False)
nao = mol.nao_nr()
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='GGA')
def tearDownModule():
global mol, mf, ao, rho
del mol, mf, ao, rho
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(w, a.ravel())
class KnownValues(unittest.TestCase):
def test_parse_xc(self):
hyb, fn_facs = dft.xcfun.parse_xc('.5*HF+.5*B3LYP,VWN*.5')
self.assertAlmostEqual(hyb[0], .6, 12)
self.assertEqual([x[0] for x in fn_facs], [0,6,16,3])
self.assertTrue(numpy.allclose([x[1] for x in fn_facs],
(0.04, 0.36, 0.405, 0.595)))
hyb, fn_facs = dft.xcfun.parse_xc('HF,')
self.assertEqual(hyb[0], 1)
self.assertEqual(fn_facs, [])
hyb, fn_facs = dft.libxc.parse_xc('B88 - SLATER')
self.assertEqual(fn_facs, [(106, 1), (1, -1)])
hyb, fn_facs = dft.libxc.parse_xc('B88 -SLATER*.5')
self.assertEqual(fn_facs, [(106, 1), (1, -0.5)])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*B3LYP+0.25*B3LYP')
self.assertTrue(numpy.allclose(hyb, [.15, 0, 0]))
hyb = dft.libxc.hybrid_coeff('0.5*B3LYP+0.25*B3LYP')
self.assertAlmostEqual(hyb, .15, 12)
hyb, fn_facs = dft.xcfun.parse_xc('CAM_B3LYP')
self.assertTrue(numpy.allclose(hyb, [0.19, 0.65, 0.33]))
hyb, fn_facs = dft.xcfun.parse_xc('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))
self.assertTrue(numpy.allclose(fn_facs,
[(9, 0.6), (3, 0.19), (16, 0.486), (0, 0.032), (6, 0.288), (46, 0.324)]))
rsh = dft.xcfun.rsh_coeff('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.xcfun.parse_xc('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))
self.assertTrue(numpy.allclose(fn_facs,
[(0, 0.032), (6, 0.288), (46, 0.324), (3, 0.19), (9, 0.6), (16, 0.486)]))
rsh = dft.xcfun.rsh_coeff('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF(0.3) + .8*HF + .22*LR_HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .22*LR_HF(0.3) + .8*HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .8*HF + .22*LR_HF(0.3)')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*RSH(2.04;0.56;0.3) + 0.5*BP86')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
self.assertEqual(fn_facs, [(6, 0.5), (46, 0.5)])
self.assertRaises(ValueError, dft.xcfun.parse_xc, 'SR_HF(0.3) + LR_HF(.5)')
self.assertRaises(ValueError, dft.xcfun.parse_xc, 'LR-HF(0.3) + SR-HF(.5)')
hyb = dft.xcfun.hybrid_coeff('M05')
self.assertAlmostEqual(hyb, 0.28, 9)
hyb, fn_facs = dft.xcfun.parse_xc('APBE,')
self.assertEqual(fn_facs[0][0], 58)
hyb, fn_facs = dft.xcfun.parse_xc('TF,')
self.assertEqual(fn_facs, [(24, 1)])
ref = [(0, 1), (3, 1)]
self.assertEqual(dft.xcfun.parse_xc_name('LDA,VWN'), (0,3))
self.assertEqual(dft.xcfun.parse_xc(('LDA','VWN'))[1], ref)
self.assertEqual(dft.xcfun.parse_xc((0, 3))[1], ref)
self.assertEqual(dft.xcfun.parse_xc('0, 3')[1], ref)
self.assertEqual(dft.xcfun.parse_xc(3)[1], [(3,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11-L')[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11-L,M11L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11_L,M11-L')[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11L,M11_L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('Xpbe,')[1], [(123,1)])
#self.assertEqual(dft.xcfun.parse_xc('pbe,' )[1], [(101,1)])
self.assertTrue (dft.xcfun.is_meta_gga('m05'))
self.assertFalse(dft.xcfun.is_meta_gga('pbe0'))
self.assertFalse(dft.xcfun.is_meta_gga('tf,'))
self.assertFalse(dft.xcfun.is_meta_gga('vv10'))
self.assertTrue (dft.xcfun.is_gga('PBE0'))
self.assertFalse(dft.xcfun.is_gga('m05'))
self.assertFalse(dft.xcfun.is_gga('tf,'))
self.assertTrue (dft.xcfun.is_lda('tf,'))
self.assertFalse(dft.xcfun.is_lda('vv10'))
self.assertTrue (dft.xcfun.is_hybrid_xc('m05'))
self.assertTrue (dft.xcfun.is_hybrid_xc('pbe0,'))
self.assertFalse(dft.xcfun.is_hybrid_xc('m05,'))
self.assertFalse(dft.xcfun.is_hybrid_xc('vv10'))
self.assertTrue (dft.xcfun.is_hybrid_xc(('b3lyp',4,'vv10')))
def test_nlc_coeff(self):
self.assertEqual(dft.xcfun.nlc_coeff('vv10'), [5.9, 0.0093])
def test_lda(self):
e,v,f,k = dft.xcfun.eval_xc('lda,', rho[0][:3], deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0]), -1.1414693830969338, 8)
self.assertAlmostEqual(lib.finger(k[0]), 4.1402447248393921, 8)
e,v,f,k = dft.xcfun.eval_xc('lda,', [rho[0][:3]*.5]*2, spin=1, deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0].T[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(v[0].T[1]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0].T[0]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(f[0].T[2]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(k[0].T[0]), 4.1402447248393921*4, 7)
self.assertAlmostEqual(lib.finger(k[0].T[3]), 4.1402447248393921*4, 7)
def test_lyp(self):
e,v,f = dft.xcfun.eval_xc(',LYP', rho, deriv=3)[:3]
self.assertAlmostEqual(numpy.dot(rho[0],e), -62.114576182676615, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[0]),-81.771670866308455, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[1]), 27.485383255125743, 8)
self.assertAlmostEqual(numpy.dot(rho[0],f[0]), 186.823806251777, 7)
self.assertAlmostEqual(numpy.dot(rho[0],f[1]), -3391.2428894571085, 6)
self.assertAlmostEqual(numpy.dot(rho[0],f[2]), 0, 9)
def test_beckex(self):
rho =(numpy.array([1. , 1., 0., 0.]).reshape(-1,1),
numpy.array([ .8, 1., 0., 0.]).reshape(-1,1))
e,v,f = dft.xcfun.eval_xc('b88,', rho, spin=1, deriv=3)[:3]
self.assertAlmostEqual(lib.finger(e) ,-0.9061911523772116 , 9)
self.assertAlmostEqual(lib.finger(v[0]),-1.8531364353196298 , 9)
self.assertAlmostEqual(lib.finger(v[1]),-0.0018308066137967724, 9)
self.assertAlmostEqual(lib.finger(f[0]),-0.21602284426026866 , 9)
self.assertAlmostEqual(lib.finger(f[1]), 0.0072053520662545617, 9)
self.assertAlmostEqual(lib.finger(f[2]), 0.0002275350850255538, 9)
def test_m05x(self):
rho =(numpy.array([1., 1., 0., 0., 0., 0.165 ]).reshape(-1,1),
numpy.array([.8, 1., 0., 0., 0., 0.1050]).reshape(-1,1))
test_ref = numpy.array([-1.57876583, -2.12127045,-2.11264351,-0.00315462,
0.00000000, -0.00444560, 3.45640232, 4.4349756])
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho, 1, deriv=3)
self.assertAlmostEqual(float(exc)*1.8, test_ref[0], 5)
self.assertAlmostEqual(abs(vxc[0]-test_ref[1:3]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[1]-test_ref[3:6]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[3]-test_ref[6:8]).max(), 0, 5)
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho[0], 0, deriv=3)
self.assertAlmostEqual(float(exc), -0.5746231988116002, 5)
self.assertAlmostEqual(float(vxc[0]), -0.8806121005703862, 6)
self.assertAlmostEqual(float(vxc[1]), -0.0032300155406846756, 7)
self.assertAlmostEqual(float(vxc[3]), 0.4474953100487698, 5)
def test_camb3lyp(self):
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('camb3lyp', rho, 0, deriv=1)
# FIXME, xcfun and libxc do not agree on camb3lyp
# self.assertAlmostEqual(float(exc), -0.5752559666317147, 5)
# self.assertAlmostEqual(float(vxc[0]), -0.7709812578936763, 5)
# self.assertAlmostEqual(float(vxc[1]), -0.0029862221286189846, 7)
self.assertEqual(dft.xcfun.rsh_coeff('camb3lyp'), (0.33, 0.65, -0.46))
def test_define_xc(self):
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
exc = vxc = fxc = kxc = None
return exc, vxc, fxc, kxc
mf = dft.RKS(mol)
ni = dft.xcfun.define_xc(mf._numint, eval_xc, 'GGA', hyb=0.2)
ni = dft.xcfun.define_xc(mf._numint, 'b3lyp+vwn', 'GGA', hyb=0.2)
self.assertRaises(ValueError, dft.xcfun.define_xc, mf._numint, 0.1)
def test_vs_libxc_rks(self):
ao = dft.numint.eval_ao(mol, mf.grids.coords[:200], deriv=2)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')
rhoa = rho[:,:200]
def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):
exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, rhoa, 0, deriv=deriv)
exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, rhoa, 0, deriv=deriv)
self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)
if deriv > 0:
for v0, v1 in zip(vxc0, vxc1):
if v0 is not None and v1 is not None:
self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)
if deriv > 1:
for f0, f1 in zip(fxc0, fxc1):
if f0 is not None and f1 is not None:
self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)
if deriv > 2:
for k0, k1 in zip(kxc0, kxc1):
if k0 is not None and k1 is not None:
self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)
check('lda,')
check('pw86,')
check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)
#?check('becke,')
#?check('br,')
#?check('LDAERF,')
check('optx,')
check('OPTXCORR,')
check('RPBE,')
check('TF,' )
check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)
check('m05,' , deriv=1, e_place=6, v_place=6)
check('m052x,', deriv=1, e_place=6, v_place=6)
check('m06,' , deriv=1, e_place=6, v_place=6)
check('m062x,', deriv=1, e_place=6, v_place=6)
check('m06l,' , deriv=1, e_place=6, v_place=6)
check('TPSS,' , k_place=-4)
#?check('REVTPSS,', deriv=1) # xcfun crash
check('APBE,')
check('BLOC,' , k_place=-5)
check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)
check(',vwn3')
check(',vwn5')
check(',pbe' , deriv=2)
#?check(',br')
#?check(',LDAERF')
check(',lyp' , deriv=2)
check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)
check(',PW91' , deriv=2, f_place=3)
check(',m052x', deriv=1)
check(',m05' , deriv=1)
check(',m06' , deriv=1)
check(',m062x', deriv=1)
check(',m06l' , deriv=1)
check(',TPSS' , deriv=1, v_place=1)
check(',REVTPSS', deriv=1, e_place=2, v_place=1)
check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)
check(',APBE' , deriv=2)
check(',PBEINT' , deriv=1)
check(',TPSSLOC', deriv=1, e_place=1, v_place=0)
#?check('br')
check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)
check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)
#?check('b97_1')
#?check('b97_2')
check('SVWN')
check('BLYP' , deriv=2)
check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)
check('OLYP' , deriv=2)
check('KT1' , deriv=1)
check('KT2' , deriv=1)
#?check('KT3')
check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)
check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)
check('B3P86G' , deriv=2, e_place=5, v_place=5, f_place=3)
check('B3PW91' , deriv=2, f_place=4)
check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)
check('B3LYP' , deriv=2)
check('B3LYP5' , deriv=2)
check('B3LYPG' , deriv=2)
check('O3LYP' , deriv=2)
check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)
check('CAMB3LYP', deriv=1)
check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)
check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)
check('TPSSH' , deriv=1, v_place=1)
def test_vs_libxc_uks(self):
ao = dft.numint.eval_ao(mol, mf.grids.coords[:400], deriv=2)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')
rhoa = rho[:,:200]
rhob = rhoa + rho[:,200:400]
def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):
exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)
exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)
self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)
if deriv > 0:
for v0, v1 in zip(vxc0, vxc1):
if v0 is not None and v1 is not None:
self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)
if deriv > 1:
for f0, f1 in zip(fxc0, fxc1):
if f0 is not None and f1 is not None:
self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)
if deriv > 2 and kxc0 is not None:
for k0, k1 in zip(kxc0, kxc1):
if k0 is not None and k1 is not None:
self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)
check('lda,')
check('pw86,')
check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)
#?check('becke,')
#?check('br,')
#?check('LDAERF,')
check('optx,')
check('OPTXCORR,')
check('RPBE,')
check('TF,' , e_place=0, v_place=-1, f_place=-2, k_place=-2)
check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)
check('m05,' , deriv=1, e_place=6, v_place=6)
check('m052x,', deriv=1, e_place=6, v_place=6)
check('m06,' , deriv=1, e_place=6, v_place=6)
check('m062x,', deriv=1, e_place=6, v_place=6)
check('m06l,' , deriv=1, e_place=6, v_place=6)
check('TPSS,' , k_place=-4)
#?check('REVTPSS,', deriv=1) # libxc crash
check('APBE,')
check('BLOC,' , k_place=-5)
check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)
check(',vwn3', e_place=2, v_place=1, f_place=1, k_place=0)
check(',vwn5')
check(',pbe' , deriv=2)
#?check(',br')
#?check(',LDAERF')
check(',lyp' , deriv=2)
check(',SPBE' , deriv=2, e_place=1, v_place=1, f_place=0)
check(',PW91' , deriv=2, f_place=3)
check(',m052x', deriv=1)
check(',m05' , deriv=1)
check(',m06' , deriv=1)
check(',m062x', deriv=1)
check(',m06l' , deriv=1)
check(',TPSS' , deriv=1, v_place=1)
check(',REVTPSS', deriv=1, e_place=2, v_place=1)
check(',p86' , deriv=2, e_place=5, v_place=5, f_place=3)
check(',APBE' , deriv=2)
check(',PBEINT' , deriv=1)
check(',TPSSLOC', deriv=1, e_place=1, v_place=0)
#?check('br')
check('revpbe', deriv=2, e_place=6, v_place=6, f_place=5)
check('b97' , deriv=2, e_place=6, v_place=5, f_place=3)
#?check('b97_1')
#?check('b97_2')
check('SVWN')
check('BLYP' , deriv=2)
check('BP86' , deriv=2, e_place=5, v_place=5, f_place=3)
check('OLYP' , deriv=2)
check('KT1' , deriv=1)
check('KT2' , deriv=1)
#?check('KT3')
check('PBE0' , deriv=2, e_place=6, v_place=6, f_place=5)
check('B3P86' , deriv=2, e_place=5, v_place=5, f_place=3)
check('B3P86G' , deriv=2, e_place=3, v_place=2, f_place=2)
check('B3PW91' , deriv=2, f_place=4)
check('B3PW91G', deriv=2, e_place=2, v_place=2, f_place=2)
check('B3LYP' , deriv=2)
check('B3LYP5' , deriv=2)
check('B3LYPG' , deriv=2, e_place=3, v_place=2, f_place=2)
check('O3LYP' , deriv=2, e_place=3, v_place=2, f_place=1)
check('X3LYP' , deriv=2, e_place=7, v_place=5, f_place=2)
check('CAMB3LYP', deriv=1)
check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)
check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)
check('TPSSH' , deriv=1, v_place=1)
if __name__ == "__main__":
print("Test xcfun")
unittest.main()
| 46.219753
| 112
| 0.566002
|
4a09f9ddb409d70b1aa006dd1d507c8e69ad7d96
| 6,476
|
py
|
Python
|
sdk/python/pulumi_azure_native/servicefabricmesh/v20180901preview/secret_value.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/servicefabricmesh/v20180901preview/secret_value.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/servicefabricmesh/v20180901preview/secret_value.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['SecretValue']
class SecretValue(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secret_resource_name: Optional[pulumi.Input[str]] = None,
secret_value_resource_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
This type describes a value of a secret resource. The name of this resource is the version identifier corresponding to this secret value.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: Azure resource group name
:param pulumi.Input[str] secret_resource_name: The name of the secret resource.
:param pulumi.Input[str] secret_value_resource_name: The name of the secret resource value which is typically the version identifier for the value.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] value: The actual value of the secret.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if secret_resource_name is None and not opts.urn:
raise TypeError("Missing required property 'secret_resource_name'")
__props__['secret_resource_name'] = secret_resource_name
__props__['secret_value_resource_name'] = secret_value_resource_name
__props__['tags'] = tags
__props__['value'] = value
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicefabricmesh/v20180901preview:SecretValue"), pulumi.Alias(type_="azure-native:servicefabricmesh:SecretValue"), pulumi.Alias(type_="azure-nextgen:servicefabricmesh:SecretValue")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecretValue, __self__).__init__(
'azure-native:servicefabricmesh/v20180901preview:SecretValue',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecretValue':
"""
Get an existing SecretValue resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["value"] = None
return SecretValue(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
State of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> pulumi.Output[Optional[str]]:
"""
The actual value of the secret.
"""
return pulumi.get(self, "value")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.780645
| 270
| 0.643144
|
4a09f9f11cac2da434e858d9ad15cde7dda97591
| 61
|
py
|
Python
|
eurostatapiclient/__init__.py
|
ckauth/eurostat-api-client
|
70c859881d50b3eca275434e2590ff7d76b290e9
|
[
"Apache-2.0"
] | null | null | null |
eurostatapiclient/__init__.py
|
ckauth/eurostat-api-client
|
70c859881d50b3eca275434e2590ff7d76b290e9
|
[
"Apache-2.0"
] | null | null | null |
eurostatapiclient/__init__.py
|
ckauth/eurostat-api-client
|
70c859881d50b3eca275434e2590ff7d76b290e9
|
[
"Apache-2.0"
] | null | null | null |
from .client import EurostatAPIClient
__version__ = "0.2.6"
| 15.25
| 37
| 0.770492
|
4a09fa11eea1291249d7bd8c21ec04f2968412f8
| 11,557
|
py
|
Python
|
sdk/search/azure-search-documents/azure/search/documents/_internal/_search_index_document_batching_client.py
|
sima-zhu/azure-sdk-for-python
|
a413dc783f0df7dc65e9c2ef9762fabff1708c4e
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/search/azure-search-documents/azure/search/documents/_internal/_search_index_document_batching_client.py
|
sima-zhu/azure-sdk-for-python
|
a413dc783f0df7dc65e9c2ef9762fabff1708c4e
|
[
"MIT"
] | null | null | null |
sdk/search/azure-search-documents/azure/search/documents/_internal/_search_index_document_batching_client.py
|
sima-zhu/azure-sdk-for-python
|
a413dc783f0df7dc65e9c2ef9762fabff1708c4e
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING
import time
import threading
from azure.core.tracing.decorator import distributed_trace
from azure.core.exceptions import ServiceResponseTimeoutError
from ._utils import is_retryable_status_code
from ._search_index_document_batching_client_base import SearchIndexDocumentBatchingClientBase
from ._generated import SearchIndexClient
from ..indexes import SearchIndexClient as SearchServiceClient
from ._generated.models import IndexBatch, IndexingResult
from ._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch import IndexDocumentsBatch
from .._headers_mixin import HeadersMixin
from .._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Union
from azure.core.credentials import AzureKeyCredential
class SearchIndexDocumentBatchingClient(SearchIndexDocumentBatchingClientBase, HeadersMixin):
"""A client to do index document batching.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param index_name: The name of the index to connect to
:type index_name: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential
:keyword bool auto_flush: if the auto flush mode is on. Default to True.
:keyword int window: how many seconds if there is no changes that triggers auto flush.
Default to 60 seconds
:keyword hook: hook. If it is set, the client will call corresponding methods when status changes
:paramtype hook: IndexingHook
:keyword str api_version: The Search API version to use for requests.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, endpoint, index_name, credential, **kwargs):
# type: (str, str, AzureKeyCredential, **Any) -> None
super(SearchIndexDocumentBatchingClient, self).__init__(
endpoint=endpoint,
index_name=index_name,
credential=credential,
**kwargs)
self._index_documents_batch = IndexDocumentsBatch()
self._client = SearchIndexClient(
endpoint=endpoint, index_name=index_name, sdk_moniker=SDK_MONIKER, **kwargs
) # type: SearchIndexClient
self._reset_timer()
def _cleanup(self, flush=True):
# type: () -> None
"""Clean up the client.
:param bool flush: flush the actions queue before shutdown the client
Default to True.
"""
if flush:
self.flush()
if self._auto_flush:
self._timer.cancel()
def __repr__(self):
# type: () -> str
return "<SearchIndexDocumentBatchingClient [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
@property
def actions(self):
# type: () -> List[IndexAction]
"""The list of currently index actions in queue to index.
:rtype: List[IndexAction]
"""
return self._index_documents_batch.actions
def close(self):
# type: () -> None
"""Close the :class:`~azure.search.documents.SearchClient` session."""
self._cleanup(flush=True)
return self._client.close()
@distributed_trace
def flush(self, timeout=86400, **kwargs): # pylint:disable=unused-argument
# type: (int) -> bool
"""Flush the batch.
:param int timeout: time out setting. Default is 86400s (one day)
:return: True if there are errors. Else False
:rtype: bool
"""
has_error = False
begin_time = int(time.time())
while len(self.actions) > 0:
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
result = self._process(timeout=remaining, raise_error=False)
if result:
has_error = True
return has_error
def _process(self, timeout=86400, **kwargs):
# type: (int) -> bool
raise_error = kwargs.pop("raise_error", True)
actions = self._index_documents_batch.dequeue_actions()
has_error = False
if not self._index_key:
try:
client = SearchServiceClient(self._endpoint, self._credential)
result = client.get_index(self._index_name)
if result:
for field in result.fields:
if field.key:
self._index_key = field.name
break
except Exception: # pylint: disable=broad-except
pass
try:
results = self._index_documents_actions(actions=actions, timeout=timeout)
for result in results:
try:
action = next(x for x in actions if x.additional_properties.get(self._index_key) == result.key)
if result.succeeded:
self._succeed_callback(action)
elif is_retryable_status_code(result.status_code):
self._retry_action(action)
has_error = True
else:
self._fail_callback(action)
has_error = True
except StopIteration:
pass
return has_error
except Exception: # pylint: disable=broad-except
for action in actions:
self._retry_action(action)
if raise_error:
raise
return True
def _process_if_needed(self):
# type: () -> bool
""" Every time when a new action is queued, this method
will be triggered. It checks the actions already queued and flushes them if:
1. Auto_flush is on
2. There are self._batch_size actions queued
"""
if not self._auto_flush:
return
# reset the timer
self._reset_timer()
if len(self._index_documents_batch.actions) < self._batch_size:
return
self._process(raise_error=False)
def _reset_timer(self):
# pylint: disable=access-member-before-definition
try:
self._timer.cancel()
except AttributeError:
pass
self._timer = threading.Timer(self._window, self._process)
if self._auto_flush:
self._timer.start()
def add_upload_actions(self, documents):
# type: (List[dict]) -> None
"""Queue upload documents actions.
:param documents: A list of documents to upload.
:type documents: List[dict]
"""
actions = self._index_documents_batch.add_upload_actions(documents)
self._new_callback(actions)
self._process_if_needed()
def add_delete_actions(self, documents):
# type: (List[dict]) -> None
"""Queue delete documents actions
:param documents: A list of documents to delete.
:type documents: List[dict]
"""
actions = self._index_documents_batch.add_delete_actions(documents)
self._new_callback(actions)
self._process_if_needed()
def add_merge_actions(self, documents):
# type: (List[dict]) -> None
"""Queue merge documents actions
:param documents: A list of documents to merge.
:type documents: List[dict]
"""
actions = self._index_documents_batch.add_merge_actions(documents)
self._new_callback(actions)
self._process_if_needed()
def add_merge_or_upload_actions(self, documents):
# type: (List[dict]) -> None
"""Queue merge documents or upload documents actions
:param documents: A list of documents to merge or upload.
:type documents: List[dict]
"""
actions = self._index_documents_batch.add_merge_or_upload_actions(documents)
self._new_callback(actions)
self._process_if_needed()
def _index_documents_actions(self, actions, **kwargs):
# type: (List[IndexAction], **Any) -> List[IndexingResult]
error_map = {413: RequestEntityTooLargeError}
timeout = kwargs.pop('timeout', 86400)
begin_time = int(time.time())
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
index_documents = IndexBatch(actions=actions)
batch_response = self._client.documents.index(batch=index_documents, error_map=error_map, **kwargs)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_first_half = self._index_documents_actions(
actions=actions[:pos],
error_map=error_map,
timeout=remaining,
**kwargs
)
if len(batch_response_first_half) > 0:
result_first_half = cast(List[IndexingResult], batch_response_first_half.results)
else:
result_first_half = []
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_second_half = self._index_documents_actions(
actions=actions[pos:],
error_map=error_map,
timeout=remaining,
**kwargs
)
if len(batch_response_second_half) > 0:
result_second_half = cast(List[IndexingResult], batch_response_second_half.results)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
def __enter__(self):
# type: () -> SearchIndexDocumentBatchingClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self.close()
self._client.__exit__(*args) # pylint:disable=no-member
def _retry_action(self, action):
# type: (IndexAction) -> None
if not self._index_key:
self._fail_callback(action)
return
key = action.additional_properties.get(self._index_key)
counter = self._retry_counter.get(key)
if not counter:
# first time that fails
self._retry_counter[key] = 1
self._index_documents_batch.enqueue_action(action)
elif counter < self._RETRY_LIMIT - 1:
# not reach retry limit yet
self._retry_counter[key] = counter + 1
self._index_documents_batch.enqueue_action(action)
else:
self._fail_callback(action)
| 39.043919
| 115
| 0.612962
|
4a09fa3d2d61095134b449bb18eef42cdbce6927
| 4,000
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_secret_reference.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_secret_reference.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_secret_reference.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1SecretReference(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, name=None, namespace=None): # noqa: E501
"""V1SecretReference - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._namespace = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def name(self):
"""Gets the name of this V1SecretReference. # noqa: E501
Name is unique within a namespace to reference a secret resource. # noqa: E501
:return: The name of this V1SecretReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1SecretReference.
Name is unique within a namespace to reference a secret resource. # noqa: E501
:param name: The name of this V1SecretReference. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1SecretReference. # noqa: E501
Namespace defines the space within which the secret name must be unique. # noqa: E501
:return: The namespace of this V1SecretReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1SecretReference.
Namespace defines the space within which the secret name must be unique. # noqa: E501
:param namespace: The namespace of this V1SecretReference. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SecretReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.972028
| 124
| 0.575
|
4a09fb45b80e3c52a94cc553e9a4d0183123fd8c
| 4,138
|
py
|
Python
|
dmb/modeling/stereo/layers/bilateral_filter.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 160
|
2019-11-16T13:59:21.000Z
|
2022-03-28T07:52:59.000Z
|
dmb/modeling/stereo/layers/bilateral_filter.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 22
|
2019-11-22T02:14:18.000Z
|
2022-01-24T10:16:14.000Z
|
dmb/modeling/stereo/layers/bilateral_filter.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 38
|
2019-12-27T14:01:01.000Z
|
2022-03-12T11:40:11.000Z
|
import torch
import torch.nn as nn
import math
eps = 1e-12
class bilateralFilter(nn.Module):
"""
Args:
kernel_size(int, tuple): bilateral filter kernel size
sigma_image(int, float): the derivation of Image Gaussian distribution
sigma_gaussian(int, float): the derivation of Disparity Gaussian distribution
leftImage(tensor): in [BatchSize, 1, Height, Width] layout, gray image
estDisp(tensor): in [BatchSize, 1, Height, Width] layout, the estimated disparity map
Outputs:
fineDisp(tensor): in [BatchSize, 1, Height, Width] layout, the refined disparity map
"""
def __init__(self, kernel_size, sigma_image, sigma_gaussian):
super(bilateralFilter, self).__init__()
self.kernel_size = kernel_size
self.sigma_image = sigma_image
self.sigma_gaussian = sigma_gaussian
self.image_conv = []
self.image_kernel = self.create_image_kernel(self.kernel_size)
for i in range(len(self.image_kernel)):
self.image_conv.append(
nn.Conv2d(1, 1, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=False))
self.image_conv[i].weight.data = self.image_kernel[i]
self.image_conv[i].weight.requires_grad = False
self.disp_conv = []
self.disp_kernel = self.create_disparity_kernel(self.kernel_size)
for i in range(len(self.disp_kernel)):
self.disp_conv.append(
nn.Conv2d(1, 1, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=False))
self.disp_conv[i].weight.data = self.disp_kernel[i]
self.disp_conv[i].weight.requires_grad = False
def forward(self, leftImage, estDisp):
assert leftImage.shape == estDisp.shape
assert estDisp.shape[1] == 1
for i in range(len(self.disp_conv)):
self.disp_conv[i] = self.disp_conv[i].to(leftImage.device)
for i in range(len(self.image_conv)):
self.image_conv[i] = self.image_conv[i].to(leftImage.device)
index_image_conv = 0
index_disp_conv = 0
fineDisp = None
weight = None
for i in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
for j in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
if i == 0 and j == 0:
image_diff_weight = torch.ones_like(estDisp)
else:
image_diff_weight = (
(-self.image_conv[index_image_conv](leftImage).pow(2.0) / (2 * self.sigma_image ** 2)).exp())
index_image_conv += 1
dist = math.exp(-float(i ** 2 + j ** 2) / float(2 * self.sigma_gaussian ** 2))
dist_diff_weight = torch.full_like(estDisp, dist)
disp = self.disp_conv[index_disp_conv](estDisp)
if index_disp_conv == 0:
weight = dist_diff_weight * image_diff_weight
fineDisp = disp * dist_diff_weight * image_diff_weight
else:
weight += dist_diff_weight * image_diff_weight
fineDisp += disp * dist_diff_weight * image_diff_weight
fineDisp = (fineDisp + eps) / (weight + eps)
return fineDisp
def create_disparity_kernel(self, kernel_size):
total_direction = kernel_size * kernel_size
kernel = []
for i in range(total_direction):
kernel.append(torch.zeros(1, 1, total_direction))
kernel[i][:, :, i] = 1
kernel[i] = kernel[i].reshape(1, 1, kernel_size, kernel_size)
return kernel
def create_image_kernel(self, kernel_size):
total_direction = kernel_size * kernel_size
kernel = []
for i in range(total_direction):
kernel.append(torch.zeros(1, 1, total_direction))
kernel[i][:, :, i] = -1
kernel[i][:, :, total_direction // 2] = 1
kernel[i] = kernel[i].reshape(1, 1, kernel_size, kernel_size)
return kernel[:total_direction // 2] + kernel[total_direction // 2 + 1:]
| 41.79798
| 117
| 0.604157
|
4a09fb85a44d52cf38f077b25e027e6f65960266
| 3,895
|
py
|
Python
|
SoyNLP/soynlp/word/_pmi.py
|
ChoiSeEun/Korean-NLP-Visual
|
adee5e45f3472b969f45e6e2e991d88df0e49dcd
|
[
"Apache-2.0"
] | 1
|
2021-11-05T10:18:43.000Z
|
2021-11-05T10:18:43.000Z
|
SoyNLP/soynlp/word/_pmi.py
|
ChoiSeEun/Korean-NLP-Visual
|
adee5e45f3472b969f45e6e2e991d88df0e49dcd
|
[
"Apache-2.0"
] | 5
|
2021-11-04T10:23:39.000Z
|
2021-12-13T13:03:31.000Z
|
SoyNLP/soynlp/word/_pmi.py
|
ChoiSeEun/Korean-NLP-Visual
|
adee5e45f3472b969f45e6e2e991d88df0e49dcd
|
[
"Apache-2.0"
] | 2
|
2021-09-29T10:52:05.000Z
|
2021-09-29T11:05:33.000Z
|
import numpy as np
from scipy.sparse import diags
from scipy.sparse import dok_matrix
from sklearn.metrics import pairwise_distances
from soynlp.utils import get_process_memory
from soynlp.vectorizer import sent_to_word_context_matrix
def pmi(x, min_pmi=0, alpha=0.0001, verbose=False):
# convert x to probability matrix & marginal probability
px = (x.sum(axis=1) / x.sum()).reshape(-1)
py = (x.sum(axis=0) / x.sum()).reshape(-1)
pxy = x / x.sum()
# transform px and py to diagonal matrix
# using scipy.sparse.diags
px_diag = diags(px.tolist()[0])
py_diag = diags((py).tolist()[0])
# pmi_alpha (x,y) = p(x,y) / ( p(x) x (p(y) + alpha) )
px_diag.data[0] = np.asarray([0 if v == 0 else 1/v for v in px_diag.data[0]])
py_diag.data[0] = np.asarray([0 if v == 0 else 1/(v + alpha) for v in py_diag.data[0]])
exp_pmi = px_diag.dot(pxy).dot(py_diag)
# PPMI using threshold
min_exp_pmi = 1 if min_pmi == 0 else np.exp(min_pmi)
# because exp_pmi is sparse matrix and type of exp_pmi.data is numpy.ndarray
indices = np.where(exp_pmi.data > min_exp_pmi)[0]
pmi_dok = dok_matrix(exp_pmi.shape)
# prepare data (rows, cols, data)
rows, cols = exp_pmi.nonzero()
data = exp_pmi.data
# enumerate function for printing status
for _n_idx, idx in enumerate(indices):
# print current status
if verbose and _n_idx % 10000 == 0:
print('\rcomputing pmi {:.3} % mem={} Gb '.format(
100 * _n_idx / indices.shape[0], '%.3f' % get_process_memory())
, flush=True, end='')
# apply logarithm
pmi_dok[rows[idx], cols[idx]] = np.log(data[idx])
if verbose:
print('\rcomputing pmi was done{}'.format(' '*30), flush=True)
return pmi_dok
class PMI:
def __init__(self, windows=3, min_tf=10, verbose=True,
tokenizer=lambda x:x.split(), min_pmi=0, alpha=0.0001):
self.windows = windows
self.min_tf = min_tf
self.verbose = verbose
self.tokenizer = tokenizer
self.min_pmi = min_pmi
self.alpha = alpha
def train(self, sents):
# construct word - context matrix
self.x, self.idx2vocab = sent_to_word_context_matrix(
sents, self.windows, self.min_tf, self.tokenizer, self.verbose)
self.vocab2idx = {vocab:idx for idx, vocab in enumerate(self.idx2vocab)}
# compute pmi
self.pmi_ = pmi(self.x, min_pmi=self.min_pmi, alpha=self.alpha, verbose=self.verbose)
return self
def most_similar_words(self, query, topk=10, filter_func=lambda x:True):
assert topk > 0
if not (query in self.vocab2idx):
return []
query_idx = self.vocab2idx[query]
dist = pairwise_distances(self.x[query_idx,:], self.x, metric='cosine')[0]
similars = []
for similar_idx in dist.argsort():
if similar_idx == query_idx:
continue
if len(similars) >= topk:
break
similar_word = self.idx2vocab[similar_idx]
if not filter_func(similar_word):
continue
similars.append((similar_word, 1-dist[similar_idx]))
return similars
def most_related_contexts(self, query, topk=10, filter_func=lambda x:True):
assert topk > 0
if not (query in self.vocab2idx):
return []
query_idx = self.vocab2idx[query]
submatrix = self.pmi_[query_idx,:].tocsr()
contexts = submatrix.nonzero()[1]
pmi_i = submatrix.data
most_relateds = [(idx, pmi_ij) for idx, pmi_ij in zip(contexts, pmi_i)]
most_relateds = sorted(most_relateds, key=lambda x:-x[1])[:topk]
most_relateds = [(self.idx2vocab[idx], pmi_ij) for idx, pmi_ij in most_relateds]
return most_relateds
| 34.776786
| 93
| 0.615918
|
4a09fbb997c6a2bdd5ffe8ed6de36cdfb52fd65f
| 11,673
|
py
|
Python
|
pysnmp/P8541-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/P8541-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/P8541-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module P8541-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/P8541-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:26:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ModuleIdentity, MibIdentifier, Counter32, Gauge32, Bits, TimeTicks, ObjectIdentity, iso, Counter64, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "MibIdentifier", "Counter32", "Gauge32", "Bits", "TimeTicks", "ObjectIdentity", "iso", "Counter64", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
comet = MibIdentifier((1, 3, 6, 1, 4, 1, 22626))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1))
p8541 = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5))
settings = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 1))
channels = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2))
channel1 = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1))
channel2 = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2))
channel3 = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3))
channel4 = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4))
traps = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 3))
tables = MibIdentifier((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4))
sensorName = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sensorName.setStatus('mandatory')
ch1Name = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1Name.setStatus('mandatory')
ch1Val = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1Val.setStatus('mandatory')
ch1IntVal = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-550, 1250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1IntVal.setStatus('mandatory')
ch1Alarm = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1Alarm.setStatus('mandatory')
ch1LimHi = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1LimHi.setStatus('mandatory')
ch1LimLo = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1LimLo.setStatus('mandatory')
ch1LimHyst = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1LimHyst.setStatus('mandatory')
ch1Delay = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1Delay.setStatus('mandatory')
ch2Name = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2Name.setStatus('mandatory')
ch2Val = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2Val.setStatus('mandatory')
ch2IntVal = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-550, 1250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2IntVal.setStatus('mandatory')
ch2Alarm = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2Alarm.setStatus('mandatory')
ch2LimHi = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2LimHi.setStatus('mandatory')
ch2LimLo = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2LimLo.setStatus('mandatory')
ch2LimHyst = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2LimHyst.setStatus('mandatory')
ch2Delay = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2Delay.setStatus('mandatory')
ch3Name = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3Name.setStatus('mandatory')
ch3Val = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3Val.setStatus('mandatory')
ch3IntVal = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-550, 1250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3IntVal.setStatus('mandatory')
ch3Alarm = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3Alarm.setStatus('mandatory')
ch3LimHi = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3LimHi.setStatus('mandatory')
ch3LimLo = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3LimLo.setStatus('mandatory')
ch3LimHyst = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3LimHyst.setStatus('mandatory')
ch3Delay = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3Delay.setStatus('mandatory')
ch4Name = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4Name.setStatus('mandatory')
ch4Val = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4Val.setStatus('mandatory')
ch4IntVal = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-550, 1250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4IntVal.setStatus('mandatory')
ch4Alarm = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4Alarm.setStatus('mandatory')
ch4LimHi = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4LimHi.setStatus('mandatory')
ch4LimLo = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4LimLo.setStatus('mandatory')
ch4LimHyst = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4LimHyst.setStatus('mandatory')
ch4Delay = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 2, 4, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4Delay.setStatus('mandatory')
messageString = MibScalar((1, 3, 6, 1, 4, 1, 22626, 1, 5, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: messageString.setStatus('mandatory')
historyTable = MibTable((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1), )
if mibBuilder.loadTexts: historyTable.setStatus('mandatory')
historyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1, 1), ).setIndexNames((0, "P8541-MIB", "ch1temperature"))
if mibBuilder.loadTexts: historyEntry.setStatus('optional')
ch1temperature = MibTableColumn((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch1temperature.setStatus('mandatory')
ch2temperature = MibTableColumn((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch2temperature.setStatus('mandatory')
ch3temperature = MibTableColumn((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch3temperature.setStatus('mandatory')
ch4temperature = MibTableColumn((1, 3, 6, 1, 4, 1, 22626, 1, 5, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ch4temperature.setStatus('mandatory')
mibBuilder.exportSymbols("P8541-MIB", ch3LimHi=ch3LimHi, ch3Val=ch3Val, ch3IntVal=ch3IntVal, ch4temperature=ch4temperature, ch4Name=ch4Name, ch2Val=ch2Val, ch1temperature=ch1temperature, ch1Delay=ch1Delay, ch2Delay=ch2Delay, ch4LimHyst=ch4LimHyst, ch3LimHyst=ch3LimHyst, products=products, ch4LimHi=ch4LimHi, channel2=channel2, ch2Alarm=ch2Alarm, ch2LimLo=ch2LimLo, ch4IntVal=ch4IntVal, ch3Name=ch3Name, ch3LimLo=ch3LimLo, channel1=channel1, comet=comet, p8541=p8541, settings=settings, ch4Alarm=ch4Alarm, messageString=messageString, historyEntry=historyEntry, DisplayString=DisplayString, ch3Delay=ch3Delay, ch2IntVal=ch2IntVal, ch1Name=ch1Name, ch4LimLo=ch4LimLo, channel3=channel3, ch1Val=ch1Val, ch1Alarm=ch1Alarm, ch2LimHyst=ch2LimHyst, ch4Val=ch4Val, historyTable=historyTable, ch3temperature=ch3temperature, ch1LimHi=ch1LimHi, ch1LimLo=ch1LimLo, sensorName=sensorName, channel4=channel4, tables=tables, ch4Delay=ch4Delay, channels=channels, ch2temperature=ch2temperature, traps=traps, ch1LimHyst=ch1LimHyst, ch2Name=ch2Name, ch2LimHi=ch2LimHi, ch3Alarm=ch3Alarm, ch1IntVal=ch1IntVal)
| 107.091743
| 1,091
| 0.738799
|
4a09fc582e485334b41917768169097061f75475
| 1,199
|
py
|
Python
|
solutions/428_serialize_and_deserialize_n_ary_tree.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/428_serialize_and_deserialize_n_ary_tree.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/428_serialize_and_deserialize_n_ary_tree.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Codec:
def serialize(self, root: 'Node') -> str:
"""Encodes a tree to a single string.
:type root: Node
:rtype: str
"""
res = []
self._preorder(root, res)
return ' '.join(res)
def _preorder(self, root, res):
if not root:
return
res.extend([str(root.val), str(len(root.children))])
for child in root.children:
self._preorder(child, res)
def deserialize(self, data: str) -> 'Node':
"""Decodes your encoded data to tree.
:type data: str
:rtype: Node
"""
if not data:
return None
splits = data.split(' ')
return self._rebuild(deque(splits))
def _rebuild(self, q):
val = int(q.popleft())
num = int(q.popleft())
children = []
for i in range(num):
child = self._rebuild(q)
children.append(child)
node = Node(val, children)
return node
| 24.979167
| 60
| 0.506255
|
4a09fe1be7c50afd52b56e71dad2abb7af2299f9
| 1,224
|
py
|
Python
|
airflow_ml_dags/dags/email_alert.py
|
made-ml-in-prod-2021/muzaffarsoliyev
|
9b6c1bd4ec003a8dbf9b375df0e302f58a3e324b
|
[
"MIT"
] | null | null | null |
airflow_ml_dags/dags/email_alert.py
|
made-ml-in-prod-2021/muzaffarsoliyev
|
9b6c1bd4ec003a8dbf9b375df0e302f58a3e324b
|
[
"MIT"
] | 1
|
2021-06-14T17:49:21.000Z
|
2021-06-18T07:33:12.000Z
|
airflow_ml_dags/dags/email_alert.py
|
made-ml-in-prod-2021/muzaffarsoliyev
|
9b6c1bd4ec003a8dbf9b375df0e302f58a3e324b
|
[
"MIT"
] | null | null | null |
from airflow.models import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
from airflow.utils.email import send_email
from datetime import timedelta
import os
def notify_email(contextDict, **kwargs):
title = "Airflow alert: file does not exist.".format(**contextDict)
body = """
Hi Everyone, <br>
<br>
There's been an error in the data_exist job. No data available. <br>
<br>Please, fix it.
""".format(**contextDict)
send_email('muzaffar.soliyev97@gmail.com', title, body)
default_args = {
'owner': 'airflow',
'description': 'email_alert',
'start_date': days_ago(1),
}
def _data_exist():
assert True == os.path.exists("/opt/airflow/data/raw/{{ ds }}/data.csv")
with DAG(
"email_alert_dag",
default_args=default_args,
description="This DAG sends email alert",
schedule_interval=timedelta(days=1),
) as dag:
data_exist = PythonOperator(task_id='email_alert_task',
python_callable=_data_exist,
on_failure_callback=notify_email,
dag=dag)
data_exist
| 28.465116
| 77
| 0.626634
|
4a09ff5a5dd67b16c23be323f0b1e19190c0accc
| 12,842
|
py
|
Python
|
Packs/JoeSecurity/Integrations/JoeSecurity/JoeSecurity.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-04-19T11:05:42.000Z
|
2020-04-19T11:05:42.000Z
|
Packs/JoeSecurity/Integrations/JoeSecurity/JoeSecurity.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 9
|
2021-02-08T20:51:18.000Z
|
2021-09-23T23:27:38.000Z
|
Packs/JoeSecurity/Integrations/JoeSecurity/JoeSecurity.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-05-27T15:26:48.000Z
|
2020-05-27T15:26:48.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import time
import shutil
import requests
from distutils.util import strtobool
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
BASE_URL = urljoin(demisto.params().get('url'), 'api/')
USE_SSL = not demisto.params().get('insecure', False)
MAX_POLLS = int(demisto.params().get('maxpolls', 300))
USE_PROXY = demisto.params().get('proxy', True)
nothing_to_analyze_message = 'We found nothing to analyze in your uploaded email' \
'(possibly all elements where whitelisted, check Input filtering in your Settings).'
nothing_to_analyze_output = {
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': 'We found nothing to analyze in your uploaded email',
'HumanReadable': 'We found nothing to analyze in your uploaded email'
}
''' HELPER FUNCTIONS '''
def http_post(url_suffix, data=None, files=None, parse_json=True):
data = {} if data is None else data
LOG('running request with url=%s\n\tdata=%s\n\tfiles=%s' % (BASE_URL + url_suffix,
data, files, ))
data.setdefault('apikey', demisto.params()['api_key'])
res = requests.post(BASE_URL + url_suffix, verify=USE_SSL, data=data, files=files)
if res.status_code == 403:
raise Exception('API Key is incorrect')
if res.status_code != 200:
error_msg = res.json()['errors'][0]['message']
if error_msg == nothing_to_analyze_message:
return 'nothing_to_analyze'
LOG('result is: %s' % (res.json(), ))
error_msg = res.json()['errors'][0]['message']
raise Exception('Your request failed with the following error: %s.\n%s' % (res.reason, error_msg, ))
if parse_json:
return res.json()
else:
return res.content
def analysis_to_entry(title, info):
if not isinstance(info, list):
info = [info]
context = []
table = []
dbot_scores = []
for analysis in info:
analysis_info = {
'ID': analysis['webid'], # for detonate generic polling
'WebID': analysis['webid'],
'SampleName': analysis['filename'],
'Status': analysis['status'],
'Comments': analysis['comments'],
'Time': analysis['time'],
'MD5': analysis['md5'],
'SHA1': analysis['sha1'],
'SHA256': analysis['sha256'],
'Systems': list(set([run['system'] for run in analysis['runs']])),
'Result': ', '.join([run['detection'] for run in analysis['runs']]),
'Errors': [run['error'] for run in analysis['runs']],
}
analysis_context = dict(analysis_info)
analysis_context['Runs'] = analysis['runs']
analysis_table = dict(analysis_info)
if not any(analysis_table['Errors']):
analysis_table['Errors'] = None
dbot_score = 0
malicious = None
if 'malicious' in analysis_info['Result']:
dbot_score = 3
malicious = {
'Vendor': 'JoeSecurity',
'Detections': ', '.join(set([run['detection'] for run in analysis['runs']])),
'SHA1': analysis_info['SHA1'],
}
elif 'suspicious' in analysis_info['Result']:
dbot_score = 2
elif 'clean' in analysis_info['Result']:
dbot_score = 1
dbot_scores.append({
'Vendor': 'JoeSecurity',
'Indicator': analysis_info['MD5'],
'Type': 'file' if analysis_info['MD5'] else 'url',
'Score': dbot_score,
'Malicious': malicious,
})
context.append(analysis_context)
table.append(analysis_table)
entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': context,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, table, removeNull=True),
'EntryContext': {'Joe.Analysis(val.ID && val.ID == obj.ID)': createContext(context, removeNull=True),
'DBotScore': createContext(dbot_scores, removeNull=True), }
}
return entry
def poll_webid(web_id):
result = {'data': {'status': 'pending'}}
max_polls = MAX_POLLS
while (max_polls >= 0) and result['data']['status'] != 'finished':
if result['data']['status'] != 'pending':
LOG('error while polling: result is %s' % (result, ))
result = info_request(web_id)
time.sleep(1)
max_polls -= 1
LOG('reached max_polls #%d' % (max_polls, ))
if max_polls < 0:
return analysis_to_entry('Polling timeout on Analysis #' + web_id, result['data'])
else:
return analysis_to_entry('Analysis #' + web_id, result['data'])
''' FUNCTIONS '''
def is_online():
cmd_url = 'v2/server/online'
res = http_post(cmd_url)
return res['data']['online']
def list_analysis():
cmd_url = 'v2/analysis/list'
res = http_post(cmd_url)
data = [info_request(web_id['webid'])['data'] for web_id in res['data']]
return analysis_to_entry('All Analyses:', data)
def analysis_info():
ids = demisto.args().get('webid')
if type(ids) in STRING_TYPES:
ids = ids.split(',')
LOG('info: web_id = %s' % (ids, ))
res = [info_request(webid)['data'] for webid in ids]
return analysis_to_entry('Analyses:', res)
def info_request(web_id):
cmd_url = 'v2/analysis/info'
return http_post(cmd_url, data={'webid': web_id})
def search():
cmd_url = 'v2/analysis/search'
query = demisto.args().get('query')
res = http_post(cmd_url, data={'q': query})
if len(res['data']) == 0:
return 'No Result was found.'
data = [info_request(web_id['webid'])['data'] for web_id in res['data']]
return analysis_to_entry('Analysis Search Results:', data)
def analyse_url():
args = demisto.args()
url = args.get('url')
internet_access = bool(strtobool(args.get('internet-access', 'true')))
comments = args.get('comments')
systems = args.get('systems')
should_wait = bool(strtobool(demisto.get(args, 'should_wait')))
return analyse_url_request(url, should_wait, internet_access, comments, systems)
def analyse_url_request(url, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'url': url,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')]
res = http_post('v2/analysis/submit', data=data)
if 'errors' in res:
LOG('Error! in command analyse_url: url=%s' % (url, ))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id, ), result['data'])
def analyse_sample():
args = demisto.args()
file_entry = args.get('file_id', '')
if type(file_entry) in STRING_TYPES:
file_entry = [f for f in file_entry.split(',') if f != '']
sample_url = args.get('sample_url', '')
if type(sample_url) in STRING_TYPES:
sample_url = [f for f in sample_url.split(',') if f != '']
internet_access = bool(strtobool(args.get('internet-access', 'true')))
should_wait = bool(strtobool(demisto.get(args, 'should_wait')))
comments = args.get('comments', '')
systems = args.get('systems', '')
if (len(file_entry) == 0 and len(sample_url) == 0) or ([] not in [file_entry, sample_url]):
raise ValueError('You must specify one (and only one) of the following: sample_url, file_id.')
LOG('analysing sample')
if len(file_entry) != 0:
return [analyse_sample_file_request(f, should_wait, internet_access, comments, systems) for f in file_entry]
else:
return [analyse_sample_url_request(s, should_wait, internet_access, comments, systems) for s in sample_url]
def analyse_sample_file_request(file_entry, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')] # type: ignore
shutil.copy(demisto.getFilePath(file_entry)['path'], demisto.getFilePath(file_entry)['name'])
with open(demisto.getFilePath(file_entry)['name'], 'rb') as f:
res = http_post('v2/analysis/submit', data=data, files={'sample': f})
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file_entry=%s' % (file_entry, ))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
shutil.rmtree(demisto.getFilePath(file_entry)['name'], ignore_errors=True)
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id, ), result['data'])
def analyse_sample_url_request(sample_url, should_wait, internet_access, comments, systems):
data = {
'accept-tac': 1,
'sample-url': sample_url,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')]
res = http_post('v2/analysis/submit', data=data)
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file url=%s' % (sample_url, ))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(res['data']['webids'][0])
return analysis_to_entry('Analysis #%s' % (web_id, ), result['data'])
def download_report():
args = demisto.args()
webid = args.get('webid')
rsc_type = args.get('type')
return download_request(webid, rsc_type)
def download_sample():
args = demisto.args()
webid = args.get('webid')
rsc_type = 'sample'
return download_request(webid, rsc_type)
def download_request(webid, rsc_type):
res = http_post('v2/analysis/download', data={'webid': webid, 'type': rsc_type.lower()}, parse_json=False)
info = info_request(webid)
if rsc_type == 'sample':
return fileResult('%s.dontrun' % (info.get('filename', webid), ), res)
else:
return fileResult('%s_report.%s' % (info.get('filename', webid), rsc_type, ), res, entryTypes['entryInfoFile'])
''' EXECUTION CODE '''
LOG('command is %s' % (demisto.command(), ))
try:
handle_proxy()
if demisto.command() in ['test-module', 'joe-is-online']:
# This is the call made when pressing the integration test button.
if is_online():
demisto.results('ok')
else:
demisto.results('not online')
elif demisto.command() == 'joe-list-analysis':
demisto.results(list_analysis())
elif demisto.command() == 'joe-analysis-info':
demisto.results(analysis_info())
elif demisto.command() == 'joe-analysis-submit-url':
demisto.results(analyse_url())
elif demisto.command() == 'joe-detonate-url':
demisto.args()['should_wait'] = 'True'
demisto.results(analyse_url())
elif demisto.command() == 'joe-analysis-submit-sample':
demisto.results(analyse_sample())
elif demisto.command() == 'joe-detonate-file':
demisto.args()['should_wait'] = 'True'
demisto.results(analyse_sample())
elif demisto.command() == 'joe-download-report':
demisto.results(download_report())
elif demisto.command() == 'joe-download-sample':
demisto.results(download_sample())
elif demisto.command() == 'joe-search':
demisto.results(search())
except Exception as e:
if demisto.params().get('verbose'):
LOG(e.message)
if demisto.command() != 'test-module':
LOG.print_log()
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'error has occurred: %s' % (e.message, ),
})
| 34.336898
| 119
| 0.616493
|
4a09ff7028017c9e2450b61afdbace6aa49157d5
| 3,186
|
py
|
Python
|
tests/test_upsampling.py
|
aoifemcdonagh/MINC_model_testing
|
87d72c9e75635afe3a5a5999141e0c6ecb087cac
|
[
"MIT"
] | 1
|
2021-02-18T18:35:28.000Z
|
2021-02-18T18:35:28.000Z
|
tests/test_upsampling.py
|
aoifemcdonagh/MINC_model_testing
|
87d72c9e75635afe3a5a5999141e0c6ecb087cac
|
[
"MIT"
] | null | null | null |
tests/test_upsampling.py
|
aoifemcdonagh/MINC_model_testing
|
87d72c9e75635afe3a5a5999141e0c6ecb087cac
|
[
"MIT"
] | 1
|
2019-12-07T12:12:11.000Z
|
2019-12-07T12:12:11.000Z
|
import caffe
import sys
import skimage
from material_segmentation import segment as resize, minc_plotting as minc_plot, minc_classify as minc_utils
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
SCALES = [1.0/np.sqrt(2), 1.0, np.sqrt(2)] # Define scales as per MINC paper
def upsample(im):
"""
Function for performing upscaling of probability maps generated by MINC model
:return: scaled, upsampled and averaged probability maps for all classes
"""
resized_images = resize.resize_images(im) # perform image resizing
outputs = [minc_utils.classify(image) for image in resized_images] # Perform classification on images
prob_maps = [minc_utils.get_probability_maps(out) for out in outputs] # Get probability maps for each class for each image
# Upsampling probability maps to be same dimensions as original image (plus padding)
upsampled_prob_maps = np.array([[skimage.transform.resize(prob_map,
output_shape=(im.shape[0], im.shape[1]),
mode='constant',
cval=0,
preserve_range=True)
for prob_map in prob_maps_single_image]
for prob_maps_single_image in prob_maps])
# Probability maps for each class, averaged from resized images probability maps
averaged_prob_maps = np.average(upsampled_prob_maps, axis=0)
return averaged_prob_maps
def plot_simple(im, data):
"""
Function to plot the input image and data side by side.
:param im: input image
:param data: a result from classification
:return:
"""
fig, axs = plt.subplots(ncols=2, figsize=(30, 10))
fig.subplots_adjust(hspace=0.5, left=0.07, right=0.93)
ax = axs[0]
hb = ax.imshow(mpimg.imread(im))
ax.set_title("Input image")
ax = axs[1]
hb = ax.imshow(data)
ax.set_title("data/result")
cb = fig.colorbar(hb, ax=ax)
plt.show()
def add_padding(im, pad):
"""
Function for padding image before classification
:param im: image (preloaded with caffe.io.load_image)
:return: image with padding
"""
return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')
def remove_padding(im, pad):
"""
Function for removing padding from an image
:param im: image to remove padding from
:param pad: number of pixels of padding to remove
:return:
"""
return im[pad:-pad, pad:-pad]
if __name__ == "__main__":
caffe.set_mode_gpu()
image_path = sys.argv[1] # path to image to be segmented
padding = int(sys.argv[2]) # number of pixels to pad
image = caffe.io.load_image(image_path) # Must load images with this method!
#image = add_padding(image, padding) # add padding
av_prob_maps = upsample(image)
confidence_map = av_prob_maps.max(axis=0)
plot_simple(image_path, confidence_map)
minc_plot.plot_class_map(av_prob_maps)
| 32.845361
| 127
| 0.630885
|
4a09ff93b016bcb5b07031f8d333958d28355816
| 274
|
py
|
Python
|
t_Modefied_file.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | 1
|
2018-03-01T02:42:52.000Z
|
2018-03-01T02:42:52.000Z
|
t_Modefied_file.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | null | null | null |
t_Modefied_file.py
|
iloghyr/easy_python
|
b750f6817d54562b23630e2419bace19da0abf8b
|
[
"Apache-2.0"
] | null | null | null |
# _*_coding=utf-8 _*_
import fileinput
import re
p = re.compile(r'http[s]?://[w]{3}\.baidu\.com')
for line in fileinput.input(r'a.txt',inplace=1):
if re.search(p,line) is not None:
print line.strip()+'\n',
else:
print line,
print 'hello world'
| 16.117647
| 49
| 0.605839
|
4a0a0151efa3ae1ef2340bc318f0de5200ff4f00
| 300
|
py
|
Python
|
juriscraper/opinions/united_states/state/hawapp.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 228
|
2015-01-23T04:41:39.000Z
|
2022-03-30T09:52:20.000Z
|
juriscraper/opinions/united_states/state/hawapp.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 331
|
2015-01-05T18:53:40.000Z
|
2022-03-29T23:43:30.000Z
|
juriscraper/opinions/united_states/state/hawapp.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 84
|
2015-01-03T01:19:21.000Z
|
2022-03-01T08:09:32.000Z
|
# Author: Michael Lissner
# Date created: 2013-05-23
from juriscraper.opinions.united_states.state import haw
class Site(haw.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.target_id = "ICA"
| 23.076923
| 56
| 0.676667
|
4a0a018829b2712a72dc8382ed9fba5b3608941e
| 284
|
py
|
Python
|
async/python/thread.py
|
walk8243/language-study
|
9625cb1a25c2d9fa35ade53b7861aa6d59601196
|
[
"MIT"
] | null | null | null |
async/python/thread.py
|
walk8243/language-study
|
9625cb1a25c2d9fa35ade53b7861aa6d59601196
|
[
"MIT"
] | null | null | null |
async/python/thread.py
|
walk8243/language-study
|
9625cb1a25c2d9fa35ade53b7861aa6d59601196
|
[
"MIT"
] | null | null | null |
import concurrent.futures
import func
tpe = concurrent.futures.ThreadPoolExecutor(max_workers=3)
futures = []
for i in range(10):
future = tpe.submit(func.sleep, 'sleep' + str(i+1), 3)
futures.append(future)
print([ future.result() for future in futures ])
tpe.shutdown()
| 20.285714
| 58
| 0.714789
|
4a0a01b8a6bcdd897adee1d5cf6cba982c20edba
| 1,832
|
py
|
Python
|
mediaviz/rotation.py
|
Tahsin-Mayeesha/network-viz
|
cb51a20071948b9b53a50224fbd10d8be42e8683
|
[
"MIT"
] | 16
|
2018-07-02T18:18:37.000Z
|
2021-08-09T16:22:30.000Z
|
mediaviz/rotation.py
|
Tahsin-Mayeesha/network-viz
|
cb51a20071948b9b53a50224fbd10d8be42e8683
|
[
"MIT"
] | 1
|
2019-02-28T20:37:30.000Z
|
2019-02-28T20:37:30.000Z
|
mediaviz/rotation.py
|
Tahsin-Mayeesha/network-viz
|
cb51a20071948b9b53a50224fbd10d8be42e8683
|
[
"MIT"
] | 7
|
2018-06-11T16:37:22.000Z
|
2020-11-05T21:55:40.000Z
|
def _rotate(point, angle, origin = (0,0),unit = 'degree'):
"""Rotate a point counterclockwise by a given angle around a given origin.
Angle can be both in radian or degree. Helper function for rotating a layout.
Parameters
----------
point : tuple
position in (x,y) form
angle : float
angle to rotate the point
origin : tuple in (x,y) form
point will rotate with respect to the origin.
unit : 'degree'/'radian' to indicate if the angle is in degrees or radians.
if given in degrees angle is converted to radians.
Returns
-------
tuple
rotated point as (x,y) tuple.
"""
import math
ox, oy = origin
px, py = point
if unit == 'degree':
angle = math.radians(angle)
if unit == 'radian':
angle = angle
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
# rotation example : pos2 = {k:rotate(v,45,(0.5,0.5)) for k,v in pos2.items()}
def rotation_layout(pos,angle,origin=(0,0),unit="degree"):
""" Rotates the pos to the given angle with respect to origin.
Parameters
----------
pos : dict
A dictionary with nodes as keys and positions as values.
angle : float
angle in degree or radian
origin : tuple
the point will rotate with respect to the origin.
unit : str
'degree'/'radian' to indicate if the angle is in degrees or radians.If given in degrees
angle is converted to radians.
Returns
-------
dict
Returns the pos dict with positions rotated by the given angle with respect to the given origin.
"""
return { k:_rotate(v,angle,origin,unit) for k,v in pos.items()}
| 30.032787
| 104
| 0.599345
|
4a0a03d43af80c76fb59ddd6c46987ace1d51e64
| 316
|
py
|
Python
|
HW3/hshioi_cloudmesh_ex3.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
HW3/hshioi_cloudmesh_ex3.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
HW3/hshioi_cloudmesh_ex3.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import cloudmesh
start_txt = cloudmesh.shell("vm start --cloud=india --image=futuresystems/ubuntu-14.04 --flavor=m1.small")
terminate_txt = cloudmesh.shell("vm delete --cloud=india hshioi_7 --force")
f = open('hshioi_cloudmesh_ex3.txt','w')
f.write(str(start_txt)+"\n"+str(terminate_txt) )
f.close()
| 39.5
| 106
| 0.740506
|
4a0a046d560aea4aef8cb1a159121711de8c8757
| 382
|
py
|
Python
|
kill_child_process_ejemplo.py
|
learsi1911/GAMA_pygmo_v13
|
227ff23a4fba78a14f45ea4058dc107e236281d8
|
[
"Apache-2.0"
] | null | null | null |
kill_child_process_ejemplo.py
|
learsi1911/GAMA_pygmo_v13
|
227ff23a4fba78a14f45ea4058dc107e236281d8
|
[
"Apache-2.0"
] | null | null | null |
kill_child_process_ejemplo.py
|
learsi1911/GAMA_pygmo_v13
|
227ff23a4fba78a14f45ea4058dc107e236281d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 16 14:03:15 2021
@author: 20210595
"""
import psutil
def on_terminate(proc):
print("process {} terminated with exit code {}".format(proc, proc.returncode))
procs = psutil.Process().children()
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=3, callback=on_terminate)
for p in alive:
p.kill()
| 21.222222
| 82
| 0.683246
|
4a0a048c6bd31460b929fb3dbb9bab8590774703
| 4,661
|
py
|
Python
|
pyabc/transition/local_transition.py
|
wdwang/pyABC
|
65f85d97f20ac47df6a6d95cb373adba35738f57
|
[
"BSD-3-Clause"
] | null | null | null |
pyabc/transition/local_transition.py
|
wdwang/pyABC
|
65f85d97f20ac47df6a6d95cb373adba35738f57
|
[
"BSD-3-Clause"
] | null | null | null |
pyabc/transition/local_transition.py
|
wdwang/pyABC
|
65f85d97f20ac47df6a6d95cb373adba35738f57
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy.linalg as la
import scipy as sp
import pandas as pd
from .base import Transition
from scipy.spatial import cKDTree
from .util import smart_cov
from .exceptions import NotEnoughParticles
import logging
logger = logging.getLogger("LocalTransition")
class LocalTransition(Transition):
"""
Local KDE fit. Takes into account only the k
nearest neighbors, similar to [Filippi]_.
Parameters
----------
k: int
Number of nearest neighbors for local covariance
calculation.
scaling: float
Scaling factor for the local covariance matrices.
k_fraction: float, optional
Calculate number of nearest neighbors to use according to
``k = k_fraction * population_size`` (and rounds it).
Attributes
----------
EPS: float
Scaling of the identity matrix to be added to the covariance
in case the covariances are not invertible.
.. [Filippi] Filippi, Sarah, Chris P. Barnes, Julien Cornebise,
and Michael P.H. Stumpf. “On Optimality of Kernels
for Approximate Bayesian Computation Using Sequential
Monte Carlo.” Statistical Applications in Genetics and
Molecular Biology 12, no. 1 (2013):
87–107. doi:10.1515/sagmb-2012-0069.
"""
EPS = 1e-3
MIN_K = 10
def __init__(self, k=None, k_fraction=1/4, scaling=1):
if k_fraction is not None:
self.k_fraction = k_fraction
self._k = None
else:
self.k_fraction = None
self._k = k
self.scaling = scaling
@property
def k(self):
if self.k_fraction is not None:
if self.w is None:
k_ = 0
else:
k_ = int(self.k_fraction * len(self.w))
else:
k_ = self._k
try:
dim = self.X_arr.shape[1]
except AttributeError:
dim = 0
return max([k_, self.MIN_K, dim])
def fit(self, X, w):
if len(X) == 0:
raise NotEnoughParticles("Fitting not possible.")
self.X_arr = X.as_matrix()
ctree = cKDTree(X)
_, indices = ctree.query(X, k=min(self.k + 1, X.shape[0]))
covs, inv_covs, dets = list(zip(*[self._cov_and_inv(n, indices)
for n in range(X.shape[0])]))
self.covs = sp.array(covs)
self.inv_covs = sp.array(inv_covs)
self.determinants = sp.array(dets)
self.normalization = sp.sqrt(
(2 * sp.pi) ** self.X_arr.shape[1] * self.determinants)
if not sp.isreal(self.normalization).all():
raise Exception("Normalization not real")
self.normalization = sp.real(self.normalization)
def pdf(self, x):
x = x[self.X.columns].as_matrix()
if len(x.shape) == 1:
return self._pdf_single(x)
else:
return sp.array([self._pdf_single(x) for x in x])
def _pdf_single(self, x):
distance = self.X_arr - x
cov_distance = sp.einsum("ij,ijk,ik->i",
distance, self.inv_covs, distance)
return sp.average(sp.exp(-.5 * cov_distance) / self.normalization,
weights=self.w)
def _cov_and_inv(self, n, indices):
"""
Calculate covariance around local support vector
and also the inverse
"""
cov = self._cov(indices, n)
det = la.det(cov)
while det <= 0:
cov += sp.identity(cov.shape[0]) * self.EPS
det = la.det(cov)
inv_cov = la.inv(cov)
return cov, inv_cov, det
def _cov(self, indices, n):
if len(indices) > 1:
surrounding_indices = indices[n, 1:]
nearest_vector_deltas = (self.X_arr[surrounding_indices]
- self.X_arr[n])
local_weights = self.w[surrounding_indices]
else:
nearest_vector_deltas = sp.absolute(self.X_arr)
local_weights = sp.array([1])
cov = smart_cov(nearest_vector_deltas,
local_weights / local_weights.sum())
if sp.absolute(cov.sum()) == 0:
for k in range(cov.shape[0]):
cov[k, k] = sp.absolute(self.X_arr[0, k])
return cov * self.scaling
def rvs_single(self):
support_index = sp.random.choice(self.w.shape[0], p=self.w)
sample = sp.random.multivariate_normal(self.X_arr[support_index],
self.covs[support_index])
return pd.Series(sample, index=self.X.columns)
| 31.924658
| 74
| 0.566188
|
4a0a049ee39412240a6e411b990c176ae816125a
| 2,997
|
py
|
Python
|
configs/eftnet/E1_eft53_hmctrd27_whheatmap_v2l_2d5lr_wd4e4_s123_nos_04b_wm200_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/eftnet/E1_eft53_hmctrd27_whheatmap_v2l_2d5lr_wd4e4_s123_nos_04b_wm200_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/eftnet/E1_eft53_hmctrd27_whheatmap_v2l_2d5lr_wd4e4_s123_nos_04b_wm200_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
wh_heatmap=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv2=True,
hm_center_ratio=0.27,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=200,
warmup_ratio=1.0 / 10,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft53_htct027_whheatmap_v2l_2d5lr_wd4e4_s123_nos_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.97
| 86
| 0.643977
|
4a0a051d0208e7fb8ec1d6b6f32da5bc9b9fd251
| 1,850
|
py
|
Python
|
generator/contact.py
|
seredyan/AddressBook
|
155779116ed7e8ba27d472bce7799a40db3780ac
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
seredyan/AddressBook
|
155779116ed7e8ba27d472bce7799a40db3780ac
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
seredyan/AddressBook
|
155779116ed7e8ba27d472bce7799a40db3780ac
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
import os.path
import json
import jsonpickle
from model.contact import Contact
import getopt
import sys
import re
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 3
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits *3
# symbols = string.ascii_letters + string.digits + string.punctuation + " " * 10 # добавили спец символы
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_char_email(maxlen):
random_emails = ["a@gmail.com", "b@ya.ru", "c@mail.ru", "d@icloud.com", "e@company.com", "f@yahoo.com", "g@outlook.com"]
symbols = (''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(maxlen))))
return (symbols + random.choice(random_emails))
def random_digits_phone(maxlen):
symbols = string.digits #+ " " * 3
return "".join([random.choice(symbols) for i in range(maxlen)])
testdata = [Contact(name=random_string("NAME", 2), lastname=random_string("LASTNAME", 2), address=random_string("countryX", 2),
landline=random_digits_phone(3),
mobile=random_digits_phone(3), workphone=random_digits_phone(3),
second_landline=random_digits_phone(3),
email=random_char_email(3), email2=random_char_email(3), email3=random_char_email(3)) for i in range(n)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as file_out:
jsonpickle.set_encoder_options("json", indent=2)
file_out.write(jsonpickle.encode(testdata))
| 30.833333
| 127
| 0.671351
|
4a0a0595710110f0aff019429b30774d74484794
| 1,005
|
py
|
Python
|
djangorestproject/gameplay/migrations/0005_auto_20181016_1041.py
|
debu999/djangofundamentals
|
15a7a3d6e40a2faf3359a151bb0bb44dd7d669ff
|
[
"Apache-2.0"
] | null | null | null |
djangorestproject/gameplay/migrations/0005_auto_20181016_1041.py
|
debu999/djangofundamentals
|
15a7a3d6e40a2faf3359a151bb0bb44dd7d669ff
|
[
"Apache-2.0"
] | null | null | null |
djangorestproject/gameplay/migrations/0005_auto_20181016_1041.py
|
debu999/djangofundamentals
|
15a7a3d6e40a2faf3359a151bb0bb44dd7d669ff
|
[
"Apache-2.0"
] | 1
|
2019-08-22T09:13:41.000Z
|
2019-08-22T09:13:41.000Z
|
# Generated by Django 2.1.1 on 2018-10-16 02:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gameplay', '0004_invitation'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='message',
field=models.CharField(blank=True, help_text='Its always good to add a friendly message.', max_length=300,
verbose_name='Optional Message'),
),
migrations.AlterField(
model_name='invitation',
name='tuser',
field=models.ForeignKey(help_text='Please select the user you want to play game with.',
on_delete=django.db.models.deletion.CASCADE, related_name='invitation_received',
to=settings.AUTH_USER_MODEL, verbose_name='User to Invite'),
),
]
| 35.892857
| 118
| 0.599005
|
4a0a05d031edfe25ea94ef33a837b65519fd4e18
| 7,008
|
py
|
Python
|
GANs/Pix2Pix_Isola_et_al_2017/model.py
|
gonzrubio/ML_Papers
|
562f85c81b0afb8771708ff31063f722d838b9d2
|
[
"MIT"
] | 6
|
2021-11-05T05:09:22.000Z
|
2022-03-10T03:32:30.000Z
|
GANs/Pix2Pix_Isola_et_al_2017/model.py
|
gonzrubio/ML_Papers
|
562f85c81b0afb8771708ff31063f722d838b9d2
|
[
"MIT"
] | null | null | null |
GANs/Pix2Pix_Isola_et_al_2017/model.py
|
gonzrubio/ML_Papers
|
562f85c81b0afb8771708ff31063f722d838b9d2
|
[
"MIT"
] | 1
|
2021-11-05T05:09:26.000Z
|
2021-11-05T05:09:26.000Z
|
"""Image-to-Image Translation with Conditional Adversarial Networks.
Paper: https://arxiv.org/abs/1611.07004
Created on Thu Nov 18 17:34:38 2021
@author: gonzr
"""
import torch
import torch.nn as nn
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
torch.cuda.empty_cache()
def make_conv(in_size, out_size, encode, batch_norm, activation, drop_out):
"""Convolutional blocks of the Generator and the Discriminator.
Let Ck denote a Convolution-BtachNorm-ReLU block with k filters.
CDk denotes a Convolution-BtachNorm-Dropout-ReLU block with 50% dropout.
All convolutions are 4 x 4 spatial filters with stride 2. Convolutions in
the encoder and discriminator downsample by a factor of 2, whereas in the
decoder they upsample by a factor of 2.
"""
block = [nn.Conv2d(in_size, out_size,
kernel_size=4, stride=2, padding=1,
padding_mode="reflect",
bias=False if batch_norm else True)
if encode else
nn.ConvTranspose2d(in_size, out_size,
kernel_size=4, stride=2, padding=1,
bias=False if batch_norm else True)]
if batch_norm:
block.append(nn.BatchNorm2d(out_size))
if activation == "leaky":
block.append(nn.LeakyReLU(0.2))
elif activation == "sigmoid":
block.append(nn.Sigmoid())
elif activation == "tanh":
block.append(nn.Tanh())
elif activation == "relu":
block.append(nn.ReLU())
if drop_out:
block.append(nn.Dropout(0.5))
return nn.Sequential(*block)
def init_weights(model, mean=0.0, std=0.02):
"""Initialize weights from a Gaussian distribution."""
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.BatchNorm2d)):
nn.init.normal_(module.weight.data, mean=mean, std=std)
class Generator(nn.Module):
"""UNet Generator architecture.
encoder:
C64-C128-C256-C512-C512-C512-C512-C512
decoder:
CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
After the C128 block in the decoder, a convolution is applied to map to the
number of output channels, followed by a Tanh function. BatchNorm is not
applied to the C64 block in the encoder. All ReLUs in the econder are
leaky with slope 0.2, while ReLUs in the decoder are not leaky.
"""
def __init__(self, in_channels=3, out_channels=3):
super(Generator, self).__init__()
encoder = [in_channels, 64, 128, 256, 512, 512, 512, 512, 512]
encoder = zip(encoder, encoder[1:])
self.encoder = nn.ModuleList()
for idx, (input_size, output_size) in enumerate(encoder):
if idx == 0:
input_size *= 2
batch_norm = False
else:
batch_norm = True
self.encoder.append(make_conv(in_size=input_size,
out_size=output_size,
encode=True,
batch_norm=batch_norm,
activation="leaky",
drop_out=False))
decoder = [512, 1024, 1024, 1024, 1024, 512, 256, 128, out_channels]
layers_decoder = len(decoder)
decoder = zip(decoder, decoder[1:])
self.decoder = nn.ModuleList()
for idx, (input_size, output_size) in enumerate(decoder):
if idx < layers_decoder - 2:
batch_norm = True
activation = "relu"
output_size //= 2
else:
batch_norm = False
activation = "tanh"
self.decoder.append(make_conv(in_size=input_size,
out_size=output_size,
encode=False,
batch_norm=batch_norm,
activation=activation,
drop_out=True if idx < 3 else False))
init_weights(self, mean=0.0, std=0.02)
def forward(self, x, z):
"""Generate a translation of x conditioned on the noise z."""
x = torch.cat((x, z), dim=1)
skip = [None]*len(self.encoder)
for idx, block in zip(range(len(skip)-1, -1, -1), self.encoder):
x = block(x)
skip[idx] = x
for idx, block in enumerate(self.decoder):
if idx > 0:
x = torch.cat((x, skip[idx]), dim=1)
x = block(x)
return x
class Discriminator(nn.Module):
"""C64-C128-C256-C512 PatchGAN Discriminator architecture.
After the C512 block, a convolution is applied to map to a 1-d output,
followed by a Sigmoid function. BatchNorm is not applied to the c64 block.
All ReLUs are leaky with slope of 0.2.
"""
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
channels = [in_channels, 64, 128, 256, 512, 1]
layers = len(channels)
channels = zip(channels, channels[1:])
self.blocks = nn.ModuleList()
for layer, (input_size, output_size) in enumerate(channels):
if layer == 0:
input_size *= 2
batch_norm = False
activation = "leaky"
elif layer < layers - 2:
batch_norm = True
activation = "leaky"
else:
batch_norm = False
activation = "sigmoid"
self.blocks.append(make_conv(in_size=input_size,
out_size=output_size,
encode=True,
batch_norm=batch_norm,
activation=activation,
drop_out=False))
init_weights(self, mean=0.0, std=0.02)
def forward(self, x, y):
"""Return a nxn tensor of patch probabilities."""
x = torch.cat((x, y), dim=1)
for block in self.blocks:
x = block(x)
return x
if __name__ == '__main__':
batch_size = 8
channels = 3
height = 256
width = 256
x = torch.randn((batch_size, channels, height, width), device=DEVICE)
y = torch.randn((batch_size, channels, height, width), device=DEVICE)
z = torch.randn((batch_size, channels, height, width), device=DEVICE)
generator = Generator().to(DEVICE)
total_params = sum(p.numel() for p in generator.parameters())
print(f"Number of parameters in Generator: {total_params:,}")
G_z = generator(x, z)
print(G_z.shape)
discriminator = Discriminator().to(DEVICE)
total_params = sum(p.numel() for p in discriminator.parameters())
print(f"Number of parameters in Discriminator: {total_params:,}")
D_x = discriminator(x, y)
print(D_x.shape)
| 34.865672
| 79
| 0.560502
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.