content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#reads each line and adds the entire document
s = 0
try:
while(True):
line = input()
if line != "":
line = (" ".join(line.split())).split(' ')
for i in range(0,len(line)):
s += int(line[i])
else:
break
except EOFError:
pass
print(s)
| [
2,
40779,
1123,
1627,
290,
6673,
262,
2104,
3188,
198,
198,
82,
796,
657,
198,
28311,
25,
198,
220,
220,
220,
981,
7,
17821,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
1627,
796,
5128,
3419,
198,
220,
220,
220,
220,
220,
220,
... | 1.927273 | 165 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Reduces the size of a model file by stripping the optimizer.
Assumes we are working with a TorchAgent
"""
import os
import torch
from parlai.core.params import ParlaiParser
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.torch import atomic_save
from parlai.utils.io import PathManager
import parlai.utils.pickle
import parlai.utils.logging as logging
@register_script("vacuum", hidden=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
201,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
201,
198,
2,
38... | 3.09434 | 212 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from apps.authentication.models import OnlineUser as User
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
6725,
13,
41299,
3299,
13,
27530,
1330,
7467,
12982,
355,
11787,
628
] | 3.261905 | 42 |
import os
import numpy as np
from datetime import date, datetime, timedelta
from GPRIBatchProcessFunctions import *
import pickle
import time
import pylab as plt
## In Python3 Shell: exec(open('Main.py').read())
main()
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
11,
28805,
12514,
198,
6738,
402,
4805,
9865,
963,
18709,
24629,
2733,
1330,
1635,
198,
11748,
2298,
293,
198,
11748,
640,
198,
11748,
... | 3.169014 | 71 |
#!/usr/bin/python
"""Given a regtest result tree, prints an HTML summary to a file.
See HTML skeleton in tests/regtest.html.
"""
import os
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
SUMMARY_ROW = """\
<tfoot style="font-weight: bold; text-align: right">
<tr>
<td>
%(name)s
</td>
<!-- input params -->
<td></td>
<td></td>
<td></td>
<td></td>
<!-- RAPPOR params -->
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<!-- MAP params -->
<td></td>
<td></td>
<!-- Result metrics -->
<td></td>
<td></td>
<td>%(mean_fpr)s</td>
<td>%(mean_fnr)s</td>
<td>%(mean_tv)s</td>
<td>%(mean_am)s</td>
<td>%(mean_time)s</td>
</tr>
</tfoot>
"""
# Navigation and links to plot.
DETAILS = """\
<p style="text-align: right">
<a href="#top">Up</a>
</p>
<a id="%(anchor)s"></a>
<p style="text-align: center">
<img src="%(instance_dir)s/dist.png"/>
</p>
<p>
<a href="%(instance_dir)s">%(name)s files</a>
</p>
"""
# Plots comparing simulations
PLOTS = """ \
<h2>Plots</h2>
<h3 style="text-align: center">Total variation distance</h3>
<p style="text-align: center">
<img src="plots/tv.png"/>
</p>
<h3 style="text-align: center">False negative rate</h3>
<p style="text-align: center">
<img src="plots/fnr.png"/>
</p>
<h3 style="text-align: center">False positive rate</h3>
<p style="text-align: center">
<img src="plots/fpr.png"/>
</p>
<h3 style="text-align: center">Allocated mass</h3>
<p style="text-align: center">
<img src="plots/am.png"/>
</p>
<h3 style="text-align: center">Time</h3>
<p style="text-align: center">
<img src="plots/time.png"/>
</p>
"""
def FormatFloat(x, percent):
"""Formats a floating-point number."""
if percent:
return '{:.1f}%'.format(x * 100.0)
else:
return '{:.3f}'.format(x)
def FormatMeanWithSem(m_std_error, percent=False):
"""Formats an estimate with standard error."""
if m_std_error is None:
return ''
m, std_error = m_std_error
if std_error is None:
return FormatFloat(m, percent)
else:
return '{}±{}'.format(
FormatFloat(m, percent),
FormatFloat(std_error, percent))
def Mean(l):
"""Computes the mean (average) for a list of numbers."""
if l:
return float(sum(l)) / len(l)
else:
return None
def SampleVar(l):
"""Computes the sample variance for a list of numbers."""
if len(l) > 1:
mean = Mean(l)
var = sum([(x - mean) ** 2 for x in l]) / (len(l) - 1)
return var
else:
return None
def StandardErrorEstimate(l):
"""Returns the standard error estimate for a list of numbers.
For a singleton the standard error is assumed to be 10% of its value.
"""
if len(l) > 1:
return (SampleVar(l) / len(l)) ** .5
elif l:
return l[0] / 10.0
else:
return None
def MeanOfMeans(dict_of_lists):
"""Returns the average of averages with the standard error of the estimate.
"""
means = [Mean(dict_of_lists[key]) for key in dict_of_lists
if dict_of_lists[key]]
if means:
# Compute variances of the estimate for each sublist.
se = [StandardErrorEstimate(dict_of_lists[key]) ** 2 for key
in dict_of_lists if dict_of_lists[key]]
return (Mean(means), # Mean over all sublists
sum(se) ** .5 / len(se)) # Standard deviation of the mean
else:
return None
def ParseSpecFile(spec_filename):
"""Parses the spec (parameters) file.
Returns:
An integer and a string. The integer is the number of bogus candidates
and the string is parameters in the HTML format.
"""
with open(spec_filename) as s:
spec_row = s.readline().split()
# Second to last column is 'num_additional' -- the number of bogus
# candidates added
num_additional = int(spec_row[-2])
spec_in_html = ' '.join('<td>%s</td>' % cell for cell in spec_row[1:])
return num_additional, spec_in_html
def ExtractTime(log_filename):
"""Extracts the elapsed time information from the log file.
Returns:
Elapsed time (in seconds) or None in case of failure.
"""
if os.path.isfile(log_filename):
with open(log_filename) as log:
log_str = log.read()
# Matching a line output by analyze.R.
match = re.search(r'Inference took ([0-9.]+) seconds', log_str)
if match:
return float(match.group(1))
return None
def ParseMetrics(metrics_file, log_file, num_additional):
"""Processes the metrics file.
Args:
metrics_file: name of the metrics file
log_file: name of the log.txt file
num_additional: A number of bogus candidates added to the candidate list.
Returns a pair:
- A dictionary of metrics (some can be []).
- An HTML-formatted portion of the report row.
"""
if not os.path.isfile(metrics_file):
metrics_row_str = ['', '', '', '', '', '']
metrics_row_dict = {}
else:
with open(metrics_file) as m:
m.readline()
metrics_row = m.readline().split(',')
(num_actual, num_rappor, num_false_pos, num_false_neg, total_variation,
allocated_mass) = metrics_row
num_actual = int(num_actual)
num_rappor = int(num_rappor)
num_false_pos = int(num_false_pos)
num_false_neg = int(num_false_neg)
total_variation = float(total_variation)
allocated_mass = float(allocated_mass)
# e.g. if there are 20 additional candidates added, and 1 false positive,
# the false positive rate is 5%.
fp_rate = float(num_false_pos) / num_additional if num_additional else 0
# e.g. if there are 100 strings in the true input, and 80 strings
# detected by RAPPOR, then we have 20 false negatives, and a false
# negative rate of 20%.
fn_rate = float(num_false_neg) / num_actual
metrics_row_str = [
str(num_actual),
str(num_rappor),
'%.1f%% (%d)' % (fp_rate * 100, num_false_pos) if num_additional
else '',
'%.1f%% (%d)' % (fn_rate * 100, num_false_neg),
'%.3f' % total_variation,
'%.3f' % allocated_mass,
]
metrics_row_dict = {
'tv': [total_variation],
'fpr': [fp_rate] if num_additional else [],
'fnr': [fn_rate],
'am': [allocated_mass],
}
elapsed_time = ExtractTime(log_file)
if elapsed_time is not None:
metrics_row_str = metrics_row_str + ['%.2f' % elapsed_time]
metrics_row_dict['time'] = [elapsed_time]
# return metrics formatted as HTML table entries
return (metrics_row_dict,
' '.join('<td>%s</td>' % cell for cell in metrics_row_str))
def FormatCell1(test_case, test_instance, metrics_file, log_file, plot_file,
link_to_plots):
"""Outputs an HTML table entry for the first cell of the row.
The row is filled if the metrics file exist. The first cell contains a link
that for short tables points to a plot file inline, for large tables to an
external file.
If the metrics file is missing, the link points to the log file (if one
exists)
"""
relpath_report = '{}/{}_report'.format(test_case, test_instance)
if os.path.isfile(metrics_file):
external_file = plot_file
if link_to_plots:
link = '#{}_{}'.format(test_case, test_instance) # anchor
else:
link = os.path.join(relpath_report, 'dist.png')
else: # no results likely due to an error, puts a link to the log file
external_file = log_file
link = os.path.join(relpath_report, 'log.txt')
if os.path.isfile(external_file):
return '<td><a href="{}">{}</a></td>'.format(link, test_case)
else: # if no file to link to
return '<td>{}</td>'.format(test_case)
def FormatSummaryRow(metrics_lists):
"""Outputs an HTML-formatted summary row."""
means_with_sem = {} # SEM - standard error of the mean
for key in metrics_lists:
means_with_sem[key] = MeanOfMeans(metrics_lists[key])
# If none of the lists is longer than one element, drop the SEM component.
if means_with_sem[key] and max([len(l) for l in metrics_lists[key]]) < 2:
means_with_sem[key] = [means_with_sem[key][0], None]
summary = {
'name': 'Means',
'mean_fpr': FormatMeanWithSem(means_with_sem['fpr'], percent=True),
'mean_fnr': FormatMeanWithSem(means_with_sem['fnr'], percent=True),
'mean_tv': FormatMeanWithSem(means_with_sem['tv'], percent=True),
'mean_am': FormatMeanWithSem(means_with_sem['am'], percent=True),
'mean_time': FormatMeanWithSem(means_with_sem['time']),
}
return SUMMARY_ROW % summary
def FormatPlots(base_dir, test_instances):
"""Outputs HTML-formatted plots."""
result = ''
for instance in test_instances:
# A test instance is identified by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
instance_dir = test_case + '/' + test_instance + '_report'
if os.path.isfile(os.path.join(base_dir, instance_dir, 'dist.png')):
result += DETAILS % {'anchor': test_case + '_' + test_instance,
'name': '{} (instance {})'.format(test_case,
test_instance),
'instance_dir': instance_dir}
return result
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError, e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
15056,
257,
842,
9288,
1255,
5509,
11,
20842,
281,
11532,
10638,
284,
257,
2393,
13,
198,
198,
6214,
11532,
18328,
287,
5254,
14,
2301,
9288,
13,
6494,
13,
198,
37811,
198,
198,
11748,... | 2.483481 | 3,723 |
# -*- coding: utf-8 -*-
from tccli.services.cii.cii_client import action_caller
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
256,
535,
4528,
13,
30416,
13,
979,
72,
13,
979,
72,
62,
16366,
1330,
2223,
62,
13345,
263,
198,
220,
220,
220,
220
] | 2.179487 | 39 |
from simple.gRPC.Fields import Fixed
import unittest | [
6738,
2829,
13,
70,
49,
5662,
13,
15878,
82,
1330,
10832,
198,
11748,
555,
715,
395
] | 3.25 | 16 |
import numpy as np
from deepscratch.models.layers.activations.activation import Activation
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://arxiv.org/abs/1706.02515,
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2769,
1416,
36722,
13,
27530,
13,
75,
6962,
13,
15791,
602,
13,
48545,
1330,
13144,
341,
628,
220,
220,
220,
1303,
3740,
1378,
12567,
13,
785,
14,
263,
1134,
75,
521,
1142,
29578,
14,
5805,
... | 2.627306 | 271 |
courseJson=[
{
'Course_Name': 'Introduction to Engineering Design',
'Code': 'DES130',
'Course_Text': 'Introduction to Engineering Design is a core, multidisciplinary course offered with an aim to ignite the young minds with concepts in design and innovation. Using the tools and skills learnt in the lab, the students participate in a project challenge to build functional prototypes in the field of intelligent buildings, automotive, and robotics which will provide solutions to real life problems.'
},
{
'Course_Name': 'Design Drawing and Visualization',
'Code': 'DES101',
'Course_Text': 'This course fosters understanding of drawing and sketching as a means to develop observational skills through the study of the environment and as a tool for visual representation, ideation/conceptualization, visualization and communication or presentation of design ideas through sketching and drawing from both observation and memory.'
},
{
'Course_Name': 'Visual Design & Communication',
'Code': 'DES202',
'Course_Text': 'For a designer to communicate more concisely and in a visually appropriate manner, it is necessary to use commonly understood principles, perspective and design layout standards. Together, these conventions constitute a visual language, and help to ensure that the drawing is clear and relatively easy to understand.'
},
{
'Course_Name': 'Design Processes and Perspectives',
'Code': 'DES201',
'Course_Text': 'Broadly, the course gives students the opportunity to develop essential design thinking skills such as exploring the designed space to identify problem, applying the design thinking process to problems, visualizing design solutions, refining final designs and communicating ideas in visually appropriate form through assignments and projects.'
},
{
'Course_Name': 'Animation & Graphics',
'Code': 'DES302',
'Course_Text': 'This course will take you right through the fundamentals of Graphic Design from photorealism up to the point where fantasy and imagination begins. You will understand usage of the colour wheel and its role in creating Digital Art.'
},
{
'Course_Name': 'Film Making and Radio Podcasting',
'Code': 'DES303',
'Course_Text': 'This course will not only give you the basic technical skills but will also hand hold you into making a aesthetically correct decisions in assembling a film.'
},
{
'Course_Name': 'Wearable Applications, Research, Devices, Interactions (WARDI)',
'Code': 'DES513',
'Course_Text': 'This is a course about the current paradigm of Wearable Computing. In this course, we will cover the origins, pioneering contributions, and principles of Wearable Computing. With this foundation, we will initiate our exploration into the space by learning how to design physical (device form factor), digital (applications) as well as human (interaction techniques) aspects of Wearables.'
},
{
'Course_Name': 'Digital Audio - (Procedural Game Audio, Algorithmic Composition & Sound Synthesis)',
'Code': 'DES514',
'Course_Text': 'This hands-on project-based course will introduce students to the world of digital audio. Topics include real-time sound synthesis, machine listening, procedural game audio, algorithmic composition, digital instrument design and sound design. '
},
{
'Course_Name': 'Information systems in Public Health',
'Code': 'DES5XX',
'Course_Text': 'This course will give an understanding of public health information systems. It will include key concepts of public health, sources of public health information, ethics in public health practice and research, and an understanding of various public health information systems in use.'
},
{
'Course_Name': 'Game Development & Design',
'Code': 'DES512',
'Course_Text': 'This hands-on project-based course will introduce students to the fundamentals of game development & design using the Unreal 4 game engine. Topics include level design, lighting, materials, particle effects, game AI, game logic, user input mappings, audio, physics and motion.'
},
{
'Course_Name': ' Introduction to 3D Animation',
'Code': 'DES5XX',
'Course_Text': 'This course introduces students to: (i) Basics and fundamental principles of animation (ii) Workflow of animation (iii) Introduction to 3D Animation'
}
] | [
17319,
41,
1559,
41888,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
49046,
62,
5376,
10354,
705,
21906,
284,
14044,
8495,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
705,
10669,
10354,
705,
30910,
12952,
... | 3.682848 | 1,236 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
for i in range(10):
image = Image.open('output{:08d}.png'.format(i))
newImage = []
for item in image.getdata():
if item[:4] == (0, 0, 0 , 0): #將透明區(0,0,0,0)轉成(255,255,255)
newImage.append((255, 255, 255))
else:
newImage.append(item)
image.putdata(newImage)
image = image.convert('RGB')#RGBA轉RGB
image.save('output{:08d}_removebg.png'.format(i))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
350,
4146,
1330,
7412,
198,
1640,
1312,
287,
2837,
7,
940,
2599,
198,
220,
220,
220,
2939,
796,
7412,
13,
9654,... | 1.911197 | 259 |
from datetime import datetime, date, timedelta
import os
import sys; sys.path += ['/var/canvas/common', '../../common']
import yaml
from boto.s3.connection import S3Connection
from configuration import aws
# results format
# {
# 'start_time': start_time,
# 'end_time': end_time,
# 'time': (end_time - start_time),
# 'stored': stored,
# 'skipped': skipped,
# 'failed': failed,
# 'size': backup_size_str,
# }
results_bucket_name = 'canvas-ugc-backup-logging'
key_format_str = 'ugc-backup-results-{0}'
if __name__ == '__main__':
check_backups()
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
11,
28805,
12514,
198,
11748,
28686,
198,
11748,
25064,
26,
25064,
13,
6978,
15853,
685,
26488,
7785,
14,
5171,
11017,
14,
11321,
3256,
705,
40720,
40720,
11321,
20520,
198,
11748,
331,
43695,... | 2.32567 | 261 |
#!/usr/bin/env python3
import sys
from threading import Thread, Event
print("INIT 1 0")
run = 1
pool = []
while run or pool:
command, id, *name = sys.stdin.readline().split()
if command == "RESOLVE":
Resolve(id, *name)
#print("print", pool)
#pool[-1].start()
elif command == "CANCEL":
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25064,
198,
6738,
4704,
278,
1330,
14122,
11,
8558,
198,
198,
4798,
7203,
1268,
2043,
352,
657,
4943,
198,
5143,
796,
352,
198,
7742,
796,
17635,
198,
198,
4514,
1057,
393,
... | 2.285714 | 147 |
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
from brownie import web3
from brownie.network.contract import Contract, ContractTx, ContractCall
_BROWNIE_RESERVED_NAMES = [
'abi', 'at', 'bytecode', 'deploy', 'get_method', 'info', 'remove', 'selectors', 'signatures', 'topics', 'tx'
]
"""
Brownieコントラクトに定義済みのプロパティ名。
同名の公開関数がスマートコントラクトに存在するとBrownieでのデプロイ時にエラーになる。
"""
def force_deploy(deployer, contract, *deploy_args):
"""
Brownieだとエラーが発生するコントラクトを強制的にデプロイする。
Brownieでは brownie.network.contract.Contract に定義済みプロパティと
同名の公開関数を持つコントラクトはエラーとなりデプロイできない。
この関数はBrownieを利用せずweb3で直接デプロイすることでエラーを回避する。
なお、この関数により生成したContractオブジェクトではBrownieが提供する一部のDebug機能は使用できない。
使用例
>>> returned_contract = force_deploy(deployer, contract, *deploy_args)
>>> # 普通の関数はそのまま使用できる。
>>> returned_contract.nameOfFunction.transact({'from': deployer})
>>> # エラーの原因となる関数は `.functions` 経由でアクセスする。
>>> returned_contract.functions.signatures()
>>> returned_contract.functions.remove.transact({'from': deployer})
:param deployer: コントラクトをデプロイするアカウント
:param contract: Brownieのコントラクトオブジェクト
:param deploy_args: コントラクトのコンストラクタ引数
:return: Brownieのコントラクトインスタンス
"""
# 引数の型変換 (Note: web3.pyとBrownieでは型変換規則が異なる)
constructor_abi = list(filter(lambda entry: entry['type'] == 'constructor', contract.abi))
if len(constructor_abi) == 1:
deploy_args = brownie.convert.normalize.format_input(constructor_abi[0], deploy_args)
# web3を用いてデプロイする
web3_contract = web3.eth.contract(abi=contract.abi, bytecode=contract.bytecode)
txn_hash = web3_contract.constructor(*deploy_args).transact({'from': deployer.address})
receipt = web3.eth.waitForTransactionReceipt(txn_hash)
contract_address = receipt['contractAddress']
# Brownieでエラーを発生させるメソッドを取り除いたABIを作成する
# このABIを用いることでBrownieのContractオブジェクトが作成できるようになる
brownie_safe_abi = []
excluded_function_abi = []
for abi_entry in contract.abi:
if abi_entry['type'] == 'function' and abi_entry['name'] in _BROWNIE_RESERVED_NAMES:
excluded_function_abi.append(abi_entry)
else:
brownie_safe_abi.append(abi_entry)
contract_name = _resolve_contract_name(contract) + '__brownie_utils'
brownie_contract = Contract.from_abi(contract_name, contract_address, brownie_safe_abi)
# ABIから削除したメソッドを復元する
# (オーバロードには未対応)
brownie_contract.functions = _BrownieUnsafeFunctionContainer()
for abi_entry in excluded_function_abi:
name = abi_entry['name']
if _is_constant(abi_entry):
recovered_function = ContractCall(contract_address, abi_entry, name, None)
else:
recovered_function = ContractTx(contract_address, abi_entry, name, None)
setattr(brownie_contract.functions, name, recovered_function)
return brownie_contract
class _BrownieUnsafeFunctionContainer:
"""Brownieでエラーとなるスマートコントラクトの関数を保持するクラス"""
pass
| [
37811,
198,
15269,
347,
6684,
2257,
18276,
1766,
1539,
12052,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
1... | 2.103118 | 1,668 |
# Define a function that will check if the given
# number is perfect and return a boolean value.
# Ask they user how many perfect numbers they want.
count = input("How many perfect numbers do you want? (Recommend less than 4, as any higher can take a loooong time): ")
try:
# Try to convert the user input to an integer (from string).
count = int(count)
except ValueError:
# If the user did not input a valid integer value, tell them.
print("That was not a valid number.")
else:
# Declare a list to store the perfect numbers in.
perfects = []
# Define a stating number.
current_number = 1
# Start a loop that will run as long as the number of found
# perfect numbers is less than the amount the user asked for.
while len(perfects) < count:
# If the current number is perfect, append it
# to the list of perfect numbers.
if is_perfect(current_number):
perfects.append(current_number)
# Increment the current number by one.
current_number += 1
# Print the final list of perfect numbers.
print(perfects)
| [
2,
2896,
500,
257,
2163,
326,
481,
2198,
611,
262,
1813,
198,
2,
1271,
318,
2818,
290,
1441,
257,
25131,
1988,
13,
628,
198,
2,
16981,
484,
2836,
703,
867,
2818,
3146,
484,
765,
13,
198,
9127,
796,
5128,
7203,
2437,
867,
2818,
314... | 3.129213 | 356 |
"""
Contraction Clustering (RASTER):
Reference Implementation in Python with an Example
(c) 2016 - 2020 Fraunhofer-Chalmers Centre for Industrial Mathematics
Algorithm development and implementation:
Gregor Ulm (gregor.ulm@fcc.chalmers.se)
Requirements:
. Python 3
For a description of the algorithm including relevant theory, please
consult our paper on Contraction Clustering (RASTER).
To run this script, type
> python3 raster.py
"""
import os
import clustering as c
if __name__ == "__main__":
# load input data
with open("input/sample.csv", "r") as f:
content = f.readlines()
all_points = []
for line in content:
line = line.strip()
(x, y) = line.split(",")
x = float(x)
y = float(y)
all_points.append((x, y))
"""
RASTER clusters:
RASTER projects points to tiles and disregards the former after the
projection has been performed. Thus, it requires merely constant
space, assuming bounded integers or a bounded coordinate system like
the GPS coordinate system for our planet.
Input is projected to points that represent tiles.
"""
precision = 1
tau = 5 # threshold
min_size = 5
clusters, scalar = raster(all_points, precision, tau, min_size)
print("Number of clusters: ", len(clusters))
output = []
count = 1
for cluster in clusters:
for (x, y) in cluster:
x = x / scalar
y = y / scalar
output.append((count, x, y))
count += 1
f = open("output/clustered.csv", "w")
f.write("Cluster Number, X-Position, Y-Position\n")
for (num, x, y) in output:
f.write(str(num) + ", " + str(x) + ", " + str(y) + "\n")
f.close()
| [
37811,
198,
4264,
7861,
1012,
436,
1586,
357,
49,
1921,
5781,
2599,
198,
26687,
46333,
287,
11361,
351,
281,
17934,
198,
7,
66,
8,
1584,
532,
12131,
39313,
403,
71,
30288,
12,
1925,
282,
11056,
9072,
329,
19034,
39448,
198,
198,
2348,... | 2.605926 | 675 |
from .output import population_status,infected
from .utils import colorscale_okabe_ito
import matplotlib.pyplot as plt
__all__ = ["population_status", "infected"]
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
"font.size": 15,
"lines.linewidth" : 3,
"axes.spines.top": False,
"axes.spines.right": False,
"ytick.major.left": True,
"axes.grid": True
}) | [
6738,
764,
22915,
1330,
3265,
62,
13376,
11,
27816,
276,
198,
6738,
764,
26791,
1330,
7577,
38765,
62,
482,
11231,
62,
10094,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
834,
439,
834,
796,
14631,
39748,
62... | 2.300926 | 216 |
from .fortiosapi import FortiOSAPI
| [
6738,
764,
3319,
4267,
15042,
1330,
6401,
35742,
17614,
198
] | 3.5 | 10 |
from PyZ3950 import asn1
oids = {}
oids['Z3950'] = {'oid': asn1.OidVal([1, 2, 840, 10003]), 'val': [1, 2, 840, 10003]}
oids['Z3950']['ATTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3]), 'val': [1, 2, 840, 10003, 3]}
oids['Z3950']['DIAG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4]), 'val': [1, 2, 840, 10003, 4]}
oids['Z3950']['RECSYN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5]), 'val': [1, 2, 840, 10003, 5]}
oids['Z3950']['TRANSFER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 6]), 'val': [1, 2, 840, 10003, 6]}
oids['Z3950']['RRF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7]), 'val': [1, 2, 840, 10003, 7]}
oids['Z3950']['ACCESS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8]), 'val': [1, 2, 840, 10003, 8]}
oids['Z3950']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9]), 'val': [1, 2, 840, 10003, 9]}
oids['Z3950']['USR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10]), 'val': [1, 2, 840, 10003, 10]}
oids['Z3950']['SPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11]), 'val': [1, 2, 840, 10003, 11]}
oids['Z3950']['VAR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12]), 'val': [1, 2, 840, 10003, 12]}
oids['Z3950']['SCHEMA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13]), 'val': [1, 2, 840, 10003, 13]}
oids['Z3950']['TAGSET'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14]), 'val': [1, 2, 840, 10003, 14]}
oids['Z3950']['NEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15]), 'val': [1, 2, 840, 10003, 15]}
oids['Z3950']['QUERY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16]), 'val': [1, 2, 840, 10003, 16]}
oids['Z3950']['ATTRS']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 1]), 'val': [1, 2, 840, 10003, 3, 1]}
oids['Z3950']['ATTRS']['EXP1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 2]), 'val': [1, 2, 840, 10003, 3, 2]}
oids['Z3950']['ATTRS']['EXT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 3]), 'val': [1, 2, 840, 10003, 3, 3]}
oids['Z3950']['ATTRS']['CCL1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 4]), 'val': [1, 2, 840, 10003, 3, 4]}
oids['Z3950']['ATTRS']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 5]), 'val': [1, 2, 840, 10003, 3, 5]}
oids['Z3950']['ATTRS']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 6]), 'val': [1, 2, 840, 10003, 3, 6]}
oids['Z3950']['ATTRS']['COLLECTIONS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 7]), 'val': [1, 2, 840, 10003, 3, 7]}
oids['Z3950']['ATTRS']['CIMI1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 8]), 'val': [1, 2, 840, 10003, 3, 8]}
oids['Z3950']['ATTRS']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 9]), 'val': [1, 2, 840, 10003, 3, 9]}
oids['Z3950']['ATTRS']['ZBIG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 10]), 'val': [1, 2, 840, 10003, 3, 10]}
oids['Z3950']['ATTRS']['UTIL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 11]), 'val': [1, 2, 840, 10003, 3, 11]}
oids['Z3950']['ATTRS']['XD1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 12]), 'val': [1, 2, 840, 10003, 3, 12]}
oids['Z3950']['ATTRS']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 13]), 'val': [1, 2, 840, 10003, 3, 13]}
oids['Z3950']['ATTRS']['FIN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 14]), 'val': [1, 2, 840, 10003, 3, 14]}
oids['Z3950']['ATTRS']['DAN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 15]), 'val': [1, 2, 840, 10003, 3, 15]}
oids['Z3950']['ATTRS']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 16]), 'val': [1, 2, 840, 10003, 3, 16]}
oids['Z3950']['ATTRS']['MARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 17]), 'val': [1, 2, 840, 10003, 3, 17]}
oids['Z3950']['ATTRS']['BIB2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 18]), 'val': [1, 2, 840, 10003, 3, 18]}
oids['Z3950']['ATTRS']['ZEEREX'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 19]), 'val': [1, 2, 840, 10003, 3, 19]}
oids['Z3950']['DIAG']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 1]), 'val': [1, 2, 840, 10003, 4, 1]}
oids['Z3950']['DIAG']['DIAG1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 2]), 'val': [1, 2, 840, 10003, 4, 2]}
oids['Z3950']['DIAG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 3]), 'val': [1, 2, 840, 10003, 4, 3]}
oids['Z3950']['DIAG']['GENERAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 4]), 'val': [1, 2, 840, 10003, 4, 4]}
oids['Z3950']['RECSYN']['UNIMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 1]), 'val': [1, 2, 840, 10003, 5, 1]}
oids['Z3950']['RECSYN']['INTERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 2]), 'val': [1, 2, 840, 10003, 5, 2]}
oids['Z3950']['RECSYN']['CCF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 3]), 'val': [1, 2, 840, 10003, 5, 3]}
oids['Z3950']['RECSYN']['USMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10]), 'val': [1, 2, 840, 10003, 5, 10]}
oids['Z3950']['RECSYN']['USMARC']['BIBLIO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 1]), 'val': [1, 2, 840, 10003, 5, 10, 1]}
oids['Z3950']['RECSYN']['USMARC']['AUTH'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 2]), 'val': [1, 2, 840, 10003, 5, 10, 2]}
oids['Z3950']['RECSYN']['USMARC']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 3]), 'val': [1, 2, 840, 10003, 5, 10, 3]}
oids['Z3950']['RECSYN']['USMARC']['COMMUNITY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 4]), 'val': [1, 2, 840, 10003, 5, 10, 4]}
oids['Z3950']['RECSYN']['USMARC']['CLASS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 5]), 'val': [1, 2, 840, 10003, 5, 10, 5]}
oids['Z3950']['RECSYN']['UKMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 11]), 'val': [1, 2, 840, 10003, 5, 11]}
oids['Z3950']['RECSYN']['NORMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 12]), 'val': [1, 2, 840, 10003, 5, 12]}
oids['Z3950']['RECSYN']['LIBRISMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 13]), 'val': [1, 2, 840, 10003, 5, 13]}
oids['Z3950']['RECSYN']['DANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 14]), 'val': [1, 2, 840, 10003, 5, 14]}
oids['Z3950']['RECSYN']['FINMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 15]), 'val': [1, 2, 840, 10003, 5, 15]}
oids['Z3950']['RECSYN']['MAB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 16]), 'val': [1, 2, 840, 10003, 5, 16]}
oids['Z3950']['RECSYN']['CANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 17]), 'val': [1, 2, 840, 10003, 5, 17]}
oids['Z3950']['RECSYN']['SBNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 18]), 'val': [1, 2, 840, 10003, 5, 18]}
oids['Z3950']['RECSYN']['PICAMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 19]), 'val': [1, 2, 840, 10003, 5, 19]}
oids['Z3950']['RECSYN']['AUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 20]), 'val': [1, 2, 840, 10003, 5, 20]}
oids['Z3950']['RECSYN']['IBERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 21]), 'val': [1, 2, 840, 10003, 5, 21]}
oids['Z3950']['RECSYN']['CATMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 22]), 'val': [1, 2, 840, 10003, 5, 22]}
oids['Z3950']['RECSYN']['MALMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 23]), 'val': [1, 2, 840, 10003, 5, 23]}
oids['Z3950']['RECSYN']['JPMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 24]), 'val': [1, 2, 840, 10003, 5, 24]}
oids['Z3950']['RECSYN']['SWEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 25]), 'val': [1, 2, 840, 10003, 5, 25]}
oids['Z3950']['RECSYN']['SIGLEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 26]), 'val': [1, 2, 840, 10003, 5, 26]}
oids['Z3950']['RECSYN']['ISDSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 27]), 'val': [1, 2, 840, 10003, 5, 27]}
oids['Z3950']['RECSYN']['RUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 28]), 'val': [1, 2, 840, 10003, 5, 28]}
oids['Z3950']['RECSYN']['HUNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 29]), 'val': [1, 2, 840, 10003, 5, 29]}
oids['Z3950']['RECSYN']['NACSISCATP'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 30]), 'val': [1, 2, 840, 10003, 5, 30]}
oids['Z3950']['RECSYN']['FINMARC2000'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 31]), 'val': [1, 2, 840, 10003, 5, 31]}
oids['Z3950']['RECSYN']['MARC21FIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 32]), 'val': [1, 2, 840, 10003, 5, 32]}
oids['Z3950']['RECSYN']['COMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 33]), 'val': [1, 2, 840, 10003, 5, 33]}
oids['Z3950']['RECSYN']['EXPLAIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 100]), 'val': [1, 2, 840, 10003, 5, 100]}
oids['Z3950']['RECSYN']['SUTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 101]), 'val': [1, 2, 840, 10003, 5, 101]}
oids['Z3950']['RECSYN']['OPAC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 102]), 'val': [1, 2, 840, 10003, 5, 102]}
oids['Z3950']['RECSYN']['SUMMARY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 103]), 'val': [1, 2, 840, 10003, 5, 103]}
oids['Z3950']['RECSYN']['GRS0'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 104]), 'val': [1, 2, 840, 10003, 5, 104]}
oids['Z3950']['RECSYN']['GRS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 105]), 'val': [1, 2, 840, 10003, 5, 105]}
oids['Z3950']['RECSYN']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 106]), 'val': [1, 2, 840, 10003, 5, 106]}
oids['Z3950']['RECSYN']['FRAGMENT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 107]), 'val': [1, 2, 840, 10003, 5, 107]}
oids['Z3950']['RECSYN']['MIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109]), 'val': [1, 2, 840, 10003, 5, 109]}
oids['Z3950']['RECSYN']['MIME']['PDF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 1]), 'val': [1, 2, 840, 10003, 5, 109, 1]}
oids['Z3950']['RECSYN']['MIME']['POSTSCRIPT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 2]), 'val': [1, 2, 840, 10003, 5, 109, 2]}
oids['Z3950']['RECSYN']['MIME']['HTML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 3]), 'val': [1, 2, 840, 10003, 5, 109, 3]}
oids['Z3950']['RECSYN']['MIME']['TIFF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 4]), 'val': [1, 2, 840, 10003, 5, 109, 4]}
oids['Z3950']['RECSYN']['MIME']['GIF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 5]), 'val': [1, 2, 840, 10003, 5, 109, 5]}
oids['Z3950']['RECSYN']['MIME']['JPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 6]), 'val': [1, 2, 840, 10003, 5, 109, 6]}
oids['Z3950']['RECSYN']['MIME']['PNG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 7]), 'val': [1, 2, 840, 10003, 5, 109, 7]}
oids['Z3950']['RECSYN']['MIME']['MPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 8]), 'val': [1, 2, 840, 10003, 5, 109, 8]}
oids['Z3950']['RECSYN']['MIME']['SGML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 9]), 'val': [1, 2, 840, 10003, 5, 109, 9]}
oids['Z3950']['RECSYN']['MIME']['XML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 10]), 'val': [1, 2, 840, 10003, 5, 109, 10]}
oids['Z3950']['RECSYN']['ZMIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110]), 'val': [1, 2, 840, 10003, 5, 110]}
oids['Z3950']['RECSYN']['ZMIME']['TIFFB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 1]), 'val': [1, 2, 840, 10003, 5, 110, 1]}
oids['Z3950']['RECSYN']['ZMIME']['WAV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 2]), 'val': [1, 2, 840, 10003, 5, 110, 2]}
oids['Z3950']['RECSYN']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 111]), 'val': [1, 2, 840, 10003, 5, 111]}
oids['Z3950']['RRF']['RESOURCE1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 1]), 'val': [1, 2, 840, 10003, 7, 1]}
oids['Z3950']['RRF']['RESOURCE2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 2]), 'val': [1, 2, 840, 10003, 7, 2]}
oids['Z3950']['ACCESS']['PROMPT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 1]), 'val': [1, 2, 840, 10003, 8, 1]}
oids['Z3950']['ACCESS']['DES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 2]), 'val': [1, 2, 840, 10003, 8, 2]}
oids['Z3950']['ACCESS']['KRB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 3]), 'val': [1, 2, 840, 10003, 8, 3]}
oids['Z3950']['ES']['PERSISTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 1]), 'val': [1, 2, 840, 10003, 9, 1]}
oids['Z3950']['ES']['PERSISTQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 2]), 'val': [1, 2, 840, 10003, 9, 2]}
oids['Z3950']['ES']['PERIODQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 3]), 'val': [1, 2, 840, 10003, 9, 3]}
oids['Z3950']['ES']['ITEMORDER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 4]), 'val': [1, 2, 840, 10003, 9, 4]}
oids['Z3950']['ES']['DBUPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5]), 'val': [1, 2, 840, 10003, 9, 5]}
oids['Z3950']['ES']['DBUPDATE']['REV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1]}
oids['Z3950']['ES']['DBUPDATE']['REV']['1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1, 1]}
oids['Z3950']['ES']['EXPORTSPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 6]), 'val': [1, 2, 840, 10003, 9, 6]}
oids['Z3950']['ES']['EXPORTINV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 7]), 'val': [1, 2, 840, 10003, 9, 7]}
oids['Z3950']['USR']['SEARCHRES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1]), 'val': [1, 2, 840, 10003, 10, 1]}
oids['Z3950']['USR']['CHARSETNEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 2]), 'val': [1, 2, 840, 10003, 10, 2]}
oids['Z3950']['USR']['INFO1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 3]), 'val': [1, 2, 840, 10003, 10, 3]}
oids['Z3950']['USR']['SEARCHTERMS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 4]), 'val': [1, 2, 840, 10003, 10, 4]}
oids['Z3950']['USR']['SEARCHTERMS2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 5]), 'val': [1, 2, 840, 10003, 10, 5]}
oids['Z3950']['USR']['DATETIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 6]), 'val': [1, 2, 840, 10003, 10, 6]}
oids['Z3950']['USR']['INSERTACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 9]), 'val': [1, 2, 840, 10003, 10, 9]}
oids['Z3950']['USR']['EDITACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 10]), 'val': [1, 2, 840, 10003, 10, 10]}
oids['Z3950']['USR']['AUTHFILE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 11]), 'val': [1, 2, 840, 10003, 10, 11]}
oids['Z3950']['USR']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000]), 'val': [1, 2, 840, 10003, 10, 1000]}
oids['Z3950']['USR']['PRIVATE']['OCLC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17]), 'val': [1, 2, 840, 10003, 10, 1000, 17]}
oids['Z3950']['USR']['PRIVATE']['OCLC']['INFO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1]), 'val': [1, 2, 840, 10003, 10, 1000, 17, 1]}
oids['Z3950']['SPEC']['ESPEC1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 1]), 'val': [1, 2, 840, 10003, 11, 1]}
oids['Z3950']['SPEC']['ESPEC2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 2]), 'val': [1, 2, 840, 10003, 11, 2]}
oids['Z3950']['SPEC']['ESPECQ'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 3]), 'val': [1, 2, 840, 10003, 11, 3]}
oids['Z3950']['VAR']['VARIANT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12, 1]), 'val': [1, 2, 840, 10003, 12, 1]}
oids['Z3950']['SCHEMA']['WAIS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 2]), 'val': [1, 2, 840, 10003, 13, 2]}
oids['Z3950']['SCHEMA']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 3]), 'val': [1, 2, 840, 10003, 13, 3]}
oids['Z3950']['SCHEMA']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 4]), 'val': [1, 2, 840, 10003, 13, 4]}
oids['Z3950']['SCHEMA']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 5]), 'val': [1, 2, 840, 10003, 13, 5]}
oids['Z3950']['SCHEMA']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 6]), 'val': [1, 2, 840, 10003, 13, 6]}
oids['Z3950']['SCHEMA']['HOLDINGS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7]), 'val': [1, 2, 840, 10003, 13, 7]}
oids['Z3950']['SCHEMA']['HOLDINGS']['11'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 1]), 'val': [1, 2, 840, 10003, 13, 7, 1]}
oids['Z3950']['SCHEMA']['HOLDINGS']['12'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 2]), 'val': [1, 2, 840, 10003, 13, 7, 2]}
oids['Z3950']['SCHEMA']['HOLDINGS']['14'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 4]), 'val': [1, 2, 840, 10003, 13, 7, 4]}
oids['Z3950']['SCHEMA']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['INSERT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['EDIT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['TAGSET']['M'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 1]), 'val': [1, 2, 840, 10003, 14, 1]}
oids['Z3950']['TAGSET']['G'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 2]), 'val': [1, 2, 840, 10003, 14, 2]}
oids['Z3950']['TAGSET']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 3]), 'val': [1, 2, 840, 10003, 14, 3]}
oids['Z3950']['TAGSET']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 4]), 'val': [1, 2, 840, 10003, 14, 4]}
oids['Z3950']['TAGSET']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 5]), 'val': [1, 2, 840, 10003, 14, 5]}
oids['Z3950']['TAGSET']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 6]), 'val': [1, 2, 840, 10003, 14, 6]}
oids['Z3950']['TAGSET']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 7]), 'val': [1, 2, 840, 10003, 14, 7]}
oids['Z3950']['TAGSET']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 8]), 'val': [1, 2, 840, 10003, 14, 8]}
oids['Z3950']['NEG']['CHARSET2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1]), 'val': [1, 2, 840, 10003, 15, 1]}
oids['Z3950']['NEG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 2]), 'val': [1, 2, 840, 10003, 15, 2]}
oids['Z3950']['NEG']['CHARSET3'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 3]), 'val': [1, 2, 840, 10003, 15, 3]}
oids['Z3950']['NEG']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000]), 'val': [1, 2, 840, 10003, 15, 1000]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81]), 'val': [1, 2, 840, 10003, 15, 1000, 81]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA']['CHARSETNAME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1]), 'val': [1, 2, 840, 10003, 15, 1000, 81, 1]}
oids['Z3950']['QUERY']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 1]), 'val': [1, 2, 840, 10003, 16, 1]}
oids['Z3950']['QUERY']['CQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 2]), 'val': [1, 2, 840, 10003, 16, 2]}
oids['UNICODE'] = {'oid': asn1.OidVal([1, 0, 10646]), 'val': [1, 0, 10646]}
oids['UNICODE']['PART1'] = {'oid': asn1.OidVal([1, 0, 10646, 1]), 'val': [1, 0, 10646, 1]}
oids['UNICODE']['PART1']['XFERSYN'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0]), 'val': [1, 0, 10646, 1, 0]}
oids['UNICODE']['PART1']['XFERSYN']['UCS2'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 2]), 'val': [1, 0, 10646, 1, 0, 2]}
oids['UNICODE']['PART1']['XFERSYN']['UCS4'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 4]), 'val': [1, 0, 10646, 1, 0, 4]}
oids['UNICODE']['PART1']['XFERSYN']['UTF16'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 5]), 'val': [1, 0, 10646, 1, 0, 5]}
oids['UNICODE']['PART1']['XFERSYN']['UTF8'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 8]), 'val': [1, 0, 10646, 1, 0, 8]}
UNICODE = [1, 0, 10646]
UNICODE_ov = asn1.OidVal([1, 0, 10646])
UNICODE_PART1 = [1, 0, 10646, 1]
UNICODE_PART1_ov = asn1.OidVal([1, 0, 10646, 1])
UNICODE_PART1_XFERSYN = [1, 0, 10646, 1, 0]
UNICODE_PART1_XFERSYN_ov = asn1.OidVal([1, 0, 10646, 1, 0])
UNICODE_PART1_XFERSYN_UCS2 = [1, 0, 10646, 1, 0, 2]
UNICODE_PART1_XFERSYN_UCS2_ov = asn1.OidVal([1, 0, 10646, 1, 0, 2])
UNICODE_PART1_XFERSYN_UCS4 = [1, 0, 10646, 1, 0, 4]
UNICODE_PART1_XFERSYN_UCS4_ov = asn1.OidVal([1, 0, 10646, 1, 0, 4])
UNICODE_PART1_XFERSYN_UTF16 = [1, 0, 10646, 1, 0, 5]
UNICODE_PART1_XFERSYN_UTF16_ov = asn1.OidVal([1, 0, 10646, 1, 0, 5])
UNICODE_PART1_XFERSYN_UTF8 = [1, 0, 10646, 1, 0, 8]
UNICODE_PART1_XFERSYN_UTF8_ov = asn1.OidVal([1, 0, 10646, 1, 0, 8])
Z3950 = [1, 2, 840, 10003]
Z3950_ov = asn1.OidVal([1, 2, 840, 10003])
Z3950_ACCESS = [1, 2, 840, 10003, 8]
Z3950_ACCESS_ov = asn1.OidVal([1, 2, 840, 10003, 8])
Z3950_ACCESS_DES1 = [1, 2, 840, 10003, 8, 2]
Z3950_ACCESS_DES1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 2])
Z3950_ACCESS_KRB1 = [1, 2, 840, 10003, 8, 3]
Z3950_ACCESS_KRB1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 3])
Z3950_ACCESS_PROMPT1 = [1, 2, 840, 10003, 8, 1]
Z3950_ACCESS_PROMPT1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 1])
Z3950_ATTRS = [1, 2, 840, 10003, 3]
Z3950_ATTRS_ov = asn1.OidVal([1, 2, 840, 10003, 3])
Z3950_ATTRS_BIB1 = [1, 2, 840, 10003, 3, 1]
Z3950_ATTRS_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 1])
Z3950_ATTRS_BIB2 = [1, 2, 840, 10003, 3, 18]
Z3950_ATTRS_BIB2_ov = asn1.OidVal([1, 2, 840, 10003, 3, 18])
Z3950_ATTRS_CCL1 = [1, 2, 840, 10003, 3, 4]
Z3950_ATTRS_CCL1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 4])
Z3950_ATTRS_CIMI1 = [1, 2, 840, 10003, 3, 8]
Z3950_ATTRS_CIMI1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 8])
Z3950_ATTRS_COLLECTIONS1 = [1, 2, 840, 10003, 3, 7]
Z3950_ATTRS_COLLECTIONS1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 7])
Z3950_ATTRS_DAN1 = [1, 2, 840, 10003, 3, 15]
Z3950_ATTRS_DAN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 15])
Z3950_ATTRS_EXP1 = [1, 2, 840, 10003, 3, 2]
Z3950_ATTRS_EXP1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 2])
Z3950_ATTRS_EXT1 = [1, 2, 840, 10003, 3, 3]
Z3950_ATTRS_EXT1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 3])
Z3950_ATTRS_FIN1 = [1, 2, 840, 10003, 3, 14]
Z3950_ATTRS_FIN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 14])
Z3950_ATTRS_GEO = [1, 2, 840, 10003, 3, 9]
Z3950_ATTRS_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 3, 9])
Z3950_ATTRS_GILS = [1, 2, 840, 10003, 3, 5]
Z3950_ATTRS_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 5])
Z3950_ATTRS_HOLD = [1, 2, 840, 10003, 3, 16]
Z3950_ATTRS_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 3, 16])
Z3950_ATTRS_MARC = [1, 2, 840, 10003, 3, 17]
Z3950_ATTRS_MARC_ov = asn1.OidVal([1, 2, 840, 10003, 3, 17])
Z3950_ATTRS_STAS = [1, 2, 840, 10003, 3, 6]
Z3950_ATTRS_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 6])
Z3950_ATTRS_UTIL = [1, 2, 840, 10003, 3, 11]
Z3950_ATTRS_UTIL_ov = asn1.OidVal([1, 2, 840, 10003, 3, 11])
Z3950_ATTRS_XD1 = [1, 2, 840, 10003, 3, 12]
Z3950_ATTRS_XD1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 12])
Z3950_ATTRS_ZBIG = [1, 2, 840, 10003, 3, 10]
Z3950_ATTRS_ZBIG_ov = asn1.OidVal([1, 2, 840, 10003, 3, 10])
Z3950_ATTRS_ZEEREX = [1, 2, 840, 10003, 3, 19]
Z3950_ATTRS_ZEEREX_ov = asn1.OidVal([1, 2, 840, 10003, 3, 19])
Z3950_ATTRS_ZTHES = [1, 2, 840, 10003, 3, 13]
Z3950_ATTRS_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 3, 13])
Z3950_DIAG = [1, 2, 840, 10003, 4]
Z3950_DIAG_ov = asn1.OidVal([1, 2, 840, 10003, 4])
Z3950_DIAG_BIB1 = [1, 2, 840, 10003, 4, 1]
Z3950_DIAG_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 1])
Z3950_DIAG_DIAG1 = [1, 2, 840, 10003, 4, 2]
Z3950_DIAG_DIAG1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 2])
Z3950_DIAG_ES = [1, 2, 840, 10003, 4, 3]
Z3950_DIAG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 4, 3])
Z3950_DIAG_GENERAL = [1, 2, 840, 10003, 4, 4]
Z3950_DIAG_GENERAL_ov = asn1.OidVal([1, 2, 840, 10003, 4, 4])
Z3950_ES = [1, 2, 840, 10003, 9]
Z3950_ES_ov = asn1.OidVal([1, 2, 840, 10003, 9])
Z3950_ES_DBUPDATE = [1, 2, 840, 10003, 9, 5]
Z3950_ES_DBUPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5])
Z3950_ES_DBUPDATE_REV = [1, 2, 840, 10003, 9, 5, 1]
Z3950_ES_DBUPDATE_REV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1])
Z3950_ES_DBUPDATE_REV_1 = [1, 2, 840, 10003, 9, 5, 1, 1]
Z3950_ES_DBUPDATE_REV_1_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1])
Z3950_ES_EXPORTINV = [1, 2, 840, 10003, 9, 7]
Z3950_ES_EXPORTINV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 7])
Z3950_ES_EXPORTSPEC = [1, 2, 840, 10003, 9, 6]
Z3950_ES_EXPORTSPEC_ov = asn1.OidVal([1, 2, 840, 10003, 9, 6])
Z3950_ES_ITEMORDER = [1, 2, 840, 10003, 9, 4]
Z3950_ES_ITEMORDER_ov = asn1.OidVal([1, 2, 840, 10003, 9, 4])
Z3950_ES_PERIODQRY = [1, 2, 840, 10003, 9, 3]
Z3950_ES_PERIODQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 3])
Z3950_ES_PERSISTQRY = [1, 2, 840, 10003, 9, 2]
Z3950_ES_PERSISTQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 2])
Z3950_ES_PERSISTRS = [1, 2, 840, 10003, 9, 1]
Z3950_ES_PERSISTRS_ov = asn1.OidVal([1, 2, 840, 10003, 9, 1])
Z3950_NEG = [1, 2, 840, 10003, 15]
Z3950_NEG_ov = asn1.OidVal([1, 2, 840, 10003, 15])
Z3950_NEG_CHARSET2 = [1, 2, 840, 10003, 15, 1]
Z3950_NEG_CHARSET2_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1])
Z3950_NEG_CHARSET3 = [1, 2, 840, 10003, 15, 3]
Z3950_NEG_CHARSET3_ov = asn1.OidVal([1, 2, 840, 10003, 15, 3])
Z3950_NEG_ES = [1, 2, 840, 10003, 15, 2]
Z3950_NEG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 15, 2])
Z3950_NEG_PRIVATE = [1, 2, 840, 10003, 15, 1000]
Z3950_NEG_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000])
Z3950_NEG_PRIVATE_INDEXDATA = [1, 2, 840, 10003, 15, 1000, 81]
Z3950_NEG_PRIVATE_INDEXDATA_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81])
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME = [1, 2, 840, 10003, 15, 1000, 81, 1]
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1])
Z3950_QUERY = [1, 2, 840, 10003, 16]
Z3950_QUERY_ov = asn1.OidVal([1, 2, 840, 10003, 16])
Z3950_QUERY_CQL = [1, 2, 840, 10003, 16, 2]
Z3950_QUERY_CQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 2])
Z3950_QUERY_SQL = [1, 2, 840, 10003, 16, 1]
Z3950_QUERY_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 1])
Z3950_RECSYN = [1, 2, 840, 10003, 5]
Z3950_RECSYN_ov = asn1.OidVal([1, 2, 840, 10003, 5])
Z3950_RECSYN_AUSMARC = [1, 2, 840, 10003, 5, 20]
Z3950_RECSYN_AUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 20])
Z3950_RECSYN_CANMARC = [1, 2, 840, 10003, 5, 17]
Z3950_RECSYN_CANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 17])
Z3950_RECSYN_CATMARC = [1, 2, 840, 10003, 5, 22]
Z3950_RECSYN_CATMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 22])
Z3950_RECSYN_CCF = [1, 2, 840, 10003, 5, 3]
Z3950_RECSYN_CCF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 3])
Z3950_RECSYN_COMARC = [1, 2, 840, 10003, 5, 33]
Z3950_RECSYN_COMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 33])
Z3950_RECSYN_DANMARC = [1, 2, 840, 10003, 5, 14]
Z3950_RECSYN_DANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 14])
Z3950_RECSYN_ES = [1, 2, 840, 10003, 5, 106]
Z3950_RECSYN_ES_ov = asn1.OidVal([1, 2, 840, 10003, 5, 106])
Z3950_RECSYN_EXPLAIN = [1, 2, 840, 10003, 5, 100]
Z3950_RECSYN_EXPLAIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 100])
Z3950_RECSYN_FINMARC = [1, 2, 840, 10003, 5, 15]
Z3950_RECSYN_FINMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 15])
Z3950_RECSYN_FINMARC2000 = [1, 2, 840, 10003, 5, 31]
Z3950_RECSYN_FINMARC2000_ov = asn1.OidVal([1, 2, 840, 10003, 5, 31])
Z3950_RECSYN_FRAGMENT = [1, 2, 840, 10003, 5, 107]
Z3950_RECSYN_FRAGMENT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 107])
Z3950_RECSYN_GRS0 = [1, 2, 840, 10003, 5, 104]
Z3950_RECSYN_GRS0_ov = asn1.OidVal([1, 2, 840, 10003, 5, 104])
Z3950_RECSYN_GRS1 = [1, 2, 840, 10003, 5, 105]
Z3950_RECSYN_GRS1_ov = asn1.OidVal([1, 2, 840, 10003, 5, 105])
Z3950_RECSYN_HUNMARC = [1, 2, 840, 10003, 5, 29]
Z3950_RECSYN_HUNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 29])
Z3950_RECSYN_IBERMARC = [1, 2, 840, 10003, 5, 21]
Z3950_RECSYN_IBERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 21])
Z3950_RECSYN_INTERMARC = [1, 2, 840, 10003, 5, 2]
Z3950_RECSYN_INTERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 2])
Z3950_RECSYN_ISDSMARC = [1, 2, 840, 10003, 5, 27]
Z3950_RECSYN_ISDSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 27])
Z3950_RECSYN_JPMARC = [1, 2, 840, 10003, 5, 24]
Z3950_RECSYN_JPMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 24])
Z3950_RECSYN_LIBRISMARC = [1, 2, 840, 10003, 5, 13]
Z3950_RECSYN_LIBRISMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 13])
Z3950_RECSYN_MAB = [1, 2, 840, 10003, 5, 16]
Z3950_RECSYN_MAB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 16])
Z3950_RECSYN_MALMARC = [1, 2, 840, 10003, 5, 23]
Z3950_RECSYN_MALMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 23])
Z3950_RECSYN_MARC21FIN = [1, 2, 840, 10003, 5, 32]
Z3950_RECSYN_MARC21FIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 32])
Z3950_RECSYN_MIME = [1, 2, 840, 10003, 5, 109]
Z3950_RECSYN_MIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109])
Z3950_RECSYN_MIME_GIF = [1, 2, 840, 10003, 5, 109, 5]
Z3950_RECSYN_MIME_GIF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 5])
Z3950_RECSYN_MIME_HTML = [1, 2, 840, 10003, 5, 109, 3]
Z3950_RECSYN_MIME_HTML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 3])
Z3950_RECSYN_MIME_JPEG = [1, 2, 840, 10003, 5, 109, 6]
Z3950_RECSYN_MIME_JPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 6])
Z3950_RECSYN_MIME_MPEG = [1, 2, 840, 10003, 5, 109, 8]
Z3950_RECSYN_MIME_MPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 8])
Z3950_RECSYN_MIME_PDF = [1, 2, 840, 10003, 5, 109, 1]
Z3950_RECSYN_MIME_PDF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 1])
Z3950_RECSYN_MIME_PNG = [1, 2, 840, 10003, 5, 109, 7]
Z3950_RECSYN_MIME_PNG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 7])
Z3950_RECSYN_MIME_POSTSCRIPT = [1, 2, 840, 10003, 5, 109, 2]
Z3950_RECSYN_MIME_POSTSCRIPT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 2])
Z3950_RECSYN_MIME_SGML = [1, 2, 840, 10003, 5, 109, 9]
Z3950_RECSYN_MIME_SGML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 9])
Z3950_RECSYN_MIME_TIFF = [1, 2, 840, 10003, 5, 109, 4]
Z3950_RECSYN_MIME_TIFF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 4])
Z3950_RECSYN_MIME_XML = [1, 2, 840, 10003, 5, 109, 10]
Z3950_RECSYN_MIME_XML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 10])
Z3950_RECSYN_NACSISCATP = [1, 2, 840, 10003, 5, 30]
Z3950_RECSYN_NACSISCATP_ov = asn1.OidVal([1, 2, 840, 10003, 5, 30])
Z3950_RECSYN_NORMARC = [1, 2, 840, 10003, 5, 12]
Z3950_RECSYN_NORMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 12])
Z3950_RECSYN_OPAC = [1, 2, 840, 10003, 5, 102]
Z3950_RECSYN_OPAC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 102])
Z3950_RECSYN_PICAMARC = [1, 2, 840, 10003, 5, 19]
Z3950_RECSYN_PICAMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 19])
Z3950_RECSYN_RUSMARC = [1, 2, 840, 10003, 5, 28]
Z3950_RECSYN_RUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 28])
Z3950_RECSYN_SBNMARC = [1, 2, 840, 10003, 5, 18]
Z3950_RECSYN_SBNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 18])
Z3950_RECSYN_SIGLEMARC = [1, 2, 840, 10003, 5, 26]
Z3950_RECSYN_SIGLEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 26])
Z3950_RECSYN_SQL = [1, 2, 840, 10003, 5, 111]
Z3950_RECSYN_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 5, 111])
Z3950_RECSYN_SUMMARY = [1, 2, 840, 10003, 5, 103]
Z3950_RECSYN_SUMMARY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 103])
Z3950_RECSYN_SUTRS = [1, 2, 840, 10003, 5, 101]
Z3950_RECSYN_SUTRS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 101])
Z3950_RECSYN_SWEMARC = [1, 2, 840, 10003, 5, 25]
Z3950_RECSYN_SWEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 25])
Z3950_RECSYN_UKMARC = [1, 2, 840, 10003, 5, 11]
Z3950_RECSYN_UKMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 11])
Z3950_RECSYN_UNIMARC = [1, 2, 840, 10003, 5, 1]
Z3950_RECSYN_UNIMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 1])
Z3950_RECSYN_USMARC = [1, 2, 840, 10003, 5, 10]
Z3950_RECSYN_USMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10])
Z3950_RECSYN_USMARC_AUTH = [1, 2, 840, 10003, 5, 10, 2]
Z3950_RECSYN_USMARC_AUTH_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 2])
Z3950_RECSYN_USMARC_BIBLIO = [1, 2, 840, 10003, 5, 10, 1]
Z3950_RECSYN_USMARC_BIBLIO_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 1])
Z3950_RECSYN_USMARC_CLASS = [1, 2, 840, 10003, 5, 10, 5]
Z3950_RECSYN_USMARC_CLASS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 5])
Z3950_RECSYN_USMARC_COMMUNITY = [1, 2, 840, 10003, 5, 10, 4]
Z3950_RECSYN_USMARC_COMMUNITY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 4])
Z3950_RECSYN_USMARC_HOLD = [1, 2, 840, 10003, 5, 10, 3]
Z3950_RECSYN_USMARC_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 3])
Z3950_RECSYN_ZMIME = [1, 2, 840, 10003, 5, 110]
Z3950_RECSYN_ZMIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110])
Z3950_RECSYN_ZMIME_TIFFB = [1, 2, 840, 10003, 5, 110, 1]
Z3950_RECSYN_ZMIME_TIFFB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 1])
Z3950_RECSYN_ZMIME_WAV = [1, 2, 840, 10003, 5, 110, 2]
Z3950_RECSYN_ZMIME_WAV_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 2])
Z3950_RRF = [1, 2, 840, 10003, 7]
Z3950_RRF_ov = asn1.OidVal([1, 2, 840, 10003, 7])
Z3950_RRF_RESOURCE1 = [1, 2, 840, 10003, 7, 1]
Z3950_RRF_RESOURCE1_ov = asn1.OidVal([1, 2, 840, 10003, 7, 1])
Z3950_RRF_RESOURCE2 = [1, 2, 840, 10003, 7, 2]
Z3950_RRF_RESOURCE2_ov = asn1.OidVal([1, 2, 840, 10003, 7, 2])
Z3950_SCHEMA = [1, 2, 840, 10003, 13]
Z3950_SCHEMA_ov = asn1.OidVal([1, 2, 840, 10003, 13])
Z3950_SCHEMA_CIMI = [1, 2, 840, 10003, 13, 5]
Z3950_SCHEMA_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 13, 5])
Z3950_SCHEMA_COLLECTIONS = [1, 2, 840, 10003, 13, 3]
Z3950_SCHEMA_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 3])
Z3950_SCHEMA_EDIT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_EDIT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_GEO = [1, 2, 840, 10003, 13, 4]
Z3950_SCHEMA_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 13, 4])
Z3950_SCHEMA_GILS = [1, 2, 840, 10003, 13, 2]
Z3950_SCHEMA_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 2])
Z3950_SCHEMA_HOLDINGS = [1, 2, 840, 10003, 13, 7]
Z3950_SCHEMA_HOLDINGS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7])
Z3950_SCHEMA_HOLDINGS_11 = [1, 2, 840, 10003, 13, 7, 1]
Z3950_SCHEMA_HOLDINGS_11_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 1])
Z3950_SCHEMA_HOLDINGS_12 = [1, 2, 840, 10003, 13, 7, 2]
Z3950_SCHEMA_HOLDINGS_12_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 2])
Z3950_SCHEMA_HOLDINGS_14 = [1, 2, 840, 10003, 13, 7, 4]
Z3950_SCHEMA_HOLDINGS_14_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 4])
Z3950_SCHEMA_INSERT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_INSERT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_UPDATE = [1, 2, 840, 10003, 13, 6]
Z3950_SCHEMA_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 13, 6])
Z3950_SCHEMA_WAIS = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_WAIS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_ZTHES = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SPEC = [1, 2, 840, 10003, 11]
Z3950_SPEC_ov = asn1.OidVal([1, 2, 840, 10003, 11])
Z3950_SPEC_ESPEC1 = [1, 2, 840, 10003, 11, 1]
Z3950_SPEC_ESPEC1_ov = asn1.OidVal([1, 2, 840, 10003, 11, 1])
Z3950_SPEC_ESPEC2 = [1, 2, 840, 10003, 11, 2]
Z3950_SPEC_ESPEC2_ov = asn1.OidVal([1, 2, 840, 10003, 11, 2])
Z3950_SPEC_ESPECQ = [1, 2, 840, 10003, 11, 3]
Z3950_SPEC_ESPECQ_ov = asn1.OidVal([1, 2, 840, 10003, 11, 3])
Z3950_TAGSET = [1, 2, 840, 10003, 14]
Z3950_TAGSET_ov = asn1.OidVal([1, 2, 840, 10003, 14])
Z3950_TAGSET_CIMI = [1, 2, 840, 10003, 14, 6]
Z3950_TAGSET_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 14, 6])
Z3950_TAGSET_COLLECTIONS = [1, 2, 840, 10003, 14, 5]
Z3950_TAGSET_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 5])
Z3950_TAGSET_G = [1, 2, 840, 10003, 14, 2]
Z3950_TAGSET_G_ov = asn1.OidVal([1, 2, 840, 10003, 14, 2])
Z3950_TAGSET_GILS = [1, 2, 840, 10003, 14, 4]
Z3950_TAGSET_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 4])
Z3950_TAGSET_M = [1, 2, 840, 10003, 14, 1]
Z3950_TAGSET_M_ov = asn1.OidVal([1, 2, 840, 10003, 14, 1])
Z3950_TAGSET_STAS = [1, 2, 840, 10003, 14, 3]
Z3950_TAGSET_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 3])
Z3950_TAGSET_UPDATE = [1, 2, 840, 10003, 14, 7]
Z3950_TAGSET_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 14, 7])
Z3950_TAGSET_ZTHES = [1, 2, 840, 10003, 14, 8]
Z3950_TAGSET_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 14, 8])
Z3950_TRANSFER = [1, 2, 840, 10003, 6]
Z3950_TRANSFER_ov = asn1.OidVal([1, 2, 840, 10003, 6])
Z3950_USR = [1, 2, 840, 10003, 10]
Z3950_USR_ov = asn1.OidVal([1, 2, 840, 10003, 10])
Z3950_USR_AUTHFILE = [1, 2, 840, 10003, 10, 11]
Z3950_USR_AUTHFILE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 11])
Z3950_USR_CHARSETNEG = [1, 2, 840, 10003, 10, 2]
Z3950_USR_CHARSETNEG_ov = asn1.OidVal([1, 2, 840, 10003, 10, 2])
Z3950_USR_DATETIME = [1, 2, 840, 10003, 10, 6]
Z3950_USR_DATETIME_ov = asn1.OidVal([1, 2, 840, 10003, 10, 6])
Z3950_USR_EDITACTIONQUAL = [1, 2, 840, 10003, 10, 10]
Z3950_USR_EDITACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 10])
Z3950_USR_INFO1 = [1, 2, 840, 10003, 10, 3]
Z3950_USR_INFO1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 3])
Z3950_USR_INSERTACTIONQUAL = [1, 2, 840, 10003, 10, 9]
Z3950_USR_INSERTACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 9])
Z3950_USR_PRIVATE = [1, 2, 840, 10003, 10, 1000]
Z3950_USR_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000])
Z3950_USR_PRIVATE_OCLC = [1, 2, 840, 10003, 10, 1000, 17]
Z3950_USR_PRIVATE_OCLC_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17])
Z3950_USR_PRIVATE_OCLC_INFO = [1, 2, 840, 10003, 10, 1000, 17, 1]
Z3950_USR_PRIVATE_OCLC_INFO_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1])
Z3950_USR_SEARCHRES1 = [1, 2, 840, 10003, 10, 1]
Z3950_USR_SEARCHRES1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1])
Z3950_USR_SEARCHTERMS1 = [1, 2, 840, 10003, 10, 4]
Z3950_USR_SEARCHTERMS1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 4])
Z3950_USR_SEARCHTERMS2 = [1, 2, 840, 10003, 10, 5]
Z3950_USR_SEARCHTERMS2_ov = asn1.OidVal([1, 2, 840, 10003, 10, 5])
Z3950_VAR = [1, 2, 840, 10003, 12]
Z3950_VAR_ov = asn1.OidVal([1, 2, 840, 10003, 12])
Z3950_VAR_VARIANT1 = [1, 2, 840, 10003, 12, 1]
Z3950_VAR_VARIANT1_ov = asn1.OidVal([1, 2, 840, 10003, 12, 1])
| [
6738,
9485,
57,
2670,
1120,
1330,
355,
77,
16,
198,
10994,
796,
23884,
198,
10994,
17816,
57,
2670,
1120,
20520,
796,
1391,
6,
1868,
10354,
355,
77,
16,
13,
46,
312,
7762,
26933,
16,
11,
362,
11,
48777,
11,
8576,
18,
46570,
705,
2... | 1.841856 | 19,811 |
# Make sure to have CoppeliaSim running, with followig scene loaded:
#
# scenes/movementViaRemoteApi.ttt
#
# Do not launch simulation, then run this script
#
# The client side (i.e. this script) depends on:
#
# sim.py, simConst.py, and the remote API library available
# in programming/remoteApiBindings/lib/lib
# Additionally you will need the python math and msgpack modules
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import math
import msgpack
with Client() as client:
print("running")
if client.id!=-1:
print ('Connected to remote API server')
targetArm1='threadedBlueArm'
targetArm2='nonThreadedRedArm'
client.stringSignalName1=targetArm1+'_executedMovId'
client.stringSignalName2=targetArm2+'_executedMovId'
# Start streaming client.stringSignalName1 and client.stringSignalName2 string signals:
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_streaming)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_streaming)
# Set-up some movement variables:
mVel=100*math.pi/180
mAccel=150*math.pi/180
maxVel=[mVel,mVel,mVel,mVel,mVel,mVel]
maxAccel=[mAccel,mAccel,mAccel,mAccel,mAccel,mAccel]
targetVel=[0,0,0,0,0,0]
# Start simulation:
sim.simxStartSimulation(client.id,sim.simx_opmode_blocking)
# Wait until ready:
waitForMovementExecuted1('ready')
waitForMovementExecuted1('ready')
# Send first movement sequence:
targetConfig=[90*math.pi/180,90*math.pi/180,-90*math.pi/180,90*math.pi/180,90*math.pi/180,90*math.pi/180]
movementData={"id":"movSeq1","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute first movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
# Wait until above movement sequence finished executing:
waitForMovementExecuted1('movSeq1')
waitForMovementExecuted1('movSeq1')
# Send second and third movement sequence, where third one should execute immediately after the second one:
targetConfig=[-90*math.pi/180,45*math.pi/180,90*math.pi/180,135*math.pi/180,90*math.pi/180,90*math.pi/180]
targetVel=[-60*math.pi/180,-20*math.pi/180,0,0,0,0]
movementData={"id":"movSeq2","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
targetConfig=[0,0,0,0,0,0]
targetVel=[0,0,0,0,0,0]
movementData={"id":"movSeq3","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute second and third movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
# Wait until above 2 movement sequences finished executing:
waitForMovementExecuted1('movSeq3')
waitForMovementExecuted1('movSeq3')
sim.simxStopSimulation(client.id,sim.simx_opmode_blocking)
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_discontinue)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_discontinue)
sim.simxGetPingTime(client.id)
# Now close the connection to CoppeliaSim:
sim.simxFinish(client.id)
else:
print ('Failed connecting to remote API server')
| [
2,
6889,
1654,
284,
423,
1766,
381,
25418,
8890,
2491,
11,
351,
1061,
328,
3715,
9639,
25,
198,
2,
198,
2,
8188,
14,
21084,
434,
30754,
36510,
32,
14415,
13,
926,
83,
198,
2,
198,
2,
2141,
407,
4219,
18640,
11,
788,
1057,
428,
4... | 2.535852 | 2,329 |
# test modified version of MulticoreTSNE
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.datasets import load_digits
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
from matplotlib import pyplot as plt
import multiprocessing
print(TSNE.__version__)
ncpu = multiprocessing.cpu_count()
ncpu_used = int(ncpu * 0.75)
X, y = load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
tsne = TSNE(
n_jobs=ncpu_used,
n_iter_without_progress=100,
min_grad_norm=1e-04,
perplexity=65,
verbose=1,
)
Z = tsne.fit_transform(X)
print("KL loss", tsne.kl_divergence_)
progress_errors = tsne.progress_errors_
progress_errors = progress_errors[np.where(progress_errors > 0)]
print("Loss by iter", progress_errors)
plt.figure(figsize=(5, 2))
plt.plot(progress_errors)
plt.savefig("temp_test_installed_loss.png")
error_per_point = tsne.error_per_point_
sizes = (
MinMaxScaler(feature_range=(32, 160))
.fit_transform(error_per_point.reshape(-1, 1))
.reshape(1, -1)
)
plt.figure(figsize=(6, 6))
plt.scatter(Z[:, 0], Z[:, 1], c=y, s=sizes, alpha=0.4, cmap="jet")
plt.savefig("temp_test_installed_scatter.png")
| [
2,
1332,
9518,
2196,
286,
7854,
291,
382,
4694,
12161,
198,
198,
6738,
7854,
291,
382,
4694,
12161,
1330,
7854,
291,
382,
4694,
12161,
355,
26136,
12161,
198,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
12894,
896,
1... | 2.457557 | 483 |
#-*- coding: utf8 -*-
import sys
from lib import SimpleDBTool, DBTool, DBFactory
if __name__ == "__main__":
sql = 'SELECT username FROM core_customer Limit 1;'
res = SimpleDBTool.query('edm_web', sql)
print res
# (('test',),)
res = SimpleDBTool.redis.incr('test:123', 2)
print res
# 2
###########################################
dbkit = DBTool.DBToolKit()
res = dbkit.init_pool('edm_web')
if not res: sys.exit(1)
res = do_query('edm_web', sql)
print res
# (('test',),)
redis = dbkit.get_redis_connection()
res = redis.incr('test:123', 2)
print res
# 4
res = dbkit.init_mongo()
res = mongo_find_one({"addr": '1@qq.com'})
print res
# None
#############################################
mysql_edm_web_obj = DBFactory.getDBObject('mysql', 'edm_web')
sql = 'SELECT username FROM core_customer Limit 1;'
res = mysql_edm_web_obj.query(sql)
print res
pgsql_edm_web_obj = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj.query(sql)
print res
# [('test22',)]
mongo_obj = DBFactory.getDBObject('mongo', 'mongo')
mongo = mongo_obj.get_mongo_collection('mail')
res = mongo.find_one({"addr": '1@qq.com'})
print res
# None
#############################################
pgsql_edm_web_obj2 = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj2.query(sql)
print res
# [('test22',)]
print id(pgsql_edm_web_obj), id(pgsql_edm_web_obj2)
# 140476245351888 140476245351888
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
25064,
201,
198,
6738,
9195,
1330,
17427,
11012,
25391,
11,
360,
19313,
970,
11,
20137,
22810,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624... | 2.225602 | 789 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^request_type_method/(?P<test_case>\w+).html$', views.request_type_method),
url(r'^header_body_data/(?P<test_case>\w+).html$', views.header_body_data),
url(r'^form_data/(?P<test_case>\w+).html$', views.form_data),
url(r'^cookies/(?P<test_case>\w+).html$', views.cookies),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
25927,
62,
4906,
62,
24396,
29006,
30,
47,
27,
9288,
62,
7442... | 2.251534 | 163 |
from datetime import datetime
import subprocess
from time import sleep
from models.result import Result
import json
from database import Base, db_session, engine
from schedule import every, repeat, run_pending
@repeat(every(30).minutes)
if __name__ == '__main__':
run_speed_test()
while True:
run_pending()
sleep(1)
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
850,
14681,
198,
6738,
640,
1330,
3993,
198,
6738,
4981,
13,
20274,
1330,
25414,
198,
11748,
33918,
198,
6738,
6831,
1330,
7308,
11,
20613,
62,
29891,
11,
3113,
198,
6738,
7269,
1330,
790,... | 2.949153 | 118 |
from sublime_plugin import WindowCommand
from ..libraries.tools import save_setting
from ..libraries.paths import folder_explorer
class DeviotChangeBuildFolderCommand(WindowCommand):
"""
Adds extra libraries folder path from the settings
""" | [
6738,
41674,
62,
33803,
1330,
26580,
21575,
198,
6738,
11485,
75,
11127,
13,
31391,
1330,
3613,
62,
33990,
198,
6738,
11485,
75,
11127,
13,
6978,
82,
1330,
9483,
62,
20676,
11934,
198,
198,
4871,
6245,
5151,
19400,
15580,
41092,
21575,
... | 3.848485 | 66 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from ss_baselines.common.utils import Flatten
from habitat_sim.utils.common import d3_40_colors_rgb
class VisualCNN(nn.Module):
r"""A Simple 3-Conv CNN followed by a fully connected layer
Takes in observations and produces an embedding of the rgb and/or depth components
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
"""
def _conv_output_dim(
self, dimension, padding, dilation, kernel_size, stride
):
r"""Calculates the output height and width based on the input
height and width to the convolution layer.
ref: https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
@property
@property
@property
@property
def convert_semantics_to_rgb(semantics):
r"""Converts semantic IDs to RGB images.
"""
semantics = semantics.long() % 40
mapping_rgb = torch.from_numpy(d3_40_colors_rgb).to(semantics.device)
semantics_r = torch.take(mapping_rgb[:, 0], semantics)
semantics_g = torch.take(mapping_rgb[:, 1], semantics)
semantics_b = torch.take(mapping_rgb[:, 2], semantics)
semantics_rgb = torch.stack([semantics_r, semantics_g, semantics_b], -1)
return semantics_rgb | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
1... | 2.111538 | 1,040 |
# django imports
from django.contrib.contenttypes.models import ContentType
# permissions imports
import permissions.utils
from permissions.models import Role
# workflows imports
from workflows.conf import settings
from workflows.models import (StateInheritanceBlock, State, StateObjectHistory, StateObjectRelation,
StatePermissionRelation, Transition, Workflow, WorkflowModelRelation,
WorkflowObjectRelation, WorkflowPermissionRelation)
from workflows.signals import before_transition, after_transition, before_state_change, after_state_change
def get_objects_for_workflow(workflow):
"""Returns all objects which have passed workflow.
**Parameters:**
workflow
The workflow for which the objects are returned. Can be a Workflow
instance or a string with the workflow name.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return []
return workflow.get_objects()
def remove_workflow(ctype_or_obj):
"""Removes the workflow from the passed content type or object. After this
function has been called the content type or object has no workflow
anymore.
If ctype_or_obj is an object the workflow is removed from the object not
from the belonging content type.
If ctype_or_obj is an content type the workflow is removed from the
content type not from instances of the content type (if they have an own
workflow)
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any LFC Django model
instance.
"""
if isinstance(ctype_or_obj, ContentType):
remove_workflow_from_model(ctype_or_obj)
else:
remove_workflow_from_object(ctype_or_obj)
def remove_workflow_from_model(ctype):
"""Removes the workflow from passed content type. After this function has
been called the content type has no workflow anymore (the instances might
have own ones).
ctype
The content type from which the passed workflow should be removed.
Must be a ContentType instance.
"""
# First delete all states, inheritance blocks and permissions from ctype's
# instances which have passed workflow.
workflow = get_workflow_for_model(ctype)
for obj in get_objects_for_workflow(workflow):
# Only take care of the given ctype.
obj_ctype = ContentType.objects.get_for_model(obj)
if ctype != obj_ctype:
continue
try:
ctype = ContentType.objects.get_for_model(obj)
sor = StateObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except StateObjectRelation.DoesNotExist:
pass
else:
sor.delete()
# Reset all permissions
permissions.utils.reset(obj)
try:
wmr = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
pass
else:
wmr.delete()
def remove_workflow_from_object(obj):
"""Removes the workflow from the passed object. After this function has
been called the object has no *own* workflow anymore (it might have one
via its content type).
obj
The object from which the passed workflow should be set. Must be a
Django Model instance.
"""
try:
wor = WorkflowObjectRelation.objects.get(content_type=obj)
except WorkflowObjectRelation.DoesNotExist:
pass
else:
wor.delete()
# Reset all permissions
permissions.utils.reset(obj)
# Set initial of object's content types workflow (if there is one)
set_initial_state(obj)
def set_workflow(ctype_or_obj, workflow):
"""Sets the workflow for passed content type or object. See the specific
methods for more information.
**Parameters:**
workflow
The workflow which should be set to the object or model.
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any Django model
instance.
"""
return workflow.set_to(ctype_or_obj)
def set_workflow_for_object(obj, workflow):
"""Sets the passed workflow to the passed object.
If the object has already the given workflow nothing happens. Otherwise
the object gets the passed workflow and the state is set to the workflow's
initial state.
**Parameters:**
workflow
The workflow which should be set to the object. Can be a Workflow
instance or a string with the workflow name.
obj
The object which gets the passed workflow.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_object(obj)
def set_workflow_for_model(ctype, workflow):
"""Sets the passed workflow to the passed content type. If the content
type has already an assigned workflow the workflow is overwritten.
The objects which had the old workflow must updated explicitely.
**Parameters:**
workflow
The workflow which should be set to passend content type. Must be a
Workflow instance.
ctype
The content type to which the passed workflow should be assigned. Can
be any Django model instance
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_model(ctype)
def get_workflow(obj):
"""Returns the workflow for the passed object. It takes it either from
the passed object or - if the object doesn't have a workflow - from the
passed object's ContentType.
**Parameters:**
object
The object for which the workflow should be returend. Can be any
Django model instance.
"""
workflow = get_workflow_for_object(obj)
if workflow is not None:
return workflow
ctype = ContentType.objects.get_for_model(obj)
return get_workflow_for_model(ctype)
def get_workflow_for_object(obj):
"""Returns the workflow for the passed object.
**Parameters:**
obj
The object for which the workflow should be returned. Can be any
Django model instance.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
wor = WorkflowObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except WorkflowObjectRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_workflow_for_model(ctype):
"""Returns the workflow for the passed model.
**Parameters:**
ctype
The content type for which the workflow should be returned. Must be
a Django ContentType instance.
"""
if not isinstance(ctype, ContentType):
ctype = ContentType.objects.get_for_model(ctype)
try:
wor = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_state(obj):
"""Returns the current workflow state for the passed object.
**Parameters:**
obj
The object for which the workflow state should be returned. Can be any
Django model instance.
"""
ctype = ContentType.objects.get_for_model(obj)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
except StateObjectRelation.DoesNotExist:
return None
else:
return sor.state
def set_state(obj, state):
"""
Sets the state for ``obj`` to ``state`` and updates
the permissions for the object.
**Parameters:**
obj
The object for which the workflow state should be set. Can be any
Django model instance.
state
The state which should be set to the passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(state, basestring):
state = State.objects.get(codename=state)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
initial_state = sor.state
before_state_change.send(sender=obj, from_state=initial_state, to_state=state)
sor.state = state
sor.save()
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=initial_state, to_state=state)
except StateObjectRelation.DoesNotExist:
before_state_change.send(sender=obj, from_state=None, to_state=state)
StateObjectRelation.objects.create(content=obj, state=state)
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=None, to_state=state)
update_permissions(obj)
def set_initial_state(obj):
"""
Sets the workflow initial state to ``obj``.
"""
wf = get_workflow(obj)
if wf is not None:
set_state(obj, wf.get_initial_state())
def get_allowed_transitions(obj, user):
"""Returns all allowed transitions for passed object and user. Takes the
current state of the object into account.
**Parameters:**
obj
The object for which the transitions should be returned.
user
The user for which the transitions are allowed.
"""
state = get_state(obj)
if state is None:
return []
return state.get_allowed_transitions(obj, user)
def do_transition(obj, transition, user):
"""Processes the passed transition to the passed object (if allowed).
"""
if not isinstance(transition, Transition):
try:
transition = Transition.objects.get(codename=transition)
except Transition.DoesNotExist:
return False
transitions = get_allowed_transitions(obj, user)
if transition in transitions:
initial_state = get_state(obj)
before_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
if transition.destination is not None and transition.destination != initial_state:
set_state(obj, transition.destination)
after_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
return True
else:
return False
def update_permissions(obj):
"""Updates the permissions of the passed object according to the object's
current workflow state.
"""
workflow = get_workflow(obj)
state = get_state(obj)
# Remove all permissions for the workflow
# for role in Role.objects.all():
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_permission(obj, role, wpr.permission)
perms = [wpr.permission for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow)]
permissions.utils.remove_permission(obj, Role.objects.all(), perms)
# Grant permission for the state
for spr in StatePermissionRelation.objects.filter(state=state):
permissions.utils.grant_permission(obj, spr.role, spr.permission)
# Remove all inheritance blocks from the object
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_inheritance_block(obj, wpr.permission)
# Add inheritance blocks of this state to the object
# for sib in StateInheritanceBlock.objects.filter(state=state):
# permissions.utils.add_inheritance_block(obj, sib.permission)
| [
2,
42625,
14208,
17944,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
198,
2,
21627,
17944,
198,
11748,
21627,
13,
26791,
198,
6738,
21627,
13,
27530,
1330,
20934,
198,
2,
670,
44041,
17944... | 2.776536 | 4,296 |
"""
This file compares DictConfig methods with the corresponding
methods of standard python's dict.
The following methods are compared:
__contains__
__delitem__
__eq__
__getitem__
__setitem__
get
pop
keys
values
items
We have separate test classes for the following cases:
TestUntypedDictConfig: for DictConfig without a set key_type
TestPrimitiveTypeDunderMethods: for DictConfig where key_type is primitive
TestEnumTypeDunderMethods: for DictConfig where key_type is Enum
"""
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional
from pytest import fixture, mark, param, raises
from omegaconf import DictConfig, OmegaConf
from omegaconf.errors import ConfigKeyError, ConfigTypeError, KeyValidationError
from tests import Enum1
@fixture(
params=[
"str",
1,
3.1415,
True,
Enum1.FOO,
]
)
def key(request: Any) -> Any:
"""A key to test indexing into DictConfig."""
return request.param
@fixture
def python_dict(data: Dict[Any, Any]) -> Dict[Any, Any]:
"""Just a standard python dictionary, to be used in comparison with DictConfig."""
return deepcopy(data)
@fixture(params=[None, False, True])
@mark.parametrize(
"data",
[
param({"a": 10}, id="str"),
param({1: "a"}, id="int"),
param({123.45: "a"}, id="float"),
param({True: "a"}, id="bool"),
param({Enum1.FOO: "foo"}, id="Enum1"),
],
)
class TestUntypedDictConfig:
"""Compare DictConfig with python dict in the case where key_type is not set."""
@fixture
def cfg(self, python_dict: Any, struct_mode: Optional[bool]) -> DictConfig:
"""Create a DictConfig instance from the given data"""
cfg: DictConfig = DictConfig(content=python_dict)
OmegaConf.set_struct(cfg, struct_mode)
return cfg
def test__setitem__(
self, python_dict: Any, cfg: DictConfig, key: Any, struct_mode: Optional[bool]
) -> None:
"""Ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg:
with raises(ConfigKeyError):
cfg[key] = "sentinel"
else:
python_dict[key] = "sentinel"
cfg[key] = "sentinel"
assert python_dict == cfg
def test__getitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
with raises(ConfigKeyError):
cfg[key]
else:
assert result == cfg[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
with raises(ConfigKeyError):
del cfg[key]
else:
del cfg[key]
assert key not in cfg
@mark.parametrize("struct_mode", [True])
def test__delitem__struct_mode(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __delitem__ fails in struct_mode"""
with raises(ConfigTypeError):
del cfg[key]
def test__contains__(self, python_dict: Any, cfg: Any, key: Any) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg)
def test_get(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key) == cfg.get(key)
def test_get_with_default(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key, "DEFAULT") == cfg.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg.pop(key)
else:
assert result == cfg.pop(key)
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
assert python_dict.pop(key, "DEFAULT") == cfg.pop(key, "DEFAULT")
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key, "DEFAULT")
@fixture
def cfg_typed(
python_dict: Any, cfg_key_type: Any, struct_mode: Optional[bool]
) -> DictConfig:
"""Create a DictConfig instance that has strongly-typed keys"""
cfg_typed: DictConfig = DictConfig(content=python_dict, key_type=cfg_key_type)
OmegaConf.set_struct(cfg_typed, struct_mode)
return cfg_typed
@mark.parametrize(
"cfg_key_type,data",
[(str, {"a": 10}), (int, {1: "a"}), (float, {123.45: "a"}), (bool, {True: "a"})],
)
class TestPrimitiveTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is a primitive type."""
def test__setitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg_typed:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key] = "sentinel"
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure ensure that struct-mode __delitem__ raises ConfigTypeError or KeyValidationError"""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__primitive_typed(
self, python_dict: Any, cfg_typed: Any, key: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg_typed)
def test_get_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key, "DEFAULT") == cfg_typed.get(key, "DEFAULT")
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.pop(key, "DEFAULT") == cfg_typed.pop(key, "DEFAULT")
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("cfg_key_type,data", [(Enum1, {Enum1.FOO: "foo"})])
class TestEnumTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is an Enum type."""
@fixture
def key_coerced(self, key: Any, cfg_key_type: Any) -> Any:
"""
This handles key coersion in the special case where DictConfig key_type
is a subclass of Enum: keys of type `str` or `int` are coerced to `key_type`.
See https://github.com/omry/omegaconf/pull/484#issuecomment-765772019
"""
assert issubclass(cfg_key_type, Enum)
if type(key) == str and key in [e.name for e in cfg_key_type]:
return cfg_key_type[key]
elif type(key) == int and key in [e.value for e in cfg_key_type]:
return cfg_key_type(key)
else:
return key
def test__setitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key_coerced not in cfg_typed:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key_coerced] = "sentinel"
if isinstance(key_coerced, cfg_key_type):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key_coerced]
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key_coerced]
assert key_coerced not in python_dict
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __delitem__ errors in struct mode"""
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__enum_typed(
self, python_dict: Any, cfg_typed: Any, key: Any, key_coerced: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key_coerced in python_dict) == (key in cfg_typed)
def test_get_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced, "DEFAULT") == cfg_typed.get(
key, "DEFAULT"
)
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
try:
result = python_dict.pop(key_coerced)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.pop(key_coerced, "DEFAULT") == cfg_typed.pop(
key, "DEFAULT"
)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) errors in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
| [
37811,
198,
1212,
2393,
23008,
360,
713,
16934,
5050,
351,
262,
11188,
198,
24396,
82,
286,
3210,
21015,
338,
8633,
13,
198,
464,
1708,
5050,
389,
3688,
25,
198,
220,
220,
220,
11593,
3642,
1299,
834,
198,
220,
220,
220,
11593,
12381,... | 2.053817 | 10,034 |
from connect import *
import clr
import wpf
clr.AddReference("PresentationFramework")
clr.AddReference("PresentationCore")
from System.Collections.Generic import List, Dictionary
from System.Windows import MessageBox
import sys, os
import json
RayStationScriptsPath = os.environ["USERPROFILE"] + r"\DeskTop\RayStationScripts" + "\\"
dllsPath = RayStationScriptsPath + "Dlls"
print "Dlls path: " + dllsPath
sys.path.append(dllsPath)
scriptsPath = RayStationScriptsPath + "Scripts"
print "Scripts path: " + scriptsPath
sys.path.append(scriptsPath)
clr.AddReference("BrainDoseIndices")
from BrainDoseIndices.Views import MainWindow
from BrainDoseIndices.Models import StructureDetail
from Helpers import GetStructureSet, GetRoiDetails
from Helpers import MakeMarginAddedRoi, MakeRingRoi, MakeRoiSubtractedRoi
try:
plan = get_current("Plan")
except:
MessageBox.Show("Plan is not selected. Select Plan")
sys.exit()
structureSet = plan.GetStructureSet()
roiDetails = GetRoiDetails(structureSet)
structureDetails = List[StructureDetail]()
for key, value in roiDetails.items():
if value["HasContours"]:
structureDetail = StructureDetail();
structureDetail.Name = key
structureDetail.Volume = value["Volume"]
structureDetails.Add(structureDetail)
mainWindow = MainWindow(structureDetails)
mainWindow.ShowDialog();
| [
6738,
2018,
1330,
1635,
198,
198,
11748,
537,
81,
198,
11748,
266,
79,
69,
198,
565,
81,
13,
4550,
26687,
7203,
34695,
341,
21055,
6433,
4943,
198,
565,
81,
13,
4550,
26687,
7203,
34695,
341,
14055,
4943,
198,
198,
6738,
4482,
13,
5... | 3.002198 | 455 |
import struct
import subprocess
import threading
from time import sleep
from scapy.layers.dot11 import Dot11, Dot11Elt, sendp, Dot11Deauth, RadioTap
from termcolor import cprint
from pywiface.models import Station, AP
from pywiface.threads import ScannerThread
| [
11748,
2878,
198,
11748,
850,
14681,
198,
11748,
4704,
278,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
629,
12826,
13,
75,
6962,
13,
26518,
1157,
1330,
22875,
1157,
11,
22875,
1157,
36,
2528,
11,
3758,
79,
11,
22875,
1157,
5005,
1843... | 3.3125 | 80 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
# TOOD(Eric Ayers): There is no task or goal named 'jvm' as used in the config section where these parameters are located.
# We might need to rename these whem merging together the config and the new options system.
class JvmDebugConfig(object):
"""A utility class to consolodate fetching JVM flags needed for debugging from the configuration."""
@staticmethod
@staticmethod
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
1946,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
1... | 3.507692 | 195 |
# static qstrs, should be sorted
# extracted from micropython/py/makeqstrdata.py
static_qstr_list = [
"",
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"\n",
" ",
"*",
"/",
"<module>",
"_",
"__call__",
"__class__",
"__delitem__",
"__enter__",
"__exit__",
"__getattr__",
"__getitem__",
"__hash__",
"__init__",
"__int__",
"__iter__",
"__len__",
"__main__",
"__module__",
"__name__",
"__new__",
"__next__",
"__qualname__",
"__repr__",
"__setitem__",
"__str__",
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"EOFError",
"Ellipsis",
"Exception",
"GeneratorExit",
"ImportError",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NoneType",
"NotImplementedError",
"OSError",
"OverflowError",
"RuntimeError",
"StopIteration",
"SyntaxError",
"SystemExit",
"TypeError",
"ValueError",
"ZeroDivisionError",
"abs",
"all",
"any",
"append",
"args",
"bool",
"builtins",
"bytearray",
"bytecode",
"bytes",
"callable",
"chr",
"classmethod",
"clear",
"close",
"const",
"copy",
"count",
"dict",
"dir",
"divmod",
"end",
"endswith",
"eval",
"exec",
"extend",
"find",
"format",
"from_bytes",
"get",
"getattr",
"globals",
"hasattr",
"hash",
"id",
"index",
"insert",
"int",
"isalpha",
"isdigit",
"isinstance",
"islower",
"isspace",
"issubclass",
"isupper",
"items",
"iter",
"join",
"key",
"keys",
"len",
"list",
"little",
"locals",
"lower",
"lstrip",
"main",
"map",
"micropython",
"next",
"object",
"open",
"ord",
"pop",
"popitem",
"pow",
"print",
"range",
"read",
"readinto",
"readline",
"remove",
"replace",
"repr",
"reverse",
"rfind",
"rindex",
"round",
"rsplit",
"rstrip",
"self",
"send",
"sep",
"set",
"setattr",
"setdefault",
"sort",
"sorted",
"split",
"start",
"startswith",
"staticmethod",
"step",
"stop",
"str",
"strip",
"sum",
"super",
"throw",
"to_bytes",
"tuple",
"type",
"update",
"upper",
"utf-8",
"value",
"values",
"write",
"zip",
]
| [
2,
9037,
10662,
2536,
82,
11,
815,
307,
23243,
198,
2,
21242,
422,
12314,
1773,
7535,
14,
9078,
14,
15883,
80,
2536,
7890,
13,
9078,
198,
198,
12708,
62,
80,
2536,
62,
4868,
796,
685,
198,
220,
220,
220,
366,
1600,
198,
220,
220,
... | 1.915066 | 1,354 |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import random
from ryu.base import app_manager
from ryu.lib import hub
from ryu.lib import mac as lib_mac
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import api as vrrp_api
from ryu.services.protocols.vrrp import event as vrrp_event
_VRID = 7
_PRIMARY_IP_ADDRESS0 = '10.0.0.2'
_PRIMARY_IP_ADDRESS1 = '10.0.0.3'
| [
2,
15069,
357,
34,
8,
2211,
399,
3974,
261,
21821,
290,
44735,
10501,
13,
198,
2,
15069,
357,
34,
8,
2211,
1148,
8719,
14063,
993,
1045,
1279,
88,
321,
993,
1045,
379,
1188,
259,
2821,
763,
474,
79,
29,
198,
2,
198,
2,
49962,
73... | 3.115502 | 329 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
parLaser.py: Defines a class for writing laser strings into a flash.par file.
Created by Scott Feister on Wed Feb 14 13:39:38 2018
See two examples towards the bottom of this document.
Note: Make sure to use FLASH setup flags to increase beam and pulse count as needed.
E.g. ./setup LaserSlab -auto ed_maxPulses=60 ed_maxBeams=20 ed_maxPulseSections=60
############# EXAMPLE SCRIPT #############
from flsuite.parLaser import parLaser, parLasers
import numpy as np
# Example 1: Three lasers, each with different parameters
las1 = parLaser(1, laslbl="Second Laser, 808 nm")
las1.lens = [20, 20, 30]
las1.targ = [20, 30, 40]
las1.powers = np.array([1,2,3,4,5])
las1.times = np.array([10,11,12,13,14])
las1.wavelength = 0.808
las2 = parLaser(2, laslbl="Second Laser, Many rays")
las2.lens = [15, 15, 23]
las2.targ = [22, 22, 41]
las2.powers = np.array([1,2,3,4,5,6,7])
las2.times = np.array([10,11,12,13,14,15,16])
las2.numberOfRays = 10000
las3 = parLaser(3, laslbl="Third Laser, Gaussian profile")
las3.lens = [14, 14, 16]
las3.targ = [40, 50, 52]
las3.powers = np.array([2,2.5,3])
las3.times = np.array([10,11,12])
las3.crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
las3.gaussianExponent = 4.0 # 4.0 for supergaussian profile
las3.gaussianRadiusMajor = 0.048
las3.gaussianRadiusMinor = 0.048
las1.write('laser1.txt', 'w')
las2.write('laser2.txt', 'w')
las3.write('laser3.txt', 'w')
print("Three lasers written to 'laser1.txt', 'laser2.txt', 'laser3.txt'")
## Example 2: Make a whole bunch of the same laser, but with lens at varying x value
# Uses the "parlasers" class
l = parLasers(10) # Laser list
for i in range(len(l)):
l[i].lens = [i*10, 0, 0] # This is the only thing changing between the ten lasers!
l[i].targ = [5, 5, 5]
l[i].powers = np.array([1,2,3,4,5])
l[i].times = np.array([10,11,12,13,14])
l[i].numberOfRays = 10000
l[i].crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
l[i].gaussianExponent = 4.0 # 4.0 for supergaussian profile
l[i].gaussianRadiusMajor = 0.048
l[i].gaussianRadiusMinor = 0.048
l.write('tenlasers.txt', 'w')
print("Ten lasers written to 'tenlasers.txt'")
######### END OF EXAMPLE SCRIPT #########
"""
import numpy as np
class parLasers(list):
""" A list of parLaser objects """
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def maxPulseSections(self):
""" Compute the maximum number of pulse sections among the lasers """
self.validate()
maxPS = 0
for i in range(len(self)):
maxPS = max(maxPS, len(self[i].powers))
return maxPS
def validate(self):
""" Check the parLasers object for simple mistakes, raising Exceptions.
"""
for i in range(len(self)):
self[i].validate()
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
class parLaser:
""" A class containing the parameters of a single beam and pulse flash.par (runtime parameters) input """
def __init__(self, lasnum, laslbl=None):
""" Initialize a parLaser object.
Input parameters:
lasnum Integer, any number >0 to identify this pulse/beam combination in the flash.par file
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
Almost all items available to define a laser pulse and beam in a flash.par file are replicated here.
'None' values are initialized for most parameters. This is deliberate.
Change values from 'None' and they be written into the flash.par file string.
For values that are left as 'None', we avoid writing these parameters to the flash.par file string.
"""
# Special variables for this class
self.lasnum = int(lasnum) # A number for the laser, considered both the beam-number and pulse-number
self.laslbl = laslbl # A label for the laser, e.g. 'Quad24', which is put into the title
self.lens = None # A 3-element list or array with values for [lensx, lensy, lensz]
self.targ = None # A 3-element list or array with values for [targetx, targety, targetz]
self.times = None # Will become an array of times for the laser pulse
self.powers = None # Will become an array of powers for the laser pulse
# Basically initializes everything else found under the "ed_XXXX_1" header
self.gridNAngularTics = None
self.gridNSemiAxisMajorTics = None
self.gridNSemiAxisMinorTics = None
self.numberOfRays = None
self.gaussianCenterMajor = None
self.gaussianCenterMinor = None
self.gaussianExponent = None
self.gaussianRadiusMajor = None
self.gaussianRadiusMinor = None
self.gridDeltaSemiAxisMajor = None
self.gridDeltaSemiAxisMinor = None
self.initialRaySpeed = None
self.lensSemiAxisMajor = None
self.wavelength = None
self.semiAxisMajorTorsionAngle = None
self.targetSemiAxisMajor = None
self.targetSemiAxisMinor = None
self.crossSectionFunctionType = None
self.gridType = None
self.semiAxisMajorTorsionAxis = None
self.ignoreBoundaryCondition = None
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def validate(self):
""" Check the parLaser object for simple mistakes, raising Exceptions.
These mistakes would otherwise result in invalid flash.par strings.
Mistakes checked for include:
* Using a laser number less than 1 (flash.par requires pulse/beam numbers to be 1 or greater)
* Specifying unequal numbers of powers and times
* Leaving out x,y, or z position values in the lens and target definitions
"""
# Check for valid laser number (Should be an integer greater than 0)
if self.lasnum < 1:
raise Exception("Problem with pulse or beam with 'lasnum' of {}: 'lasnum' must be an integer greater than zero.".format(self.lasnum))
# Check that variables 'powers' and 'times' were both set as 1D arrays (or lists)
if not (hasattr(self.powers, '__len__') and (not isinstance(self.powers, str))):
raise Exception('Problem with pulse {}: Powers are not specified as a list or 1D array.'.format(self.lasnum))
if not (hasattr(self.times, '__len__') and (not isinstance(self.times, str))):
raise Exception('Problem with pulse {}: Times are not specified as a list or 1D array.'.format(self.lasnum))
# Check that equal numbers of powers and times are specified (one-to-one between powers and times)
if len(self.powers) != len(self.times):
raise Exception("Problem with pulse {}: Powers and times have different numbers of elements.".format(self.lasnum))
# Do some checks for lens and target arrays
if not (hasattr(self.lens, '__len__') and (not isinstance(self.lens, str))):
raise Exception('Problem with beam: Lens is not specified as a 3-element list or array.')
if not (hasattr(self.targ, '__len__') and (not isinstance(self.targ, str))):
raise Exception('Problem with beam: Targ is not specified as a 3-element list or array.')
if len(self.lens) != 3:
raise Exception('Problem with beam: Lens has less or more than 3 elements.')
if len(self.targ) != 3:
raise Exception('Problem with beam: Targ has less or more than 3 elements.')
def makePar(self):
""" Generate a string which can be copied and pasted into a flash.par file. """
## PERFORM CHECK
self.validate() # Don't move forward without validating the laser beam and pulse parameters
## INITIALIZE PAR STRING
par = ''
par += "\n"
if self.laslbl is not None:
par += "###### BEAM/PULSE COMBO #{}: {}\n".format(self.lasnum, self.laslbl)
else:
par += "###### BEAM/PULSE COMBO #{}\n".format(self.lasnum)
par += "#### Automatically-generated by parLaser.py version 0.0.1\n"
## ADD PULSE TO PAR STRING
# Write a pulse header for human readability
if self.laslbl is not None:
par += "## Begin pulse {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin pulse {}:\n".format(self.lasnum)
# Write powers and times
par += "ed_numberOfSections_{} = {}\n".format(self.lasnum, len(self.powers))
for i, power in enumerate(self.powers, start=1):
par += "ed_power_{}_{} = {}\n".format(self.lasnum, i, power)
for i, time in enumerate(self.times, start=1):
par += "ed_time_{}_{} = {}\n".format(self.lasnum, i, time)
## ADD BEAM TO PAR STRING
# Write a beam header for human readability
par += "\n"
if self.laslbl is not None:
par += "## Begin beam {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin beam {}:\n".format(self.lasnum)
# Associate the pulse with this beam
par += "ed_pulseNumber_{} = {}\n".format(self.lasnum, self.lasnum)
# Write lens and target parameters
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_lens{}_{} = {}\n".format(dim, self.lasnum, self.lens[i])
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_target{}_{} = {}\n".format(dim, self.lasnum, self.targ[i])
# Write all remaining beam parameters (anything not set to 'None')
keys_remaining = set(self.__dict__.keys()) - set(["lasnum", "laslbl", "lens", "targ", "powers", "times"]) # A list of properties of the parLaser object, excluding those items that we just wrote into the par string
for key in keys_remaining:
if getattr(self, key) is not None:
if isinstance(getattr(self,key), str):
par += 'ed_{}_{} = "{}"\n'.format(key, self.lasnum, getattr(self, key))
else:
par += 'ed_{}_{} = {}\n'.format(key, self.lasnum, getattr(self, key))
return par
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
if __name__ == "__main__":
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1845,
43,
6005,
13,
9078,
25,
2896,
1127,
257,
1398,
329,
3597,
12855,
13042,
656,
257,
7644,
13,
184... | 2.404948 | 4,850 |
import os
import re
import sys
import time
from subprocess import PIPE, run
from types import ModuleType
from typing import Union
import docker
import requests
import storm.__main__ as storm
from lazycluster import Runtime, RuntimeGroup, RuntimeManager, RuntimeTask
from .config import RUNTIME_DOCKER_IMAGE, RUNTIME_NAMES, WORKSPACE_PORT
def setup_module(module: ModuleType) -> None:
""" setup any state specific to the execution of the given module."""
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
_start_runtime_container(runtime_name, docker_client)
# Sleep a moment to give all processes time to start within the Workspace containers
time.sleep(15)
for runtime_name in RUNTIME_NAMES:
_setup_ssh_connection_to_runtime(runtime_name)
def teardown_module(module: ModuleType) -> None:
"""teardown any state that was previously setup with a setup_module
method.
"""
_remove_runtimes()
# -------------------------------------------------------------------------
| [
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
850,
14681,
1330,
350,
4061,
36,
11,
1057,
198,
6738,
3858,
1330,
19937,
6030,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
36253,
198,
11748,
7007,
198,
11... | 3.375796 | 314 |
# -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channelc
VectChannel Class
=================
.. autosummary::
:toctree: generated/
VectChannel.__init__
VectChannel.show3_old
VectChannel.show3
ScalChannel Class
=================
.. autosummary::
:toctree: generated/
ScalChannel.__init__
ScalChannel.info
ScalChannel.imshow
ScalChannel.apply
ScalChannel.applywavC
ScalChannel.applywavB
ScalChannel.applywavA
ScalChannel.doddoa
ScalChannel.wavefig
ScalChannel.rayfig
VectLOS Class
=============
.. autosummary::
:toctree: generated/
VectLOS.__init__
VectLOS.cir
"""
import doctest
import pdb
import numpy as np
import scipy as sp
import pylab as plt
import struct as stru
from pylayers.antprop.channel import *
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
from pylayers.antprop.raysc import GrRay3D
from pylayers.util.project import *
class VectChannel(Ctilde):
""" container for a vector representation of the propagation channel
Attributes
-----------
Ctt FUsignal (Nray x Nf canal )
Cpp
Cpt
Ctp
built in vec2scal1
Frt Fusignal (Nray x Nf antenna )
Frp
Ftt
Ftp
fGHz : frequency
tauk : delay
tang : dod
rang : doa
Methods
-------
init(S,itx,irx)
S is a simulation object, itx and irx are index of tx and rx
show(display=False,mode='linear')
display vect channel
doadod()
scatter plot DoA - DoD
vec2scal(fGHz)
build scal channel without antenna
vec2scal1(fGHz)
build scal channel with antenna
"""
def __init__(self, S, itx, irx, transpose=False):
"""
Parameters
----------
S
Simulation
itx
tx number
irx
rx number
transpose
antenna transposition indicator
"""
# .. todo::
#
# a verifier -ravel-
self.fail = False
_filefield = S.dfield[itx][irx]
filefield = pyu.getlong(_filefield,pstruc['DIRTUD'])
_filetauk = S.dtauk[itx][irx]
filetauk = pyu.getlong(_filetauk,pstruc['DIRTUD'])
_filetang = S.dtang[itx][irx]
filetang = pyu.getlong(_filetang,pstruc['DIRTUD'])
_filerang = S.drang[itx][irx]
filerang = pyu.getlong(_filerang,pstruc['DIRTUD'])
"""
.. todo::
Revoir Freq
"""
# old version
#freq = S.freq()
#self.freq = freq
self.fGHz = S.fGHz
#
# pour show3 de gr on a besoin de filetra et indoor
# pas beau
#
self.filetra = S.dtra[itx][irx]
self.L = S.L
#try:
# fo = open(filetauk, "rb")
#except:
# self.fail=True
# print "file ",filetauk, " is unreachable"
# decode filetauk
#if not self.fail:
# nray_tauk = unpack('i',fo.read(4))[0]
# print "nb rayons dans .tauk : ",nray_tauk
# buf = fo.read()
# fo.close()
# nray = len(buf)/8
# print "nb rayons 2: ",nray
# self.tauk = ndarray(shape=nray,buffer=buf)
# if nray_tauk != nray:
# print itx , irx
# print nray_tauk - nray
#self.tauk = self.tauk
Ctilde.__init__(self)
self.load(filefield, transpose)
# decode the angular files (.tang and .rang)
# #try:
# fo = open(filetang, "rb")
# except:
# self.fail=True
# print "file ",filetang, " is unreachable"
# if not self.fail:
# nray_tang = unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # coorectif Bug evalfield
# tmp = ndarray(shape=(nray_tang,2),buffer=buf)
# self.tang = tmp[0:nray,:]
# try:
# fo = open(filerang, "rb")
# except:
# self.fail=True
# print "file ",filerang, " is unreachable"
#
# if not self.fail:
# nray_rang = stru.unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # correctif Bug evalfield
# tmp = ndarray(shape=(nray_rang,2),buffer=buf)
# self.rang = tmp[0:nray,:]
#sh = shape(self.Ctt.y)
"""
.. todo::
Express Ftt and Ftp in global frame from Tt and ant_tx
Express Frt and Frp in global frame from Tt and ant_tx
"""
#self.Ftt = FUsignal(fGHz,np.ones(sh))
#self.Ftp = FUsignal(fGHz,np.zeros(sh))
#self.Frt = FUsignal(fGHz,np.ones(sh))
#self.Frp = FUsignal(fGHz,np.zeros(sh))
def show3_old(self, id=0):
""" geomview visualization old version
This function provides a complete ray tracing vsualization
of the channel structure. The rays are color coded as a fonction
of their energy.
Parameters
----------
id : int
index of filetra
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(id) + "_col.list",pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(Emax)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([1, 0, 0]) # red
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.1 * Emax) & (Es < 0.5 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 0, 1]) # blue
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.01 * Emax) & (Es < 0.1 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 1]) # cyan
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.001 * Emax) & (Es < 0.01 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 0]) # green
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where(Es < 0.001 * Emax)[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([1, 1, 0]) # yellow
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def show3(self, seuildb=100):
""" geomview vizualization
This function provides a complete ray tracing visualization
of the radio channel. Rays are color coded as a fonction of
their energy.
Parameters
----------
seuildb : float
default 100
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(seuildb) + "_col.list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(v)
db = 20 * np.log10(Es)
c = 1 - (db > -seuildb) * (db + seuildb) / seuildb
app = round(np.log10(Es / Emax))
lw = app - min(app)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([c[i], c[i], c[i]])
l = int(lw[i])
fileray = r.show3(False, False, col, j, l)
#fileray =r.show3(False,False,col,j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
class ScalChannel(object):
"""
DEPRECATED
ScalChannel Class :
The ScalChannel is obtained from combination of the propagation
channel and the antenna transfer function from both transmitting
and receiving antennas
Members
-------
H : FUDSignal
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tauk :
delay ray k in ns
"""
def info(self):
""" display information
"""
#print 'Ftt,Ftp,Frt,Frp'
#print 'dod,doa,tau'
#print 'H - FUDsignal '
print ('tau min , tau max :', min(self.tau), max(self.tau))
self.H.info()
def imshow(self):
""" imshow vizualization of H
"""
self.H
sh = np.shape(self.H.y)
itau = np.arange(len(self.tau))
plt.imshow(abs(self.H.y))
plt.show()
def apply(self, W):
""" Apply a FUsignal W to the ScalChannel.
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Notes
-----
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the ScalChannei, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUDsignal H
"""
H = self.H
U = H * W
V = bs.FUDsignal(U.x, U.y, H.tau0)
return(V)
def applywavC(self, w, dxw):
""" apply waveform method C
Parameters
----------
w :
waveform
dxw
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
Parameters
----------
Wgam :
waveform including gamma factor
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
#
# return a FUDsignal
#
Y = self.apply(Wgam)
#ri = Y.ft1(500,0)
# Le fftshift est activé
ri = Y.ft1(500, 1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
"""
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0, 'natural')
ri.translate(-Tw)
return(ri)
def doddoa(self):
""" doddoa() : DoD / DoA diagram
"""
dod = self.dod
doa = self.doa
#
#col = 1 - (10*np.log10(Etot)-Emin)/(Emax-Emin)
Etot = self.H.energy()
Etot = Etot / max(Etot)
al = 180 / np.pi
col = 10 * np.log10(Etot)
print (len(dod[:, 0]), len(dod[:, 1]), len(col[:]))
plt.subplot(121)
plt.scatter(dod[:, 0] * al, dod[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
a = colorbar()
#a.set_label('dB')
plt.xlabel("$\\theta_t(\degree)$", fontsize=18)
plt.ylabel('$\phi_t(\degree)$', fontsize=18)
title('DoD')
plt.subplot(122)
plt.scatter(doa[:, 0] * al, doa[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
b = colorbar()
b.set_label('dB')
plt.title('DoA')
plt.xlabel("$\\theta_r(\degree)$", fontsize=18)
plt.ylabel("$\phi_r (\degree)$", fontsize=18)
plt.show()
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
#r.require('graphics')
#r.postscript('fig.eps')
#r('par(mfrow=c(2,2))')
#Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
#r.dev_off()
#os.system("gv fig.eps ")
#y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k, :])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print (" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print (" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# on s'arrange que Hk.x[0]==Wk.x[0]
# if Wk.x[0]!=Hk.x[0]:
# x=arange(Wk.x[0],Hk.x[0],dx)
# if Hk.x[0]!=x[0]:
# Hk.x=hstack((x,Hk.x[1:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
# else:
# Hk.x=hstack((x,Hk.x[0:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
#
self.Hk = Hk
self.Wk = Wk
Rk = Hk * Wk
self.Rk = Rk
rk = Rk.iftshift()
plot(rk.x, rk.y, col)
return(rk)
| [
2,
532,
9,
12,
19617,
25,
18274,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
1459,
21412,
3712,
279,
2645,
6962,
13,
415,
22930,
13,
17620,
66,
198,
198,
53,
478,
29239,
5016,
198,
4770,
28,
628,
198,
492,
44619,
388,
6874,
37... | 1.796992 | 8,911 |
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE. | [
4871,
13047,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
13269,
287,
428,
8265,
526,
15931,
198,
220,
220,
220,
1208,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
4871,
23412,
12331,
7,
12331,
2599,
198,
220,
2... | 3.558897 | 399 |
import json
import click
import pickle
from ledgertools.read import read_file
from ledgertools.version import __version__
@click.group(help='CLI tools for working with ledger')
@click.version_option(version=__version__, prog_name='Ledger Tools')
@cli.command(help='Import ledger style file')
@click.option('-f', '--file', 'in_file', help='Input file name', prompt='Input file name')
@click.option('-n', '--name', default='transactions.json', help='Output file name')
@click.option('-p', '--pickle', 'as_pickle', is_flag=True, help='Output as pickle file')
@click.option('--run-checks', 'run_checks', is_flag=True, help='Run standard checks on data')
@click.option('--stdout', is_flag=True, help='Output to stdout, supresses output files')
| [
11748,
33918,
198,
11748,
3904,
198,
11748,
2298,
293,
198,
198,
6738,
2957,
70,
861,
10141,
13,
961,
1330,
1100,
62,
7753,
198,
6738,
2957,
70,
861,
10141,
13,
9641,
1330,
11593,
9641,
834,
198,
198,
31,
12976,
13,
8094,
7,
16794,
... | 3.152542 | 236 |
#!/usr/bin/env python3
import psycopg2
import os
# Database Name
DB_NAME = "news"
# Filename
FILENAME = "log_analysis.txt"
# queries
first_query = "select title,views from view_article limit 3"
second_query = "select * from view_author"
third_query = "select * from view_error_log where percent_error > 1"
# to store results
first_query_dict = dict()
first_query_dict['title'] = """\n1. The 3 most popular articles \
of all time are:\n"""
second_query_dict = dict()
second_query_dict['title'] = """\n2. The most popular article \
authors of all time are:\n"""
third_query_dict = dict()
third_query_dict['title'] = """"\n3. Days with more than 1% of \
request that lead to an error:\n"""
def connect_db_get_query_result(query):
"""connects to DB and gets query results"""
db = psycopg2.connect(database=DB_NAME)
c = db.cursor()
c.execute(query)
results = c.fetchall()
db.close()
return results
def display_query_result(query_result):
"""prints reports generated from query"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' views'+'\n')
f.write(output)
print(output, end='')
f.close()
def display_request_error_result(query_result):
"""displays % of requests lead to errors"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' %'+'\n')
f.write(output)
print(output, end='')
f.close()
# main starts
if __name__ == "__main__":
print("Fetching the data from the Database...")
if os.path.isfile(FILENAME):
os.remove(FILENAME)
# stores query result
first_query_dict['results'] = connect_db_get_query_result(first_query)
second_query_dict['results'] = connect_db_get_query_result(second_query)
third_query_dict['results'] = connect_db_get_query_result(third_query)
# print formatted output
display_query_result(first_query_dict)
display_query_result(second_query_dict)
display_request_error_result(third_query_dict)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
28686,
198,
198,
2,
24047,
6530,
198,
11012,
62,
20608,
796,
366,
10827,
1,
198,
198,
2,
7066,
12453,
198,
46700,
1677,
10067,
796,
... | 2.610405 | 865 |
#!/usr/bin/python
import sqlite3, json, os
import logging, sys
from collections import defaultdict
from nameparser import HumanName
#
# This script moves candidate information from filename.json into the sqlite3 database
#
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
#
# The order matter when we want to insert the value, current schema:
# CREATE TABLE candidates (firstName TEXT, lastName TEXT, prefix TEXT, suffix TEXT, party TEXT, chamber TEXT, state TEXT, district INTEGER, incumbent INTEGER, source TEXT, bioguideId TEXT PRIMARY KEY UNIQUE, fecId TEXT UNIQUE, website TEXT, email TEXT UNIQUE, facebook TEXT UNIQUE, twitter TEXT UNIQUE, youtube TEXT UNIQUE, img_src TEXT, questionnaire_response TEXT, gen_election_candidate INTEGER DEFAULT (0), duplicate INTEGER, candidate_url TEXT UNIQUE);
logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
dbpath = '../../db/db.sqlite3'
if not (dbpath and os.path.isfile(dbpath)):
print 'db file not found'
exit()
try:
db = sqlite3.connect(dbpath)
c = db.cursor()
except sqlite3.Error:
print 'sqlite3 error'
db.close()
#jsonpath = '/root/CongressionalGuide/app/candidates/import.json'
jsonpath = str(sys.argv[1])
if not (jsonpath and os.path.isfile(jsonpath)):
print 'candidates json file not found'
exit()
congressman = json.load(open(jsonpath))
# check first/last name pair
# if exists, update_query
# else insert_query
update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, gen_election_candidate = ?, incumbent = ?, district = ? where firstName like ? and lastName like ? and state = ?'
#update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, source = ?, gen_election_candidate = ?, incumbent = ? where firstName like ? and lastName like ? and state = ? and district = ?'
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_query = 'INSERT INTO candidates VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
for human in congressman:
firstName=(None,)
lastName=(None,)
prefix=(None,)
suffix=(None,)
party=(None,)
chamber=(None,)
state=(None,)
district=(0,)
incumbent=(None,)
bioguideId=(None,)
fecId=(None,)
source=('ballotpedia',)
website=(None,)
email=(None,)
facebook=(None,)
twitter=(None,)
youtube=(None,)
img_src=(None,)
questionnaire_response=(None,)
#TODO: NH primary election 9/13, their candidate will have null value here
gen_election_candidate=(None,)
candidate_url=(None,)
duplicate=(None,)
mesg=''
for k,v in human.iteritems():
mesg += '(k,v)=(' + k + ' ,' + str(v) + ')\n'
if k == 'name':
v = v.replace('%27','\'') #clean up scraped single quote issue
if v.endswith(')'): #handle name like 'Bill Otto (Missouri)'
lp = v.find('(')
v = v[:lp-1]
v = v.replace('%22','\"') #change nickname parenthesis to quotes
fullName = HumanName(v)
prefix = fullName.title,
if len(fullName.first) < 3: # if only 1st initial, then need to include middle name
firstName = fullName.first + ' ' + fullName.middle,
else:
firstName = fullName.first,
lastName = fullName.last,
suffix = fullName.suffix,
elif k == 'party':
party = v[0],
elif k == 'dist':
dl = [int(d) for d in v if d.isdigit()]
if len(dl) != 0:
district = int(''.join(map(str, dl))),
elif k == 'camp':
website = v,
elif k == 'twtr':
twitter = v[v.find('twitter.com')+len('twitter.com')+1:],
elif k == 'fb':
facebook = v,
elif k == 'state':
state = getStateAbbr(v),
elif k == 'pic':
img_src = v,
elif k == 'chamber':
chamber = v,
elif k == 'youtube':
youtube = v,
elif k == 'incumbent':
incumbent = v,
elif k == 'gen_election_candidate':
gen_election_candidate = v,
elif k == 'url':
candidate_url = v,
logging.debug(mesg)
match_firstName = '%'+firstName[0]+'%',
match_lastName = '%'+lastName[0]+'%',
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_values = (firstName + lastName + prefix + suffix + party + chamber + state + district + incumbent + source + bioguideId + fecId + website + email + facebook + twitter + youtube + img_src + questionnaire_response + gen_election_candidate + duplicate + candidate_url)
update_values = (candidate_url + img_src + facebook + twitter + website + youtube + gen_election_candidate + incumbent + district + match_firstName + match_lastName + state)
#update_values = (candidate_url + img_src + facebook + twitter + website + youtube + source + gen_election_candidate + incumbent + match_firstName + match_lastName + state + district)
# Match with existing Sunlight data: lastName, first word of firstName, state and district
# no district for senate
c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? ;', match_firstName + match_lastName + state )
#c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? and district = ?;', match_firstName + match_lastName + state + district)
obj = c.fetchone()
if obj[0]:
logging.info('update_values: %s', update_values)
c.execute(update_query, update_values)
else:
logging.info('insert_values: %s', insert_values)
c.execute(insert_query, insert_values)
logging.info('[OK]\n\n')
db.commit()
db.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
220,
198,
11748,
44161,
578,
18,
11,
33918,
11,
28686,
198,
11748,
18931,
11,
25064,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
1438,
48610,
1330,
5524,
5376,
198,
198,
2,
198,
2,
770,
4226,
... | 2.839487 | 1,950 |
# Python solution for 'Find all non-consecutive numbers' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS AND ARRAYS.
# Author: Jack Brokenshire
# Date: 05/08/2020
import unittest
def all_non_consecutive(arr):
"""
Find all the elements of an array that are non consecutive. A number is non consecutive if it is not exactly one
larger than the previous element in the array. The first element gets a pass and is never considered non consecutive.
:param arr: An array of integers.
:return: The results as an array of objects with two values i: <the index of the non-consecutive number> and n:
<the non-consecutive number>.
"""
return [{'i': i + 1, 'n': arr[i + 1]} for i in range(len(arr) - 1) if arr[i] + 1 != arr[i + 1]]
class TestAllNonConsecutive(unittest.TestCase):
"""Class to test 'all_non_consecutive' function"""
if __name__ == "__main__":
unittest.main()
| [
2,
11361,
4610,
329,
705,
16742,
477,
1729,
12,
1102,
4552,
425,
3146,
6,
14873,
413,
945,
1808,
13,
198,
2,
5684,
25,
767,
479,
24767,
198,
2,
44789,
25,
29397,
35,
2390,
3525,
23333,
5357,
5923,
3861,
16309,
13,
198,
2,
6434,
25... | 2.927215 | 316 |
# Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import tc_lib
from neutron.services.qos import qos_consts
from neutron.tests import base
DEVICE_NAME = "tap_device"
KERNEL_HZ_VALUE = 1000
BW_LIMIT = 2000 # [kbps]
BURST = 100 # [kbit]
LATENCY = 50 # [ms]
TC_QDISC_OUTPUT = (
'qdisc tbf 8011: root refcnt 2 rate %(bw)skbit burst %(burst)skbit '
'lat 50.0ms \n') % {'bw': BW_LIMIT, 'burst': BURST}
TC_FILTERS_OUTPUT = (
'filter protocol all pref 49152 u32 \nfilter protocol all pref '
'49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh '
'800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n '
'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n'
'drop overhead 0b \n ref 1 bind 1'
) % {'bw': BW_LIMIT, 'burst': BURST}
| [
2,
15069,
1584,
440,
53,
39,
35516,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
42... | 2.714559 | 522 |
import pygame
import cmg
from cmg.color import Colors
from cmg.color import Color
from cmg import math
from study_tool import card_attributes
from study_tool.card import Card
from study_tool.card_attributes import *
from study_tool.entities.entity import Entity
from study_tool.russian.word import AccentedText
from study_tool.russian.word import Word
class CardAttributeBox(Entity):
"""
A box with a word in it that can reference a card.
"""
def __init__(self, attribute, short=False, font=None):
"""Entity constructor."""
super().__init__()
self.__attribute = CardAttributes(attribute)
self.__short = short
self.__text = ""
self.__font = font
if self.__font is None:
self.__font = cmg.Font(24)
self.__padding = cmg.Vec2(8, 6)
def on_create(self):
"""Called when the entity is created."""
if self.__short:
self.__text = self.__attribute.value
else:
self.__text = card_attributes.get_card_attribute_display_name(
self.__attribute)
self.set_size(self.__font.measure(self.__text) + (self.__padding * 2))
def update(self, dt):
"""Updates the entity."""
def draw(self, g):
"""Draws the entity."""
# Determine colors
text_color = Colors.WHITE
background_color = Colors.BLACK
if self.__attribute in card_attributes.ATTRIBUTE_COLORS:
background_color = card_attributes.ATTRIBUTE_COLORS[self.__attribute]
# Draw the background
g.fill_rect(self.get_rect(),
color=background_color)
# Draw the text
g.draw_accented_text(self.get_center().x,
self.get_center().y,
text=self.__text,
font=self.__font,
color=text_color,
align=cmg.Align.Centered)
| [
11748,
12972,
6057,
198,
11748,
269,
11296,
198,
6738,
269,
11296,
13,
8043,
1330,
29792,
198,
6738,
269,
11296,
13,
8043,
1330,
5315,
198,
6738,
269,
11296,
1330,
10688,
198,
6738,
2050,
62,
25981,
1330,
2657,
62,
1078,
7657,
198,
6738... | 2.132479 | 936 |
import os
import json
from pprint import pprint, pformat
from mpi4py import MPI
import math
from nas4candle.nasapi.evaluator import Evaluator
from nas4candle.nasapi.search import util, Search
from nas4candle.nasapi.search.nas.agent import nas_ppo_sync_a3c
logger = util.conf_logger('nas4candle.nasapi.search.nas.ppo_a3c_sync')
LAUNCHER_NODES = int(os.environ.get('BALSAM_LAUNCHER_NODES', 1))
WORKERS_PER_NODE = int(os.environ.get('nas4candle.nasapi_WORKERS_PER_NODE', 1))
class NasPPOSyncA3C(Search):
"""Neural Architecture search using proximal policy gradient with synchronous optimization.
"""
@staticmethod
if __name__ == "__main__":
args = NasPPOSyncA3C.parse_args()
search = NasPPOSyncA3C(**vars(args))
search.main()
| [
11748,
28686,
198,
11748,
33918,
198,
6738,
279,
4798,
1330,
279,
4798,
11,
279,
18982,
198,
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
11748,
10688,
198,
198,
6738,
25221,
19,
46188,
293,
13,
24716,
15042,
13,
18206,
84,
1352,
... | 2.6 | 290 |
# GetScraped V2.5.1
# github.com/kendalled
### possible regexp: [^\s@<>]+@[^\s@<>]+\.[^\s@<>]+
### Backup regexp: '[\w.]+@[\w.]+'
import requests
import re
import unicodecsv as csv
import pandas as pd
# Negative Email Endings
#TODO: remove %20 from beginning
negatives = ['domain.net','group.calendar.google','youremail.com','sample.com','yoursite.com','internet.com','companysite.com','sentry.io','domain.xxx','sentry.wixpress.com', 'example.com', 'domain.com', 'address.com', 'xxx.xxx', 'email.com', 'yourdomain.com']
# Reads website column, initializes counter variable
df = pd.read_csv('./Arab.csv')
urls = list(dict.fromkeys(df['website']))
counter = 0
final_list = []
print_list = []
# Set Response Headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
if __name__ == "__main__":
for link in urls:
print(link)
email = get_email(link)
if(email):
for mail in [elem.lower() for elem in email]:
final_list.append(mail)
counter += len(email)
if(counter >= 2001):
break
print('------------------------')
print(str(counter) + ' Email(s) found so far.')
print('------------------------')
with open('Anaheim-CA-Emails.csv', 'wb') as csvfile:
final_list = list(set(final_list))
for i in final_list:
print_list.append({'email': i})
fieldnames = ['email']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
for data in print_list:
writer.writerow(data)
print('File written!')
| [
2,
3497,
3351,
31951,
569,
17,
13,
20,
13,
16,
198,
2,
33084,
13,
785,
14,
74,
437,
4262,
198,
21017,
1744,
40364,
79,
25,
685,
61,
59,
82,
31,
27,
37981,
10,
31,
58,
61,
59,
82,
31,
27,
37981,
10,
59,
3693,
61,
59,
82,
31... | 2.361528 | 733 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
628,
628,
628,
628,
628,
198
] | 2.466667 | 15 |
"""Test the code status enum."""
from astoria.common.code_status import CodeStatus
def test_code_status_enum() -> None:
"""Test that CodeStatus has the right values."""
assert CodeStatus.STARTING.value == "code_starting"
assert CodeStatus.RUNNING.value == "code_running"
assert CodeStatus.KILLED.value == "code_killed"
assert CodeStatus.FINISHED.value == "code_finished"
assert CodeStatus.CRASHED.value == "code_crashed"
assert len(CodeStatus) == 5
| [
37811,
14402,
262,
2438,
3722,
33829,
526,
15931,
198,
198,
6738,
6468,
7661,
13,
11321,
13,
8189,
62,
13376,
1330,
6127,
19580,
628,
198,
4299,
1332,
62,
8189,
62,
13376,
62,
44709,
3419,
4613,
6045,
25,
198,
220,
220,
220,
37227,
14... | 3 | 160 |
import time
from pylmcp import Object
from pylmcp.server import Server
from pylmcp.uxas import AutomationRequestValidator, UxASConfig
# Create bridge configuration
bridge_cfg = UxASConfig()
bridge_cfg += AutomationRequestValidator()
with Server(bridge_cfg=bridge_cfg) as server:
try:
obj = Object(class_name='ServiceStatus', StatusType=2,
randomize=True)
server.send_msg(obj)
time.sleep(1)
print("OK")
finally:
print("Here")
| [
11748,
640,
198,
6738,
279,
2645,
76,
13155,
1330,
9515,
198,
6738,
279,
2645,
76,
13155,
13,
15388,
1330,
9652,
198,
6738,
279,
2645,
76,
13155,
13,
2821,
292,
1330,
17406,
341,
18453,
47139,
1352,
11,
471,
87,
1921,
16934,
198,
198,... | 2.48 | 200 |
import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.Util import constants as const
from hierarc.Likelihood.LensLikelihood.td_mag_likelihood import TDMagLikelihood
from hierarc.Likelihood.LensLikelihood.td_mag_magnitude_likelihood import TDMagMagnitudeLikelihood
from lenstronomy.Util.data_util import magnitude2cps
if __name__ == '__main__':
pytest.main()
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
6738,
18896,
301,
1313,
9145,
13,
18274,
346,
1330,
38491,
355,
1500,
198,
6738,
13550,
5605,
13,
7594,
11935,
13,
49479,
759... | 3.137097 | 124 |
import numpy as np
from math import log
day_in_seconds = 24. * 60. * 60.
def ecdf(vals, x, eps=1e-12):
"""
Compute empirical cdf: P(X <= x) over the values vals
"""
return np.sum(vals <= x, dtype=np.float32) / (np.shape(vals)[0] + eps)
def format_runtime(runtime):
""" """
return '{}s = {}m = {}h = {}d'.format(runtime, runtime / 60, runtime / 3600, runtime / (3600 * 24)) | [
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
2604,
198,
198,
820,
62,
259,
62,
43012,
796,
1987,
13,
1635,
3126,
13,
1635,
3126,
13,
628,
198,
198,
4299,
9940,
7568,
7,
12786,
11,
2124,
11,
304,
862,
28,
16,
68,
12,
1065... | 2.421687 | 166 |
import collections
import pandas
import numpy
import itertools
if __name__ == "__main__":
main() | [
11748,
17268,
201,
198,
11748,
19798,
292,
201,
198,
11748,
299,
32152,
201,
198,
11748,
340,
861,
10141,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
1388,
3419
] | 2.74359 | 39 |
# This part will be responsible for downloading the zotero collected PDFs | [
2,
770,
636,
481,
307,
4497,
329,
22023,
262,
1976,
313,
3529,
7723,
12960,
82
] | 4.866667 | 15 |
from flask import Flask
import requests, json
from flask import render_template
from RepeatedTimer import RepeatedTimer
from flask_socketio import SocketIO, emit
from threading import Thread
from gevent import monkey as curious_george
import redis
import datetime as dt
from rejson import Client, Path
from SetEncoder import SetEncoder
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
r1 = redis.StrictRedis(host='localhost', port=6379, db=1, charset="utf-8", decode_responses=True)
r2 = redis.StrictRedis(host='localhost', port=6379, db=2, charset="utf-8", decode_responses=True)
r3 = redis.StrictRedis(host='localhost', port=6379, db=3, charset="utf-8", decode_responses=True)
r4 = redis.StrictRedis(host='localhost', port=6379, db=4, charset="utf-8", decode_responses=True)
async_mode = "gevent"
curious_george.patch_all(ssl=False)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socket_ = SocketIO(app,async_mode=async_mode)
thread = None
@app.route("/")
@socket_.on('start_process', namespace='/start')
if __name__ == "__main__":
socket_.run(app, debug=True) | [
6738,
42903,
1330,
46947,
198,
11748,
7007,
11,
33918,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
30558,
515,
48801,
1330,
30558,
515,
48801,
198,
6738,
42903,
62,
44971,
952,
1330,
47068,
9399,
11,
27588,
198,
6738,
4704,
278,
... | 2.905512 | 381 |
'''Navier-Stokes and Euler equations solver implemented in Python
Triple periodic spectral method in space
4th order Runge–Kutta method in time
Auther: Jia Cheng Hu (University of Waterloo, Canada)
'''
from pysif.spectral_method import *
| [
7061,
6,
45,
19492,
12,
1273,
3369,
290,
412,
18173,
27490,
1540,
332,
9177,
287,
11361,
198,
198,
14824,
1154,
27458,
37410,
2446,
287,
2272,
198,
19,
400,
1502,
5660,
469,
1906,
42,
315,
8326,
2446,
287,
640,
198,
198,
32,
12866,
... | 3.394366 | 71 |
import os
import sys
import site
# Add virtualenv site packages
site.addsitedir('/home/attendance/.virtualenvs/attendance/lib/python3.5/site-packages')
sys.path.insert(0, '/var/www/html/class-list')
# Fired up virtualenv before include application
activate_this = '/home/attendance/.virtualenvs/attendance/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from app import create_app
config_name = os.getenv('FLASK_CONFIG')
if not config_name:
config_name = 'development'
application = create_app(config_name)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
2524,
198,
198,
2,
3060,
7166,
24330,
2524,
10392,
198,
15654,
13,
2860,
82,
863,
343,
10786,
14,
11195,
14,
1078,
437,
590,
11757,
32844,
268,
14259,
14,
1078,
437,
590,
14,
8019,
14,
2941... | 3 | 181 |
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
# HELPER FUNCTIONS AND STRUCTURES
class StoreReview_args(object):
"""
Attributes:
- req_id
- review
- carrier
"""
all_structs.append(StoreReview_args)
StoreReview_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.STRUCT, 'review', [Review, None], None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class StoreReview_result(object):
"""
Attributes:
- se
"""
all_structs.append(StoreReview_result)
StoreReview_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
class ReadReviews_args(object):
"""
Attributes:
- req_id
- review_ids
- carrier
"""
all_structs.append(ReadReviews_args)
ReadReviews_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.LIST, 'review_ids', (TType.I64, None, False), None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class ReadReviews_result(object):
"""
Attributes:
- success
- se
"""
all_structs.append(ReadReviews_result)
ReadReviews_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [Review, None], False), None, ), # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| [
2,
198,
2,
5231,
519,
877,
515,
416,
16283,
2135,
3082,
5329,
357,
15,
13,
1065,
13,
15,
8,
198,
2,
198,
2,
8410,
5626,
48483,
4725,
48481,
7013,
15986,
311,
11335,
14603,
7013,
35876,
25003,
7013,
15986,
8410,
2751,
198,
2,
198,
... | 2.467598 | 787 |
from __future__ import division
import cPickle
import numpy as np
import math
import random
import os as os
from scipy import misc
from skimage import color
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#import matplotlib.pyplot as plot1
#def graph_plot(x, y, xlab, ylab):
#plot1.figure(num = 1, figsize =(15,10), dpi = 72)
#plot1.subplot(321)
#plot1.scatter(CS_Score,Res_OH)
# plot1.plot(x, y, 'g^')
# plot1.xlabel(xlab)
# plot1.ylabel(ylab)
# plot1.show()
if __name__ == "__main__":
print ("UBitName = jruvikam")
print ("personNumber = 50207613")
pickleFile = open('mnist.pkl','rb')
train_set_MNIST, valid_set_MNIST, test_set_MNIST = cPickle.load(pickleFile)
train_x_MNIST = train_set_MNIST[0]
train_target_MNIST = train_set_MNIST[1]
train_t_MNIST = oneHotEncoding(train_target_MNIST)
valid_x_MNIST = valid_set_MNIST[0]
valid_target_MNIST = valid_set_MNIST[1]
test_x_MNIST = test_set_MNIST[0]
test_target_MNIST = test_set_MNIST[1]
b = 1
# TUNE HYPERPARAMETER ETA
w_logRegress_MNIST = logRegression(train_x_MNIST, train_t_MNIST, b)
yOneHot_validate_MNIST, y_value_validate_MNIST, accuracy_validate_MNIST = logRegressionValidate(valid_x_MNIST, valid_target_MNIST, w_logRegress_MNIST, b)
yOneHot_test_MNIST, y_value_test_MNIST = logRegressionTest(test_x_MNIST, w_logRegress_MNIST, b)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
path = "USPSdata/Numerals/"
count = 0
validate_x_USPS = np.zeros((1,784))
target_set_USPS = np.zeros((1,1))
print (np.shape(validate_x_USPS))
for i in range(10):
new_path = path
new_path = new_path + str(i) + "/"
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
validate_x_USPS = np.insert(validate_x_USPS,len(validate_x_USPS),flat_img,axis=0)
target_set_USPS = np.insert(target_set_USPS,len(target_set_USPS),int(i),axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
validate_x_USPS = np.delete(validate_x_USPS,0,axis=0)
target_set_USPS = np.delete(target_set_USPS,0,axis=0)
yOneHot_validate_USPS, y_value_validate_USPS, accuracy_validate_USPS = logRegressionValidate(validate_x_USPS, target_set_USPS, w_logRegress_MNIST, b)
path = "USPSdata/Test/"
count = 0
test_x_USPS = np.zeros((1,784))
for i in range(10):
new_path = path
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
test_x_USPS = np.insert(test_x_USPS,len(validate_x_USPS),flat_img,axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
test_x_USPS = np.delete(test_x_USPS,0,axis=0)
yOneHot_test_USPS, y_value_test_USPS = logRegressionTest(test_x_USPS, w_logRegress_MNIST, b)
cnn()
print ("accuracy USPS validation:")
print (accuracy_validate_USPS)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
# w1_nn_MNIST, w2_nn_MNIST = neuralnetwork(train_x_MNIST, train_t_MNIST, b)
# yOneHot_nn_MNIST, y_value_nn_MNIST, accuracy_nn_MNIST = neuralnetwork(valid_x_MNIST, valid_target_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_MNIST, y_value_test_nn_MNIST = neuralnetwork(test_x_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_nn_USPS, y_value_nn_USPS, accuracy_nn_USPS = neuralnetwork(validate_x_USPS, target_set_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_USPS, y_value_test_nn_USPS = neuralnetwork(test_x_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
print ("PROGRAM COMPLETED")
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
269,
31686,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
28686,
355,
28686,
198,
6738,
629,
541,
88,
1330,
12747,
198,
6738,
1341,
9060,
1330,
... | 1.861055 | 2,958 |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: 'Administrator'
@contact:
@time:
"""
#!/usr/bin/python
# encoding: utf-8
# ================ 直接创建DataFrame
from pyspark.sql import SparkSession
from pyspark.sql import Row
spark = SparkSession.builder.appName('test').getOrCreate()
sc = spark.sparkContext
# spark.conf.set("spark.sql.shuffle.partitions", 6)
# ================直接创建==========================
l = [('Ankit',25),('Jalfaizy',22),('saurabh',20),('Bala',26)]
rdd = sc.parallelize(l)
people = rdd.map(lambda x: Row(name=x[0], age=int(x[1])))
# schemaPeople = sqlContext.createDataFrame(people)
schemaPeople = spark.createDataFrame(people)
# ==================从csv读取======================
df = spark.read.format("csv"). \
option("header", "true") \
.load("iris.csv")
df.printSchema()
df.show(10)
df.count()
df.columns
# ===============增加一列(或者替换) withColumn===========
# Column name which we want add /replace.
# Expression on column.
df.withColumn('newWidth',df.SepalWidth * 2).show()
# ==========删除一列 drop=========================
df.drop('Name').show()
#================ 统计信息 describe================
df.describe().show()
df.describe('Name').show() #分类变量
# ===============提取部分列 select==============
df.select('Name','SepalLength').show()
# ==================基本统计功能 distinct count=====
df.select('Name').distinct().count()
# 分组统计 groupby(colname).agg({'col':'fun','col2':'fun2'})
df.groupby('Name').agg({'SepalWidth':'mean','SepalLength':'max'}).show()
# avg(), count(), countDistinct(), first(), kurtosis(),
# max(), mean(), min(), skewness(), stddev(), stddev_pop(),
# stddev_samp(), sum(), sumDistinct(), var_pop(), var_samp() and variance()
# 自定义的汇总方法
import pyspark.sql.functions as fn
df.agg(fn.count('SepalWidth').alias('width_count'),
fn.countDistinct('id').alias('distinct_id_count')).collect()
#====================数据集拆成两部分 randomSplit ===========
trainDF, testDF = df.randomSplit([0.6, 0.4])
# ================采样数据 sample===========
# withReplacement = True or False to select a observation with or without replacement.
# fraction = x, where x = .5 shows that we want to have 50% data in sample DataFrame.
# seed for reproduce the result
sdf = df.sample(False,0.2,100)
#查看两个数据集在类别上的差异 subtract,确保训练数据集覆盖了所有分类
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name'))
diff_in_train_test.distinct().count()
# ================交叉表 crosstab=============
df.crosstab('Name','SepalLength').show()
# ===============sql 功能 ==============
df.registerAsTable('train_table')
spark.sql("").show()
#================== 综合案例,+ udf================
# 测试数据集中有些类别在训练集中是不存在的,把这些数据集应该从测试集中删除
trainDF,testDF = df.randomSplit([0.01,0.98])
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name')).distinct().show()
首先找到这些类,整理到一个列表
not_exist_cats = testDF.select('Name').subtract(trainDF.select('Name')).distinct().rdd.map(lambda x :x[0]).collect()
定义一个方法,用于检测
创建udf,udf函数需要两个参数:
# Function
# Return type (in my case StringType())
from pyspark.sql.types import StringType
from pyspark.sql.functions import udf
check = udf(should_remove,StringType())
testDF2 = testDF.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
testDF2.show()
# ==================过滤行 filter ==================
import pyspark.sql.functions as fun
estDF2 = df.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
df.withColumn('New_name',check(testDF['Name'])).filter(fun.col('Name')<>-1).show() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
628,
198,
37811,
198,
31,
9800,
25,
220,
705,
41862,
12392,
6,
198,
31,
32057,
25,
220,
220,
198,
31,
2435,
25,
220,
198,
37811,
198,
198,
2,
4844... | 2.257566 | 1,553 |
from .pgcb import Pgcb, PgcbHome, PgcbFilter | [
6738,
764,
6024,
21101,
1330,
350,
70,
21101,
11,
350,
70,
21101,
16060,
11,
350,
70,
21101,
22417
] | 2.444444 | 18 |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# analyze.py
#
# Copyright © 214 Intel Corporation
#
# Author: Quanxian Wang <quanxian.wang@intel.com>
# Zhang Xiaoyan <zhang.xiaoyanx@intel.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import re
import math
import xml.etree.ElementTree as ET
import collections
import copy
import sys
import os
from cairographic import Graphic
#Define macro
START_TIME = 999999999
TOTAL_INTERVAL = 0
GROUP_NUM = 3
X_AXIS_INTERVAL = 120
MAX_LEN = 1000000
class Analyzer:
"""
Profile Analyzer
It's used to read from log file and visualize the record.
"""
def draw_smooth(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw frame summary graph
Args:
show_start: the start time to show
show_end: the end time to show
output_dir: the output directory of fps.txt
Input:self.smooth_events, self.comm_events
Output:Graphic object
"""
if len(self.smooth_events.keys()) == 0:
return None
st_dic = collections.OrderedDict()
for cid in self.client_id_list:
if 'client'+'_'+cid not in self.client_activate \
or self.client_activate['client'+'_'+cid] != True:
continue
st_dic[cid] = collections.OrderedDict()
data = []
color_index = 0
colors = []
x_labels = []
sum_total = 0
se_len = len(self.smooth_event_list)
for i in range(se_len):
total = 0
ename = self.smooth_event_list[i]
data_len = len(self.smooth_events[cid][ename])
for number in self.smooth_events[cid][ename]:
total += number
st_dic[cid][ename] = total/data_len
if i < se_len - 1:
cname = 'comm' + str(i)
comm_val = self.comm_events[cid][cname]
if comm_val > 0.1:
st_dic[cid][cname] = comm_val
# get sum_total
for ename in st_dic[cid]:
sum_total += st_dic[cid][ename]
sum_total = float("{0:.2f}".format(sum_total))
fps = float("{0:.2f}".format(1000 / sum_total))
str1 = 'total_time = ' + str(sum_total) + 'ms'\
+ '\n' + 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
for ename in st_dic[cid].keys():
x_labels.append(ename)
for i in range(len(x_labels)):
color_index = color_index % (len(self.color_table))
colors.append(self.color_table.values()[color_index])
color_index += 1
data = st_dic[cid]
smooth_chart = Graphic(name, data, width,
height, x_labels=x_labels,
axis=True, grid=True,
background="white", series_colors=colors)
smooth_chart.render()
smooth_chart.render_smooth()
return smooth_chart
def draw_fps(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.time_dic
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
time_list = []
x_labels = []
time_list = self.time_dic[cid]
FPS = collections.OrderedDict()
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for i in range(len(time_list)):
if time_list[i].start < rel_start:
continue
if time_list[i].end > rel_end:
break
if time_list[i].end == -1:
FPS[time_list[i].start] = -1
continue
# change ms value to FPS value
FPS[time_list[i].start] = 1000/time_list[i].end
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def draw_fps_media(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.new_events
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
x_labels = []
FPS = collections.OrderedDict()
offset = 0
time_old = 0
sum_total = 0
fps_len = len(self.fps_event_list)
event_len = len(self.new_events[cid])
event_name = self.fps_event_list[0]
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for time in range(1000, int(rel_end) + 1000)[::1000]:
count = 0
for i in range(offset, event_len)[::fps_len]:
event1 = self.new_events[cid][i]
if event1[0] == event_name and time_old <= event1[1] < time:
if (i + fps_len - 1) < event_len:
event2 = self.new_events[cid][i + fps_len - 1]
if event2[2] < time:
count += 1
else:
break
offset = i
time_old = time
if count >= 1:
sum_total += count
FPS[time] = count
if sum_total > 0:
fps = int("{0:.0f}".format(sum_total / len(FPS)))
str1 = 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def calculate_fps(self):
"""
Input:self.time_dic = {event_name:{id:time}}
Output:self.time_dic
Data Formate:
"""
for cid in self.client_id_list:
time_list = []
number = 0
index = 0
offset = 0
rate = 0
event1 = self.fps_event_list[0]
event2 = self.fps_event_list[-1]
seg_len = len(self.seg_point_time)
for event in self.new_events[cid]:
if event[0] == event1:
start = event[1]
continue
if event[0] == event2:
end = event[1]
itv = interval()
itv.start = start
itv.end = end - start
number += 1
if seg_len > 0 and seg_len > index:
seg_time = self.seg_point_time[index]
if start >= seg_time:
"""
Before insert segment point, sample the time data
"""
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv2 = interval()
itv2.start = seg_time
itv2.end = -1
index += 1
time_list.append(itv2)
offset = len(time_list)
time_list.append(itv)
if seg_len == 0:
time_list.sort(key=lambda e:e.end)
self.sample_data(time_list, 0, len(time_list))
time_list.sort(key=lambda e:e.start)
if seg_len > 0 and self.seg_point_time[-1] not in [e.start for e in time_list]:
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv = interval()
itv.start = self.seg_point_time[-1]
itv.end = -1
time_list.append(itv)
self.time_dic[cid] = time_list
def parse_log_file(self):
"""
parse log file.
Return:self.events_dic
"""
color_index = 0
for debug_file in self.log_files:
with open(debug_file) as inf:
for line in inf:
# Find the match
match = self.idregex.match(line)
if match is not None:
self.process_id(match)
continue
match = self.pregex.match(line)
if match is not None:
self.process_point(match)
continue
match = self.sregex.match(line)
if match is not None:
self.process_timestr(match, True)
continue
match = self.eregex.match(line)
if match is not None:
self.process_timestr(match, False)
continue
def parse_config_file(self, configfile, logfile):
"""
parse config xml file, it shows how to parse log file.
parse log file then according to the xml instruction.
"""
if configfile == None:
configfile = '../config/config.xml'
if not os.path.exists(configfile):
return
self.root = ET.parse(configfile).getroot()
config_tags = {"segmentation_point":("point", []),
"event_item":("event", []),
"fps_item":("fps", []),
"smooth_item":("smooth", []),
"sample_rate":("rate", []),
"action_type":("type", []),
"profile":("file", [])}
for key in config_tags.keys():
debug = self.root.find(key)
if debug is None:
continue
subitems = debug.getchildren()
for item in subitems:
if item.tag == config_tags[key][0]:
config_tags[key][1].append(item.text)
# convert config to global values
if len(config_tags["segmentation_point"][1]) > 0:
self.seg_point = config_tags["segmentation_point"][1][0]
self.event_list.extend(config_tags["event_item"][1])
self.fps_event_list.extend(config_tags["fps_item"][1])
self.smooth_event_list.extend(config_tags["smooth_item"][1])
if len(config_tags["sample_rate"][1]) == 0:
self.sample_rate = 0
else:
self.sample_rate = config_tags["sample_rate"][1][0]
if logfile != None:
self.log_files.append(logfile)
else:
self.log_files.extend(config_tags["profile"][1])
if len(config_tags["action_type"][1]) != 0:
self.action_type = config_tags["action_type"][1][0]
def get_smooth_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the smooth data(self.smooth_events)
Input:self.events_dic
Return:self.smooth_events
Data Format:self.smooth_events = {event_name:{client_id:time}}
"""
event_len = MAX_LEN
for cid in self.client_id_list:
self.smooth_events[cid] = {}
for event in self.new_events[cid]:
name = event[0]
number = event[2] - event[1]
if name not in self.smooth_events[cid].keys():
self.smooth_events[cid][name] = []
self.smooth_events[cid][name].append(number)
# merge the data based on the sample rate
for name in self.smooth_events[cid].keys():
self.smooth_events[cid][name].sort()
self.sample_data(self.smooth_events[cid][name], 0, \
len(self.smooth_events[cid][name]))
def get_comm_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the communication data(self.comm_events)
Input:self.events_dic
Return:self.comm_events
Data Format:self.comm_events = {client_id:{event_name:time}}
"""
for cid in self.client_id_list:
self.comm_events[cid] = collections.OrderedDict()
total = 0
comm_time = 0
comm_len = 0
for i in range(0, len(self.smooth_event_list) - 1):
fname = self.smooth_event_list[i]
sname = self.smooth_event_list[i+1]
fst_end = [e[2] for e in self.new_events[cid] \
if e[0] == fname]
sec_start = [e[1] for e in self.new_events[cid] \
if e[0] == sname]
comm_list = []
if len(fst_end) == 0 or len(sec_start) == 0:
print 'smooth invalid data!'
sys.exit(-1)
comm_len = len(fst_end) > len(sec_start) and \
len(sec_start) or len(fst_end)
for j in range(comm_len):
number = sec_start[j] - fst_end[j]
comm_list.append(number)
comm_list.sort()
self.sample_data(comm_list, 0, len(comm_list))
for number in comm_list:
total += number
if len(comm_list) > 0:
comm_time = total / len(comm_list)
self.comm_events[cid]['comm' + str(i)] = comm_time
def form_new_dic(self):
"""
Form new event dictionary
"""
for cid in self.client_id_list:
self.new_events[cid] = []
for cid in self.client_id_list:
for i in range(len(self.events_dic[cid])):
event = self.events_dic[cid][i]
if event['start'] == True:
new_event = (event['name'], event['time'], -1)
self.new_events[cid].append(new_event)
continue
if event['start'] == False:
# find the last event which end is -1
event_len = len(self.new_events[cid])
if event_len == 0:
continue
i = 1
while i < (event_len - 1):
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] != -1:
break
i += 1
while i > 0:
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] == -1:
new_event = (e1[0], e1[1], event['time'])
del self.new_events[cid][-i]
self.new_events[cid].append(new_event)
i -= 1
# sort self.new_events
self.new_events[cid].sort(key=lambda e:e[1])
def build_complete_dic(self):
"""
Form a complete event dictionary
"""
elen = len(self.event_list)
for cid in self.client_id_list:
ecount = len(self.new_events[cid])
j = 0
index = 0
while j < ecount:
if self.new_events[cid][j][0] == self.event_list[index]:
index += 1
index = index % elen
j += 1
else:
del self.new_events[cid][j]
ecount -= 1
for cid in self.new_events.keys():
if len(self.new_events[cid]) < elen:
del self.new_events[cid]
index = self.client_id_list.index(cid)
del self.client_id_list[index]
continue
for i in range(len(self.new_events[cid])):
event = self.new_events[cid][i]
if event[2] == -1:
del self.new_events[cid][i:]
break
def get_valid_data(self):
"""
Note:according to the first event in
self.event_list(like 'client', self.event_list
according to config.xml, the order of the list is
the order of the events), rule out the error data.
Input:original data:self.events_dic
Return:valid data:self.events_dic
data format:self.events_dic = {id:[event]}
event = {'name':event_name, 'start':start_time, 'end':end_time}
"""
self.clean_up()
self.merge_server()
self.form_new_dic()
# build a complate event dic
self.build_complete_dic()
self.init_client_activate()
self.get_startend_time()
def get_startend_time(self):
"""
Note:get the start time of log files.
Input:self.events_dic
Output:self.start_time
"""
for cid in self.client_id_list:
if len(self.new_events[cid]) <= 0:
continue
start_time = self.new_events[cid][0][1]
end_time = self.new_events[cid][-1][2]
if self.start_time > start_time:
self.start_time = start_time
if self.end_time < end_time:
self.end_time = end_time
for time in self.seg_point_time:
if time < self.start_time:
self.start_time = time
for time in self.seg_point_time:
if time > self.end_time:
self.end_time = time
self.total_interval = self.end_time
def update2rel(self):
"""
all event time is decreased by start time
"""
for cid in self.client_id_list:
time_list = self.new_events[cid]
for i in range(len(time_list)):
event = time_list[i]
event_new = (event[0], event[1] - self.start_time, \
event[2] - self.start_time)
time_list[i] = event_new
for i in range(len(self.seg_point_time)):
self.seg_point_time[i] -= self.start_time
def init(self, configfile, logfile):
"""initialize start time and parse config file"""
self.parse_config_file(configfile, logfile)
self.parse_log_file()
if len(self.client_id_list) == 0:
# self.client_id_list.append('0')
self.client_id_list.extend(self.events_dic.keys())
# filer the all data to be valid
if len(self.events_dic.keys()) == 0:
print 'logfile do not have valid data!'
sys.exit(-1)
self.get_valid_data()
self.update2rel()
if len(self.smooth_event_list) > 0:
self.get_smooth_time()
self.get_comm_time()
if len(self.fps_event_list) != 0:
self.time_dic = {}
self.calculate_fps()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
17,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
16602,
13,
9078,
198,
2,
198,
2,
15069,
10673,
28277,
8180,
10501,
198,
2,
198,
2,
6434,
25,
42312,
... | 1.812545 | 12,467 |
# shopping_cart.py
#from pprint import pprint
import pandas as pd
import datetime
import time
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
#print(products)
# pprint(products)
# TODO: write some Python code here to produce the desired output
products_list_csv = pd.read_csv('/Users/richiebubbs/Downloads/GitHub/shopping-cart/data/products.csv')
acceptable_inputs = [str(i["id"]) for i in products]
selected_products = []
#print(acceptable_inputs)
#I constructed this while loop with help from https://realpython.com/python-while-loop/
# I reconstructed the loop with some help form your screencast when I got stuck...
#https://www.youtube.com/watch?v=3BaGb-1cIr0&feature=youtu.be
total_price = 0
selected_ids = []
a = False
while not a:
print("Please enter a product identifier (or enter 'DONE' to exit): ")
x = input()
if x != "DONE" and x in acceptable_inputs:
a = False
#matching_products = [p for p in products if str(p["id"])==x]
#matching_product = matching_products[0]
#total_price = total_price + matching_product["price"]
selected_ids.append(x)
#print("..." + matching_product["name"] + "(" + str(matching_product["price"])+ ")")
#print(type(x))
elif x == "DONE":
a = True
else:
print("I'm sorry, that is not a valid selection, please try again")
#print("Total Price: ", total_price)
#print(selected_products) i did this to make sure that the list was being properly appended
#breakpoint()
# time delay help from https://www.cyberciti.biz/faq/python-sleep-command-syntax-example/
time.sleep(1)
print(" ")
print("Here is your receipt")
time.sleep(1)
print(".")
time.sleep(1)
print("..")
time.sleep(1)
print("...")
time.sleep(1)
print(" ")
print("--------------------------------------")
print(" ")
print("RichieBubbs Grocery Emporium")
print("WWW.RICHIEBUBBS-GROCERY-EMPORIUM.COM")
print(" ")
print("--------------------------------------")
# for date time I got some help from https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/
# and for formatting: https://stackoverflow.com/questions/415511/how-to-get-the-current-time-in-python
# https://stackoverflow.com/questions/31487732/simple-way-to-drop-milliseconds-from-python-datetime-datetime-object
now = datetime.datetime.now().replace(microsecond=0)
print("CHECKOUT AT: ", now)
print(" ")
print("--------------------------------------")
print(" ")
print("SELECTED PRODUCTS:")
if selected_ids == []:
total_price = 0.00
tax = 0.00
grand_ttl_price_usd = 0.00
ttl_price_usd = 0.00
tax_price_usd = 0.00
ttl_price_usd = 0.00
else:
for y in selected_ids:
matching_products = [p for p in products if str(p["id"])==y]
matching_product = matching_products[0]
#price_usd = "{0:.2f}".format(matching_product["price"])
price_usd = "{0:.2f}".format(matching_product["price"])
total_price = total_price + matching_product["price"]
ttl_price_usd = "{0:.2f}".format(total_price)
print("..." + matching_product["name"] + "($" + str(price_usd)+ ")")
tax = total_price * 0.08875
tax_price_usd = "{0:.2f}".format(tax)
grand_ttl = total_price + tax
grand_ttl_price_usd = "{0:.2f}".format(grand_ttl)
print("--------------------------------------")
print(" ")
print("SUBTOTAL: $" + str(ttl_price_usd))
print("TAX: $" + str(tax_price_usd))
print("TOTAL: $" + str(grand_ttl_price_usd))
print(" ")
print("--------------------------------------")
print("THANK YOU, COME AGAIN!")
print("--------------------------------------")
#for y in selected_products:
# matching_products_name = [p["name"] for p in products if p["id"]==y]
# matching_products_price =[p['price'] for p in products if p['id']==y]
#print(final_product_selection, final_product_price)
#for p in selected_products:
# print("..." + products["id"] == p)
#> ---------------------------------
#> GREEN FOODS GROCERY
#> WWW.GREEN-FOODS-GROCERY.COM
#> ---------------------------------
#> CHECKOUT AT: 2019-06-06 11:31 AM
#> ---------------------------------
#> SELECTED PRODUCTS:
#> ... Chocolate Sandwich Cookies ($3.50)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Dry Nose Oil ($21.99)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Mint Chocolate Flavored Syrup ($4.50)
#> ... Chocolate Fudge Layer Cake ($18.50)
#> ---------------------------------
#> SUBTOTAL: $61.24
#> TAX: $5.35
#> TOTAL: $66.59
#> ---------------------------------
#> THANKS, SEE YOU AGAIN SOON!
#> ---------------------------------
#print(products_list_csv)
| [
2,
9735,
62,
26674,
13,
9078,
198,
198,
2,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4818,
8079,
198,
11748,
640,
628,
198,
198,
29498,
796,
685,
198,
220,
220,
220,
19779,
312,
1298,
16,
... | 2.52324 | 3,012 |
import os
import sys
import platform
import multiprocessing
from distutils.core import run_setup
from setuptools import find_packages
from numpy.distutils.command.build_ext import build_ext
from numpy.distutils.core import setup, Extension
from io import open
# Global constants
ncpus = multiprocessing.cpu_count()
this_directory = os.path.abspath(os.path.dirname(__file__))
# Eagle environment
eagle_nodes = ['el'+str(m) for m in range(10)] + ['ed'+str(m) for m in range(10)]
eagle_flag = platform.node() in eagle_nodes
ci_flag = platform.node().find('fv-az') >= 0
if eagle_flag:
os.environ["FC"] = "ifort"
os.environ["CC"] = "icc"
os.environ["CXX"] = "icpc"
os.environ["LDSHARED"] = "icc -pthread -shared"
# For the CMake Extensions
# All of the extensions
fastExt = CMakeExtension('openfast','OpenFAST')
roscoExt = CMakeExtension('rosco','ROSCO')
extList = [roscoExt] if platform.system() == "Windows" else [roscoExt, fastExt]
# Setup content
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CLASSIFIERS = '''
Development Status :: 1 - Planning
Intended Audience :: Science/Research
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS
'''
weis_pkgs = find_packages()
# Install the python sub-packages
print(sys.argv)
for pkg in ['WISDEM','ROSCO_toolbox','pCrunch','pyHAMS','MoorPy','RAFT','pyoptsparse']:
os.chdir(pkg)
if pkg == 'pyoptsparse':
# Build pyOptSparse specially
run_setup('setup.py', script_args=['install'])
else:
run_setup('setup.py', script_args=sys.argv[1:], stop_after='run')
# subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."]) # This option runs `pip install -e .` on each package
os.chdir('..')
# Now install WEIS and the Fortran packages
metadata = dict(
name = 'WEIS',
version = '0.2',
description = 'Wind Energy with Integrated Servo-control',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'NREL',
url = 'https://github.com/WISDEM/WEIS',
install_requires = ['openmdao>=3.4','numpy','scipy','nlopt','dill','smt','control','jsonmerge','fatpack'],
classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f],
packages = weis_pkgs,
package_data = {'':['*.yaml','*.xlsx']},
python_requires = '>=3.6',
license = 'Apache License, Version 2.0',
ext_modules = extList,
cmdclass = {'build_ext': CMakeBuildExt},
zip_safe = False,
)
setup(**metadata)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
3859,
198,
11748,
18540,
305,
919,
278,
198,
6738,
1233,
26791,
13,
7295,
1330,
1057,
62,
40406,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
198,
6738,
299,
32152,
13,
17080,
26791,
... | 2.326967 | 1,309 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Tomasz Czaja'
__version__ = '0.0.1'
import sys
import time
from pathlib import Path
import signal
import RPi.GPIO as GPIO
from PIL import Image, ImageDraw, ImageFont
from ST7789 import ST7789
from audioplayer import AudioPlayer
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
jukebox = RfidJukebox()
while True:
try:
value = input("Enter song key:\n")
if value.isdigit():
jukebox.play_song(value)
time.sleep(0.3)
except KeyboardInterrupt:
if jukebox.player:
jukebox.player.stop()
print("Bye")
sys.exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
13787,
292,
89,
327,
89,
27792,
6,
198,
834,
9641,
834,
796,
705,
15,
13,
15,
13,
... | 2.165192 | 339 |
'''
This module parses the input arguments and extracts the necessary
data structures from it, then calls the appropriate functions to
process it.
'''
import sys
import getpass
from Subgraph import *
from Multiprocessing import *
def dispatch(json_file_name):
'''
Looks at input JSON file and determines
which functions should be called to
process it.
Parameters
----------
json_file_name : str
Path to JSON file to be executed
Returns
-------
None
'''
# Convert JSON to my format
agent_dict_json = make_json(json_file_name)
# Extract the dictionary from JSON
with open(agent_dict_json) as data_file:
json_data = json.load(data_file)
# Case 1: No groups -> no parallel processing
if 'groups' not in json_data.keys():
# First expose nested subgraphs
agent_dict_json = unwrap_subgraph(agent_dict_json)
# Then animate it
make_js(agent_dict_json)
# Case 2: Has groups -> parallel processing
else:
# Sort components into indicated processes
big_dict = parallel_dict(json_data)
# Then execute using multiprocessing
run_parallel(big_dict)
###################################################
# If you're running from an IDE...
# Simple example with parameter arguments
var1 = 'JSON/multiplyparam.json'
# Example of an input JSON file that is already in the
# special agent descriptor dict format
var2 = 'JSON/agent_descriptor.json'
# Simple nested subgraph example
var3 = 'JSON/simplesubgraph.json'
# Graph with 3 nested subgraphs
var4 = 'JSON/doublenested.json'
# Multiprocessing example. Doesn't work yet!!
var5 = 'JSON/simplegroups.json'
# UNCOMMENT the following 3 lines to be prompted
# for a JSON file name at each run
# var = raw_input("Please enter path of JSON: ")
# var = str(var)
# dispatch(var)
# UNCOMMENT the following line to run the same
# file each run, replacing 'var1' with the
# path to the file you want
# dispatch(var1)
###################################################
# If you're running from terminal:
# Usage: navigate into the directory with this file
# type: python run.py NAME_OF_JSON_FILE
user_os = sys.platform
user_name = getpass.getuser()
if user_os == 'darwin':
path = '/Users/' + user_name + '/Downloads/'
elif user_os[:3] == 'win':
path = 'C:/Users/' + user_name + '/Downloads/'
elif 'linux' in user_os:
path = '/home/' + user_name + '/Downloads/'
else:
path = ''
var = sys.argv
fullpath = path + var[1]
dispatch(fullpath)
| [
7061,
6,
198,
1212,
8265,
13544,
274,
262,
5128,
7159,
290,
32139,
262,
3306,
198,
7890,
8573,
422,
340,
11,
788,
3848,
262,
5035,
5499,
284,
198,
14681,
340,
13,
198,
198,
7061,
6,
198,
198,
11748,
25064,
198,
11748,
651,
6603,
198... | 2.943613 | 869 |
# Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
from django import http, template
from django.core import urlresolvers
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.servers.basehttp import FileWrapper
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
import json
import cStringIO
import csv
import os
import tempfile
import shutil
import subprocess
import glob
import time
import traceback
from django.views.generic import ListView
from iondb.rundb.models import (
Experiment,
Results,
Project,
Location,
ReportStorage,
EventLog, GlobalConfig, ReferenceGenome, dnaBarcode, KitInfo, ContentType, Plugin, ExperimentAnalysisSettings, Sample,
DMFileSet, DMFileStat, FileServer, IonMeshNode, Chip)
from iondb.rundb.api import CompositeExperimentResource, ProjectResource
from iondb.rundb.report.analyze import build_result
from iondb.rundb.report.views import _report_started
from iondb.rundb.report import file_browse
from iondb.rundb import forms
from iondb.anaserve import client
from iondb.rundb.data import dmactions_types
from iondb.rundb.data import tasks as dmtasks
from iondb.rundb.data import dmactions
from iondb.rundb.data.data_management import update_files_in_use
from iondb.rundb.data import exceptions as DMExceptions
from iondb.rundb.data.data_import import find_data_to_import, data_import
from iondb.utils.files import get_disk_attributes_gb, is_mounted
from iondb.rundb.data.dmfilestat_utils import dm_category_stats, get_keepers_diskspace
from django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound
from datetime import datetime
import logging
from django.core.urlresolvers import reverse
from django.db.models.query_utils import Q
from urllib import unquote_plus
logger = logging.getLogger(__name__)
@login_required
@login_required
def data(request):
"""This is a the main entry point to the Data tab."""
context = cache.get("data_tab_context")
if context is None:
context = data_context(request)
cache.set("data_tab_context", context, 29)
return render(request, "rundb/data/data.html", context)
class ExperimentListView(ListView):
"""This is a class based view using the Django ListView generic view.
It shows Experiment objects and data from their representative report.
"""
queryset = Experiment.objects.select_related(
"repResult", "repResult__qualitymetrics", "repResult__eas"
).exclude(repResult=None).order_by('-repResult__timeStamp')
template_name = "rundb/data/fast.html"
paginate_by = 30
class ResultsListView(ListView):
"""This ListView shows Results objects and is meant to be quick and light weight
"""
queryset = Results.objects.select_related(
"experiment", "qualitymetrics", "eas"
).order_by('-timeStamp')
template_name = "rundb/data/results_list.html"
paginate_by = 30
@login_required
@login_required
def dm_action_selected(request, results_pks, action):
'''
file categories to process: data['categories']
user log entry comment: data['comment']
results_pks could contain more than 1 result
'''
logger = logging.getLogger('data_management')
data = json.loads(request.body)
logger.info("dm_action_selected: request '%s' on report(s): %s" % (action, results_pks))
'''
organize the dmfilestat objects by result_id, we make multiple dbase queries
but it keeps them organized. Most times, this will be a single query anyway.
'''
dmfilestat_dict = {}
try:
# update any dmfilestats in use by running analyses
update_files_in_use()
backup_directory = data['backup_dir'] if data['backup_dir'] != 'default' else None
for resultPK in results_pks.split(','):
logger.debug("Matching dmfilestats contain %s reportpk" % resultPK)
dmfilestat_dict[resultPK] = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
for dmfilestat in dmfilestat_dict[resultPK]:
# validate export/archive destination folders
if action in ['export', 'archive']:
dmactions.destination_validation(dmfilestat, backup_directory, manual_action=True)
# validate files not in use
try:
dmactions.action_validation(dmfilestat, action, data['confirmed'])
except DMExceptions.FilesInUse as e:
# warn if exporting files currently in use, allow to proceed if confirmed
if action == 'export':
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e) + '<br>Exporting now may produce incomplete data set.'}), mimetype="application/json")
else:
raise e
except DMExceptions.BaseInputLinked as e:
# warn if deleting basecaller files used in any other re-analysis started from BaseCalling
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e)}), mimetype="application/json")
# warn if archiving data marked Keep
if action == 'archive' and dmfilestat.getpreserved():
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': '%s currently marked Keep.' % dmfilestat.dmfileset.type}), mimetype="application/json")
else:
dmfilestat.setpreserved(False)
# if further processing an archived dataset, error if archive drive is not mounted
if dmfilestat.isarchived() and not os.path.exists(dmfilestat.archivepath):
return HttpResponseServerError("%s archive location %s is not available." % (dmfilestat.dmfileset.type, dmfilestat.archivepath))
async_task_result = dmtasks.action_group.delay(request.user.username, data[
'categories'], action, dmfilestat_dict, data['comment'], backup_directory, data['confirmed'])
if async_task_result:
logger.debug(async_task_result)
except DMExceptions.SrcDirDoesNotExist as e:
dmfilestat.setactionstate('DD')
msg = "Source directory %s no longer exists. Setting action_state to Deleted" % e.message
logger.info(msg)
EventLog.objects.add_entry(dmfilestat.result, msg, username=request.user.username)
except Exception as e:
logger.error("dm_action_selected: error: %s" % str(e))
return HttpResponseServerError("%s" % str(e))
test = {'pks': results_pks, 'action': action, 'data': data}
return HttpResponse(json.dumps(test), mimetype="application/json")
@login_required
@permission_required('user.is_staff', raise_exception=True)
@login_required
def dm_list_files(request, resultPK, action):
"""Returns the list of files that are selected for the given file categories for the given Report"""
data = json.loads(request.body)
dmfilestat = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
dmfilestat = dmfilestat[0]
# Hack - generate serialized json file for the DataXfer plugin
dmactions.write_serialized_json(dmfilestat.result, dmfilestat.result.get_report_dir())
to_process, to_keep = dmactions.get_file_list(dmfilestat)
payload = {
'files_to_transfer': to_process,
'start_dirs': [dmfilestat.result.get_report_dir(), dmfilestat.result.experiment.expDir],
}
return HttpResponse(json.dumps(payload), mimetype="application/json")
@login_required
| [
2,
15069,
357,
34,
8,
2321,
36404,
43399,
11998,
11,
3457,
13,
1439,
6923,
33876,
198,
6738,
42625,
14208,
1330,
2638,
11,
11055,
198,
6738,
42625,
14208,
13,
7295,
1330,
19016,
411,
349,
690,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 2.630885 | 3,186 |
# Generated by Django 3.2.9 on 2021-12-31 13:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33448,
12,
1065,
12,
3132,
1511,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from django.db import models
import random
import numpy as np
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.705882 | 17 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2_grpc
from typing import List
| [
2,
15069,
33448,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.62069 | 232 |
import arcade
SCREEN_WIDTH = 500
SCREEN_HEIGHT = 600
class MyApplication(arcade.Window):
"""
Main application class.
"""
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
pass
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)
arcade.run()
| [
11748,
27210,
198,
198,
6173,
2200,
1677,
62,
54,
2389,
4221,
796,
5323,
198,
6173,
2200,
1677,
62,
13909,
9947,
796,
10053,
628,
198,
4871,
2011,
23416,
7,
5605,
671,
13,
27703,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.243902 | 205 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import os
from os.path import join
import sys
from pathlib import Path
import time
import importlib
from glob import iglob
import argparse
import click
from vframe.settings import app_cfg
from vframe.utils import log_utils
# -----------------------------------------------------------------------------
#
# Argparse pre-process
#
# -----------------------------------------------------------------------------
def choices_description(plugins):
"""Generate custom help menu with colored text
"""
clr_h: str = '\033[1m\033[94m'
clr_t: str = '\033[0m'
sp_max: int = 20 + len(clr_h) + len(clr_t)
t = ['Commands and plugins:']
for plugin in plugins:
t_cli = f'{clr_h}{plugin.name}{clr_t}'
sp = sp_max - len(t_cli)
t.append(f'\t{t_cli}{" " * sp}{plugin.description}')
result: str = "\n".join(t)
return result
# intercept first argument using argparse to select command group
argv_tmp = sys.argv
sys.argv = sys.argv[:2]
help_desc = f"\033[1m\033[94mVFRAME CLI ({app_cfg.VERSION})\033[0m"
ap = argparse.ArgumentParser(usage="vf [command]",
description=help_desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=choices_description(app_cfg.plugins.plugins))
ap.add_argument('commands', choices=app_cfg.plugins.keys(), metavar='[command]')
# exit and how help if no command group supplied
if len(sys.argv) < 2:
ap.print_help()
sys.exit(1)
args = ap.parse_args()
sys.argv = argv_tmp
sys.argv.pop(1) # remove first argument (command group)
plugin_group = app_cfg.plugins.get(args.commands) # create plugin
# -----------------------------------------------------------------------------
#
# Click CLI
#
# -----------------------------------------------------------------------------
# @click.option('--pipe', 'opt_pipe', type=bool, default=plugin_group.pipe)
@click.group(chain=plugin_group.pipe, no_args_is_help=True, help=help_desc)
@click.pass_context
def cli(ctx, opt_pipe=True):
"""\033[1m\033[94mVFRAME\033[0m
"""
# print("plugin_group.pipe", plugin_group.pipe)
# opt_pipe = plugin_group.pipe
opt_verbosity = int(os.environ.get("VFRAME_VERBOSITY", 4)) # 1 - 5
# store reference to opt_pipe for access in callback
ctx.opts = {'opt_pipe': plugin_group.pipe}
# store user object variables
ctx.ensure_object(dict)
ctx.obj['start_time'] = time.time()
# init global logger
log_utils.Logger.create(verbosity=opt_verbosity)
# def process_commands(processors, opt_pipe):
@cli.resultcallback()
def process_commands(processors):
"""This result callback is invoked with an iterable of all the chained
subcommands. As in this example each subcommand returns a function
we can chain them together to feed one into the other, similar to how
a pipe on UNIX works. Copied from Click's docs.
"""
if not plugin_group.pipe:
return
def sink():
"""This is the end of the pipeline
"""
while True:
yield
sink = sink()
sink.__next__()
# Compose and prime processors
for processor in reversed(processors):
sink = processor(sink)
sink.__next__()
sink.close()
# -----------------------------------------------------------------------------
#
# Setup commands
#
# -----------------------------------------------------------------------------
# append files to click groups
import vframe.utils.im_utils
for plugin_script in plugin_group.scripts:
fp_root = '/'.join(plugin_script.filepath.split('/')[:2]) # eg plugins/vframe_custom_plugin
fp_root = join(app_cfg.DIR_SRC, fp_root)
# print(fp_root)
if not Path(fp_root).is_dir():
print(f'{50 * "*"}\nWARNING: {fp_root} does not exist\n{50 * "*"}')
continue
# append plugin directory to import paths
if fp_root not in sys.path:
sys.path.append(fp_root)
# glob for python files inside command directory
fp_dir_glob = join(app_cfg.DIR_SRC, plugin_script.filepath, '*.py')
for fp_py in iglob(fp_dir_glob):
fn = Path(fp_py).stem
# skip files starting with "_"
if plugin_script.include_hidden is False and fn.startswith('_'):
continue
fp_module = str(Path(fp_py).relative_to(Path(app_cfg.DIR_SRC)))
fp_import = fp_module.replace('/', '.').replace('.py', '')
try:
module = importlib.import_module(fp_import)
cli.add_command(module.cli, name=fn)
except Exception as e:
msg = f'Could not import "{fn}": {e}'
print(f"{app_cfg.TERM_COLORS.FAIL}{msg}{app_cfg.TERM_COLORS.ENDC}")
# -----------------------------------------------------------------------------
#
# Start CLI application
#
# -----------------------------------------------------------------------------
cli() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
29113,
29113,
7804,
4242,
2,
198,
2,
198,
2,
569,
10913,
10067,
198,
2,
17168,
13789,
198,
2,
15069,
357,
66,
8,
12131,
7244,
14943,
290,
569,
10913,
10067,
198,
2,
3740,
1378,
... | 2.974298 | 1,673 |
import os
import io
import json
from google.cloud import vision
from google.protobuf.json_format import MessageToJson
from joblib import Parallel, delayed
import argparse
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ""
client = vision.ImageAnnotatorClient()
if __name__ == '__main__':
# construct the argument parse and parse the arguments
parser = argparse.ArgumentParser(description='Page Detection')
parser.add_argument('--imgdir', type=str)
parser.add_argument('--outputdir', type=str)
args = parser.parse_args()
#create output file
if not os.path.isdir(args.outputdir):
os.mkdir(args.outputdir)
print('creating directory ' + args.outputdir)
clean_names = lambda x: [i for i in x if i[0] != '.']
imgdir = os.listdir(args.imgdir)
imgdir = sorted(clean_names(imgdir))
outputdir = [os.path.join(args.outputdir, dir) for dir in imgdir]
imgdir = [os.path.join(args.imgdir, dir) for dir in imgdir]
Parallel(n_jobs=1)(map(delayed(main), imgdir, outputdir)) | [
11748,
28686,
198,
11748,
33245,
198,
11748,
33918,
198,
6738,
23645,
13,
17721,
1330,
5761,
198,
6738,
23645,
13,
11235,
672,
3046,
13,
17752,
62,
18982,
1330,
16000,
2514,
41,
1559,
198,
6738,
1693,
8019,
1330,
42945,
11,
11038,
198,
... | 2.77628 | 371 |
import discord
from discord.ext import commands
from discord.utils import get | [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
13,
26791,
1330,
651
] | 4.8125 | 16 |
import sys
from Util import write_list_to_file
from ProgramParser import ProgramParser
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
6738,
7273,
346,
1330,
3551,
62,
4868,
62,
1462,
62,
7753,
198,
6738,
6118,
46677,
1330,
6118,
46677,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 3.121951 | 41 |
from rest_framework.serializers import ModelSerializer
from .models import Hint
| [
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
9104,
32634,
7509,
198,
6738,
764,
27530,
1330,
367,
600,
628
] | 4.263158 | 19 |
"""
此脚本负责 - 手工运行游戏或者用AI运行游戏
"""
import sys
import cv2
from utils.util import create_image_from_state
from game.confs import Action_Type, Block_Type, Confs
from game.tetris_engine import tetris_engine
import json
import numpy as np
if __name__ == "__main__":
# human_play()
ai_play("outputs/q_3600000.json")
sys.exit(0)
| [
37811,
201,
198,
29826,
97,
164,
226,
248,
17312,
105,
164,
112,
253,
164,
112,
96,
532,
10545,
231,
233,
32432,
98,
32573,
238,
26193,
234,
162,
116,
116,
22755,
237,
22755,
244,
38519,
18796,
101,
20185,
32573,
238,
26193,
234,
162,... | 1.960894 | 179 |
from smpr3d.util import *
from smpr3d.algorithm import *
from smpr3d.setup import *
import torch as th
import os
import numpy as np
# salloc -C gpu -N 2 -t 30 -c 10 --gres=gpu:8 -A m1759 --ntasks-per-node=8
# srun -N 2 python ./admm_smatrix_dist_pytorch.py
# module purge
# module load pytorch/v1.4.0-gpu
# module list
# Currently Loaded Modulefiles:
# 1) esslurm 2) modules/3.2.11.1 3) cuda/10.1.168 4) nccl/2.5.6
args = Param()
args.io = Param()
args.io.path = '/home/philipp/drop/Public/nesap_hackathon/'
# args.io.path = '../Inputs/'
if os.environ.get('SLURM_PROCID') is not None:
args.io.path = '/global/cscratch1/sd/pelzphil/'
args.io.summary_log_dir = args.io.path + 'log/'
args.io.logname = 'atoms_aberrations52_2'
args.io.filename_data = 'atoms_aberrations52_2.h5'
summary = setup_logging(args.io.path, args.io.logname)
args.dist_backend = 'mpi' # 'mpi'
args.dist_init_method = f'file://{args.io.path}sharedfile'
args.node_config = configure_node(args.dist_backend, args.dist_init_method)
args.beam_threshold_percent = 5e-3
args.max_phase_error = np.pi / 64
args.use_full_smatrix = True
args.uniform_initial_intensity = False
dC1 = 30
# %% load data
i = 0
args.io.filename_results = f'random4_dC{dC1}perc_res_{i + 5:03d}.h5'
world_size = args.node_config.world_size
rank = args.node_config.rank
device = args.node_config.device
lam, alpha_rad, C, dx, specimen_thickness_angstrom, vacuum_probe, D, K, K_rank, MY, MX, NY, NX, \
fy, fx, detector_shape, r, I_target, y_max, x_max, y_min, x_min, S_sol, Psi_sol, r_sol = load_smatrix_data_list2(
args.io.path + args.io.filename_data, device, rank, world_size, subset=[0, 1, 2, 3])
# dx = 1/2/dx
lam *= 1e10
# %% define data-dependent variables
# Fourier space grid on detector
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
A_init = initial_probe_amplitude(vacuum_probe, I_target, world_size, rank)
# mask which beams to include in the S-matrix input channels
take_beams = vacuum_probe > args.beam_threshold_percent
B, B_tile, tile_order, beam_numbers, tile_map = prepare_beam_parameters(take_beams, q2, specimen_thickness_angstrom,
alpha_rad * 1.1, lam, args.max_phase_error,
args.use_full_smatrix, device)
# shape of reconstruction variables
S_shape = (B_tile, NY, NX)
Psi_shape = (D, MY, MX)
z_shape = tuple(I_target.shape)
# map of convergence angles
alpha = q.norm(dim=0) * lam
beam_alphas = th.zeros_like(take_beams, dtype=th.float32, device=device) * -1
beam_alphas[take_beams] = alpha[take_beams]
alpha_map = beam_alphas[take_beams]
# %%
print(specimen_thickness_angstrom)
S0, depth_init = initial_smatrix(S_shape, beam_numbers, device, is_unitary=True, include_plane_waves=B == B_tile,
initial_depth=specimen_thickness_angstrom, lam=lam, q2=q2,
is_pinned=False)
tile_numbers = beam_numbers[beam_numbers >= 0]
beam_numbers = th.ones_like(take_beams).cpu().long() * -1
beam_numbers[take_beams] = th.arange(B)
# %% define S-matrix forward and adjoint operators
from smpr3d.operators import A as A1, AH_S as AH_S1
r_min = th.zeros(2, device=device)
AH_Psi = None
AH_r = None
a = th.sqrt(I_target)
report_smatrix_parameters(rank, world_size, a, S0, B, D, K, MY, MX, NY, NX, fy, fx, B_tile, K_rank,
specimen_thickness_angstrom, depth_init, y_max, x_max, y_min, x_min)
if world_size == 1:
plot(take_beams.cpu().float().numpy(), 'take_beams')
plot(np.fft.fftshift(beam_numbers.cpu().float().numpy()), 'aperture_tiling', cmap='gist_ncar')
# else:
# dist.barrier()
# %% define initial probes
C_target = C.to(device)
C_target[1] = 10
print('C_target:', C_target)
C_model = th.zeros(12, D).to(device)
C_model[:] = C_target
# define data-dependent variables
# Fourier space grid on detector
detector_shape = np.array([MY, MX])
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
Ap0 = vacuum_probe
# del I_target
# mask which beams to include in the S-matrix input channels
# take_beams = vacuum_probe > args.beam_threshold_percent / 100
Psi_gen = ZernikeProbe2(q, lam, fft_shifted=True)
Psi_target = Psi_gen(C_target, Ap0).detach()
Psi_model = Psi_gen(C_model, Ap0).detach()
psi_model = th.fft.ifft2(Psi_model, norm='ortho')
cb = fftshift_checkerboard(MY // 2, MX // 2)
fpr1 = Psi_target[0].cpu().numpy()
pr1 = np.fft.ifft2(fpr1, norm='ortho')
fpr2 = Psi_model[0].cpu().numpy()
pr2 = np.fft.ifft2(fpr2, norm='ortho')
from smpr3d.core import SMeta
s_meta = SMeta(take_beams, dx, S_shape, MY, MX, device)
print(s_meta.q_dft)
# report_initial_probes(summary, rank, world_size, Psi_model, psi_model, C_model, specimen_thickness_angstrom, q, lam,
# alpha_rad)
# %% perform reconstruction
# m = [MY, MX]
# plotAbsAngle(complex_numpy(S_sol[0, m[0]:-m[0], m[1]:-m[1]].cpu()), f'S_sol[{0}]')
args.reconstruction_opts = Param()
args.reconstruction_opts.max_iters = 100
args.reconstruction_opts.beta = 1.0
args.reconstruction_opts.tau_S = 2e-3
args.reconstruction_opts.tau_Psi = 1e6
args.reconstruction_opts.tau_r = 8e-3
args.reconstruction_opts.optimize_psi = lambda i: i > 1e3
args.reconstruction_opts.node_config = args.node_config
args.reconstruction_opts.verbose = 2
r0 = r
Psi0 = Psi_sol
(S_n, Psi_n, C_n, r_n), outs, opts = fasta2(s_meta, A, AH_S, AH_Psi, AH_r, prox_D_gaussian, Psi_gen, a, S0, Psi0,
C_model, Ap0, r0, args.reconstruction_opts, S_sol=S_sol, Psi_sol=Psi_sol,
r_sol=r_sol, summary=summary)
# save_results(rank, S_n, Psi_n, C_n, r_n, outs, S_sol, Psi_sol, r_sol, beam_numbers, tile_map, alpha_map, A.coords, A.inds,
# take_beams, lam, alpha_rad, dx, specimen_thickness_angstrom, args.io.path + args.io.filename_results)
# if world_size > 1:
# dist.barrier()
# dist.destroy_process_group()
# %%
# plotcx(S_n[2])
| [
6738,
895,
1050,
18,
67,
13,
22602,
1330,
1635,
198,
6738,
895,
1050,
18,
67,
13,
282,
42289,
1330,
1635,
198,
6738,
895,
1050,
18,
67,
13,
40406,
1330,
1635,
198,
11748,
28034,
355,
294,
198,
11748,
28686,
198,
11748,
299,
32152,
3... | 2.170673 | 2,912 |
import csv
from collections import Counter
from crispy_forms.utils import render_crispy_form
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Sum, Count
from django.forms.models import model_to_dict
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.template.context_processors import csrf
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django_countries import countries
from .models import *
from .forms import *
from .plots import cohort_countries, get_partner_counts, breakdown_per_partner, \
choropleth_map, get_partner_sum, meta_plot
| [
11748,
269,
21370,
198,
6738,
17268,
1330,
15034,
198,
198,
6738,
42807,
62,
23914,
13,
26791,
1330,
8543,
62,
66,
2442,
9078,
62,
687,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
... | 3.475096 | 261 |
import tensorflow as tf
import cv2
from imageio import mimsave
from IPython.display import display as display_fn
from IPython.display import Image
def load_img(path_to_img):
'''
Loads an image and convert it to a tensor with longer side to 512
:param path_to_img (str): directory path to image
:return:
'''
max_size = 512
img = cv2.imread(path_to_img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
shape = tf.shape(img)[:-1]
shape = tf.cast(shape, tf.float32)
longer_size = max(shape)
scale = max_size / longer_size
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape) # additional dim for batch
img = img[tf.newaxis, :]
img = tf.image.convert_image_dtype(img, tf.uint8)
return img
def tensor_to_img(tensor):
'''
Converts a tensor to an image
:param tensor:
:return:
'''
tensor_shape = tf.shape(tensor)
num_dim = tf.shape(tensor_shape)
if num_dim > 3:
assert tensor_shape[0] == 1
tensor = tensor[0]
return tf.keras.preprocessing.image.array_to_img(tensor)
def display_gif(gif_path):
'''displays the generated images as an animated gif'''
with open(gif_path,'rb') as f:
display_fn(Image(data=f.read(), format='png'))
def create_gif(gif_path, images):
'''creates animation of generated images'''
mimsave(gif_path, images, fps=1)
return gif_path | [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
269,
85,
17,
201,
198,
6738,
2939,
952,
1330,
285,
12078,
1015,
201,
198,
6738,
6101,
7535,
13,
13812,
1330,
3359,
355,
3359,
62,
22184,
201,
198,
6738,
6101,
7535,
13,
13812,
133... | 2.281065 | 676 |
import yaml
c = Conf()
| [
11748,
331,
43695,
628,
198,
66,
796,
7326,
3419,
198
] | 2.5 | 10 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
import torchvision.models
from neuralnets.mobilenet_v1 import MobileNet
from neuralnets.modelcomponents import *
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
2,
460,
779,
262,
2174,
1330,
815,
345,
3853,
284,
41216,
262,
19590,
286,
534,
3433... | 3.690476 | 84 |
"""Data loader."""
import torch
from torch.utils.data.sampler import RandomSampler
from .kinetics import Kinetics
_DATASETS = {
"kinetics": Kinetics,
}
def construct_loader(cfg, split):
"""
Constructs the data loader for the given dataset.
Args:
cfg (CfgNode): configs. Details can be found in
vml/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
"""
assert split in ["train", "val"]
dataset_name = cfg.TRAIN.DATASET
batch_size = cfg.TRAIN.BATCH_SIZE
if split in ["train"]:
shuffle = True
elif split in ["val"]:
shuffle = False
# Construct the dataset
dataset = _DATASETS[dataset_name](cfg, split)
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True,
drop_last=False,
)
return loader
| [
37811,
6601,
40213,
526,
15931,
628,
198,
11748,
28034,
198,
198,
6738,
28034,
13,
26791,
13,
7890,
13,
37687,
20053,
1330,
14534,
16305,
20053,
198,
6738,
764,
5116,
14596,
1330,
16645,
14596,
198,
198,
62,
35,
1404,
1921,
32716,
796,
... | 2.370115 | 435 |
import sqlite3
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Set the file paths
db_path ='D:/Workspace-Github/saproject/data/foursquare.db'
# connect and write to database
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT tid, tip FROM tips;')
results = c.fetchall()
for tid, tip in results:
sid = SentimentIntensityAnalyzer()
rating = round((sid.polarity_scores(tip)['compound'] + 1) * 5, 2) # Rating: 1-10
c.execute("""UPDATE tips SET senti_score = ? WHERE tid = ?;""",(rating, tid))
conn.commit()
conn.close()
| [
11748,
44161,
578,
18,
201,
198,
6738,
299,
2528,
74,
13,
34086,
3681,
13,
85,
5067,
1330,
11352,
3681,
5317,
6377,
37702,
9107,
201,
198,
201,
198,
2,
5345,
262,
2393,
13532,
201,
198,
9945,
62,
6978,
796,
6,
35,
14079,
23044,
1022... | 2.552632 | 228 |
import pyarrow as pa
import pyarrow.flight as flight
import base64
import cmd
import json
import struct
import sys
from time import sleep, time
pa.enable_signal_handlers(True)
def wait_for_connection(client):
"""Perform a blocking check that a connection can be made to the server"""
try:
client.wait_for_available(5)
print(f"Connected")
except Exception as e:
if type(e) is not flight.FlightUnauthenticatedError:
print(f"⁉ Failed to connect to {location}: {e.args}")
sys.exit(1)
else:
print("Server requires auth, but connection possible")
def get_actions(client, options={}):
"""Discover available actions on the server"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
actions = list(client.list_actions(options=options))
if len(actions) == 0:
print("Found zero actions 😕")
else:
print(f"💥 Found {len(actions)} actions!")
return actions
def cypher_read(client, cypher, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
cypher_bytes = cypher.encode("utf8")
params_bytes = json.dumps(params).encode("utf8")
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
pattern = f"!H{len(cypher_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(pattern,
len(cypher_bytes), cypher_bytes,
len(params_bytes), params_bytes)
ticket = None
try:
results = client.do_action(("cypherRead", buffer), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def gds_read_node_prop(client, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
params_bytes = json.dumps(params).encode("utf8")
ticket = None
try:
results = client.do_action(("gdsNodeProperties", params_bytes), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def check_flight_status(client, ticket, options):
"""Check on a flight's status given a particular Ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
status = None
try:
results = client.do_action(("jobStatus", buffer), options=options)
status = next(results).body.to_pybytes().decode("utf8")
except Exception as e:
print(f"⚠ check_flight_status: {e}")
sys.exit(1)
return status
def list_flights(client, options={}):
"""List all available flights"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
pass
def get_flight_info(client, ticket, options):
"""Find a flight based on the given ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
descriptor = pa.flight.FlightDescriptor.for_command(buffer)
info = None
try:
info = client.get_flight_info(descriptor, options=options)
except Exception as e:
print(f"⚠ get_flight_info: {e}")
return info
def stream_flight(client, ticket, options):
"""Stream back a given flight, assuming it's ready to stream"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
result = client.do_get(ticket, options=options)
start = time()
cnt = 0
for chunk, metadata in result:
cnt = cnt + chunk.num_rows
print(f"Current Row @ {cnt:,}:\t[fields: {chunk.schema.names}, rows: {chunk.num_rows:,}]")
#for col in chunk:
# print(col)
finish = time()
print(f"Done! Time Delta: {round(finish - start, 1):,}s")
print(f"Count: {cnt:,} rows, Rate: {round(cnt / (finish - start)):,} rows/s")
##############################################################################
if __name__ == "__main__":
location = build_location()
client = flight.FlightClient(location)
print(f"Trying to connect to location {location}")
wait_for_connection(client)
# TODO: username/password args? env?
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + base64.b64encode(b'neo4j:password'))
])
print(f"Enumerating available actions from location {location}")
for action in get_actions(client, options):
print(f" {action}")
# TODO: user-supplied cypher/params
print("Submitting a read cypher action/job using:")
cypher = """
UNWIND range(1, $rows) AS row
RETURN row, [_ IN range(1, $dimension) | rand()] as fauxEmbedding
"""
params = {"rows": 1_000_000, "dimension": 128}
print(f" cypher: {cypher}")
print(f" params: {params}")
ticket = cypher_read(client, cypher, params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
### GDS
print("----------------------------------------------------------------")
gds_params = {
"dbName": "neo4j",
"graphName:": "mygraph",
"filters": [],
"properties": ["n"],
}
print(f"Submitting GDS node properties request:\n{gds_params}")
ticket = gds_read_node_prop(client, gds_params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
| [
11748,
12972,
6018,
355,
14187,
198,
11748,
12972,
6018,
13,
22560,
355,
5474,
198,
11748,
2779,
2414,
198,
11748,
23991,
198,
11748,
33918,
198,
11748,
2878,
198,
11748,
25064,
198,
6738,
640,
1330,
3993,
11,
640,
198,
198,
8957,
13,
2... | 2.535268 | 2,963 |
# Generated by Django 4.0.3 on 2022-03-11 21:29
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
18,
319,
33160,
12,
3070,
12,
1157,
2310,
25,
1959,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
#!/usr/bin/env python3
import unittest
import numpy as np
from sympy import Eq, symbols
from main import sympy_simplex, LP
aufgabe1 = LP( # Blatt 2
np.matrix('2 0 6; -2 8 4; 3 6 5'),
np.matrix('10; 12; 20'),
np.matrix('2; 1; 3; 0; 0; 0'),
[4, 5, 6])
kreise_example = LP( # Book Page 31
np.matrix('-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0'), # A
np.matrix('0; 0; 1'), # b
np.matrix('10; -57; -9; -24; 0; 0; 0'), # c
[5, 6, 7]
)
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
10558,
88,
1330,
412,
80,
11,
14354,
198,
198,
6738,
1388,
1330,
10558,
88,
62,
14323,
11141,
11,
18470,
... | 1.950943 | 265 |
import copy
import json
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Wrappers.Dials.RefineBravaisSettings")
def RefineBravaisSettings(DriverType=None):
"""A factory for RefineBravaisSettingsWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
return RefineBravaisSettingsWrapper()
| [
11748,
4866,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
2124,
544,
17,
13,
32103,
13,
32103,
22810,
1330,
12434,
22810,
198,
6738,
2124,
544,
17,
13,
12885,
8116,
13,
18673,
1330,
4543,
15732,
198,
198,
640... | 3.271318 | 129 |
# Converted "manually" from EMSABTAG.H
from mapitags import PT_UNSPECIFIED, PT_NULL, PT_I2, PT_LONG, PT_R4, \
PT_DOUBLE, PT_CURRENCY, PT_APPTIME, PT_ERROR, \
PT_BOOLEAN, PT_OBJECT, PT_I8, PT_STRING8, PT_UNICODE, \
PT_SYSTIME, PT_CLSID, PT_BINARY, PT_SHORT, PT_I4, \
PT_FLOAT, PT_DOUBLE, PT_LONGLONG, PT_TSTRING, \
PT_MV_I2, PT_MV_LONG, PT_MV_R4, PT_MV_DOUBLE, \
PT_MV_CURRENCY, PT_MV_APPTIME, PT_MV_SYSTIME, \
PT_MV_STRING8, PT_MV_BINARY, PT_MV_UNICODE, \
PT_MV_CLSID, PT_MV_I8, PT_MV_SHORT, PT_MV_I4, \
PT_MV_FLOAT, PT_MV_R8, PT_MV_LONGLONG, PT_MV_TSTRING, \
PROP_TAG
AB_SHOW_PHANTOMS = 2
AB_SHOW_OTHERS = 4
# Flags for ulFlag on ResolveNames
EMS_AB_ADDRESS_LOOKUP = 1
# Constructed, but externally visible.
PR_EMS_AB_SERVER = PROP_TAG( PT_TSTRING, 65534)
PR_EMS_AB_SERVER_A = PROP_TAG( PT_STRING8, 65534)
PR_EMS_AB_SERVER_W = PROP_TAG( PT_UNICODE, 65534)
PR_EMS_AB_CONTAINERID = PROP_TAG( PT_LONG, 65533)
PR_EMS_AB_DOS_ENTRYID = PR_EMS_AB_CONTAINERID
PR_EMS_AB_PARENT_ENTRYID = PROP_TAG( PT_BINARY, 65532)
PR_EMS_AB_IS_MASTER = PROP_TAG(PT_BOOLEAN, 65531)
PR_EMS_AB_OBJECT_OID = PROP_TAG(PT_BINARY, 65530)
PR_EMS_AB_HIERARCHY_PATH = PROP_TAG(PT_TSTRING, 65529)
PR_EMS_AB_HIERARCHY_PATH_A = PROP_TAG(PT_STRING8, 65529)
PR_EMS_AB_HIERARCHY_PATH_W = PROP_TAG(PT_UNICODE, 65529)
PR_EMS_AB_CHILD_RDNS = PROP_TAG(PT_MV_STRING8, 65528)
MIN_EMS_AB_CONSTRUCTED_PROP_ID = 65528
PR_EMS_AB_OTHER_RECIPS = PROP_TAG(PT_OBJECT, 61440)
# Prop tags defined in the schema.
PR_EMS_AB_DISPLAY_NAME_PRINTABLE = PROP_TAG(PT_TSTRING, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_A = PROP_TAG(PT_STRING8, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_W = PROP_TAG(PT_UNICODE, 14847)
PR_EMS_AB_ACCESS_CATEGORY = PROP_TAG( PT_LONG, 32836)
PR_EMS_AB_ACTIVATION_SCHEDULE = PROP_TAG( PT_BINARY, 32837)
PR_EMS_AB_ACTIVATION_STYLE = PROP_TAG( PT_LONG, 32838)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 32791)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 32839)
PR_EMS_AB_ADDRESS_SYNTAX = PROP_TAG( PT_BINARY, 32792)
PR_EMS_AB_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32840)
PR_EMS_AB_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32840)
PR_EMS_AB_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32840)
PR_EMS_AB_ADMD = PROP_TAG( PT_TSTRING, 32841)
PR_EMS_AB_ADMD_A = PROP_TAG( PT_STRING8, 32841)
PR_EMS_AB_ADMD_W = PROP_TAG( PT_UNICODE, 32841)
PR_EMS_AB_ADMIN_DESCRIPTION = PROP_TAG( PT_TSTRING, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_A = PROP_TAG( PT_STRING8, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_W = PROP_TAG( PT_UNICODE, 32842)
PR_EMS_AB_ADMIN_DISPLAY_NAME = PROP_TAG( PT_TSTRING, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_A = PROP_TAG( PT_STRING8, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_W = PROP_TAG( PT_UNICODE, 32843)
PR_EMS_AB_ADMIN_EXTENSION_DLL = PROP_TAG( PT_TSTRING, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_A = PROP_TAG( PT_STRING8, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_W = PROP_TAG( PT_UNICODE, 32844)
PR_EMS_AB_ALIASED_OBJECT_NAME = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_A = PROP_TAG( PT_STRING8, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_W = PROP_TAG( PT_UNICODE, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_O = PROP_TAG( PT_OBJECT, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_T = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALT_RECIPIENT = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_A = PROP_TAG( PT_STRING8, 32846)
PR_EMS_AB_ALT_RECIPIENT_W = PROP_TAG( PT_UNICODE, 32846)
PR_EMS_AB_ALT_RECIPIENT_O = PROP_TAG( PT_OBJECT, 32846)
PR_EMS_AB_ALT_RECIPIENT_T = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_BL = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_A = PROP_TAG( PT_MV_STRING8, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_W = PROP_TAG( PT_MV_UNICODE, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_O = PROP_TAG( PT_OBJECT, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_T = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ANCESTOR_ID = PROP_TAG( PT_BINARY, 32848)
PR_EMS_AB_ASSOC_NT_ACCOUNT = PROP_TAG( PT_BINARY, 32807)
PR_EMS_AB_ASSOC_REMOTE_DXA = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_A = PROP_TAG( PT_MV_STRING8, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_W = PROP_TAG( PT_MV_UNICODE, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_O = PROP_TAG( PT_OBJECT, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_T = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOCIATION_LIFETIME = PROP_TAG( PT_LONG, 32850)
PR_EMS_AB_AUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 32851)
PR_EMS_AB_AUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 32851)
PR_EMS_AB_AUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 32851)
PR_EMS_AB_AUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTHORITY_REVOCATION_LIST = PROP_TAG( PT_MV_BINARY, 32806)
PR_EMS_AB_AUTHORIZED_DOMAIN = PROP_TAG( PT_TSTRING, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_A = PROP_TAG( PT_STRING8, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_W = PROP_TAG( PT_UNICODE, 32852)
PR_EMS_AB_AUTHORIZED_PASSWORD = PROP_TAG( PT_BINARY, 32853)
PR_EMS_AB_AUTHORIZED_USER = PROP_TAG( PT_TSTRING, 32854)
PR_EMS_AB_AUTHORIZED_USER_A = PROP_TAG( PT_STRING8, 32854)
PR_EMS_AB_AUTHORIZED_USER_W = PROP_TAG( PT_UNICODE, 32854)
PR_EMS_AB_AUTOREPLY = PROP_TAG( PT_BOOLEAN, 32779)
PR_EMS_AB_AUTOREPLY_MESSAGE = PROP_TAG( PT_TSTRING, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_A = PROP_TAG( PT_STRING8, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_W = PROP_TAG( PT_UNICODE, 32778)
PR_EMS_AB_AUTOREPLY_SUBJECT = PROP_TAG( PT_TSTRING, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_A = PROP_TAG( PT_STRING8, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_W = PROP_TAG( PT_UNICODE, 32830)
PR_EMS_AB_BRIDGEHEAD_SERVERS = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_A = PROP_TAG( PT_MV_STRING8, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_O = PROP_TAG( PT_OBJECT, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BUSINESS_CATEGORY = PROP_TAG( PT_MV_TSTRING, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_A = PROP_TAG( PT_MV_STRING8, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_W = PROP_TAG( PT_MV_UNICODE, 32855)
PR_EMS_AB_BUSINESS_ROLES = PROP_TAG( PT_BINARY, 32803)
PR_EMS_AB_CA_CERTIFICATE = PROP_TAG( PT_MV_BINARY, 32771)
PR_EMS_AB_CAN_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32856)
PR_EMS_AB_CAN_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32856)
PR_EMS_AB_CAN_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32856)
PR_EMS_AB_CAN_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_NOT_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_PRESERVE_DNS = PROP_TAG( PT_BOOLEAN, 32864)
PR_EMS_AB_CERTIFICATE_REVOCATION_LIST = PROP_TAG( PT_BINARY, 32790)
PR_EMS_AB_CLOCK_ALERT_OFFSET = PROP_TAG( PT_LONG, 32865)
PR_EMS_AB_CLOCK_ALERT_REPAIR = PROP_TAG( PT_BOOLEAN, 32866)
PR_EMS_AB_CLOCK_WARNING_OFFSET = PROP_TAG( PT_LONG, 32867)
PR_EMS_AB_CLOCK_WARNING_REPAIR = PROP_TAG( PT_BOOLEAN, 32868)
PR_EMS_AB_COMPUTER_NAME = PROP_TAG( PT_TSTRING, 32869)
PR_EMS_AB_COMPUTER_NAME_A = PROP_TAG( PT_STRING8, 32869)
PR_EMS_AB_COMPUTER_NAME_W = PROP_TAG( PT_UNICODE, 32869)
PR_EMS_AB_CONNECTED_DOMAINS = PROP_TAG( PT_MV_TSTRING, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_A = PROP_TAG( PT_MV_STRING8, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_W = PROP_TAG( PT_MV_UNICODE, 32870)
PR_EMS_AB_CONTAINER_INFO = PROP_TAG( PT_LONG, 32871)
PR_EMS_AB_COST = PROP_TAG( PT_LONG, 32872)
PR_EMS_AB_COUNTRY_NAME = PROP_TAG( PT_TSTRING, 32873)
PR_EMS_AB_COUNTRY_NAME_A = PROP_TAG( PT_STRING8, 32873)
PR_EMS_AB_COUNTRY_NAME_W = PROP_TAG( PT_UNICODE, 32873)
PR_EMS_AB_CROSS_CERTIFICATE_PAIR = PROP_TAG( PT_MV_BINARY, 32805)
PR_EMS_AB_DELIV_CONT_LENGTH = PROP_TAG( PT_LONG, 32874)
PR_EMS_AB_DELIV_EITS = PROP_TAG( PT_MV_BINARY, 32875)
PR_EMS_AB_DELIV_EXT_CONT_TYPES = PROP_TAG( PT_MV_BINARY, 32876)
PR_EMS_AB_DELIVER_AND_REDIRECT = PROP_TAG( PT_BOOLEAN, 32877)
PR_EMS_AB_DELIVERY_MECHANISM = PROP_TAG( PT_LONG, 32878)
PR_EMS_AB_DESCRIPTION = PROP_TAG( PT_MV_TSTRING, 32879)
PR_EMS_AB_DESCRIPTION_A = PROP_TAG( PT_MV_STRING8, 32879)
PR_EMS_AB_DESCRIPTION_W = PROP_TAG( PT_MV_UNICODE, 32879)
PR_EMS_AB_DESTINATION_INDICATOR = PROP_TAG( PT_MV_TSTRING, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_A = PROP_TAG( PT_MV_STRING8, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_W = PROP_TAG( PT_MV_UNICODE, 32880)
PR_EMS_AB_DIAGNOSTIC_REG_KEY = PROP_TAG( PT_TSTRING, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_A = PROP_TAG( PT_STRING8, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_W = PROP_TAG( PT_UNICODE, 32881)
PR_EMS_AB_DISPLAY_NAME_OVERRIDE = PROP_TAG( PT_BOOLEAN, 32769)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEMBER_RULE = PROP_TAG( PT_MV_BINARY, 32884)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_A = PROP_TAG( PT_STRING8, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_W = PROP_TAG( PT_UNICODE, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_O = PROP_TAG( PT_OBJECT, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_T = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_NAME = PROP_TAG( PT_TSTRING, 32886)
PR_EMS_AB_DOMAIN_NAME_A = PROP_TAG( PT_STRING8, 32886)
PR_EMS_AB_DOMAIN_NAME_W = PROP_TAG( PT_UNICODE, 32886)
PR_EMS_AB_DSA_SIGNATURE = PROP_TAG( PT_BINARY, 32887)
PR_EMS_AB_DXA_ADMIN_COPY = PROP_TAG( PT_BOOLEAN, 32888)
PR_EMS_AB_DXA_ADMIN_FORWARD = PROP_TAG( PT_BOOLEAN, 32889)
PR_EMS_AB_DXA_ADMIN_UPDATE = PROP_TAG( PT_LONG, 32890)
PR_EMS_AB_DXA_APPEND_REQCN = PROP_TAG( PT_BOOLEAN, 32891)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_REQ_TIME = PROP_TAG( PT_SYSTIME, 32893)
PR_EMS_AB_DXA_CONF_SEQ = PROP_TAG( PT_TSTRING, 32894)
PR_EMS_AB_DXA_CONF_SEQ_A = PROP_TAG( PT_STRING8, 32894)
PR_EMS_AB_DXA_CONF_SEQ_W = PROP_TAG( PT_UNICODE, 32894)
PR_EMS_AB_DXA_CONF_SEQ_USN = PROP_TAG( PT_LONG, 32895)
PR_EMS_AB_DXA_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32896)
PR_EMS_AB_DXA_EXPORT_NOW = PROP_TAG( PT_BOOLEAN, 32897)
PR_EMS_AB_DXA_FLAGS = PROP_TAG( PT_LONG, 32898)
PR_EMS_AB_DXA_IMP_SEQ = PROP_TAG( PT_TSTRING, 32899)
PR_EMS_AB_DXA_IMP_SEQ_A = PROP_TAG( PT_STRING8, 32899)
PR_EMS_AB_DXA_IMP_SEQ_W = PROP_TAG( PT_UNICODE, 32899)
PR_EMS_AB_DXA_IMP_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32900)
PR_EMS_AB_DXA_IMP_SEQ_USN = PROP_TAG( PT_LONG, 32901)
PR_EMS_AB_DXA_IMPORT_NOW = PROP_TAG( PT_BOOLEAN, 32902)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32903)
PR_EMS_AB_DXA_LOCAL_ADMIN = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_A = PROP_TAG( PT_STRING8, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_W = PROP_TAG( PT_UNICODE, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_O = PROP_TAG( PT_OBJECT, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_T = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOGGING_LEVEL = PROP_TAG( PT_LONG, 32905)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32906)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32907)
PR_EMS_AB_DXA_PASSWORD = PROP_TAG( PT_TSTRING, 32908)
PR_EMS_AB_DXA_PASSWORD_A = PROP_TAG( PT_STRING8, 32908)
PR_EMS_AB_DXA_PASSWORD_W = PROP_TAG( PT_UNICODE, 32908)
PR_EMS_AB_DXA_PREV_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32909)
PR_EMS_AB_DXA_PREV_EXPORT_NATIVE_ONLY = PROP_TAG( PT_BOOLEAN, 32910)
PR_EMS_AB_DXA_PREV_IN_EXCHANGE_SENSITIVITY = PROP_TAG( PT_LONG, 32911)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_A = PROP_TAG( PT_STRING8, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_W = PROP_TAG( PT_UNICODE, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_O = PROP_TAG( PT_OBJECT, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_T = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 32913)
PR_EMS_AB_DXA_PREV_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32914)
PR_EMS_AB_DXA_PREV_TYPES = PROP_TAG( PT_LONG, 32915)
PR_EMS_AB_DXA_RECIPIENT_CP = PROP_TAG( PT_TSTRING, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_A = PROP_TAG( PT_STRING8, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_W = PROP_TAG( PT_UNICODE, 32916)
PR_EMS_AB_DXA_REMOTE_CLIENT = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_A = PROP_TAG( PT_STRING8, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_W = PROP_TAG( PT_UNICODE, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_O = PROP_TAG( PT_OBJECT, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_T = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REQ_SEQ = PROP_TAG( PT_TSTRING, 32918)
PR_EMS_AB_DXA_REQ_SEQ_A = PROP_TAG( PT_STRING8, 32918)
PR_EMS_AB_DXA_REQ_SEQ_W = PROP_TAG( PT_UNICODE, 32918)
PR_EMS_AB_DXA_REQ_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32919)
PR_EMS_AB_DXA_REQ_SEQ_USN = PROP_TAG( PT_LONG, 32920)
PR_EMS_AB_DXA_REQNAME = PROP_TAG( PT_TSTRING, 32921)
PR_EMS_AB_DXA_REQNAME_A = PROP_TAG( PT_STRING8, 32921)
PR_EMS_AB_DXA_REQNAME_W = PROP_TAG( PT_UNICODE, 32921)
PR_EMS_AB_DXA_SVR_SEQ = PROP_TAG( PT_TSTRING, 32922)
PR_EMS_AB_DXA_SVR_SEQ_A = PROP_TAG( PT_STRING8, 32922)
PR_EMS_AB_DXA_SVR_SEQ_W = PROP_TAG( PT_UNICODE, 32922)
PR_EMS_AB_DXA_SVR_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32923)
PR_EMS_AB_DXA_SVR_SEQ_USN = PROP_TAG( PT_LONG, 32924)
PR_EMS_AB_DXA_TASK = PROP_TAG( PT_LONG, 32925)
PR_EMS_AB_DXA_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32926)
PR_EMS_AB_DXA_TEMPLATE_TIMESTAMP = PROP_TAG( PT_SYSTIME, 32927)
PR_EMS_AB_DXA_TYPES = PROP_TAG( PT_LONG, 32928)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_ENABLED_PROTOCOLS = PROP_TAG( PT_LONG, 33151)
PR_EMS_AB_ENCAPSULATION_METHOD = PROP_TAG( PT_LONG, 32930)
PR_EMS_AB_ENCRYPT = PROP_TAG( PT_BOOLEAN, 32931)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA = PROP_TAG( PT_MV_TSTRING, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_A = PROP_TAG( PT_MV_STRING8, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_W = PROP_TAG( PT_MV_UNICODE, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER = PROP_TAG( PT_MV_TSTRING, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_A = PROP_TAG( PT_MV_STRING8, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_W = PROP_TAG( PT_MV_UNICODE, 32833)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA = PROP_TAG( PT_TSTRING, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_A = PROP_TAG( PT_STRING8, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_W = PROP_TAG( PT_UNICODE, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER = PROP_TAG( PT_TSTRING, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_A = PROP_TAG( PT_STRING8, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_W = PROP_TAG( PT_UNICODE, 32829)
PR_EMS_AB_EXPAND_DLS_LOCALLY = PROP_TAG( PT_BOOLEAN, 32932)
PR_EMS_AB_EXPIRATION_TIME = PROP_TAG( PT_SYSTIME, 32808)
PR_EMS_AB_EXPORT_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_O = PROP_TAG( PT_OBJECT, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CUSTOM_RECIPIENTS = PROP_TAG( PT_BOOLEAN, 32934)
PR_EMS_AB_EXTENDED_CHARS_ALLOWED = PROP_TAG( PT_BOOLEAN, 32935)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1 = PROP_TAG( PT_TSTRING, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_A = PROP_TAG( PT_STRING8, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_W = PROP_TAG( PT_UNICODE, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10 = PROP_TAG( PT_TSTRING, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_A = PROP_TAG( PT_STRING8, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_W = PROP_TAG( PT_UNICODE, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2 = PROP_TAG( PT_TSTRING, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_A = PROP_TAG( PT_STRING8, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_W = PROP_TAG( PT_UNICODE, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3 = PROP_TAG( PT_TSTRING, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_A = PROP_TAG( PT_STRING8, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_W = PROP_TAG( PT_UNICODE, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4 = PROP_TAG( PT_TSTRING, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_A = PROP_TAG( PT_STRING8, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_W = PROP_TAG( PT_UNICODE, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5 = PROP_TAG( PT_TSTRING, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_A = PROP_TAG( PT_STRING8, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_W = PROP_TAG( PT_UNICODE, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6 = PROP_TAG( PT_TSTRING, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_A = PROP_TAG( PT_STRING8, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_W = PROP_TAG( PT_UNICODE, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7 = PROP_TAG( PT_TSTRING, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_A = PROP_TAG( PT_STRING8, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_W = PROP_TAG( PT_UNICODE, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8 = PROP_TAG( PT_TSTRING, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_A = PROP_TAG( PT_STRING8, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_W = PROP_TAG( PT_UNICODE, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9 = PROP_TAG( PT_TSTRING, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_A = PROP_TAG( PT_STRING8, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_W = PROP_TAG( PT_UNICODE, 32821)
PR_EMS_AB_EXTENSION_DATA = PROP_TAG( PT_MV_BINARY, 32936)
PR_EMS_AB_EXTENSION_NAME = PROP_TAG( PT_MV_TSTRING, 32937)
PR_EMS_AB_EXTENSION_NAME_A = PROP_TAG( PT_MV_STRING8, 32937)
PR_EMS_AB_EXTENSION_NAME_W = PROP_TAG( PT_MV_UNICODE, 32937)
PR_EMS_AB_EXTENSION_NAME_INHERITED = PROP_TAG( PT_MV_TSTRING, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_A = PROP_TAG( PT_MV_STRING8, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_W = PROP_TAG( PT_MV_UNICODE, 32938)
PR_EMS_AB_FACSIMILE_TELEPHONE_NUMBER = PROP_TAG( PT_MV_BINARY, 32939)
PR_EMS_AB_FILE_VERSION = PROP_TAG( PT_BINARY, 32940)
PR_EMS_AB_FILTER_LOCAL_ADDRESSES = PROP_TAG( PT_BOOLEAN, 32941)
PR_EMS_AB_FOLDER_PATHNAME = PROP_TAG( PT_TSTRING, 32772)
PR_EMS_AB_FOLDER_PATHNAME_A = PROP_TAG( PT_STRING8, 32772)
PR_EMS_AB_FOLDER_PATHNAME_W = PROP_TAG( PT_UNICODE, 32772)
PR_EMS_AB_FOLDERS_CONTAINER = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_A = PROP_TAG( PT_STRING8, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_W = PROP_TAG( PT_UNICODE, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_O = PROP_TAG( PT_OBJECT, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_T = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_GARBAGE_COLL_PERIOD = PROP_TAG( PT_LONG, 32943)
PR_EMS_AB_GATEWAY_LOCAL_CRED = PROP_TAG( PT_TSTRING, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 32944)
PR_EMS_AB_GATEWAY_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 32945)
PR_EMS_AB_GATEWAY_PROXY = PROP_TAG( PT_MV_TSTRING, 32946)
PR_EMS_AB_GATEWAY_PROXY_A = PROP_TAG( PT_MV_STRING8, 32946)
PR_EMS_AB_GATEWAY_PROXY_W = PROP_TAG( PT_MV_UNICODE, 32946)
PR_EMS_AB_GATEWAY_ROUTING_TREE = PROP_TAG( PT_BINARY, 32947)
PR_EMS_AB_GWART_LAST_MODIFIED = PROP_TAG( PT_SYSTIME, 32948)
PR_EMS_AB_HAS_FULL_REPLICA_NCS = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_A = PROP_TAG( PT_MV_STRING8, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_W = PROP_TAG( PT_MV_UNICODE, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_O = PROP_TAG( PT_OBJECT, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_T = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_MASTER_NCS = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HAS_MASTER_NCS_A = PROP_TAG( PT_MV_STRING8, 32950)
PR_EMS_AB_HAS_MASTER_NCS_W = PROP_TAG( PT_MV_UNICODE, 32950)
PR_EMS_AB_HAS_MASTER_NCS_O = PROP_TAG( PT_OBJECT, 32950)
PR_EMS_AB_HAS_MASTER_NCS_T = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HELP_DATA16 = PROP_TAG( PT_BINARY, 32826)
PR_EMS_AB_HELP_DATA32 = PROP_TAG( PT_BINARY, 32784)
PR_EMS_AB_HELP_FILE_NAME = PROP_TAG( PT_TSTRING, 32827)
PR_EMS_AB_HELP_FILE_NAME_A = PROP_TAG( PT_STRING8, 32827)
PR_EMS_AB_HELP_FILE_NAME_W = PROP_TAG( PT_UNICODE, 32827)
PR_EMS_AB_HEURISTICS = PROP_TAG( PT_LONG, 32951)
PR_EMS_AB_HIDE_DL_MEMBERSHIP = PROP_TAG( PT_BOOLEAN, 32952)
PR_EMS_AB_HIDE_FROM_ADDRESS_BOOK = PROP_TAG( PT_BOOLEAN, 32953)
PR_EMS_AB_HOME_MDB = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_A = PROP_TAG( PT_STRING8, 32774)
PR_EMS_AB_HOME_MDB_W = PROP_TAG( PT_UNICODE, 32774)
PR_EMS_AB_HOME_MDB_O = PROP_TAG( PT_OBJECT, 32774)
PR_EMS_AB_HOME_MDB_T = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_BL = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MDB_BL_A = PROP_TAG( PT_MV_STRING8, 32788)
PR_EMS_AB_HOME_MDB_BL_W = PROP_TAG( PT_MV_UNICODE, 32788)
PR_EMS_AB_HOME_MDB_BL_O = PROP_TAG( PT_OBJECT, 32788)
PR_EMS_AB_HOME_MDB_BL_T = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MTA = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_MTA_A = PROP_TAG( PT_STRING8, 32775)
PR_EMS_AB_HOME_MTA_W = PROP_TAG( PT_UNICODE, 32775)
PR_EMS_AB_HOME_MTA_O = PROP_TAG( PT_OBJECT, 32775)
PR_EMS_AB_HOME_MTA_T = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_PUBLIC_SERVER = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_A = PROP_TAG( PT_STRING8, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_W = PROP_TAG( PT_UNICODE, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_O = PROP_TAG( PT_OBJECT, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_T = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_IMPORT_CONTAINER = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_CONTAINER_A = PROP_TAG( PT_STRING8, 32954)
PR_EMS_AB_IMPORT_CONTAINER_W = PROP_TAG( PT_UNICODE, 32954)
PR_EMS_AB_IMPORT_CONTAINER_O = PROP_TAG( PT_OBJECT, 32954)
PR_EMS_AB_IMPORT_CONTAINER_T = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_SENSITIVITY = PROP_TAG( PT_LONG, 32955)
PR_EMS_AB_IMPORTED_FROM = PROP_TAG( PT_TSTRING, 32834)
PR_EMS_AB_IMPORTED_FROM_A = PROP_TAG( PT_STRING8, 32834)
PR_EMS_AB_IMPORTED_FROM_W = PROP_TAG( PT_UNICODE, 32834)
PR_EMS_AB_INBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 32956)
PR_EMS_AB_INBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 32956)
PR_EMS_AB_INBOUND_SITES_O = PROP_TAG( PT_OBJECT, 32956)
PR_EMS_AB_INBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INSTANCE_TYPE = PROP_TAG( PT_LONG, 32957)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER = PROP_TAG( PT_MV_TSTRING, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32958)
PR_EMS_AB_INVOCATION_ID = PROP_TAG( PT_BINARY, 32959)
PR_EMS_AB_IS_DELETED = PROP_TAG( PT_BOOLEAN, 32960)
PR_EMS_AB_IS_MEMBER_OF_DL = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_A = PROP_TAG( PT_MV_STRING8, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_W = PROP_TAG( PT_MV_UNICODE, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_O = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_T = PROP_TAG( PT_MV_TSTRING, 32776)
PR_EMS_AB_IS_SINGLE_VALUED = PROP_TAG( PT_BOOLEAN, 32961)
PR_EMS_AB_KCC_STATUS = PROP_TAG( PT_MV_BINARY, 32962)
PR_EMS_AB_KM_SERVER = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KM_SERVER_A = PROP_TAG( PT_STRING8, 32781)
PR_EMS_AB_KM_SERVER_W = PROP_TAG( PT_UNICODE, 32781)
PR_EMS_AB_KM_SERVER_O = PROP_TAG( PT_OBJECT, 32781)
PR_EMS_AB_KM_SERVER_T = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KNOWLEDGE_INFORMATION = PROP_TAG( PT_MV_TSTRING, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_A = PROP_TAG( PT_MV_STRING8, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_W = PROP_TAG( PT_MV_UNICODE, 32963)
PR_EMS_AB_LANGUAGE = PROP_TAG( PT_LONG, 33144)
PR_EMS_AB_LDAP_DISPLAY_NAME = PROP_TAG( PT_MV_TSTRING, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_A = PROP_TAG( PT_MV_STRING8, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_W = PROP_TAG( PT_MV_UNICODE, 33137)
PR_EMS_AB_LINE_WRAP = PROP_TAG( PT_LONG, 32964)
PR_EMS_AB_LINK_ID = PROP_TAG( PT_LONG, 32965)
PR_EMS_AB_LOCAL_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 32967)
PR_EMS_AB_LOCAL_INITIAL_TURN = PROP_TAG( PT_BOOLEAN, 32968)
PR_EMS_AB_LOCAL_SCOPE = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOCAL_SCOPE_A = PROP_TAG( PT_MV_STRING8, 32969)
PR_EMS_AB_LOCAL_SCOPE_W = PROP_TAG( PT_MV_UNICODE, 32969)
PR_EMS_AB_LOCAL_SCOPE_O = PROP_TAG( PT_OBJECT, 32969)
PR_EMS_AB_LOCAL_SCOPE_T = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOG_FILENAME = PROP_TAG( PT_TSTRING, 32970)
PR_EMS_AB_LOG_FILENAME_A = PROP_TAG( PT_STRING8, 32970)
PR_EMS_AB_LOG_FILENAME_W = PROP_TAG( PT_UNICODE, 32970)
PR_EMS_AB_LOG_ROLLOVER_INTERVAL = PROP_TAG( PT_LONG, 32971)
PR_EMS_AB_MAINTAIN_AUTOREPLY_HISTORY = PROP_TAG( PT_BOOLEAN, 32972)
PR_EMS_AB_MANAGER = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_A = PROP_TAG( PT_STRING8, 32773)
PR_EMS_AB_MANAGER_W = PROP_TAG( PT_UNICODE, 32773)
PR_EMS_AB_MANAGER_O = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_T = PROP_TAG( PT_TSTRING, 32773)
PR_EMS_AB_MAPI_DISPLAY_TYPE = PROP_TAG( PT_LONG, 32973)
PR_EMS_AB_MAPI_ID = PROP_TAG( PT_LONG, 32974)
PR_EMS_AB_MAXIMUM_OBJECT_ID = PROP_TAG( PT_BINARY, 33129)
PR_EMS_AB_MDB_BACKOFF_INTERVAL = PROP_TAG( PT_LONG, 32975)
PR_EMS_AB_MDB_MSG_TIME_OUT_PERIOD = PROP_TAG( PT_LONG, 32976)
PR_EMS_AB_MDB_OVER_QUOTA_LIMIT = PROP_TAG( PT_LONG, 32977)
PR_EMS_AB_MDB_STORAGE_QUOTA = PROP_TAG( PT_LONG, 32978)
PR_EMS_AB_MDB_UNREAD_LIMIT = PROP_TAG( PT_LONG, 32979)
PR_EMS_AB_MDB_USE_DEFAULTS = PROP_TAG( PT_BOOLEAN, 32980)
PR_EMS_AB_MEMBER = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_A = PROP_TAG( PT_MV_STRING8, 32777)
PR_EMS_AB_MEMBER_W = PROP_TAG( PT_MV_UNICODE, 32777)
PR_EMS_AB_MEMBER_O = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_T = PROP_TAG( PT_MV_TSTRING, 32777)
PR_EMS_AB_MESSAGE_TRACKING_ENABLED = PROP_TAG( PT_BOOLEAN, 32981)
PR_EMS_AB_MONITOR_CLOCK = PROP_TAG( PT_BOOLEAN, 32982)
PR_EMS_AB_MONITOR_SERVERS = PROP_TAG( PT_BOOLEAN, 32983)
PR_EMS_AB_MONITOR_SERVICES = PROP_TAG( PT_BOOLEAN, 32984)
PR_EMS_AB_MONITORED_CONFIGURATIONS = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_A = PROP_TAG( PT_MV_STRING8, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_W = PROP_TAG( PT_MV_UNICODE, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_O = PROP_TAG( PT_OBJECT, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_T = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_SERVERS = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVERS_A = PROP_TAG( PT_MV_STRING8, 32986)
PR_EMS_AB_MONITORED_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 32986)
PR_EMS_AB_MONITORED_SERVERS_O = PROP_TAG( PT_OBJECT, 32986)
PR_EMS_AB_MONITORED_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVICES = PROP_TAG( PT_MV_TSTRING, 32987)
PR_EMS_AB_MONITORED_SERVICES_A = PROP_TAG( PT_MV_STRING8, 32987)
PR_EMS_AB_MONITORED_SERVICES_W = PROP_TAG( PT_MV_UNICODE, 32987)
PR_EMS_AB_MONITORING_ALERT_DELAY = PROP_TAG( PT_LONG, 32988)
PR_EMS_AB_MONITORING_ALERT_UNITS = PROP_TAG( PT_LONG, 32989)
PR_EMS_AB_MONITORING_AVAILABILITY_STYLE = PROP_TAG( PT_LONG, 32990)
PR_EMS_AB_MONITORING_AVAILABILITY_WINDOW = PROP_TAG( PT_BINARY, 32991)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_A = PROP_TAG( PT_MV_STRING8, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_W = PROP_TAG( PT_MV_UNICODE, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_O = PROP_TAG( PT_OBJECT, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_T = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_A = PROP_TAG( PT_MV_STRING8, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_W = PROP_TAG( PT_MV_UNICODE, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_O = PROP_TAG( PT_OBJECT, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_T = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_ESCALATION_PROCEDURE = PROP_TAG( PT_MV_BINARY, 32994)
PR_EMS_AB_MONITORING_HOTSITE_POLL_INTERVAL = PROP_TAG( PT_LONG, 32995)
PR_EMS_AB_MONITORING_HOTSITE_POLL_UNITS = PROP_TAG( PT_LONG, 32996)
PR_EMS_AB_MONITORING_MAIL_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 32997)
PR_EMS_AB_MONITORING_MAIL_UPDATE_UNITS = PROP_TAG( PT_LONG, 32998)
PR_EMS_AB_MONITORING_NORMAL_POLL_INTERVAL = PROP_TAG( PT_LONG, 32999)
PR_EMS_AB_MONITORING_NORMAL_POLL_UNITS = PROP_TAG( PT_LONG, 33000)
PR_EMS_AB_MONITORING_RECIPIENTS = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_A = PROP_TAG( PT_MV_STRING8, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_W = PROP_TAG( PT_MV_UNICODE, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_O = PROP_TAG( PT_OBJECT, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_T = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_A = PROP_TAG( PT_MV_STRING8, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_W = PROP_TAG( PT_MV_UNICODE, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_O = PROP_TAG( PT_OBJECT, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_T = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RPC_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 33003)
PR_EMS_AB_MONITORING_RPC_UPDATE_UNITS = PROP_TAG( PT_LONG, 33004)
PR_EMS_AB_MONITORING_WARNING_DELAY = PROP_TAG( PT_LONG, 33005)
PR_EMS_AB_MONITORING_WARNING_UNITS = PROP_TAG( PT_LONG, 33006)
PR_EMS_AB_MTA_LOCAL_CRED = PROP_TAG( PT_TSTRING, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 33007)
PR_EMS_AB_MTA_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 33008)
PR_EMS_AB_N_ADDRESS = PROP_TAG( PT_BINARY, 33009)
PR_EMS_AB_N_ADDRESS_TYPE = PROP_TAG( PT_LONG, 33010)
PR_EMS_AB_NETWORK_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33136)
PR_EMS_AB_NETWORK_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33136)
PR_EMS_AB_NETWORK_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33136)
PR_EMS_AB_NNTP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33149)
PR_EMS_AB_NNTP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33142)
PR_EMS_AB_NT_MACHINE_NAME = PROP_TAG( PT_TSTRING, 33011)
PR_EMS_AB_NT_MACHINE_NAME_A = PROP_TAG( PT_STRING8, 33011)
PR_EMS_AB_NT_MACHINE_NAME_W = PROP_TAG( PT_UNICODE, 33011)
PR_EMS_AB_NT_SECURITY_DESCRIPTOR = PROP_TAG( PT_BINARY, 32787)
PR_EMS_AB_NUM_OF_OPEN_RETRIES = PROP_TAG( PT_LONG, 33012)
PR_EMS_AB_NUM_OF_TRANSFER_RETRIES = PROP_TAG( PT_LONG, 33013)
PR_EMS_AB_OBJ_DIST_NAME = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJ_DIST_NAME_A = PROP_TAG( PT_STRING8, 32828)
PR_EMS_AB_OBJ_DIST_NAME_W = PROP_TAG( PT_UNICODE, 32828)
PR_EMS_AB_OBJ_DIST_NAME_O = PROP_TAG( PT_OBJECT, 32828)
PR_EMS_AB_OBJ_DIST_NAME_T = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJECT_CLASS_CATEGORY = PROP_TAG( PT_LONG, 33014)
PR_EMS_AB_OBJECT_VERSION = PROP_TAG( PT_LONG, 33015)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_O = PROP_TAG( PT_OBJECT, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_SCHEDULE = PROP_TAG( PT_BINARY, 33017)
PR_EMS_AB_OFF_LINE_AB_SERVER = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_A = PROP_TAG( PT_STRING8, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_W = PROP_TAG( PT_UNICODE, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_O = PROP_TAG( PT_OBJECT, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_T = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_STYLE = PROP_TAG( PT_LONG, 33019)
PR_EMS_AB_OID_TYPE = PROP_TAG( PT_LONG, 33020)
PR_EMS_AB_OM_OBJECT_CLASS = PROP_TAG( PT_BINARY, 33021)
PR_EMS_AB_OM_SYNTAX = PROP_TAG( PT_LONG, 33022)
PR_EMS_AB_OOF_REPLY_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33023)
PR_EMS_AB_OPEN_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33024)
PR_EMS_AB_ORGANIZATION_NAME = PROP_TAG( PT_MV_TSTRING, 33025)
PR_EMS_AB_ORGANIZATION_NAME_A = PROP_TAG( PT_MV_STRING8, 33025)
PR_EMS_AB_ORGANIZATION_NAME_W = PROP_TAG( PT_MV_UNICODE, 33025)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME = PROP_TAG( PT_MV_TSTRING, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_A = PROP_TAG( PT_MV_STRING8, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_W = PROP_TAG( PT_MV_UNICODE, 33026)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33027)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 33028)
PR_EMS_AB_OUTBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OUTBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 33029)
PR_EMS_AB_OUTBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 33029)
PR_EMS_AB_OUTBOUND_SITES_O = PROP_TAG( PT_OBJECT, 33029)
PR_EMS_AB_OUTBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OWNER = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_A = PROP_TAG( PT_STRING8, 32780)
PR_EMS_AB_OWNER_W = PROP_TAG( PT_UNICODE, 32780)
PR_EMS_AB_OWNER_O = PROP_TAG( PT_OBJECT, 32780)
PR_EMS_AB_OWNER_T = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_BL = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_OWNER_BL_A = PROP_TAG( PT_STRING8, 32804)
PR_EMS_AB_OWNER_BL_W = PROP_TAG( PT_UNICODE, 32804)
PR_EMS_AB_OWNER_BL_O = PROP_TAG( PT_OBJECT, 32804)
PR_EMS_AB_OWNER_BL_T = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_P_SELECTOR = PROP_TAG( PT_BINARY, 33030)
PR_EMS_AB_P_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33031)
PR_EMS_AB_PER_MSG_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33032)
PR_EMS_AB_PER_RECIP_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33033)
PR_EMS_AB_PERIOD_REP_SYNC_TIMES = PROP_TAG( PT_BINARY, 33034)
PR_EMS_AB_PERIOD_REPL_STAGGER = PROP_TAG( PT_LONG, 33035)
PR_EMS_AB_PF_CONTACTS = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_PF_CONTACTS_A = PROP_TAG( PT_MV_STRING8, 32824)
PR_EMS_AB_PF_CONTACTS_W = PROP_TAG( PT_MV_UNICODE, 32824)
PR_EMS_AB_PF_CONTACTS_O = PROP_TAG( PT_OBJECT, 32824)
PR_EMS_AB_PF_CONTACTS_T = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_POP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33145)
PR_EMS_AB_POP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33145)
PR_EMS_AB_POP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33145)
PR_EMS_AB_POP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33143)
PR_EMS_AB_POSTAL_ADDRESS = PROP_TAG( PT_MV_BINARY, 33036)
PR_EMS_AB_PREFERRED_DELIVERY_METHOD = PROP_TAG( PT_MV_LONG, 33037)
PR_EMS_AB_PRMD = PROP_TAG( PT_TSTRING, 33038)
PR_EMS_AB_PRMD_A = PROP_TAG( PT_STRING8, 33038)
PR_EMS_AB_PRMD_W = PROP_TAG( PT_UNICODE, 33038)
PR_EMS_AB_PROXY_ADDRESSES = PROP_TAG( PT_MV_TSTRING, 32783)
PR_EMS_AB_PROXY_ADDRESSES_A = PROP_TAG( PT_MV_STRING8, 32783)
PR_EMS_AB_PROXY_ADDRESSES_W = PROP_TAG( PT_MV_UNICODE, 32783)
PR_EMS_AB_PROXY_GENERATOR_DLL = PROP_TAG( PT_TSTRING, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_A = PROP_TAG( PT_STRING8, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_W = PROP_TAG( PT_UNICODE, 33039)
PR_EMS_AB_PUBLIC_DELEGATES = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_A = PROP_TAG( PT_MV_STRING8, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_W = PROP_TAG( PT_MV_UNICODE, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_O = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_T = PROP_TAG( PT_MV_TSTRING, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_BL = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_A = PROP_TAG( PT_MV_STRING8, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_W = PROP_TAG( PT_MV_UNICODE, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_O = PROP_TAG( PT_OBJECT, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_T = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_QUOTA_NOTIFICATION_SCHEDULE = PROP_TAG( PT_BINARY, 33041)
PR_EMS_AB_QUOTA_NOTIFICATION_STYLE = PROP_TAG( PT_LONG, 33042)
PR_EMS_AB_RANGE_LOWER = PROP_TAG( PT_LONG, 33043)
PR_EMS_AB_RANGE_UPPER = PROP_TAG( PT_LONG, 33044)
PR_EMS_AB_RAS_CALLBACK_NUMBER = PROP_TAG( PT_TSTRING, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_A = PROP_TAG( PT_STRING8, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_W = PROP_TAG( PT_UNICODE, 33045)
PR_EMS_AB_RAS_PHONE_NUMBER = PROP_TAG( PT_TSTRING, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_A = PROP_TAG( PT_STRING8, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_W = PROP_TAG( PT_UNICODE, 33046)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME = PROP_TAG( PT_TSTRING, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_A = PROP_TAG( PT_STRING8, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_W = PROP_TAG( PT_UNICODE, 33047)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME = PROP_TAG( PT_TSTRING, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_A = PROP_TAG( PT_STRING8, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_W = PROP_TAG( PT_UNICODE, 33048)
PR_EMS_AB_REGISTERED_ADDRESS = PROP_TAG( PT_MV_BINARY, 33049)
PR_EMS_AB_REMOTE_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 33051)
PR_EMS_AB_REMOTE_OUT_BH_SERVER = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_A = PROP_TAG( PT_STRING8, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_W = PROP_TAG( PT_UNICODE, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_O = PROP_TAG( PT_OBJECT, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_T = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_SITE = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REMOTE_SITE_A = PROP_TAG( PT_STRING8, 33053)
PR_EMS_AB_REMOTE_SITE_W = PROP_TAG( PT_UNICODE, 33053)
PR_EMS_AB_REMOTE_SITE_O = PROP_TAG( PT_OBJECT, 33053)
PR_EMS_AB_REMOTE_SITE_T = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REPLICATION_MAIL_MSG_SIZE = PROP_TAG( PT_LONG, 33128)
PR_EMS_AB_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 33054)
PR_EMS_AB_REPLICATION_STAGGER = PROP_TAG( PT_LONG, 33055)
PR_EMS_AB_REPORT_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33056)
PR_EMS_AB_REPORT_TO_OWNER = PROP_TAG( PT_BOOLEAN, 33057)
PR_EMS_AB_REPORTS = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_A = PROP_TAG( PT_MV_STRING8, 32782)
PR_EMS_AB_REPORTS_W = PROP_TAG( PT_MV_UNICODE, 32782)
PR_EMS_AB_REPORTS_O = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_T = PROP_TAG( PT_MV_TSTRING, 32782)
PR_EMS_AB_REQ_SEQ = PROP_TAG( PT_LONG, 33058)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_A = PROP_TAG( PT_STRING8, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_W = PROP_TAG( PT_UNICODE, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_O = PROP_TAG( PT_OBJECT, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_T = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RID_SERVER = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_RID_SERVER_A = PROP_TAG( PT_STRING8, 33060)
PR_EMS_AB_RID_SERVER_W = PROP_TAG( PT_UNICODE, 33060)
PR_EMS_AB_RID_SERVER_O = PROP_TAG( PT_OBJECT, 33060)
PR_EMS_AB_RID_SERVER_T = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_ROLE_OCCUPANT = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROLE_OCCUPANT_A = PROP_TAG( PT_MV_STRING8, 33061)
PR_EMS_AB_ROLE_OCCUPANT_W = PROP_TAG( PT_MV_UNICODE, 33061)
PR_EMS_AB_ROLE_OCCUPANT_O = PROP_TAG( PT_OBJECT, 33061)
PR_EMS_AB_ROLE_OCCUPANT_T = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROUTING_LIST = PROP_TAG( PT_MV_TSTRING, 33062)
PR_EMS_AB_ROUTING_LIST_A = PROP_TAG( PT_MV_STRING8, 33062)
PR_EMS_AB_ROUTING_LIST_W = PROP_TAG( PT_MV_UNICODE, 33062)
PR_EMS_AB_RTS_CHECKPOINT_SIZE = PROP_TAG( PT_LONG, 33063)
PR_EMS_AB_RTS_RECOVERY_TIMEOUT = PROP_TAG( PT_LONG, 33064)
PR_EMS_AB_RTS_WINDOW_SIZE = PROP_TAG( PT_LONG, 33065)
PR_EMS_AB_RUNS_ON = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_RUNS_ON_A = PROP_TAG( PT_MV_STRING8, 33066)
PR_EMS_AB_RUNS_ON_W = PROP_TAG( PT_MV_UNICODE, 33066)
PR_EMS_AB_RUNS_ON_O = PROP_TAG( PT_OBJECT, 33066)
PR_EMS_AB_RUNS_ON_T = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_S_SELECTOR = PROP_TAG( PT_BINARY, 33067)
PR_EMS_AB_S_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33068)
PR_EMS_AB_SCHEMA_FLAGS = PROP_TAG( PT_LONG, 33139)
PR_EMS_AB_SCHEMA_VERSION = PROP_TAG( PT_MV_LONG, 33148)
PR_EMS_AB_SEARCH_FLAGS = PROP_TAG( PT_LONG, 33069)
PR_EMS_AB_SEARCH_GUIDE = PROP_TAG( PT_MV_BINARY, 33070)
PR_EMS_AB_SECURITY_PROTOCOL = PROP_TAG( PT_MV_BINARY, 32823)
PR_EMS_AB_SEE_ALSO = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SEE_ALSO_A = PROP_TAG( PT_MV_STRING8, 33071)
PR_EMS_AB_SEE_ALSO_W = PROP_TAG( PT_MV_UNICODE, 33071)
PR_EMS_AB_SEE_ALSO_O = PROP_TAG( PT_OBJECT, 33071)
PR_EMS_AB_SEE_ALSO_T = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SERIAL_NUMBER = PROP_TAG( PT_MV_TSTRING, 33072)
PR_EMS_AB_SERIAL_NUMBER_A = PROP_TAG( PT_MV_STRING8, 33072)
PR_EMS_AB_SERIAL_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 33072)
PR_EMS_AB_SERVICE_ACTION_FIRST = PROP_TAG( PT_LONG, 33073)
PR_EMS_AB_SERVICE_ACTION_OTHER = PROP_TAG( PT_LONG, 33074)
PR_EMS_AB_SERVICE_ACTION_SECOND = PROP_TAG( PT_LONG, 33075)
PR_EMS_AB_SERVICE_RESTART_DELAY = PROP_TAG( PT_LONG, 33076)
PR_EMS_AB_SERVICE_RESTART_MESSAGE = PROP_TAG( PT_TSTRING, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_A = PROP_TAG( PT_STRING8, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_W = PROP_TAG( PT_UNICODE, 33077)
PR_EMS_AB_SESSION_DISCONNECT_TIMER = PROP_TAG( PT_LONG, 33078)
PR_EMS_AB_SITE_AFFINITY = PROP_TAG( PT_MV_TSTRING, 33079)
PR_EMS_AB_SITE_AFFINITY_A = PROP_TAG( PT_MV_STRING8, 33079)
PR_EMS_AB_SITE_AFFINITY_W = PROP_TAG( PT_MV_UNICODE, 33079)
PR_EMS_AB_SITE_FOLDER_GUID = PROP_TAG( PT_BINARY, 33126)
PR_EMS_AB_SITE_FOLDER_SERVER = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_A = PROP_TAG( PT_STRING8, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_W = PROP_TAG( PT_UNICODE, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_O = PROP_TAG( PT_OBJECT, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_T = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_PROXY_SPACE = PROP_TAG( PT_MV_TSTRING, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_A = PROP_TAG( PT_MV_STRING8, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_W = PROP_TAG( PT_MV_UNICODE, 33080)
PR_EMS_AB_SPACE_LAST_COMPUTED = PROP_TAG( PT_SYSTIME, 33081)
PR_EMS_AB_STREET_ADDRESS = PROP_TAG( PT_TSTRING, 33082)
PR_EMS_AB_STREET_ADDRESS_A = PROP_TAG( PT_STRING8, 33082)
PR_EMS_AB_STREET_ADDRESS_W = PROP_TAG( PT_UNICODE, 33082)
PR_EMS_AB_SUB_REFS = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_REFS_A = PROP_TAG( PT_MV_STRING8, 33083)
PR_EMS_AB_SUB_REFS_W = PROP_TAG( PT_MV_UNICODE, 33083)
PR_EMS_AB_SUB_REFS_O = PROP_TAG( PT_OBJECT, 33083)
PR_EMS_AB_SUB_REFS_T = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_SITE = PROP_TAG( PT_TSTRING, 33147)
PR_EMS_AB_SUB_SITE_A = PROP_TAG( PT_STRING8, 33147)
PR_EMS_AB_SUB_SITE_W = PROP_TAG( PT_UNICODE, 33147)
PR_EMS_AB_SUBMISSION_CONT_LENGTH = PROP_TAG( PT_LONG, 33084)
PR_EMS_AB_SUPPORTED_APPLICATION_CONTEXT = PROP_TAG( PT_MV_BINARY, 33085)
PR_EMS_AB_SUPPORTING_STACK = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_A = PROP_TAG( PT_MV_STRING8, 33086)
PR_EMS_AB_SUPPORTING_STACK_W = PROP_TAG( PT_MV_UNICODE, 33086)
PR_EMS_AB_SUPPORTING_STACK_O = PROP_TAG( PT_OBJECT, 33086)
PR_EMS_AB_SUPPORTING_STACK_T = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_BL = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_A = PROP_TAG( PT_MV_STRING8, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_W = PROP_TAG( PT_MV_UNICODE, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_O = PROP_TAG( PT_OBJECT, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_T = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_T_SELECTOR = PROP_TAG( PT_BINARY, 33088)
PR_EMS_AB_T_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33089)
PR_EMS_AB_TARGET_ADDRESS = PROP_TAG( PT_TSTRING, 32785)
PR_EMS_AB_TARGET_ADDRESS_A = PROP_TAG( PT_STRING8, 32785)
PR_EMS_AB_TARGET_ADDRESS_W = PROP_TAG( PT_UNICODE, 32785)
PR_EMS_AB_TARGET_MTAS = PROP_TAG( PT_MV_TSTRING, 33090)
PR_EMS_AB_TARGET_MTAS_A = PROP_TAG( PT_MV_STRING8, 33090)
PR_EMS_AB_TARGET_MTAS_W = PROP_TAG( PT_MV_UNICODE, 33090)
PR_EMS_AB_TELEPHONE_NUMBER = PROP_TAG( PT_MV_TSTRING, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32786)
PR_EMS_AB_TELETEX_TERMINAL_IDENTIFIER = PROP_TAG( PT_MV_BINARY, 33091)
PR_EMS_AB_TEMP_ASSOC_THRESHOLD = PROP_TAG( PT_LONG, 33092)
PR_EMS_AB_TOMBSTONE_LIFETIME = PROP_TAG( PT_LONG, 33093)
PR_EMS_AB_TRACKING_LOG_PATH_NAME = PROP_TAG( PT_TSTRING, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_A = PROP_TAG( PT_STRING8, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_W = PROP_TAG( PT_UNICODE, 33094)
PR_EMS_AB_TRANS_RETRY_MINS = PROP_TAG( PT_LONG, 33095)
PR_EMS_AB_TRANS_TIMEOUT_MINS = PROP_TAG( PT_LONG, 33096)
PR_EMS_AB_TRANSFER_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33097)
PR_EMS_AB_TRANSFER_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33098)
PR_EMS_AB_TRANSFER_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33099)
PR_EMS_AB_TRANSFER_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33100)
PR_EMS_AB_TRANSLATION_TABLE_USED = PROP_TAG( PT_LONG, 33101)
PR_EMS_AB_TRANSPORT_EXPEDITED_DATA = PROP_TAG( PT_BOOLEAN, 33102)
PR_EMS_AB_TRUST_LEVEL = PROP_TAG( PT_LONG, 33103)
PR_EMS_AB_TURN_REQUEST_THRESHOLD = PROP_TAG( PT_LONG, 33104)
PR_EMS_AB_TWO_WAY_ALTERNATE_FACILITY = PROP_TAG( PT_BOOLEAN, 33105)
PR_EMS_AB_UNAUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_USE_SERVER_VALUES = PROP_TAG( PT_BOOLEAN, 33150)
PR_EMS_AB_USER_PASSWORD = PROP_TAG( PT_MV_BINARY, 33107)
PR_EMS_AB_USN_CHANGED = PROP_TAG( PT_LONG, 32809)
PR_EMS_AB_USN_CREATED = PROP_TAG( PT_LONG, 33108)
PR_EMS_AB_USN_DSA_LAST_OBJ_REMOVED = PROP_TAG( PT_LONG, 33109)
PR_EMS_AB_USN_INTERSITE = PROP_TAG( PT_LONG, 33146)
PR_EMS_AB_USN_LAST_OBJ_REM = PROP_TAG( PT_LONG, 33110)
PR_EMS_AB_USN_SOURCE = PROP_TAG( PT_LONG, 33111)
PR_EMS_AB_WWW_HOME_PAGE = PROP_TAG( PT_TSTRING, 33141)
PR_EMS_AB_WWW_HOME_PAGE_A = PROP_TAG( PT_STRING8, 33141)
PR_EMS_AB_WWW_HOME_PAGE_W = PROP_TAG( PT_UNICODE, 33141)
PR_EMS_AB_X121_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33112)
PR_EMS_AB_X121_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33112)
PR_EMS_AB_X121_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33112)
PR_EMS_AB_X25_CALL_USER_DATA_INCOMING = PROP_TAG( PT_BINARY, 33113)
PR_EMS_AB_X25_CALL_USER_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33114)
PR_EMS_AB_X25_FACILITIES_DATA_INCOMING = PROP_TAG( PT_BINARY, 33115)
PR_EMS_AB_X25_FACILITIES_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33116)
PR_EMS_AB_X25_LEASED_LINE_PORT = PROP_TAG( PT_BINARY, 33117)
PR_EMS_AB_X25_LEASED_OR_SWITCHED = PROP_TAG( PT_BOOLEAN, 33118)
PR_EMS_AB_X25_REMOTE_MTA_PHONE = PROP_TAG( PT_TSTRING, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_A = PROP_TAG( PT_STRING8, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_W = PROP_TAG( PT_UNICODE, 33119)
PR_EMS_AB_X400_ATTACHMENT_TYPE = PROP_TAG( PT_BINARY, 33120)
PR_EMS_AB_X400_SELECTOR_SYNTAX = PROP_TAG( PT_LONG, 33121)
PR_EMS_AB_X500_ACCESS_CONTROL_LIST = PROP_TAG( PT_BINARY, 33122)
PR_EMS_AB_XMIT_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33123)
PR_EMS_AB_XMIT_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33124)
PR_EMS_AB_XMIT_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33125)
| [
2,
43433,
366,
805,
935,
1,
422,
17228,
4090,
19313,
4760,
13,
39,
201,
198,
6738,
3975,
270,
3775,
1330,
19310,
62,
4944,
48451,
28343,
11,
19310,
62,
33991,
11,
19310,
62,
40,
17,
11,
19310,
62,
43,
18494,
11,
19310,
62,
49,
19,... | 1.612599 | 38,606 |
import textwrap
from .attribute_name import AttributeName
| [
11748,
2420,
37150,
198,
6738,
764,
42348,
62,
3672,
1330,
3460,
4163,
5376,
628
] | 4.214286 | 14 |
import os
import gensim
from gensim.utils import simple_preprocess
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from src.consts import (
SubtopicSummaryType,
LDA_SUBTOPIC_KEY_WORDS_PATH,
LDA_SUBTOPIC_KEY_PHRASES_PATH,
LDA_SUBTOPIC_N_WORDS_PATH,
LDA_SUBTOPIC_SENTENCE_PATH,
SUBTOPIC_SUMMARY_TYPE,
MAX_SUBTOPICS_NUM,
N_BASE_UNIGRAMS,
N_PARAMETER,
)
from src.utils import load
stemmer = SnowballStemmer("english")
topic2original_topic = {
"Arts & Entertainment": [
"Arts & Entertainment",
"Multimedia",
"Arts and Living",
"Entertainment",
"Arts and Living_Books",
"Arts and Living_Food and Dining",
"BookWorld",
"Arts and Living_Movies",
"Arts and Living_Home and Garden",
"Arts and Living_Music",
"Arts and Living_Travel",
"Style",
],
"Business": [
"Business",
"Business_U.S. Economy",
"Economy",
"Capital Business",
"National-Economy",
"Business_Metro Business",
"Economic Policy",
],
"By The Way - Travel": ["By The Way - Travel", "Travel"],
"Climate & Environment": ["Climate & Environment", "Capital Weather Gang", "Animals", "Climate Solutions"],
"D.C., Md. & Va.": ["D.C., Md. & Va."],
"Discussions": ["Discussions", "Live Discussions"],
"Education": [
"Education",
"Higher Education",
"High Schools",
"Colleges",
"KidsPost",
"The Answer Sheet",
"Parenting",
],
"Health": ["Health", "Health_Wires", "Health & Science", "Health-Environment-Science", "National/health-science"],
"History": ["History", "Made by History", "Retropolis"],
"Immigration": ["Immigration"],
"Lifestyle": [
"Lifestyle",
"LocalLiving",
"Lifestyle/food",
"Food",
"Local",
"Obituaries",
"Local-Enterprise",
"The Extras_Montgomery",
"The Extras_Southern Md.",
"The Extras_Fairfax",
"Morning Mix",
"Going Out Guide",
"Weekend",
"Lifestyle/magazine",
"Internet Culture",
"Pop Culture",
"Inspired Life",
"PostEverything",
"Magazine",
"Lifestyle/style",
"Brand-studio",
],
"Live Chats": ["Live Chats"],
"National": ["National", "Nation", "Nationals & MLB", "National-Enterprise"],
"National Security": ["National Security", "National-Security", "Crime", "Cops-Courts", "True Crime", "Military"],
"Opinions": [
"Opinions",
"Editorial-Opinion",
"Opinions_Columnists",
"Opinions_Feedback",
"Local Opinions",
"Global Opinions",
"Opinions_Columns and Blogs",
"Post Opinión",
"Opinions/global-opinions",
"The Plum Line",
"Fact Checker",
],
"Outlook": ["Outlook"],
"Photography": ["Photography"],
"Podcasts": ["Podcasts"],
"Politics": [
"Politics",
"National-Politics",
"Local-Politics",
"Politics_Federal Page",
"Monkey Cage",
"Politics_Elections",
"World_Middle East_Iraq",
"Powerpost",
"powerpost",
"The Fix",
],
"Public Relations": [
"Public Relations",
"The Extras_Prince William",
"The Extras_Prince George's",
"The Extras_Loudoun",
],
"Real Estate": ["Real Estate", "RealEstate"],
"Religion": ["Religion", "OnFaith"],
"Science": ["Science"],
"Sports": [
"Sports",
"Sports_High Schools",
"High School Sports",
"Sports_Redskins",
"Redskins",
"Sports_MLB",
"Sports_Nationals",
"Sports_Wizards",
"Sports_NFL",
"Sports_NBA",
"Sports_Capitals",
"NFL",
"NBA",
"College Sports",
"MLB",
"Washington Nationals",
"D.C. Sports Bog",
"Golf",
"Soccer",
"NHL",
"Fantasy Sports",
"Esports",
],
"Tablet": ["Tablet"],
"Technology": [
"Technology",
"Technology_Personal Tech",
"Technology_Special Reports_Satellite Radio",
"Tech Policy",
"Innovations",
],
"Topics": ["Topics"],
"Transportation": [
"Transportation",
"Metro_Obituaries",
"Metro_Virginia",
"Metro_The District",
"Gridlock",
"Metro_Crime",
"Metro_Maryland",
"Metro_Maryland_Montgomery",
"Future of Transportation",
"Metro_Maryland_Pr. George's",
"Metro",
"Cars",
"Development-Transportation",
],
"U.S. Policy": ["U.S. Policy"],
"Utils": ["Utils", "Express", "Print_A Section", "Print", "Print_Editorial Pages", "Print_Style Print_Weekend"],
"Video Games": ["Video Games", "Video Game News", "Video Gaming"],
"Washington Post Live": [
"Washington Post Live",
"Washington Post Magazine",
"Washington Post PR Blog",
"The Washington Post Magazine",
"Washington Wizards",
"Washington Capitals",
],
"World": ["World", "Foreign", "World_Asia/Pacific", "Europe", "Asia", "Africa"],
}
original_topic2topic = {v: k for k, vs in topic2original_topic.items() for v in vs}
| [
11748,
28686,
198,
198,
11748,
308,
641,
320,
198,
6738,
308,
641,
320,
13,
26791,
1330,
2829,
62,
3866,
14681,
198,
6738,
299,
2528,
74,
13,
927,
1330,
9678,
7934,
43,
368,
6759,
7509,
11,
7967,
1894,
1273,
368,
647,
198,
198,
6738... | 2.191899 | 2,444 |
import random
from sherlockpipe.scoring.SignalSelector import SignalSelector, SignalSelection
| [
11748,
4738,
198,
6738,
15059,
5354,
34360,
13,
46536,
13,
11712,
282,
17563,
273,
1330,
26484,
17563,
273,
11,
26484,
4653,
1564,
198
] | 4.086957 | 23 |
# Give a name to describe this model. The name should conform to python variable naming conventions, and should be
# only a single word.
model_name = 'ner_text'
# Tags are used to describe the performance of a model. These simple keywords can help people decide whether your model
# is appropriate to use for their situation. Some examples of tags are 'fast', 'accurate', or 'essential'. You should
# limit the number of tags your model has to only contain a few with relevant information.
model_tags = 'huggingface,ner,text'
# The model type determines what inputs your model will receive. The options are:
# - 'image' : Model receives a file name to an image file, opens it, and creates a prediction
# - 'text' : Model receives a string of text and uses it to create a prediction.
model_type = 'text' | [
2,
13786,
257,
1438,
284,
6901,
428,
2746,
13,
383,
1438,
815,
17216,
284,
21015,
7885,
19264,
21396,
11,
290,
815,
307,
198,
2,
691,
257,
2060,
1573,
13,
198,
19849,
62,
3672,
796,
705,
1008,
62,
5239,
6,
198,
198,
2,
44789,
389,... | 4.029851 | 201 |
import random
import requests
import json
from flask import Flask
from app import create_app, init_app
from app.models import *
from faker import Faker
faker = Faker()
valid_email = "matthewkantor@gmail.com"
valid_password = "password"
valid_user = "mattkantor"
app = create_app()
seed()
| [
11748,
4738,
198,
198,
11748,
7007,
198,
11748,
33918,
198,
198,
6738,
42903,
1330,
46947,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
11,
2315,
62,
1324,
198,
6738,
598,
13,
27530,
1330,
1635,
198,
6738,
277,
3110,
1330,
376,
3110,
19... | 2.923077 | 104 |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 19:56:24 2019
@author: William
"""
import cv2
import winsound
import time
goal = 25
n = 0
cv2.VideoCapture(0).release
HAARPATH = "haarcascade/haarcascade_frontalface_default.xml"
cap=cv2.VideoCapture(0)
face_detect=cv2.CascadeClassifier(HAARPATH)
faces =[]
prevface = []
time.sleep(15) #Gives user 15 seconds to get into position
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if n>=goal:
break
# Our operations on the frame go here
if ret is True:
prevface = faces
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=face_detect.detectMultiScale(gray, 1.3, 7)
if len(faces) > 0 and len(prevface) == 0:
n = n+1
winsound.MessageBeep(winsound.MB_ICONHAND)
print (n)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print ("\a")
cap.release()
cv2.destroyAllWindows()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
5267,
220,
604,
678,
25,
3980,
25,
1731,
13130,
201,
198,
201,
198,
31,
9800,
25,
3977,
201,
198,
37811,
201,
198,
201,
198,
... | 1.97193 | 570 |
MESSAGE_DICT = {
'17':"Feed me!",
'33':"Things could be going better. :-(",
'65':"Do I want to do work: NOP. :-/",
'18':"This isn't the largest program I have every stored.",
'34':"I am happy!",
'66':"My processor is idling! :p",
'20':"Instructions! OM, NOP, NOP, NOP!",
'36':"I am very happy!",
'68':"I have a lot of energy! Electricity!!!",
'24':"Ooh, are all these instructions for me?",
'40':"I am ecstatic today! Brick hack is so exciting. :-)",
'72':"I feel like my processor speed increased tenfold!",
}
| [
44,
1546,
4090,
8264,
62,
35,
18379,
796,
1391,
198,
220,
220,
220,
705,
1558,
10354,
1,
18332,
502,
40754,
198,
220,
220,
220,
705,
2091,
10354,
1,
22248,
714,
307,
1016,
1365,
13,
1058,
30420,
1600,
198,
220,
220,
220,
705,
2996,
... | 2.540909 | 220 |
import json
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
from django.db.transaction import atomic
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
from hub.models.types import METHOD_TYPES_DICT, RESP_TYPES_DICT, JSON
from hub.models import Project, Api, APIPermissions
from hub.api.serializers import ProjectSerializer
from hub.api.pagination import StandardResultsPagination
@permission_classes((IsAuthenticated,))
@api_view(['GET', 'POST', 'PUT', 'DELETE'])
| [
11748,
33918,
198,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
1148,
47649,
... | 3.468293 | 205 |
import os
import csv
import shutil
from copy import deepcopy
from exceptions import TypeError
from django.db.models.signals import pre_save, post_save
from django.db import models
from django.utils import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from jetpack import settings
from jetpack.managers import PackageManager
from jetpack.errors import SelfDependencyException, FilenameExistException, \
UpdateDeniedException, AddingModuleDenied, AddingAttachmentDenied
from jetpack.xpi_utils import sdk_copy, xpi_build, xpi_remove
PERMISSION_CHOICES = (
(0, 'private'),
(1, 'view'),
(2, 'do not copy'),
(3, 'edit')
)
TYPE_CHOICES = (
('l', 'Library'),
('a', 'Add-on')
)
class Package(models.Model):
"""
Holds the meta data shared across all PackageRevisions
"""
# identification
# it can be the same as database id, but if we want to copy the database
# some day or change to a document-oriented database it would be bad
# to have this relied on any database model
id_number = models.CharField(max_length=255, unique=True, blank=True)
# name of the Package
full_name = models.CharField(max_length=255)
# made from the full_name
# it is used to create Package directory for export
name = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
# type - determining ability to specific options
type = models.CharField(max_length=30, choices=TYPE_CHOICES)
# author is the first person who created the Package
author = models.ForeignKey(User, related_name='packages_originated')
# is the Package visible for public?
public_permission = models.IntegerField(
choices=PERMISSION_CHOICES,
default=1, blank=True)
# url for the Manifest
url = models.URLField(verify_exists=False, blank=True, default='')
# license on which this package is rekeased to the public
license = models.CharField(max_length=255, blank=True, default='')
# where to export modules
lib_dir = models.CharField(max_length=100, blank=True, null=True)
# this is set in the PackageRevision.set_version
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
version = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated')
latest = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated2')
private_key = models.TextField(blank=True, null=True)
public_key = models.TextField(blank=True, null=True)
jid = models.CharField(max_length=255, blank=True, null=True)
program_id = models.CharField(max_length=255, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
objects = PackageManager()
##################
# Methods
def create_id_number(self):
"""
get the highest id number and increment it
"""
all_packages = Package.objects.all().order_by('-id_number')
return str(int(all_packages[0].id_number) + 1) if all_packages else str(settings.MINIMUM_PACKAGE_ID)
def generate_key(self):
"""
create keypair, program_id and jid
"""
from ecdsa import SigningKey, NIST256p
from cuddlefish.preflight import vk_to_jid, jid_to_programid, my_b32encode
sk = SigningKey.generate(curve=NIST256p)
sk_text = "private-jid0-%s" % my_b32encode(sk.to_string())
vk = sk.get_verifying_key()
vk_text = "public-jid0-%s" % my_b32encode(vk.to_string())
self.jid = vk_to_jid(vk)
self.program_id = jid_to_programid(self.jid)
self.private_key = sk_text
self.public_key = vk_text
def make_dir(self, packages_dir):
"""
create package directories inside packages
return package directory name
"""
package_dir = '%s/%s' % (packages_dir, self.get_unique_package_name())
os.mkdir(package_dir)
os.mkdir('%s/%s' % (package_dir, self.get_lib_dir()))
if not os.path.isdir('%s/%s' % (package_dir, self.get_data_dir())):
os.mkdir('%s/%s' % (package_dir, self.get_data_dir()))
return package_dir
def copy(self, author):
"""
create copy of the package
"""
new_p = Package(
full_name=self.get_copied_full_name(),
description=self.description,
type=self.type,
author=author,
public_permission=self.public_permission,
url=self.url,
license=self.license,
lib_dir=self.lib_dir
)
new_p.save()
return new_p
class PackageRevision(models.Model):
"""
contains data which may be changed and rolled back
"""
package = models.ForeignKey(Package, related_name='revisions')
# public version name
# this is a tag used to mark important revisions
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
# this makes the revision unique across the same package/user
revision_number = models.IntegerField(blank=True, default=0)
# commit message
message = models.TextField(blank=True)
# Libraries on which current package depends
dependencies = models.ManyToManyField('self', blank=True, null=True,
symmetrical=False)
# from which revision this mutation was originated
origin = models.ForeignKey('PackageRevision', related_name='mutations',
blank=True, null=True)
# person who owns this revision
author = models.ForeignKey(User, related_name='package_revisions')
created_at = models.DateTimeField(auto_now_add=True)
#contributors for Manifest
contributors = models.CharField(max_length=255, blank=True, default='')
# main for the Manifest
module_main = models.CharField(max_length=100, blank=True, default='main')
######################
# Manifest
def get_full_description(self):
" return joined description "
description = self.package.description
if self.message:
description = "%s\n%s" % (description, self.message)
return description
def get_full_rendered_description(self):
" return description prepared for rendering "
return "<p>%s</p>" % self.get_full_description().replace("\n","<br/>")
def get_main_module(self):
" return executable Module for Add-ons "
if type == 'l': return None
# find main module
main = self.modules.filter(filename=self.module_main)
if not main:
raise Exception('Every Add-on needs to be linked with an executable Module')
return main[0]
######################
# revision save methods
def save(self, **kwargs):
"""
overloading save is needed to prevent from updating the same revision
use super(PackageRevision, self).save(**kwargs) if needed
"""
if self.id:
# create new revision
return self.save_new_revision(**kwargs)
return super(PackageRevision, self).save(**kwargs)
def save_new_revision(self, package=None, **kwargs):
" save self as new revision with link to the origin. "
origin = deepcopy(self)
if package:
self.package = package
self.author = package.author
self.id = None
self.version_name = None
self.origin = origin
self.revision_number = self.get_next_revision_number()
save_return = super(PackageRevision, self).save(**kwargs)
# reassign all dependencies
for dep in origin.dependencies.all():
self.dependencies.add(dep)
for mod in origin.modules.all():
self.modules.add(mod)
for att in origin.attachments.all():
self.attachments.add(att)
self.package.latest = self
self.package.save()
if package:
self.set_version('copy')
return save_return
def get_next_revision_number(self):
"""
find latest revision_number for the self.package and self.user
@return latest revisiion number or 1
"""
revision_numbers = PackageRevision.objects.filter(
author__username=self.author.username,
package__id_number=self.package.id_number
).order_by('-revision_number')
return revision_numbers[0].revision_number + 1 if revision_numbers else 1
def set_version(self, version_name, current=True):
"""
@param String version_name: name of the version
@param Boolean current: should the version become a current one
@returns result of save revision
Set the version_name
update the PackageRevision obeying the overload save
Set current Package:version_name and Package:version if current
"""
# check if there isn't a version with such a name
revisions = PackageRevision.objects.filter(package__pk=self.package.pk)
for revision in revisions:
if revision.version_name == version_name:
version_name = ''
#raise Exception("There is already a revision with that name")
self.version_name = version_name
if current:
self.package.version_name = version_name
self.package.version = self
self.package.save()
return super(PackageRevision, self).save()
def module_create(self, **kwargs):
" create module and add to modules "
# validate if given filename is valid
if not self.validate_module_filename(kwargs['filename']):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % kwargs['filename']
)
mod = Module.objects.create(**kwargs)
self.module_add(mod)
return mod
def module_add(self, mod):
" copy to new revision, add module "
# save as new version
# validate if given filename is valid
if not self.validate_module_filename(mod.filename):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % mod.filename
)
"""
I think it's not necessary
TODO: check integration
for rev in mod.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingModuleDenied('this module is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.modules.add(mod)
def module_remove(self, mod):
" copy to new revision, remove module "
# save as new version
self.save()
return self.modules.remove(mod)
def module_update(self, mod):
" to update a module, new package revision has to be created "
self.save()
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def modules_update(self, modules):
" update more than one module "
self.save()
for mod in modules:
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def attachment_create(self, **kwargs):
" create attachment and add to attachments "
# validate if given filename is valid
if not self.validate_attachment_filename(kwargs['filename'], kwargs['ext']):
raise FilenameExistException(
'Sorry, there is already an attachment in your add-on with the name "%s.%s". Each attachment in your add-on needs to have a unique name.' % (
kwargs['filename'], kwargs['ext']
)
)
att = Attachment.objects.create(**kwargs)
self.attachment_add(att)
return att
def attachment_add(self, att):
" copy to new revision, add attachment "
# save as new version
# validate if given filename is valid
if not self.validate_attachment_filename(att.filename, att.ext):
raise FilenameExistException(
'Attachment with filename %s.%s already exists' % (att.filename, att.ext)
)
"""
for rev in att.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingAttachmentDenied('this attachment is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.attachments.add(att)
def attachment_remove(self, dep):
" copy to new revision, remove attachment "
# save as new version
self.save()
return self.attachments.remove(dep)
def dependency_add(self, dep):
" copy to new revision, add dependency (existing Library - PackageVersion) "
# a PackageRevision has to depend on the LibraryRevision only
if dep.package.type != 'l':
raise TypeError('Dependency has to be a Library')
# a LibraryRevision can't depend on another LibraryRevision linked with the same
# Library
if dep.package.id_number == self.package.id_number:
raise SelfDependencyException('A Library can not depend on itself!')
# dependency have to be unique in the PackageRevision
deps = self.dependencies.all()
for d in deps:
if d.package.pk == dep.package.pk:
raise Exception('Your add-on is already using "%s" by %s.' % (dep.package.full_name, dep.package.author.get_profile()));
# save as new version
self.save()
return self.dependencies.add(dep)
def dependency_remove(self, dep):
" copy to new revision, remove dependency "
# save as new version
self.save()
return self.dependencies.remove(dep)
def dependency_remove_by_id_number(self, id_number):
" find dependency by its id_number call dependency_remove "
for dep in self.dependencies.all():
if dep.package.id_number == id_number:
self.dependency_remove(dep)
return True
raise Exception('There is no such library in this %s' % self.package.get_type_name())
def build_xpi(self):
" prepare and build XPI "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
self.export_files_with_dependencies('%s/packages' % sdk_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def build_xpi_test(self, modules):
" prepare and build XPI for test only (unsaved modules) "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
packages_dir = '%s/packages' % sdk_dir
package_dir = self.package.make_dir(packages_dir)
self.export_manifest(package_dir)
# instead of export modules
lib_dir = '%s/%s' % (package_dir, self.package.get_lib_dir())
for mod in self.modules.all():
mod_edited = False
for e_mod in modules:
if e_mod.pk == mod.pk:
mod_edited = True
e_mod.export_code(lib_dir)
if not mod_edited:
mod.export_code(lib_dir)
self.export_attachments('%s/%s' % (package_dir, self.package.get_data_dir()))
self.export_dependencies(packages_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def export_keys(self, sdk_dir):
" export private and public keys "
keydir = '%s/%s' % (sdk_dir, settings.KEYDIR)
if not os.path.isdir(keydir):
os.mkdir(keydir)
handle = open('%s/%s' % (keydir, self.package.jid), 'w')
handle.write('private-key:%s\n' % self.package.private_key)
handle.write('public-key:%s' % self.package.public_key)
handle.close()
class Module(models.Model):
" the only way to 'change' the module is to assign it to different PackageRequest "
revisions = models.ManyToManyField(PackageRevision,
related_name='modules', blank=True)
# name of the Module - it will be used as javascript file name
filename = models.CharField(max_length=255)
# Code of the module
code = models.TextField(blank=True)
# user who has written current revision of the module
author = models.ForeignKey(User, related_name='module_revisions')
#################################################################################
## Catching Signals
pre_save.connect(set_package_id_number, sender=Package)
pre_save.connect(make_name, sender=Package)
pre_save.connect(make_keypair_on_create, sender=Package)
def save_first_revision(instance, **kwargs):
"""
Create first PackageRevision
"""
if kwargs.get('raw', False): return
# only for the new Package
if not kwargs.get('created', False): return
revision = PackageRevision(package=instance, author=instance.author)
revision.save()
instance.version = revision
instance.latest = revision
if instance.is_addon():
mod = Module.objects.create(
filename=revision.module_main,
author=instance.author,
code="""// This is an active module of the %s Add-on
exports.main = function() {};""" % instance.full_name
)
revision.modules.add(mod)
instance.save()
post_save.connect(save_first_revision, sender=Package)
| [
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
4423,
346,
198,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
13269,
1330,
5994,
12331,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
662,
62,
21928,
11,
1281... | 2.836332 | 5,780 |
import versioneer # https://github.com/warner/python-versioneer
from setuptools import setup
setup(name="finddata",
version=versioneer.get_version(), #"0.2.2",
cmdclass=versioneer.get_cmdclass(),
description = "Find data files using ONCat",
author = "Pete Peterson",
author_email = "petersonpf@ornl.gov",
url = "http://github.com/peterfpeterson/finddata/",
long_description = """This package uses ONCat at SNS to find NeXus files.""",
license = "The MIT License (MIT)",
scripts=["scripts/finddata"],
packages=["finddata"],
package_dir={},#'finddata': '.'},
data_files=[('/etc/bash_completion.d/', ['finddata.bashcomplete'])]
)
| [
11748,
2196,
28153,
1303,
3740,
1378,
12567,
13,
785,
14,
5767,
1008,
14,
29412,
12,
690,
7935,
263,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
2625,
19796,
7890,
1600,
198,
220,
220,
220,
220,
220,
2196,
28,
... | 2.573529 | 272 |
# ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from bq_etl_levels import default_args, get_sql, BigQueryETLLevels
with DAG('NMBGMR_MANUAL_ETL0.7',
# schedule_interval='*/10 * * * *',
schedule_interval='@daily',
max_active_runs=1,
catchup=False,
default_args=default_args) as dag:
gsm = PythonOperator(task_id='get_manual_sql', python_callable=get_sql_manual)
gm = BigQueryETLLevels('Water Well',
('Ground Water Levels', {'agency': 'NMBGMR'}),
('Manual', 'Manual measurement of groundwater depth by field technician'),
('Depth to Water Below Land Surface', 'depth to water below land surface'),
task_id='etl_manual_levels', sql_task_id='get_manual_sql')
gsm >> gm
# ============= EOF =============================================
| [
2,
38093,
25609,
855,
198,
2,
15069,
33448,
686,
824,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
1... | 2.959147 | 563 |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Attempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_mode': 'str',
'api_version': 'str',
'automatic_fetch': 'bool',
'daily_refresh': 'bool',
'categorization': 'str',
'created_at': 'datetime',
'custom_fields': 'object',
'device_type': 'str',
'remote_ip': 'str',
'exclude_accounts': 'list[str]',
'user_present': 'bool',
'customer_last_logged_at': 'datetime',
'fail_at': 'datetime',
'fail_error_class': 'str',
'fail_message': 'str',
'fetch_scopes': 'list[str]',
'finished': 'bool',
'finished_recent': 'bool',
'from_date': 'date',
'id': 'str',
'interactive': 'bool',
'locale': 'str',
'partial': 'bool',
'store_credentials': 'bool',
'success_at': 'datetime',
'to_date': 'datetime',
'updated_at': 'datetime',
'show_consent_confirmation': 'bool',
'include_natures': 'list[str]',
'stages': 'list[Stage]'
}
attribute_map = {
'api_mode': 'api_mode',
'api_version': 'api_version',
'automatic_fetch': 'automatic_fetch',
'daily_refresh': 'daily_refresh',
'categorization': 'categorization',
'created_at': 'created_at',
'custom_fields': 'custom_fields',
'device_type': 'device_type',
'remote_ip': 'remote_ip',
'exclude_accounts': 'exclude_accounts',
'user_present': 'user_present',
'customer_last_logged_at': 'customer_last_logged_at',
'fail_at': 'fail_at',
'fail_error_class': 'fail_error_class',
'fail_message': 'fail_message',
'fetch_scopes': 'fetch_scopes',
'finished': 'finished',
'finished_recent': 'finished_recent',
'from_date': 'from_date',
'id': 'id',
'interactive': 'interactive',
'locale': 'locale',
'partial': 'partial',
'store_credentials': 'store_credentials',
'success_at': 'success_at',
'to_date': 'to_date',
'updated_at': 'updated_at',
'show_consent_confirmation': 'show_consent_confirmation',
'include_natures': 'include_natures',
'stages': 'stages'
}
def __init__(self, api_mode=None, api_version=None, automatic_fetch=None, daily_refresh=None, categorization='personal', created_at=None, custom_fields=None, device_type=None, remote_ip=None, exclude_accounts=None, user_present=None, customer_last_logged_at=None, fail_at=None, fail_error_class=None, fail_message=None, fetch_scopes=None, finished=None, finished_recent=None, from_date=None, id=None, interactive=None, locale=None, partial=None, store_credentials=None, success_at=None, to_date=None, updated_at=None, show_consent_confirmation=None, include_natures=None, stages=None): # noqa: E501
"""Attempt - a model defined in Swagger""" # noqa: E501
self._api_mode = None
self._api_version = None
self._automatic_fetch = None
self._daily_refresh = None
self._categorization = None
self._created_at = None
self._custom_fields = None
self._device_type = None
self._remote_ip = None
self._exclude_accounts = None
self._user_present = None
self._customer_last_logged_at = None
self._fail_at = None
self._fail_error_class = None
self._fail_message = None
self._fetch_scopes = None
self._finished = None
self._finished_recent = None
self._from_date = None
self._id = None
self._interactive = None
self._locale = None
self._partial = None
self._store_credentials = None
self._success_at = None
self._to_date = None
self._updated_at = None
self._show_consent_confirmation = None
self._include_natures = None
self._stages = None
self.discriminator = None
self.api_mode = api_mode
self.api_version = api_version
self.automatic_fetch = automatic_fetch
self.daily_refresh = daily_refresh
self.categorization = categorization
self.created_at = created_at
self.custom_fields = custom_fields
self.device_type = device_type
self.remote_ip = remote_ip
self.exclude_accounts = exclude_accounts
self.user_present = user_present
self.customer_last_logged_at = customer_last_logged_at
self.fail_at = fail_at
self.fail_error_class = fail_error_class
self.fail_message = fail_message
self.fetch_scopes = fetch_scopes
self.finished = finished
self.finished_recent = finished_recent
self.from_date = from_date
self.id = id
self.interactive = interactive
self.locale = locale
self.partial = partial
self.store_credentials = store_credentials
self.success_at = success_at
self.to_date = to_date
self.updated_at = updated_at
self.show_consent_confirmation = show_consent_confirmation
self.include_natures = include_natures
self.stages = stages
@property
def api_mode(self):
"""Gets the api_mode of this Attempt. # noqa: E501
the API mode of the customer that queried the API. # noqa: E501
:return: The api_mode of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_mode
@api_mode.setter
def api_mode(self, api_mode):
"""Sets the api_mode of this Attempt.
the API mode of the customer that queried the API. # noqa: E501
:param api_mode: The api_mode of this Attempt. # noqa: E501
:type: str
"""
if api_mode is None:
raise ValueError("Invalid value for `api_mode`, must not be `None`") # noqa: E501
allowed_values = ["app", "service"] # noqa: E501
if api_mode not in allowed_values:
raise ValueError(
"Invalid value for `api_mode` ({0}), must be one of {1}" # noqa: E501
.format(api_mode, allowed_values)
)
self._api_mode = api_mode
@property
def api_version(self):
"""Gets the api_version of this Attempt. # noqa: E501
the API version in which the attempt was created # noqa: E501
:return: The api_version of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this Attempt.
the API version in which the attempt was created # noqa: E501
:param api_version: The api_version of this Attempt. # noqa: E501
:type: str
"""
if api_version is None:
raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
self._api_version = api_version
@property
def automatic_fetch(self):
"""Gets the automatic_fetch of this Attempt. # noqa: E501
whether the connection related to the attempt can be automatically fetched # noqa: E501
:return: The automatic_fetch of this Attempt. # noqa: E501
:rtype: bool
"""
return self._automatic_fetch
@automatic_fetch.setter
def automatic_fetch(self, automatic_fetch):
"""Sets the automatic_fetch of this Attempt.
whether the connection related to the attempt can be automatically fetched # noqa: E501
:param automatic_fetch: The automatic_fetch of this Attempt. # noqa: E501
:type: bool
"""
if automatic_fetch is None:
raise ValueError("Invalid value for `automatic_fetch`, must not be `None`") # noqa: E501
self._automatic_fetch = automatic_fetch
@property
def daily_refresh(self):
"""Gets the daily_refresh of this Attempt. # noqa: E501
latest assigned value for `daily_refresh` in connection # noqa: E501
:return: The daily_refresh of this Attempt. # noqa: E501
:rtype: bool
"""
return self._daily_refresh
@daily_refresh.setter
def daily_refresh(self, daily_refresh):
"""Sets the daily_refresh of this Attempt.
latest assigned value for `daily_refresh` in connection # noqa: E501
:param daily_refresh: The daily_refresh of this Attempt. # noqa: E501
:type: bool
"""
if daily_refresh is None:
raise ValueError("Invalid value for `daily_refresh`, must not be `None`") # noqa: E501
self._daily_refresh = daily_refresh
@property
def categorization(self):
"""Gets the categorization of this Attempt. # noqa: E501
the type of categorization applied. # noqa: E501
:return: The categorization of this Attempt. # noqa: E501
:rtype: str
"""
return self._categorization
@categorization.setter
def categorization(self, categorization):
"""Sets the categorization of this Attempt.
the type of categorization applied. # noqa: E501
:param categorization: The categorization of this Attempt. # noqa: E501
:type: str
"""
if categorization is None:
raise ValueError("Invalid value for `categorization`, must not be `None`") # noqa: E501
allowed_values = ["none", "personal", "business"] # noqa: E501
if categorization not in allowed_values:
raise ValueError(
"Invalid value for `categorization` ({0}), must be one of {1}" # noqa: E501
.format(categorization, allowed_values)
)
self._categorization = categorization
@property
def created_at(self):
"""Gets the created_at of this Attempt. # noqa: E501
when the attempt was made # noqa: E501
:return: The created_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attempt.
when the attempt was made # noqa: E501
:param created_at: The created_at of this Attempt. # noqa: E501
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def custom_fields(self):
"""Gets the custom_fields of this Attempt. # noqa: E501
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:return: The custom_fields of this Attempt. # noqa: E501
:rtype: object
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this Attempt.
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:param custom_fields: The custom_fields of this Attempt. # noqa: E501
:type: object
"""
if custom_fields is None:
raise ValueError("Invalid value for `custom_fields`, must not be `None`") # noqa: E501
self._custom_fields = custom_fields
@property
def device_type(self):
"""Gets the device_type of this Attempt. # noqa: E501
the type of the device that created the attempt. # noqa: E501
:return: The device_type of this Attempt. # noqa: E501
:rtype: str
"""
return self._device_type
@device_type.setter
def device_type(self, device_type):
"""Sets the device_type of this Attempt.
the type of the device that created the attempt. # noqa: E501
:param device_type: The device_type of this Attempt. # noqa: E501
:type: str
"""
if device_type is None:
raise ValueError("Invalid value for `device_type`, must not be `None`") # noqa: E501
allowed_values = ["desktop", "tablet", "mobile"] # noqa: E501
if device_type not in allowed_values:
raise ValueError(
"Invalid value for `device_type` ({0}), must be one of {1}" # noqa: E501
.format(device_type, allowed_values)
)
self._device_type = device_type
@property
def remote_ip(self):
"""Gets the remote_ip of this Attempt. # noqa: E501
the IP of the device that created the attempt # noqa: E501
:return: The remote_ip of this Attempt. # noqa: E501
:rtype: str
"""
return self._remote_ip
@remote_ip.setter
def remote_ip(self, remote_ip):
"""Sets the remote_ip of this Attempt.
the IP of the device that created the attempt # noqa: E501
:param remote_ip: The remote_ip of this Attempt. # noqa: E501
:type: str
"""
if remote_ip is None:
raise ValueError("Invalid value for `remote_ip`, must not be `None`") # noqa: E501
self._remote_ip = remote_ip
@property
def exclude_accounts(self):
"""Gets the exclude_accounts of this Attempt. # noqa: E501
the `ids` of accounts that do not need to be refreshed # noqa: E501
:return: The exclude_accounts of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._exclude_accounts
@exclude_accounts.setter
def exclude_accounts(self, exclude_accounts):
"""Sets the exclude_accounts of this Attempt.
the `ids` of accounts that do not need to be refreshed # noqa: E501
:param exclude_accounts: The exclude_accounts of this Attempt. # noqa: E501
:type: list[str]
"""
if exclude_accounts is None:
raise ValueError("Invalid value for `exclude_accounts`, must not be `None`") # noqa: E501
self._exclude_accounts = exclude_accounts
@property
def user_present(self):
"""Gets the user_present of this Attempt. # noqa: E501
whether the request was initiated by the end-user of your application # noqa: E501
:return: The user_present of this Attempt. # noqa: E501
:rtype: bool
"""
return self._user_present
@user_present.setter
def user_present(self, user_present):
"""Sets the user_present of this Attempt.
whether the request was initiated by the end-user of your application # noqa: E501
:param user_present: The user_present of this Attempt. # noqa: E501
:type: bool
"""
if user_present is None:
raise ValueError("Invalid value for `user_present`, must not be `None`") # noqa: E501
self._user_present = user_present
@property
def customer_last_logged_at(self):
"""Gets the customer_last_logged_at of this Attempt. # noqa: E501
the datetime when user was last active in your application # noqa: E501
:return: The customer_last_logged_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._customer_last_logged_at
@customer_last_logged_at.setter
def customer_last_logged_at(self, customer_last_logged_at):
"""Sets the customer_last_logged_at of this Attempt.
the datetime when user was last active in your application # noqa: E501
:param customer_last_logged_at: The customer_last_logged_at of this Attempt. # noqa: E501
:type: datetime
"""
if customer_last_logged_at is None:
raise ValueError("Invalid value for `customer_last_logged_at`, must not be `None`") # noqa: E501
self._customer_last_logged_at = customer_last_logged_at
@property
def fail_at(self):
"""Gets the fail_at of this Attempt. # noqa: E501
when the attempt failed to finish # noqa: E501
:return: The fail_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._fail_at
@fail_at.setter
def fail_at(self, fail_at):
"""Sets the fail_at of this Attempt.
when the attempt failed to finish # noqa: E501
:param fail_at: The fail_at of this Attempt. # noqa: E501
:type: datetime
"""
if fail_at is None:
raise ValueError("Invalid value for `fail_at`, must not be `None`") # noqa: E501
self._fail_at = fail_at
@property
def fail_error_class(self):
"""Gets the fail_error_class of this Attempt. # noqa: E501
class of error that triggered the fail for attempt # noqa: E501
:return: The fail_error_class of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_error_class
@fail_error_class.setter
def fail_error_class(self, fail_error_class):
"""Sets the fail_error_class of this Attempt.
class of error that triggered the fail for attempt # noqa: E501
:param fail_error_class: The fail_error_class of this Attempt. # noqa: E501
:type: str
"""
if fail_error_class is None:
raise ValueError("Invalid value for `fail_error_class`, must not be `None`") # noqa: E501
self._fail_error_class = fail_error_class
@property
def fail_message(self):
"""Gets the fail_message of this Attempt. # noqa: E501
message that describes the error class # noqa: E501
:return: The fail_message of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_message
@fail_message.setter
def fail_message(self, fail_message):
"""Sets the fail_message of this Attempt.
message that describes the error class # noqa: E501
:param fail_message: The fail_message of this Attempt. # noqa: E501
:type: str
"""
if fail_message is None:
raise ValueError("Invalid value for `fail_message`, must not be `None`") # noqa: E501
self._fail_message = fail_message
@property
def fetch_scopes(self):
"""Gets the fetch_scopes of this Attempt. # noqa: E501
fetching mode. # noqa: E501
:return: The fetch_scopes of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._fetch_scopes
@fetch_scopes.setter
def fetch_scopes(self, fetch_scopes):
"""Sets the fetch_scopes of this Attempt.
fetching mode. # noqa: E501
:param fetch_scopes: The fetch_scopes of this Attempt. # noqa: E501
:type: list[str]
"""
if fetch_scopes is None:
raise ValueError("Invalid value for `fetch_scopes`, must not be `None`") # noqa: E501
allowed_values = ["accounts", "holder_info", "transactions"] # noqa: E501
if not set(fetch_scopes).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `fetch_scopes` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(fetch_scopes) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._fetch_scopes = fetch_scopes
@property
def finished(self):
"""Gets the finished of this Attempt. # noqa: E501
whether the connection had finished fetching # noqa: E501
:return: The finished of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this Attempt.
whether the connection had finished fetching # noqa: E501
:param finished: The finished of this Attempt. # noqa: E501
:type: bool
"""
if finished is None:
raise ValueError("Invalid value for `finished`, must not be `None`") # noqa: E501
self._finished = finished
@property
def finished_recent(self):
"""Gets the finished_recent of this Attempt. # noqa: E501
whether the connection had finished data for recent range # noqa: E501
:return: The finished_recent of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished_recent
@finished_recent.setter
def finished_recent(self, finished_recent):
"""Sets the finished_recent of this Attempt.
whether the connection had finished data for recent range # noqa: E501
:param finished_recent: The finished_recent of this Attempt. # noqa: E501
:type: bool
"""
if finished_recent is None:
raise ValueError("Invalid value for `finished_recent`, must not be `None`") # noqa: E501
self._finished_recent = finished_recent
@property
def from_date(self):
"""Gets the from_date of this Attempt. # noqa: E501
date from which the data had been fetched # noqa: E501
:return: The from_date of this Attempt. # noqa: E501
:rtype: date
"""
return self._from_date
@from_date.setter
def from_date(self, from_date):
"""Sets the from_date of this Attempt.
date from which the data had been fetched # noqa: E501
:param from_date: The from_date of this Attempt. # noqa: E501
:type: date
"""
if from_date is None:
raise ValueError("Invalid value for `from_date`, must not be `None`") # noqa: E501
self._from_date = from_date
@property
def id(self):
"""Gets the id of this Attempt. # noqa: E501
`id` of the attempt # noqa: E501
:return: The id of this Attempt. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attempt.
`id` of the attempt # noqa: E501
:param id: The id of this Attempt. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def interactive(self):
"""Gets the interactive of this Attempt. # noqa: E501
whether the connection related to the attempt is interactive # noqa: E501
:return: The interactive of this Attempt. # noqa: E501
:rtype: bool
"""
return self._interactive
@interactive.setter
def interactive(self, interactive):
"""Sets the interactive of this Attempt.
whether the connection related to the attempt is interactive # noqa: E501
:param interactive: The interactive of this Attempt. # noqa: E501
:type: bool
"""
if interactive is None:
raise ValueError("Invalid value for `interactive`, must not be `None`") # noqa: E501
self._interactive = interactive
@property
def locale(self):
"""Gets the locale of this Attempt. # noqa: E501
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:return: The locale of this Attempt. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Attempt.
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:param locale: The locale of this Attempt. # noqa: E501
:type: str
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def partial(self):
"""Gets the partial of this Attempt. # noqa: E501
whether the connection was partially fetched # noqa: E501
:return: The partial of this Attempt. # noqa: E501
:rtype: bool
"""
return self._partial
@partial.setter
def partial(self, partial):
"""Sets the partial of this Attempt.
whether the connection was partially fetched # noqa: E501
:param partial: The partial of this Attempt. # noqa: E501
:type: bool
"""
if partial is None:
raise ValueError("Invalid value for `partial`, must not be `None`") # noqa: E501
self._partial = partial
@property
def store_credentials(self):
"""Gets the store_credentials of this Attempt. # noqa: E501
whether the credentials were stored on our side # noqa: E501
:return: The store_credentials of this Attempt. # noqa: E501
:rtype: bool
"""
return self._store_credentials
@store_credentials.setter
def store_credentials(self, store_credentials):
"""Sets the store_credentials of this Attempt.
whether the credentials were stored on our side # noqa: E501
:param store_credentials: The store_credentials of this Attempt. # noqa: E501
:type: bool
"""
if store_credentials is None:
raise ValueError("Invalid value for `store_credentials`, must not be `None`") # noqa: E501
self._store_credentials = store_credentials
@property
def success_at(self):
"""Gets the success_at of this Attempt. # noqa: E501
when the attempt succeeded and finished # noqa: E501
:return: The success_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._success_at
@success_at.setter
def success_at(self, success_at):
"""Sets the success_at of this Attempt.
when the attempt succeeded and finished # noqa: E501
:param success_at: The success_at of this Attempt. # noqa: E501
:type: datetime
"""
if success_at is None:
raise ValueError("Invalid value for `success_at`, must not be `None`") # noqa: E501
self._success_at = success_at
@property
def to_date(self):
"""Gets the to_date of this Attempt. # noqa: E501
date until which the data has been fetched # noqa: E501
:return: The to_date of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._to_date
@to_date.setter
def to_date(self, to_date):
"""Sets the to_date of this Attempt.
date until which the data has been fetched # noqa: E501
:param to_date: The to_date of this Attempt. # noqa: E501
:type: datetime
"""
if to_date is None:
raise ValueError("Invalid value for `to_date`, must not be `None`") # noqa: E501
self._to_date = to_date
@property
def updated_at(self):
"""Gets the updated_at of this Attempt. # noqa: E501
when last attempt update occurred # noqa: E501
:return: The updated_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attempt.
when last attempt update occurred # noqa: E501
:param updated_at: The updated_at of this Attempt. # noqa: E501
:type: datetime
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def show_consent_confirmation(self):
"""Gets the show_consent_confirmation of this Attempt. # noqa: E501
whether any consent was given for this connection # noqa: E501
:return: The show_consent_confirmation of this Attempt. # noqa: E501
:rtype: bool
"""
return self._show_consent_confirmation
@show_consent_confirmation.setter
def show_consent_confirmation(self, show_consent_confirmation):
"""Sets the show_consent_confirmation of this Attempt.
whether any consent was given for this connection # noqa: E501
:param show_consent_confirmation: The show_consent_confirmation of this Attempt. # noqa: E501
:type: bool
"""
if show_consent_confirmation is None:
raise ValueError("Invalid value for `show_consent_confirmation`, must not be `None`") # noqa: E501
self._show_consent_confirmation = show_consent_confirmation
@property
def include_natures(self):
"""Gets the include_natures of this Attempt. # noqa: E501
the natures of the accounts that need to be fetched # noqa: E501
:return: The include_natures of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._include_natures
@include_natures.setter
def include_natures(self, include_natures):
"""Sets the include_natures of this Attempt.
the natures of the accounts that need to be fetched # noqa: E501
:param include_natures: The include_natures of this Attempt. # noqa: E501
:type: list[str]
"""
if include_natures is None:
raise ValueError("Invalid value for `include_natures`, must not be `None`") # noqa: E501
self._include_natures = include_natures
@property
def stages(self):
"""Gets the stages of this Attempt. # noqa: E501
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:return: The stages of this Attempt. # noqa: E501
:rtype: list[Stage]
"""
return self._stages
@stages.setter
def stages(self, stages):
"""Sets the stages of this Attempt.
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:param stages: The stages of this Attempt. # noqa: E501
:type: list[Stage]
"""
if stages is None:
raise ValueError("Invalid value for `stages`, must not be `None`") # noqa: E501
self._stages = stages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Attempt, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attempt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
13754,
13113,
10781,
6188,
7824,
628,
220,
220,
220,
7824,
20984,
329,
2594,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
4946,
17614,
1020,
2196,
25... | 2.353568 | 13,986 |
"""`oteapi.plugins` module."""
from .factories import create_strategy, load_strategies
__all__ = ("create_strategy", "load_strategies")
| [
37811,
63,
1258,
15042,
13,
37390,
63,
8265,
526,
15931,
198,
6738,
764,
22584,
1749,
1330,
2251,
62,
2536,
4338,
11,
3440,
62,
2536,
2397,
444,
198,
198,
834,
439,
834,
796,
5855,
17953,
62,
2536,
4338,
1600,
366,
2220,
62,
2536,
2... | 2.978261 | 46 |