text string | size int64 | token_count int64 |
|---|---|---|
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
import simplejson as json
except ImportError:
import json
from rest_framework import serializers
from predictions_manager.models import Prediction, TissueFragmentsCollection, TissueFragment
from slides_manager.serializers import SlideSerializer
class PredictionSerializer(serializers.ModelSerializer):
class Meta:
model = Prediction
fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
read_only_fields = ('id', 'creation_date')
def validate_provenance(self, value):
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'provenance\' field')
class PredictionDetailsSerializer(serializers.ModelSerializer):
slide = SlideSerializer(many=False, read_only=True)
class Meta:
model = Prediction
fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
read_only_fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
class TissueFragmentsCollectionSerializer(serializers.ModelSerializer):
class Meta:
model = TissueFragmentsCollection
fields = ('id', 'prediction', 'creation_date')
read_only_fields = ('id', 'creation_date')
class TissueFragmentSerializer(serializers.ModelSerializer):
class Meta:
model = TissueFragment
fields = ('id', 'collection', 'shape_json', 'creation_date')
read_only_fields = ('id', 'creation_date')
def validate_shape_json(self, value):
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'shape_json\' field')
class TissueFragmentsCollectionDetailsSerializer(serializers.ModelSerializer):
fragments = TissueFragmentSerializer(many=True, read_only=True)
prediction = PredictionSerializer(many=False, read_only=True)
class Meta:
model = TissueFragmentsCollection
fields = ('id', 'prediction', 'creation_date', 'fragments')
read_only_fields = ('id', 'creation_date')
| 3,286 | 986 |
import os
import pytest
from src.modules.tweet_module import Twitter_api
class TestTweet:
def test_tokens(self):
with pytest.raises(ValueError) as e:
api = Twitter_api()
err_msg = 'Provide the correct tokens and keys'
assert e.match(err_msg), 'All credentials are given and accepted'
| 327 | 99 |
import pygame, gameslib
class App():
def __init__(self, size: gameslib.Size = (600, 300), title: str = 'GamesLib'):
self._screen = pygame.display.set_mode(size)
self._scene = None
pygame.display.set_caption(title)
@property
def screen(self) -> pygame.Surface:
return self._screen
@property
def size(self) -> gameslib.Size:
return self.screen.get_size()
@property
def scene(self) -> object:
return self._scene
@scene.setter
def scene(self, scene: object) -> None:
if self._scene is not None:
self._scene.stop()
self._scene = scene
if self._scene is not None:
self._scene.start(self)
| 657 | 268 |
"""Model wrapper class for performing GradCam visualization with a ShowAndTellModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from im2txt import show_and_tell_model
from im2txt.inference_utils import inference_wrapper_base
import numpy as np
import matplotlib
# Fix to run remotely (with no display)
# matplotlib.use('agg')
import tensorflow as tf
import PIL.Image
from matplotlib import pylab as P
import pickle
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.colors as mcolors
import os
import os.path as osp
slim=tf.contrib.slim
import scipy
import sys
sys.path.append('gradcam')
def transparent_cmap(cmap, N=255):
"Copy colormap and set alpha values"
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 0.8, N+4)
return mycmap
class GradCamWrapper(inference_wrapper_base.InferenceWrapperBase):
"""Model wrapper class for performing inference with a ShowAndTellModel."""
def __init__(self):
super(GradCamWrapper, self).__init__()
def build_model(self, model_config):
model = show_and_tell_model.ShowAndTellModel(model_config, mode="gradcam")
model.build()
return model
def process_image(self, sess, encoded_image, input_feed, filename, vocab, word_index=1, word_id=None, save_path=None):
graph = tf.get_default_graph()
softmax = sess.run(fetches=["softmax:0"], feed_dict={"image_feed:0": encoded_image, "input_feed:0": input_feed})
logits = graph.get_tensor_by_name('softmax:0')
neuron_selector = tf.placeholder(tf.int32)
neuron_pred = logits[0,word_index][neuron_selector]
pred_max = np.argmax(softmax[0][0][word_index])
if word_id != None:
print('%s\tpredicted: %s with prob %f , given: %s with prob %.10f' % (filename, vocab.id_to_word(pred_max), np.max(softmax[0][0][word_index]), vocab.id_to_word(word_id), softmax[0][0][word_index][word_id]))
pred_max = word_id
from grad_cam import GradCam
grad_cam = GradCam(graph, sess, neuron_pred, graph.get_tensor_by_name('concat:0'), conv_layer = graph.get_tensor_by_name('InceptionV3/InceptionV3/Mixed_7c/concat:0'))
input_image = PIL.Image.open(filename)
input_image = input_image.convert('RGB')
im = np.asarray(input_image)
im_resized = scipy.misc.imresize(im, (299, 299), interp='bilinear', mode=None)
im_resized = im_resized / 127.5 - 1.0
grad_mask_2d = grad_cam.GetMask(im_resized, feed_dict = {neuron_selector: pred_max, "input_feed:0": input_feed}, should_resize = False, three_dims = False)
# if np.min(grad_mask_2d) == np.max(grad_mask_2d): grad_mask_2d[0,0]=1.0000001 # Fix for a bug that happens very rarely
mycmap = transparent_cmap(plt.cm.jet)
w = im_resized.shape[0]
h = im_resized.shape[1]
y, x = np.mgrid[0:h, 0:w]
grad_mask_2d_norm = grad_mask_2d / np.max(grad_mask_2d)
grad_mask_2d_upscaled = scipy.misc.imresize(grad_mask_2d_norm, (w, h), interp='bilinear', mode='F')
percentile = 99
vmax = np.percentile(grad_mask_2d_upscaled, percentile)
vmin = np.min(grad_mask_2d_upscaled)
mask_grayscale_upscaled = np.clip((grad_mask_2d_upscaled - vmin) / (vmax - vmin), 0, 1)
fig, ax = plt.subplots(1, 1)
plt.axis('off')
ax.imshow( ((im_resized + 1.0) * 127.5)/255.0)
cb = ax.contourf(x, y, mask_grayscale_upscaled, 15, cmap=mycmap)
if save_path != None and save_path != '':
np.save(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.npy', grad_mask_2d)
plt.savefig(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.jpg', bbox_inches='tight')
plt.close()
else:
plt.show()
| 3,764 | 1,459 |
import ops.cmd
import ops
import ops.env
import ops.cmd.safetychecks
import ops.security.auditing
from ops.cmd import getBoolOption, setBoolOption, getValueOption, setListOption
OpsCommandException = ops.cmd.OpsCommandException
VALID_OPTIONS = ['user', 'network', 'local', 'target']
class GroupsCommand(ops.cmd.DszCommand, ):
optgroups = {}
reqgroups = []
reqopts = []
defopts = {}
def __init__(self, plugin='groups', netmap_type=None, **optdict):
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def validateInput(self):
for opt in self.optdict:
if (opt not in VALID_OPTIONS):
return False
return True
local = property((lambda x: getBoolOption(x, 'local')), (lambda x, y: setBoolOption(x, y, 'local')))
remote = property((lambda x: getBoolOption(x, 'remote')), (lambda x, y: setBoolOption(x, y, 'remote')))
target = property((lambda x: getValueOption(x, 'target')), (lambda x, y: setStringOption(x, y, 'target')))
user = property((lambda x: getValueOption(x, 'user')), (lambda x, y: setStringOption(x, y, 'user')))
ops.cmd.command_classes['groups'] = GroupsCommand
ops.cmd.aliasoptions['groups'] = VALID_OPTIONS | 1,212 | 399 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
import os
import subprocess
import sys
def s10_f1_run():
systems = os.listdir('../../systems')
command = "./sup_eval.sh ../../systems/%s . ../80_20/all/mapping.%d.key ../80_20/all/test.%d.key 2>/dev/null | tail -1 | grep -oP '0.\d+'"
for system in systems:
scores = []
if system != 'filesToSystemsMap':
for i in range(1,6):
c = command % (system, i, i)
s = subprocess.Popen(c, shell=True, stdout=subprocess.PIPE).stdout.read().strip()
scores.append(float(s))
print >> sys.stderr, system, scores
print "%s\t%f" % (system, sum(scores) / len(scores))
def s07_f1_run():
systems = os.listdir('../../s07/systems')
command = "./sup_eval.sh ../../s07/systems/%s . ../../s07/keys/random_split/82_18/senseinduction.random82train.key ../../s07/keys/random_split/82_18/senseinduction.random82test.key 2>/dev/null | tail -1 | grep -oP '0.\d+'"
for system in systems:
c = command % (system)
s = subprocess.Popen(c, shell=True, stdout=subprocess.PIPE).stdout.read().strip()
s = float(s)
print "%s\t%f" % (system, s)
s07_f1_run()
| 1,249 | 479 |
# SPDX-License-Identifier: Apache-2.0
from timemachines.skaters.tcn.tcninclusiontraining import using_tcntraining
if using_tcntraining:
from onnxruntime import InferenceSession
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers, Input
def test_keras_onnx_runtime():
"""
:return: test if onnx and keras seem to be working
"""
# adapted from https://github.com/microprediction/tensorflow-onnx/blob/master/examples/end2end_tfkeras.py
# Creates the model.
model = keras.Sequential()
model.add(Input((4, 4)))
model.add(layers.SimpleRNN(8))
model.add(layers.Dense(2))
print(model.summary())
input_names = [n.name for n in model.inputs]
output_names = [n.name for n in model.outputs]
print('inputs:', input_names)
print('outputs:', output_names)
########################################
# Training
# ....
# Skipped.
########################################
# Testing the model.
input = np.random.randn(2, 4, 4).astype(np.float32)
expected = model.predict(input)
print(expected)
########################################
# Serialize but do not save the model
from tf2onnx.keras2onnx_api import convert_keras
onnx_model = convert_keras(model=model,name='example')
onnx_model_as_byte_string = onnx_model.SerializeToString()
########################################
# Runs onnxruntime.
session = InferenceSession(onnx_model_as_byte_string)
got = session.run(None, {'input_1': input})
print(got[0])
########################################
# Measures the differences.
assert (np.abs(got[0] - expected).max())<1e-5
| 1,851 | 549 |
import unittest
from robotide.controller.commands import *
from nose.tools import assert_true, assert_false, assert_equals
from base_command_test import TestCaseCommandTest
class TestRenameKeywords(TestCaseCommandTest):
def test_test_is_gerkin_kw(self):
observer = NullObserver()
myobject = RenameKeywordOccurrences("Step 1", "My New Keyword", observer)
# ._get_gherkin("keyword value")
is_gherkin, kw_value = myobject._get_gherkin("Given a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("Then a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("And a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("When a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("But a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("But Given a Keyword")
assert_true(is_gherkin)
assert_equals(kw_value, "Given a Keyword")
is_gherkin, kw_value = myobject._get_gherkin("If a Keyword")
assert_false(is_gherkin)
assert_equals(kw_value, "If a Keyword")
def test_check_gerkin_kw(self):
observer = NullObserver()
myobject = RenameKeywordOccurrences("Step 1", "My New Keyword", observer)
# ._check_gherkin("new keyword value", "original keyword value")
original_kw, new_kw = myobject._check_gherkin("Given a Keyword", "a Keyword")
assert_equals(new_kw, "Given a Keyword")
assert_equals(original_kw, "a Keyword")
original_kw, new_kw = myobject._check_gherkin("a Keyword", "Given a Keyword")
assert_equals(new_kw, "a Keyword")
assert_equals(original_kw, "Given a Keyword")
original_kw, new_kw = myobject._check_gherkin("When a Keyword", "Given a Keyword")
assert_equals(new_kw, "When a Keyword")
assert_equals(original_kw, "Given a Keyword")
original_kw, new_kw = myobject._check_gherkin("My new Keyword", "Old Keyword")
assert_equals(new_kw, "My new Keyword")
assert_equals(original_kw, "Old Keyword")
original_kw, new_kw = myobject._check_gherkin("But Given a new Keyword", "Given a new Keyword")
assert_equals(new_kw, "But Given a new Keyword")
assert_equals(original_kw, "Given a new Keyword")
original_kw, new_kw = myobject._check_gherkin("Given a new Keyword", "Given an old Keyword")
assert_equals(new_kw, "a new Keyword")
assert_equals(original_kw, "an old Keyword")
if __name__ == "__main__":
unittest.main()
| 2,905 | 969 |
import os
import json
from uber_rides.session import Session
from uber_rides.client import UberRidesClient
from pprint import pprint
session = Session(server_token="P0xeLpEgkQct68R3USut3nBst62X83Tz4V8BT7CR")
client = UberRidesClient(session)
response = client.get_products(37.77, -122.41)
products = response.json.get('products')
with open('data.json','w') as outfile:
json.dump(products, outfile)
with open('data.json') as inFile:
data = json.load(inFile)
pprint(data)
| 488 | 192 |
#!/usr/bin/env python
# This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
class DataButtonsWidget(gtk.HBox):
__gsignals__ = { "revert" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"store" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())}
def __init__(self):
gtk.HBox.__gobject_init__(self)
revertButton = gtk.Button()
revertButton.set_label("Revert data")
revertButton.connect("clicked", self._revert)
storeButton = gtk.Button()
storeButton.set_label("Store data")
storeButton.connect("clicked", self._store)
self.pack_start(revertButton)
self.pack_end(storeButton)
return
def _revert(self, widget = None):
self.emit("revert")
def _store(self, widget = None):
self.emit("store")
gobject.type_register(DataButtonsWidget)
| 1,476 | 494 |
class LinkTitle(object):
ARTIFACT_LIST = "artifact-list"
ARTIFACT_ROOT = "artifact-root"
ARTIFACT = "artifact"
METADATA = "metadata"
| 149 | 56 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for running a full probabilistic iterative solver baseline."""
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_datasets as tfds
from metapose import data_utils
from metapose import inference_time_optimization as inf_opt
_INPUT_PATH = flags.DEFINE_string(
'input_path', '',
'path to an folder containing a tfrec file and a features.json file')
_OUTPUT_PATH = flags.DEFINE_string(
'output_path', None,
'path to the output a dataset with refined 3d poses')
_N_STEPS = flags.DEFINE_integer('n_steps', 100, 'optimizer (adam) steps')
_DEBUG_FIRST_N = flags.DEFINE_integer(
'debug_first_n', None, 'read only first n records')
_LEARNING_RATE = flags.DEFINE_float(
'learning_rate', 1e-2, 'optimizer (adam) learning rate')
_REPORT_N_APPROX = flags.DEFINE_integer(
'report_n_approx', 50,
'number of intermediate optimization results to report')
_CAM_SUBSET = flags.DEFINE_list(
'cam_subset', list(map(str, range(4))),
'comma-separated list of camera ids to use, e.g. 3,4,5')
_GT_HEATMAPS = flags.DEFINE_bool(
'gt_heatmaps', False,
'whether to replace heatmaps with fake ground truth heatmaps')
_FAKE_GT_HT_STD = flags.DEFINE_float(
'fake_gt_ht_std', 0.0,
'how much noise to add to positions of means of fake gt heatmaps')
_USE_WEAK_REPR = flags.DEFINE_bool(
'use_weak_repr', False,
'whether to use weak projection to get ground truth heatmaps')
_FAKE_GT_INIT = flags.DEFINE_bool(
'fake_gt_init', False,
'whether to use ground truth instead of monocular 3d predictions')
_RANDOM_INIT = flags.DEFINE_bool(
'random_init', False,
'whether to use random noise instead of monocular 3d predictions')
_EDGE_LENS_LAMBDA = flags.DEFINE_float(
'edge_lens_lambda', 0.0,
'weight of the normalized limb length loss during refinement')
flags.mark_flag_as_required('output_path')
def main(_):
cam_subset = list(map(int, _CAM_SUBSET.value))
n_cam = len(cam_subset)
report_n = (
_N_STEPS.value // (_N_STEPS.value // (_REPORT_N_APPROX.value - 1)) + 1)
output_shape_dtype = {
# optimization results
'loss': ([report_n], tf.float32),
'iters': ([report_n], tf.int32),
'pose3d_opt_preds': ([report_n, 17, 3], tf.float32),
'cam_rot_opt_preds': ([report_n, n_cam, 3, 3], tf.float32),
'scale_opt_preds': ([report_n, n_cam], tf.float32),
'shift_opt_preds': ([report_n, n_cam, 3], tf.float32),
# metrics
'pose2d_opt_preds': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_gt_aligned_pred_3d_proj': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_pred_pmpjpe': ([report_n], tf.float32),
'pose2d_pred_err': ([report_n], tf.float32),
'pose2d_pred_vs_posenet_err': ([report_n], tf.float32),
'pose2d_gt_posenet_err_mean': ([], tf.float32),
'pose3d_gt_backaligned_pose2d_gt_err': ([report_n], tf.float32),
# input data
'pose3d': ([17, 3], tf.float64),
'cam_pose3d': ([n_cam, 3], tf.float64),
'cam_rot': ([n_cam, 3, 3], tf.float64),
'cam_intr': ([n_cam, 4], tf.float64),
'cam_kd': ([n_cam, 5], tf.float64),
'pose2d_gt': ([n_cam, 17, 2], tf.float64),
'pose2d_repr': ([n_cam, 17, 2], tf.float64),
'heatmaps': ([n_cam, 17, 4, 4], tf.float64),
# note! pose2d_pred is actually the "mean heatmap" 2D pred
'pose2d_pred': ([n_cam, 17, 2], tf.float64),
'keys': ([n_cam], tf.string),
'bboxes': ([n_cam, 4], tf.int32),
'pose3d_epi_pred': ([n_cam, 17, 3], tf.float32),
'cam_subset': ([n_cam], tf.int32),
}
output_spec = tfds.features.FeaturesDict({
k: tfds.features.Tensor(shape=s, dtype=d)
for k, (s, d) in output_shape_dtype.items()
})
ds = data_utils.read_tfrec_feature_dict_ds(_INPUT_PATH.value)
if _DEBUG_FIRST_N.value is not None:
ds = ds.take(_DEBUG_FIRST_N.value)
dataset = []
for _, data_rec in ds:
opt_stats = inf_opt.run_inference_optimization(
data_rec=data_rec,
opt_steps=_N_STEPS.value,
report_n_results=_REPORT_N_APPROX.value,
cam_subset=cam_subset,
edge_lens_lambda=_EDGE_LENS_LAMBDA.value,
fake_gt_heatmaps=_GT_HEATMAPS.value,
fake_gt_ht_std=_FAKE_GT_HT_STD.value,
fake_gt_init=_FAKE_GT_INIT.value,
random_init=_RANDOM_INIT.value,
recompute_weak_repr=_USE_WEAK_REPR.value,
learning_rate=_LEARNING_RATE.value)
print('pmpjpe', opt_stats['pose3d_pred_pmpjpe'][-1])
dataset.append(opt_stats)
data_utils.write_tfrec_feature_dict_ds(
dataset, output_spec, _OUTPUT_PATH.value)
if __name__ == '__main__':
app.run(main)
| 5,274 | 2,077 |
from abc import ABC, abstractmethod
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
s = SentimentIntensityAnalyzer()
import flair
flair_sentiment = flair.models.TextClassifier.load('en-sentiment')
""" from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
azureclient = TextAnalyticsClient(endpoint="https://textsentimentcheck.cognitiveservices.azure.com/", credential=AzureKeyCredential("")) """
# add an instance of your model to this once you have defined it
models = []
# all added sentiment analysis models must be wrapped
# in a class that inherits from this class to enforce
# a common api between different models
class baseSentimentModel(ABC):
def __init__(self, name, model):
self.name = name
self.model = model
# this is the only required method
# it should take the text and return the predicted
# sentiment as a number between [-1, 1] where
# 1 is maximally positive, 0 is nuetral, and -1 is maximally negative
@abstractmethod
def predict(self, texts):
pass
class nltkModel(baseSentimentModel):
def predict(self, texts):
return [self.parsePolarity(self.model.polarity_scores(text)) for text in texts]
def parsePolarity(self, polarity):
if polarity['neg'] > polarity['pos'] and polarity['neg'] > polarity['neu']:
return -1.0
elif polarity['pos'] > polarity['neg'] and polarity['pos'] > polarity['neu']:
return 1.0
return 0.0
models.append(nltkModel('nltkVader', s))
class flairModel(baseSentimentModel):
def __init__(self, name, model):
self.sentMapping = {'NEGATIVE' : -1.0, 'NEUTRAL': 0.0, 'POSITIVE': 1.0}
super().__init__(name, model)
def predict(self, texts):
sents = [flair.data.Sentence(text) for text in texts]
self.model.predict(sents)
result = []
for i, t in enumerate(sents):
try:
result.append(self.sentMapping[t.labels[0].value])
except:
print(texts[i])
return result
models.append(flairModel('flair', flair_sentiment))
""" class azureModel(baseSentimentModel):
def predict(self, texts):
responses = self.model.analyze_sentiment(documents=texts)
return list(map(self.parseResponses, responses))
def parseResponses(self, responses):
totals = [0.0, 0.0, 0.0]
for response in responses:
totals[0] += response.confidence_scores.positive
totals[1] += response.confidence_scores.neutral
totals[2] += response.confidence_scores.negative
max_idx = 0
if totals[1] > totals[0]:
max_idx = 1
if totals[2] > totals[max_idx]:
max_idx = 2
return 1.0 - max_idx # this returns 1.0 for pos, 0.0 for neutral, and -1.0 for negative
models.append(azureModel('azureModel', azureclient)) """
"""
example of this:
class myModel(baseSentimentModel):
# this example is a categorical model
# so the values must be converted to numbers
def predict(self, text):
pred = self.model.evaluateSentiment(text)
if pred == 'positive':
return 1.0
elif pred == 'nuetral':
return 0.0
else:
return -1.0
models.append(myModel('example model', somePackage.model))
"""
| 3,450 | 1,086 |
# Generated by Django 3.0.14 on 2021-05-09 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tools', '0007_auto_20210429_1427'),
]
operations = [
migrations.AlterField(
model_name='tool',
name='quantity',
field=models.IntegerField(null=True),
),
]
| 384 | 140 |
import pytest
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data.sampler import SequentialSampler
from energizer.data import ActiveDataModule
from energizer.data.datamodule import FixedLengthSampler
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_len(dataset_arg):
"""Test that measures of length are consistent."""
# no instances
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
ads.prepare_data() # useless: just pass but for coverage
ads.setup() # useless: just pass but for coverage
assert ads.total_labelled_size == ads.train_size + ads.val_size
assert len(ads.train_dataset) == ads.train_size == ads.val_size == ads.total_labelled_size == 0
assert len(dataset_arg) == len(ads.pool_dataset) == ads.pool_size
assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size
# one instance in the train dataset
ads.label(0)
assert ads.total_labelled_size == ads.train_size + ads.val_size
assert len(ads.train_dataset) == ads.train_size == ads.total_labelled_size == 1
assert ads.val_dataset is None
assert len(dataset_arg) - ads.total_labelled_size == len(ads.pool_dataset) == ads.pool_size
assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size
# one instance in the train dataset and one in the val dataset
ads.val_split = 0.5 # hack
ads.label([0, 1])
assert ads.total_labelled_size == ads.train_size + ads.val_size
assert len(ads.train_dataset) == ads.train_size == 2
assert len(ads.val_dataset) == ads.val_size == 1
assert len(dataset_arg) - ads.total_labelled_size == len(ads.pool_dataset) == ads.pool_size
assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_indexing(dataset_arg):
"""Test that ActiveDataModule is not indexable directly."""
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
with pytest.raises(TypeError):
assert ads[0]
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_labelling(dataset_arg):
"""Test that labelling changes all the required states."""
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
len_dataset_arg = len(dataset_arg)
assert ads.last_labelling_step == 0
assert ads.train_size == 0
assert ads.pool_size == len_dataset_arg
assert ads.has_labelled_data is False
assert ads.has_unlabelled_data is True
assert ads.train_dataset.indices == []
for i in range(1, len_dataset_arg + 1):
ads.label(0) # always label the first instance in the pool
assert ads.last_labelling_step == i
assert ads.train_size == i
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
if i < len_dataset_arg:
assert ads.has_unlabelled_data is True
else:
assert ads.has_unlabelled_data is False
assert ads.train_dataset.indices == list(range(i))
assert ads.last_labelling_step == len_dataset_arg
assert ads.train_size == len_dataset_arg
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
assert ads.has_unlabelled_data is False
assert ads.train_dataset.indices == list(range(len_dataset_arg))
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_labelling_multiple_indices(dataset_arg):
"""Test labelling multiple instances at once."""
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
pool_ids = [0, 8, 7] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_dataset.indices == sorted(pool_ids)
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_labelling_duplicates(dataset_arg):
"""Test that labelling duplicate indices results in a single instance to be labelled."""
# check behaviour when batch of indices contains
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
pool_ids = [0, 0] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_size == 1
# check behaviour when batch of indices contains
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.5)
pool_ids = [0, 0, 1] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_size == ads.val_size == 1
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_labelling_val_split(dataset_arg):
"""Test that labelling with val_split works."""
# check split works
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.5)
pool_ids = [0, 1] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_size == ads.val_size == 1
# check that val_split receives at least 1 instance when there are two labelled instances
# and the probability is too small that it randomly would receive just one
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.0001)
pool_ids = [0, 1] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_size == ads.val_size == 1
# check behaviour when there is only one instance (bonus: using a duplicate)
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.99)
pool_ids = [0, 0] # they are the first to be labelled so correspond to ids in oracle
ads.label(pool_ids)
assert ads.train_size == 1
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_reset_at_labelling_step(dataset_arg):
"""Test that resetting the labelling steps sets the correct states."""
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
len_dataset_arg = len(dataset_arg)
ads.label(0) # label first
assert ads.last_labelling_step == 1
assert ads.train_size == 1
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
assert ads.has_unlabelled_data is True
assert ads.train_dataset.indices == [0]
ads.label(list(range(len_dataset_arg - 1))) # label the rest
assert ads.train_size == len_dataset_arg
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
assert ads.has_unlabelled_data is False
assert ads.train_dataset.indices == list(range(len_dataset_arg))
ads.reset_at_labelling_step(1) # go back to when there was one instance
assert ads.train_size == 1
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
assert ads.has_unlabelled_data is True
assert ads.train_dataset.indices == [0]
ads.reset_at_labelling_step(0) # go back to when there was nothing labelled
assert ads.last_labelling_step == 2
assert ads.train_size == 0
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is False
assert ads.has_unlabelled_data is True
assert ads.train_dataset.indices == []
ads.reset_at_labelling_step(ads.last_labelling_step) # reset to the last step
assert ads.train_size == len_dataset_arg
assert ads.pool_size == len_dataset_arg - ads.train_size
assert ads.has_labelled_data is True
assert ads.has_unlabelled_data is False
assert ads.train_dataset.indices == list(range(len_dataset_arg))
with pytest.raises(ValueError):
assert ads.reset_at_labelling_step(100)
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_sample_pool_indices(dataset_arg):
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
with pytest.raises(ValueError):
assert ads.sample_pool_idx(-1)
with pytest.raises(ValueError):
assert ads.sample_pool_idx(0)
with pytest.raises(ValueError):
assert ads.sample_pool_idx(ads.pool_size + 1)
assert len(ads.sample_pool_idx(ads.pool_size)) == ads.pool_size
assert len(ads.sample_pool_idx(1)) == 1
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_curriculum(dataset_arg):
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
for _ in range(5):
ads.label(0)
assert ads.curriculum_dataset().indices == list(range(5))
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_initial_labelling(dataset_arg):
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
assert ads.train_size == 0
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=[0])
assert ads.train_size == 1
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2)
assert ads.train_size == 2
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, val_split=0.5)
assert ads.train_size == ads.val_size == 1
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_dataloader_len(dataset_arg):
for batch_size in range(1, len(dataset_arg) + 1):
ads = ActiveDataModule(
num_classes=2,
train_dataset=dataset_arg,
initial_labels=2,
batch_size=batch_size,
)
assert ads.train_dataloader().batch_size is None
assert ads.train_dataloader().batch_sampler.batch_size == batch_size
assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader())
# min_steps_per_epoch
for shuffle in (True, False):
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, shuffle=shuffle)
ads._min_steps_per_epoch = 1
assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader()) == 2
for _ in range(2):
assert next(iter(ads.train_dataloader()))
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, shuffle=shuffle)
ads._min_steps_per_epoch = 10
assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader()) == 10
for _ in range(10):
assert next(iter(ads.train_dataloader()))
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_sampler_type(dataset_arg):
ads = ActiveDataModule(
num_classes=2,
train_dataset=dataset_arg,
test_dataset=dataset_arg,
predict_dataset=dataset_arg,
val_dataset=dataset_arg,
initial_labels=2,
batch_size=1,
)
assert isinstance(ads.train_dataloader().batch_sampler.sampler, FixedLengthSampler)
assert isinstance(ads.pool_dataloader().batch_sampler.sampler, SequentialSampler)
assert isinstance(ads.val_dataloader().batch_sampler.sampler, SequentialSampler)
assert isinstance(ads.test_dataloader().batch_sampler.sampler, SequentialSampler)
assert isinstance(ads.predict_dataloader().batch_sampler.sampler, SequentialSampler)
@pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True)
def test_raise_errors(dataset_arg):
for i in (-0.5, 1.0):
with pytest.raises(MisconfigurationException):
ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=i)
with pytest.raises(MisconfigurationException):
ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=i, val_dataset=dataset_arg)
with pytest.raises(RuntimeError):
ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg)
next(iter(ads.train_dataloader()))
| 12,141 | 4,141 |
# import os
# from typing import List
# # import aiofiles
# from fastapi import UploadFile
# from datetime import datetime
# import shutil
# # from app.model.emails import EmailRequest
# # from app.utils.mail_utils import send_mail
# def get_datetime():
# # return a current datetime string
# return datetime.now().strftime("%Y%m%d_%H%M%S")
# async def save_uploaded_files_to_wkdir(files):
# # Create temporary folder for storing uploaded files
# file_path = f"app/data/temp/upload_{get_datetime()}"
# os.mkdir(file_path)
# # save the file in local directory and get the list of files
# list_files = []
# for file in files:
# _file_name = os.path.join(file_path, file.filename)
# print("File Name: ", _file_name)
# async with aiofiles.open(_file_name, "wb") as out_file:
# content = await file.read() # async read
# await out_file.write(content) # async write
# list_files.append(_file_name)
# return {
# "path_to_folder": file_path,
# "list_files": list_files,
# }
| 1,086 | 347 |
import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import classpath_jars
from sagemaker_pyspark.wrapper import Option, ScalaMap, ScalaList
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def test_convert_dictionary():
dictionary = {"key": "value"}
map = ScalaMap(dictionary)._to_java()
assert map.apply("key") == "value"
def test_convert_list():
list = ["features", "label", "else"]
s_list = ScalaList(list)._to_java()
assert s_list.apply(0) == "features"
assert s_list.apply(1) == "label"
assert s_list.apply(2) == "else"
def test_convert_option():
list = ["features", "label", "else"]
option = Option(list)._to_java()
assert option.get().apply(0) == "features"
| 1,139 | 382 |
import torch
import numpy as np
from .base import Module
class Dropout(Module):
def __init__(self, p, input_size, seed=0):
self.p = p
self.generator = np.random.RandomState(seed)
self.activation = self.generator.binomial(size=input_size, n=1, p=1-p)
self.activation = torch.from_numpy(self.activation).float()
self.train = True
def set_training(self, b):
self.train = b
def forward(self, input):
if self.train:
self.output = input*self.activation
else:
self.output = input
def backward(self, grad):
return self.activation*grad
| 645 | 203 |
from datetime import datetime
import pglet
from pglet import DatePicker, Text
with pglet.page("datepicker-with-change-event") as page:
def datepicker_changed(e):
t.value = f"DatePicker value changed to {dp.value}"
t.update()
now = datetime.now()
t = Text()
dp = DatePicker(label="Start date", value=now, width=150, on_change=datepicker_changed)
page.add(dp, t)
input() | 401 | 135 |
""" Multiplication Table """
def ex30():
"""Generate a multiplication table"""
for left in range(13):
for right in range(13):
print('{} X {} = {}'.format(left, right, left * right))
if __name__ == '__main__':
ex30()
| 250 | 83 |
""" Tests for form crawlers """
from unittest import TestCase, main
from unittest.mock import Mock
from functools import partial
from interactive_bots.commons.form_crawler import FormActionOptions, FormCrawler
class FormActionOptionsTestCase(TestCase):
""" Test case for FormActionOptions class """
def setUp(self):
self.driver_mock = Mock()
self.form_action = FormActionOptions(self.driver_mock)
self.navigate_mock = Mock()
self.action_mock = Mock()
self.data_mock = Mock()
def test_set_actions_should_set_navigate(self):
""" set_actions should take function for navigate and make partial with driver """
self.navigate_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.form_action.navigate()
def test_set_actions_should_set_data(self):
""" set_actions should take function for data and make partial with driver """
self.data_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.form_action.data()
def test_set_actions_should_set_action(self):
""" set_actions should take function for action and make partial with driver """
self.action_mock = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.action_mock)
self.form_action.action()
def test_reset_accumulator_should_set_acc_to_0(self):
""" reset_accumulator should set acc to 0 """
self.form_action.acc = 12
self.form_action.reset_accumulator()
self.assertEqual(self.form_action.acc, 0)
def test_iteration_should_stop_iteration_if_acc_is_False(self):
""" Iteration through actions should stop if accumulator passed from action is false """
self.navigate_mock.return_value = []
self.action_mock.return_value = False
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.assertRaises(StopIteration, partial(next, self.form_action))
def test_iteration_should_pass_acc_to_data(self):
""" acc should be passed to data if True """
acc = ["stuff"]
self.navigate_mock.return_value = [1]
self.action_mock.return_value = acc
self.data_mock.side_effect = lambda d, a: self.assertTrue(a is acc)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
next(self.form_action)
def test_iteration_should_return_from_data(self):
""" Iteration through FormActionOptions should return wahtever data returned """
val = 1
self.navigate_mock.return_value = [1]
self.data_mock.return_value = val
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.assertEqual(next(self.form_action), val)
class FormCrawlerTestCase(TestCase):
""" Test case for FormCrawler """
def setUp(self):
self.form_crawler = FormCrawler()
def test_add_action_should_add_action_to_list(self):
""" add_action method should append action to actions list """
act = Mock()
self.form_crawler.add_action(act)
self.assertTrue(act is self.form_crawler.actions[0])
def test_remove_action_should_remove_action(self):
""" remove_action should remove action from actions list by given index """
act = Mock()
self.form_crawler.add_action(act)
self.form_crawler.remove_action(0)
self.assertEqual(len(self.form_crawler.actions), 0)
def test_crawl_should_set_header(self):
""" crawl should call writeheader before writing anything else """
writer = Mock()
option = FormActionOptions(Mock())
option.set_actions(Mock(return_value=[]), Mock(return_value=False), Mock())
self.form_crawler.add_action(option)
self.form_crawler.crawl(writer)
writer.writeheader.assert_called_once()
def test_crawl_should_write_row_of_all_values(self):
""" crawl should write row from dictionary with all the fields passed by actions data function """
write_dict = {"foo": 1, "bar": 2}
writer = Mock()
writer.writerow = lambda d: self.assertEqual(d, write_dict)
def counter(d, l, a):
if not a:
return True
else:
return False
option1 = FormActionOptions(Mock())
option2 = FormActionOptions(Mock())
option1.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value={"foo": write_dict["foo"]}))
option2.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value=[{"bar": write_dict["bar"]}]))
self.form_crawler.add_action(option1)
self.form_crawler.add_action(option2)
self.form_crawler.crawl(writer)
def test_crawl_should_throw_exception_if_actions_list_is_empty(self):
""" crawl should throw IndexError if actions is empty """
self.assertRaises(IndexError, partial(self.form_crawler.crawl, Mock()))
if __name__ == "__main__":
main()
| 5,312 | 1,635 |
from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import (
StockListView,
stockAddView,
stockDeleteView,
autoComplete
)
urlpatterns = [
path(
'',
login_required(StockListView.as_view(), login_url='/user/login/'),
name='stock_list'
),
path(
'add/',
login_required(stockAddView, login_url='/user/login/'),
name='stock_add'
),
path(
'add/<int:id>/',
login_required(stockAddView, login_url='/user/login/'),
name='stock_update'
),
path(
'delete/<int:id>/',
login_required(stockDeleteView, login_url='/user/login/'),
name='stock_delete'
),
path(
'autocomplete/',
login_required(autoComplete, login_url='/user/login/'),
name='stock_autocomplete'
),
] | 872 | 279 |
"""Unit test for DataReader (public methods only)"""
import unittest
import numpy as np
import os
from dicom_data_preprocess import parsing
from dicom_data_preprocess.reader import DataReader
__author__ = 'Christine Hsu'
class TestReader(unittest.TestCase):
@classmethod
def setUpClass(TestReader):
TestReader.download_data_path = 'tests/data/sample-batchset/'
TestReader.data_basepath = 'tests/data/output_data/'
TestReader.logs_path = 'tests/logs/',
TestReader.plots_path = 'tests/plots/'
TestReader.contour_type = 'i-contours'
TestReader.save_plot = False
TestReader.dicoms_basepath = os.path.join(TestReader.download_data_path, 'dicoms')
TestReader.contours_basepath = os.path.join(TestReader.download_data_path, 'contourfiles')
TestReader.link_filepath = os.path.join(TestReader.download_data_path, 'link.csv')
link_tuples = DataReader._read_link(TestReader, TestReader.link_filepath)
TestReader.sample_tuples = DataReader._assemble_link(TestReader, link_tuples)
def test_load_samples(self):
print('\nTesting the loading of eight assembled samples...')
reader = DataReader(download_data_path=TestReader.download_data_path,
data_basepath=TestReader.data_basepath,
logs_path=TestReader.logs_path,
plots_path=TestReader.plots_path,
contour_type=TestReader.contour_type,
save_plot=TestReader.save_plot)
images, masks, metadata = reader.load_samples(TestReader.sample_tuples)
self.assertTrue(isinstance(images, list))
self.assertTrue(isinstance(masks, list))
self.assertTrue(isinstance(metadata, list))
self.assertTrue(isinstance(images[0], np.ndarray))
self.assertEqual(masks[0].dtype, np.bool)
self.assertTrue(isinstance(metadata[0], str))
reader.plot_samples(images, masks, metadata, 'test_load_samples.jpg')
if __name__ == "__main__":
unittest.main()
| 1,828 | 648 |
# -*- coding: utf-8 -*-
import logging
import logging.config
import logging.handlers
class Logger:
_default_conf = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s",
"datefmt": "%d-%M-%Y %H:%M:%S"
}
},
"handlers": {
"defaultHandler": {
"class":"logging.StreamHandler",
"level":"DEBUG",
"formatter":"default",
"stream":"ext://sys.stdout"
}
},
"root": {
"level": "DEBUG",
"handlers": ['defaultHandler']
}
}
_current_conf = None
_logger = None
_register_loggers = ['root']
def __init__(self, conf=None):
if conf is not None and not getattr(conf, 'get'):
raise TypeError("conf has no get method")
self._current_conf = self._default_conf
if conf is not None:
self._current_conf['formatters'].update(conf.get('formatters', {}))
self._current_conf['handlers'].update(conf.get('handlers', {}))
self._current_conf['loggers'] = conf.get('loggers', {})
#set default propagate = 0
for logger in self._current_conf['loggers'].values():
logger['propagate'] = 0
try:
logging.config.dictConfig(self._current_conf)
except ValueError:
self._current_conf = self._default_conf
logging.config.dictConfig(self._current_conf)
logging.getLogger("defaultLogger").exception("logger config error.")
finally:
self._logger = logging.getLogger("defaultLogger")
for key in self._current_conf.get('loggers', {}).keys():
self._register_loggers.append(key)
def getLogger(self, name):
if name == "root":
return self._logger
if name in self._register_loggers:
return logging.getLogger(name)
else:
raise NameError("No this logger: {}".format(name))
if __name__ == "__main__":
conf = {
"formatters": {
"default": {
"format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s",
"datefmt": "%d-%M-%Y %H:%M:%S"
}
},
"handlers": {
"consoleHandler": {
"class":"logging.StreamHandler",
"level":"NOTSET",
"formatter":"default",
"stream":"ext://sys.stdout"
},
"fileHandler": {
"class": "logging.FileHandler",
"level": "NOTSET",
"formatter": "default",
"filename": "testHandler2.log"
}
},
"loggers": {
"testLogger1": {
"handlers": ["consoleHandler"],
"level": "INFO"
},
"testLogger2": {
"handlers": ["fileHandler"],
"level": "DEBUG"
}
}
}
loggerHome = Logger(conf)
#root = loggerHome.getLogger('root')
#root.debug('this is a debug message')
#root.info('this is a info message')
#root.warn('this is a warning message')
#root.error('this is a error message')
#root.fatal('this is a fatal message')
testLogger1 = loggerHome.getLogger('testLogger1')
testLogger1.debug('this is a debug message')
testLogger1.info('this is a info message')
testLogger1.warn('this is a warning message')
testLogger1.error('this is a error message')
testLogger1.fatal('this is a fatal message')
testLogger2 = loggerHome.getLogger('testLogger2')
testLogger2.debug('this is a debug message')
testLogger2.info('this is a info message')
testLogger2.warn('this is a warning message')
testLogger2.error('this is a error message')
testLogger2.fatal('this is a fatal message')
| 4,064 | 1,158 |
from tempfile import NamedTemporaryFile
import shutil
import csv
import datetime
import time
#filename = 'tmpEmployeeDatabase.csv'
tempfile = NamedTemporaryFile('w+t', newline='', delete=False)
class tempFile:
def __init__ ():
filename = "" + str(datetime.date().month) + str(datetime.date().day) + str((datetime.date().year) - 2000) + "-WildStang_Attendance.csv"
def createTemp ():
tempfile = NamedTemporaryFile('w+t', newline='', delete=False)
def findID ():
x=2
with open(filename, 'r', newline='') as csvFile, tempfile:
reader = csv.reader(csvFile, delimiter=',', quotechar='"')
writer = csv.writer(tempfile, delimiter=',', quotechar='"')
for row in reader:
row[1] = row[1].title()
writer.writerow(row)
shutil.move(tempfile.name, filename)
| 846 | 277 |
from tkinter import *
janela = Tk()
botao = Button(janela, text="Click")
botao.grid(row=0, column=0)
janela.mainloop() | 121 | 52 |
###################################################################################################
#
#
# Copyright (C) by Shivani Kishnani & Andreas Zoglauer.
# All rights reserved.
#
# Please see the file License.txt in the main repository for the copyright-notice.
#
###################################################################################################
###################################################################################################
import os
import sys
import argparse
import itertools
from ToyModel3DCone import ToyModel3DCone
import signal
###################################################################################################
"""
This program loops over different layout and determines their performance
For all the command line options, try:
python3 explorelayouts.py --help
"""
parser = argparse.ArgumentParser(description='Passing in values to run ToyModel3DCone to test different layouts')
parser.add_argument('-f', '--file', default='changethis.txt', help='File name used for training/testing')
parser.add_argument('-o', '--output', default='output.txt', help='The output file name where the final results will be stored')
parser.add_argument('-l', '--hiddenlayers', default='3', help='Number of hidden layers. Default: 3')
parser.add_argument('-n', '--startingnode', default='10', help='Number of nodes to start with. Default: 50')
parser.add_argument('-m', '--multfactor', default='10', help='Number that is to be multiplied to starting nodes to get layers of new file')
parser.add_argument('-a', '--activation', default='relu', help='Name of default activation layer to be applied')
parser.add_argument('-mn', '--maxNode', default='50', help='Maximum number of nodes in a layer')
parser.add_argument('-t', '--time', default='600', help='Time in seconds to run the model for')
args = parser.parse_args()
hiddenLayers = int(args.hiddenlayers)
multFactor = int(args.multfactor)
startingNode = int(args.startingnode)
maxNode = int(args.maxNode)
LayoutList = []
output = args.output
filew = open(output,"w+")
#Step 0: Take care of Ctrl+C
Interrupted = False
NInterrupts = 0
def signal_handler(signal, frame):
print("You pressed Ctrl+C! inside explore_layouts!")
global Interrupted
Interrupted = True
global NInterrupts
NInterrupts += 1
if NInterrupts >= 3:
print("Aborting!")
filew.close()
System.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Step 1: Create function to get layout
def create_layout(node, numLayers):
layer_list = [node]
while numLayers > 0 and node!= 0:
add = node*multFactor
layer_list.append(node*multFactor)
node = add
numLayers -= 1
return layer_list
# Step 2: Create list of layouts for NN
for Layout in list(create_layout(x, hiddenLayers) for x in range(startingNode, maxNode+1, 10)):
LayoutList.append(Layout)
print(Layout)
# Step 3: Loop over all layouts and record performance
for Layout in LayoutList:
ToyModel3DCone(filew, Layout, args.activation)
filew.close()
print("Finished!")
# END
###################################################################################################
| 3,205 | 927 |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines data preprocessing pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import os
import random
import apache_beam as beam
from apache_beam.io import tfrecordio
from apache_beam.pvalue import TaggedOutput
from tensorflow import gfile
from tensorflow import logging
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_schema
from constants import constants
from utils import utils
class _DatasetType(enum.Enum):
"""Encodes integer values to differentiate train, validation, test sets."""
UNSPECIFIED = 0
TRAIN = 1
VAL = 2
class _SplitData(beam.DoFn):
"""DoFn that randomly splits records in training / validation sets."""
def process(self, element, train_size, val_label):
"""Randomly assigns element to training or validation set."""
if random.random() > train_size:
yield TaggedOutput(val_label, element)
else:
yield element
class ReadFile(beam.DoFn):
"""DoFn to read and label files."""
def process(self, element):
labels = {
constants.SUBDIR_POSITIVE: constants.POSITIVE_SENTIMENT_LABEL,
constants.SUBDIR_NEGATIVE: constants.NEGATIVE_SENTIMENT_LABEL
}
found_labels = [labels[l] for l in labels if l in element]
if len(found_labels) > 1:
raise ValueError('Incompatible path: `{}`.'.format(element))
if found_labels:
with gfile.GFile(element, 'r') as single_file:
for line in single_file:
yield {constants.LABELS: found_labels[0], constants.REVIEW: line}
else:
logging.debug('Label not found for file: `%s`.', element)
@beam.ptransform_fn
def shuffle(p):
"""Shuffles data from PCollection.
Args:
p: PCollection.
Returns:
PCollection of shuffled data.
"""
class _AddRandomKey(beam.DoFn):
def process(self, element):
yield random.random(), element
shuffled_data = (
p
| 'PairWithRandom' >> beam.ParDo(_AddRandomKey())
| 'GroupByRandom' >> beam.GroupByKey()
| 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs))
return shuffled_data
def run(p, params):
"""Defines Beam preprocessing pipeline.
Performs the following:
- Reads text files from pattern.
- Split text files in train and validation sets.
Args:
p: PCollection, initial pipeline.
params: Object holding a set of parameters as name-value pairs.
"""
path_pattern = os.path.join(params.input_dir, '*', '*{}'.format(
constants.FILE_EXTENSION))
data = (
p
| 'ListFiles' >> beam.Create(gfile.Glob(path_pattern))
| 'ReadFiles' >> beam.ParDo(ReadFile())
| 'SplitData' >> beam.ParDo(
_SplitData(),
train_size=params.train_size,
val_label=_DatasetType.VAL.name).with_outputs(
_DatasetType.VAL.name, main=_DatasetType.TRAIN.name))
schema = dataset_schema.from_feature_spec(utils.get_processed_data_schema())
for dataset in _DatasetType:
if not dataset.value:
continue
_ = (
data[dataset.name]
| 'Shuffle{}'.format(dataset.name) >> shuffle() # pylint: disable=no-value-for-parameter
| 'WriteFiles{}'.format(dataset.name) >> tfrecordio.WriteToTFRecord(
os.path.join(params.output_dir, dataset.name + constants.TFRECORD),
coder=example_proto_coder.ExampleProtoCoder(schema)))
| 4,013 | 1,264 |
import ast
import os
import subprocess
from pathlib import Path
from json2html import *
from inspect4py.parse_setup_files import inspect_setup
from inspect4py.structure_tree import DisplayablePath, get_directory_structure
def print_summary(json_dict):
"""
This method prints a small summary of the classes and properties recognized during the analysis.
At the moment this method is only invoked when a directory with multiple files is passed.
"""
folders = 0
files = 0
dependencies = 0
functions = 0
classes = 0
for key, value in json_dict.items():
if "/" in key:
folders += 1
if isinstance(value, list):
for element in value:
files += 1
if "dependencies" in element:
dependencies += len(element["dependencies"])
if "functions" in element:
functions += len(element["functions"])
if "classes" in element:
classes += len(element["classes"])
print("Analysis completed")
print("Total number of folders processed (root folder is considered a folder):", folders)
print("Total number of files found: ", files)
print("Total number of classes found: ", classes)
print("Total number of dependencies found in those files", dependencies)
print("Total number of functions parsed: ", functions)
def extract_directory_tree(input_path, ignore_dirs, ignore_files, visual=0):
"""
Method to obtain the directory tree of a repository.
The ignored directories and files that were inputted are also ignored.
:input_path path of the repo to
"""
ignore_set = ['.git', '__pycache__', '.idea', '.pytest_cache']
ignore_set = tuple(list(ignore_dirs) + list(ignore_files) + ignore_set)
if visual:
paths = DisplayablePath.make_tree(Path(input_path), criteria=lambda
path: True if path.name not in ignore_set and not os.path.join("../", path.name).endswith(".pyc") else False)
for path in paths:
print(path.displayable())
return get_directory_structure(input_path, ignore_set)
def prune_json(json_dict):
"""
Method that given a JSON object, removes all its empty fields.
This method simplifies the resultant JSON.
:param json_dict input JSON file to prune
:return JSON file removing empty values
"""
final_dict = {}
if not (isinstance(json_dict, dict)):
# Ensure the element provided is a dict
return json_dict
else:
for a, b in json_dict.items():
if b or isinstance(b, bool):
if isinstance(b, dict):
aux_dict = prune_json(b)
if aux_dict: # Remove empty dicts
final_dict[a] = aux_dict
elif isinstance(b, list):
aux_list = list(filter(None, [prune_json(i) for i in b]))
if len(aux_list) > 0: # Remove empty lists
final_dict[a] = aux_list
else:
final_dict[a] = b
return final_dict
def extract_requirements(input_path):
print("Finding the requirements with the pigar package for %s" % input_path)
try:
file_name = 'requirements_' + os.path.basename(input_path) + '.txt'
# Attention: we can modify the output of pigar, if we use echo N.
# Answering yes (echo y), we allow searching for PyPI
# for the missing modules and filter some unnecessary modules.
cmd = 'echo y | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name
# cmd = 'echo n | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name
# print("cmd: %s" %cmd)
proc = subprocess.Popen(cmd.encode('utf-8'), shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
req_dict = {}
with open(file_name, "r") as file:
lines = file.readlines()[1:]
file.close()
for line in lines:
try:
if line != "\n":
splitLine = line.split(" == ")
req_dict[splitLine[0]] = splitLine[1].split("\n")[0]
except:
pass
# Note: Pigar requirement file is being deleted
# in the future we might want to keep it (just commenting the line bellow)
os.system('rm ' + file_name)
return req_dict
except:
print("Error finding the requirements in" % input_path)
def extract_software_invocation(dir_info, dir_tree_info, input_path, call_list, readme):
"""
Method to detect the directory type of a software project. This method also detects tests
We distinguish four main types: script, package, library and service. Some can be more than one.
:dir_info json containing all the extracted information about the software repository
:dir_tree_info json containing the directory information of the target repo
:input_path path of the repository to analyze
:call_list json file containing the list of calls per file and functions or methods.
:readme content of the readme file of the project (if any)
"""
software_invocation_info = []
setup_files = ("setup.py", "setup.cfg")
server_dependencies = ("flask", "flask_restful", "falcon", "falcon_app", "aiohttp", "bottle", "django", "fastapi",
"locust", "pyramid", "hug", "eve", "connexion")
# Note: other server dependencies are missing here. More testing is needed.
flag_package_library = 0
for directory in dir_tree_info:
for elem in setup_files: # first check setup.py, then cfg
if elem in dir_tree_info[directory]:
# 1. Exploration for package or library
software_invocation_info.append(inspect_setup(input_path, elem))
flag_package_library = 1
break
# We continue exploration to make sure we continue exploring mains even after detecting this is a
# library
# Looping across all mains
# to decide if it is a service (main + server dep) or just a script (main without server dep)
main_files = []
# new list to store the "mains that have been previously classified as "test".
test_files_main = []
test_files_no_main = []
# new list to store files without mains
body_only_files = []
flag_service_main = 0
for key in dir_info: # filter (lambda key: key not in "directory_tree", dir_info):
if key!="requirements":
for elem in dir_info[key]:
if elem["main_info"]["main_flag"]:
flag_service_main = 0
flag_service = 0
main_stored = 0
if elem["is_test"]:
test_files_main.append(elem["file"]["path"])
main_stored = 1
else:
try:
# 2. Exploration for services in files with "mains"
flag_service, software_invocation_info = service_check(elem, software_invocation_info,
server_dependencies, "main", readme)
except:
main_files.append(elem["file"]["path"])
if flag_service:
flag_service_main = 1
if not flag_service and not main_stored:
main_files.append(elem["file"]["path"])
elif elem["is_test"]:
test_files_no_main.append(elem["file"]["path"])
# Filtering scripts with just body in software invocation
elif elem['body']['calls']:
body_only_files.append(elem)
m_secondary = [0] * len(main_files)
flag_script_main = 0
# this list (of lists) stores the mains that each main import
import_mains = []
# this list (of lists) stores the mains that each main is imported by
imported_by = [None]*len(main_files)
# 3. Exploration for main scripts
for m in range(0, len(main_files)):
m_calls = find_file_calls(main_files[m], call_list)
# HERE I STORE WHICH OTHER MAIN FILES CALLS EACH "M" MAIN_FILE
m_imports = extract_relations(main_files[m], m_calls, main_files, call_list)
# storing those m_imports in the import_mains[m]
import_mains.append(m_imports)
for m_i in m_imports:
m_secondary[main_files.index(m_i)] = 1
if not imported_by[main_files.index(m_i)]:
imported_by[main_files.index(m_i)] = []
imported_by[main_files.index(m_i)].append(main_files[m])
for m in range(0, len(main_files)):
soft_info = {"type": "script", "run": "python " + main_files[m], "has_structure": "main",
"mentioned_in_readme": os.path.basename(os.path.normpath(main_files[m])) in readme,
"imports": import_mains[m], "imported_by": imported_by[m]}
software_invocation_info.append(soft_info)
flag_script_main = 1
# tests with main.
for t in range(0, len(test_files_main)):
# Test files do not have help, they are usually run by themselves
soft_info = {"type": "test", "run": "python " + test_files_main[t], "has_structure": "main",
"mentioned_in_readme": os.path.basename(os.path.normpath(test_files_main[t])) in readme}
software_invocation_info.append(soft_info)
# tests with no main.
for t in range(0, len(test_files_no_main)):
# Test files do not have help, they are usually run by themselves
soft_info = {"type": "test", "run": "python " + test_files_no_main[t], "has_structure": "body",
"mentioned_in_readme": os.path.basename(os.path.normpath(test_files_no_main[t])) in readme}
software_invocation_info.append(soft_info)
flag_service_body = 0
flag_script_body = 0
for elem in body_only_files:
# 4. Exploration for services in files with body
flag_service, software_invocation_info = service_check(elem, software_invocation_info,
server_dependencies, "body", readme)
if flag_service:
flag_service_body = 1
# Only adding this information if we haven't not found libraries, packages, services or scripts with mains.
# 5. Exploration for script without main in files with body
if not flag_service_main and not flag_service_body and not flag_package_library and not flag_script_main:
soft_info = {"type": "script", "run": "python " + elem["file"]["path"], "has_structure": "body",
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
software_invocation_info.append(soft_info)
flag_script_body = 1
# Only adding this information if we haven't not found libraries, packages, services or scripts with mains
# or bodies.
# 6. Exploration for script without main or body in files with body
if not flag_script_body and not flag_service_main and not flag_service_body and not flag_package_library \
and not flag_script_main:
python_files = []
for directory in dir_tree_info:
for elem in dir_tree_info[directory]:
if ".py" in elem:
python_files.append(os.path.abspath(input_path + "/" + directory + "/" + elem))
for f in range(0, len(python_files)):
soft_info = {"type": "script without main", "import": python_files[f], "has_structure": "without_body",
"mentioned_in_readme": os.path.basename(os.path.normpath(python_files[f])) in readme}
software_invocation_info.append(soft_info)
return software_invocation_info
def generate_output_html(pruned_json, output_file_html):
"""
Method to generate a simple HTML view of the obtained JSON.
:pruned_json JSON to print out
:output_file_html path where to write the HTML
"""
html = json2html.convert(json=pruned_json)
with open(output_file_html, "w") as ht:
ht.write(html)
def top_level_functions(body):
return (f for f in body if isinstance(f, ast.FunctionDef))
def top_level_classes(body):
return (c for c in body if isinstance(c, ast.ClassDef))
def parse_module(filename):
with open(filename, "rt") as file:
return ast.parse(file.read(), filename=filename)
def list_functions_classes_from_module(m, path):
functions_classes = []
try:
# to open a module inside a directory
m = m.replace(".", "/")
repo_path = Path(path).parent.absolute()
abs_repo_path = os.path.abspath(repo_path)
file_module = abs_repo_path + "/" + m + ".py"
tree = parse_module(file_module)
for func in top_level_functions(tree.body):
functions_classes.append(func.name)
for cl in top_level_classes(tree.body):
functions_classes.append(cl.name)
type = "internal"
except:
#module = __import__(m)
#functions = dir(module)
type = "external"
return functions_classes, type
def type_module(m, i, path):
repo_path = Path(path).parent.absolute()
abs_repo_path = os.path.abspath(repo_path)
if m:
m = m.replace(".", "/")
file_module = abs_repo_path + "/" + m + "/" + i + ".py"
else:
file_module = abs_repo_path + "/" + i + ".py"
file_module_path = Path(file_module)
if file_module_path.is_file():
return "internal"
else:
return "external"
def extract_call_functions(funcs_info, body=0):
call_list = {}
if body:
if funcs_info["body"]["calls"]:
call_list["local"] = funcs_info["body"]["calls"]
else:
for funct in funcs_info:
if funcs_info[funct]["calls"]:
call_list[funct] = {}
call_list[funct]["local"] = funcs_info[funct]["calls"]
if funcs_info[funct]["functions"]:
call_list[funct]["nested"] = extract_call_functions(funcs_info[funct]["functions"])
return call_list
def extract_call_methods(classes_info):
call_list = {}
for method in classes_info:
if classes_info[method]["calls"]:
call_list[method] = {}
call_list[method]["local"] = classes_info[method]["calls"]
if classes_info[method]["functions"]:
call_list[method]["nested"] = extract_call_methods(classes_info[method]["functions"])
return call_list
def call_list_file(code_info):
call_list = {}
call_list["functions"] = extract_call_functions(code_info.funcsInfo)
call_list["body"] = extract_call_functions(code_info.bodyInfo, body=1)
for class_n in code_info.classesInfo:
call_list[class_n] = extract_call_methods(code_info.classesInfo[class_n]["methods"])
return call_list
def call_list_dir(dir_info):
call_list = {}
for dir in dir_info:
call_list[dir] = {}
for file_info in dir_info[dir]:
file_path = file_info["file"]["path"]
call_list[dir][file_path] = extract_call_functions(file_info["functions"])
for class_n in file_info["classes"]:
call_list[dir][file_path][class_n] = extract_call_methods(file_info["classes"][class_n]["methods"])
return call_list
def find_file_calls(file_name, call_list):
for dir in call_list:
for elem in call_list[dir]:
if elem in file_name:
return call_list[dir][elem]
def find_module_calls(module, call_list):
for dir in call_list:
for elem in call_list[dir]:
if "/"+module+"." in elem:
#print("---MODULE %s, elem %s, giving call_list[%s][%s]" %(module, elem, dir, elem))
return call_list[dir][elem]
# DFS algorithm - Allowing up to 2 levels of depth.
def file_in_call(base, call, file, m_imports, call_list, orig_base, level):
### NOTE: LEVEL is a parameter very important here!
### It allows us to track how deep we are inside the recursivity search.
### If we want to modify the depth of the recursity, we just need to change the level_depth.
level_depth = 2
## For each call, we extract all its sub_calls (level 1),
## and for each sub_call we extract all its sub_sub_calls (level 2)
####
if base in call and m_imports.count(file) == 0 and orig_base not in call:
m_imports.append(file)
return 1
elif orig_base in call:
return 0
elif level < level_depth and call!="":
m_calls_extern = {}
module_base = call.split(".")[0]
module_base = module_base + "."
m_calls_extern = find_module_calls(module_base, call_list)
# Note: Here is when we increase the level of recursivity
level += 1
if m_calls_extern:
for m_c in m_calls_extern:
flag_found = extract_data(base, m_calls_extern[m_c], file, m_imports, 0, call_list, orig_base, level)
if flag_found:
return 1
return 0
else:
return 0
def extract_local_function(base, m_calls_local, file, m_imports, flag_found, call_list, orig_base, level):
for call in m_calls_local:
flag_found = file_in_call(base, call, file, m_imports, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
def extract_nested_function(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level):
for call in m_calls_nested:
flag_found = extract_data(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
def extract_data(base, m_calls, file, m_imports, flag_found, call_list, orig_base, level):
for elem in m_calls:
if elem == "local":
flag_found = extract_local_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base,
level)
elif elem == "nested":
flag_found = extract_nested_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base,
level)
else:
flag_found = extract_data(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
# We will apply the DFS strategy later to find the external relationships.
def extract_relations(file_name, m_calls, main_files, call_list):
m_imports = []
orig_base = os.path.basename(file_name)
orig_base = os.path.splitext(orig_base)[0]
orig_base = orig_base + "."
for file in main_files:
if file not in file_name:
flag_found = 0
base = os.path.basename(file)
base = os.path.splitext(base)[0]
base = base + "."
for m_c in m_calls:
level = 0
flag_found = extract_data(base, m_calls[m_c], file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return m_imports
return m_imports
def service_check(elem, software_invocation_info, server_dependencies, has_structure, readme):
flag_service = 0
for dep in elem["dependencies"]:
imports = dep["import"]
flag_service, software_invocation_info = service_in_set(imports, server_dependencies, elem,
software_invocation_info, has_structure, readme)
if flag_service:
return flag_service, software_invocation_info
else:
modules = dep["from_module"]
flag_service, software_invocation_info = service_in_set(modules, server_dependencies, elem,
software_invocation_info, has_structure, readme)
if flag_service:
return flag_service, software_invocation_info
return flag_service, software_invocation_info
def service_in_set(data, server_dependencies, elem, software_invocation_info, has_structure, readme):
flag_service = 0
if isinstance(data, list):
for data_dep in data:
if data_dep.lower() in server_dependencies:
soft_info = {"type": "service", "run": "python " + elem["file"]["path"],
"has_structure": has_structure,
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
flag_service = 1
if soft_info not in software_invocation_info:
software_invocation_info.append(soft_info)
else:
if data:
if data.lower() in server_dependencies:
soft_info = {"type": "service", "run": "python " + elem["file"]["path"],
"has_structure": has_structure,
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
flag_service = 1
if soft_info not in software_invocation_info:
software_invocation_info.append(soft_info)
return flag_service, software_invocation_info
def rank_software_invocation(soft_invocation_info_list):
"""
Function to create a ranking over the different ways of executing a program.
If two elements have the same position in the ranking, it means that there is no priority among them.
Heuristic to order the invocation list is as follows, in decreasing order of prioritization:
- If package or library is detected, this will be always first.
- If something (script or service) is mentioned in the readme file, it is considered a priority.
- Services are prioritized over scripts
- Scripts with main are prioritized over script with body.
- Scripts with body are prioritized over scripts with no body.
TO DOs:
- If a script imports other scripts (or service), it gets prioritized (TO DO when examples are available)
- If several scripts are available, those at root level are prioritized (TO DO when examples are available)
:param soft_invocation_info_list JSON list with the different ways to execute a program.
"""
if len(soft_invocation_info_list) == 0:
return soft_invocation_info_list
# Calculate score for every entry in the list
for entry in soft_invocation_info_list:
score = 0
if "library" in entry["type"] or "package" in entry["type"]:
score += 100
try:
if entry["mentioned_in_readme"]:
score += 10
except:
pass
if "service" in entry["type"]:
score += 5
try:
if "main" in entry["has_structure"]:
score += 2
if "body" in entry["has_structure"]:
score += 1
except:
pass
entry["ranking"] = score
# Reorder vector and assign ranking
soft_invocation_info_list.sort(key=lambda x: x["ranking"], reverse=True)
# Replace score by number (but keep those with same score with the same ranking)
position = 1
previous_score = soft_invocation_info_list[0]["ranking"]
for entry in soft_invocation_info_list:
current_score = entry["ranking"]
if previous_score > current_score: # Ordered in descending order
position += 1
previous_score = current_score
entry["ranking"] = position
return soft_invocation_info_list
| 24,252 | 7,020 |
class Bird(object):
have_feather = True
way_of_reproduction = 'egg'
def move(self, dx, dy):
position = [0,0]
position[0] = position[0] + dx
position[1] = position[1] + dy
return position
class Chicken(Bird):
way_of_move = 'walk'
possible_in_KFC = True
class Oriole(Bird):
way_of_move = 'fly'
possible_in_KFC = False
class happyBird(Bird):
def __init__(self,more_words):
print 'We are happy birds: ', more_words
winter = happyBird('Happy')
summer = Chicken()
print summer.have_feather
print summer.move(5,8) | 582 | 209 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Show a list of users in an Apps Domain.
Tool to show usage of Admin SDK Directory APIs.
APIs Used:
Admin SDK Directory API: user management
"""
import sys
# setup_path required to allow imports from component dirs (e.g. utils)
# and lib (where the OAuth and Google API Python Client modules reside).
import setup_path # pylint: disable=unused-import,g-bad-import-order
from admin_sdk_directory_api import users_api
from utils import admin_api_tool_errors
from utils import auth_helper
from utils import common_flags
from utils import file_manager
from utils import log_utils
FILE_MANAGER = file_manager.FILE_MANAGER
def AddFlags(arg_parser):
"""Handle command line flags unique to this script.
Args:
arg_parser: object from argparse.ArgumentParser() to accumulate flags.
"""
common_flags.DefineAppsDomainFlagWithDefault(arg_parser)
common_flags.DefineForceFlagWithDefaultFalse(arg_parser)
common_flags.DefineVerboseFlagWithDefaultFalse(arg_parser)
arg_parser.add_argument('--json', action='store_true', default=False,
help='Output results to a json file.')
arg_parser.add_argument('--first_n', type=int, default=0,
help='Show the first n users in the list.')
def main(argv):
"""A script to test Admin SDK Directory APIs."""
flags = common_flags.ParseFlags(argv, 'List domain users.', AddFlags)
if flags.json:
FILE_MANAGER.ExitIfCannotOverwriteFile(FILE_MANAGER.USERS_FILE_NAME,
overwrite_ok=flags.force)
http = auth_helper.GetAuthorizedHttp(flags)
api_wrapper = users_api.UsersApiWrapper(http)
max_results = flags.first_n if flags.first_n > 0 else None
try:
if flags.json:
user_list = api_wrapper.GetDomainUsers(flags.apps_domain,
max_results=max_results)
else:
api_wrapper.PrintDomainUsers(flags.apps_domain,
max_results=max_results)
except admin_api_tool_errors.AdminAPIToolUserError as e:
log_utils.LogError(
'Unable to enumerate users from domain %s.' % flags.apps_domain, e)
sys.exit(1)
if flags.json:
try:
filename_path = FILE_MANAGER.WriteJsonFile(FILE_MANAGER.USERS_FILE_NAME,
user_list,
overwrite_ok=flags.force)
except admin_api_tool_errors.AdminAPIToolFileError as e:
# This usually means the file already exists and --force not supplied.
log_utils.LogError('Unable to write the domain users file.', e)
sys.exit(1)
print 'Users list written to %s.' % filename_path
if __name__ == '__main__':
main(sys.argv[1:])
| 3,353 | 1,000 |
from functools import lru_cache
from typing import List
from days import AOCDay, day
@day(10)
class Day10(AOCDay):
print_debug = "c12"
test_input = """16
10
15
5
1
11
7
19
6
12
4""".split("\n")
test_input2 = """28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3""".split("\n")
def common(self, input_data):
# input_data = self.test_input2
self.input_data = list(map(int, input_data))
def check_smallest_adapter_recurse(self, current_rating, target_rating, adapters_left) -> List[int]:
options = [current_rating + i for i in range(1, 4)]
for option in options:
if option in adapters_left:
difference = option - current_rating
current_rating = option
if current_rating + 3 == target_rating:
return [difference, 3]
new_adapters = adapters_left[:]
new_adapters.remove(option)
return self.check_smallest_adapter_recurse(current_rating, target_rating, new_adapters) + [difference]
def part1(self, input_data):
current_rating = 0
target_rating = max(self.input_data) + 3
adapters_left = self.input_data[:]
differences = self.check_smallest_adapter_recurse(current_rating, target_rating, adapters_left)
yield len([x for x in differences if x == 1]) * len([x for x in differences if x == 3])
@lru_cache
def check_adapter_recurse(self, current_rating, target_rating, adapters) -> int:
if current_rating == target_rating:
return 1
options = [i for i in adapters if 1 <= i - current_rating <= 3]
count = 0
for option in options:
count += self.check_adapter_recurse(option, target_rating, adapters)
return count
def part2(self, input_data):
current_rating = 0
target_rating = max(self.input_data) + 3
adapters_plus_builtin = tuple(self.input_data[:] + [target_rating])
differences = self.check_adapter_recurse(current_rating, target_rating, adapters_plus_builtin)
yield differences
| 2,151 | 747 |
'''
URL: https://leetcode.com/problems/find-the-duplicate-number/
Time complexity: O(nlogn)
Space complexity: O(1)
'''
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return -1
lo, hi = 1, len(nums) - 1
while lo < hi:
mid = (lo + hi) // 2
count = 0
for num in nums:
if num <= mid:
count += 1
if count <= mid:
lo = mid + 1
else: # count > mid
hi = mid
return lo
| 642 | 208 |
def includeme(config):
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('root', '/')
config.add_route('ui', '/ui*path')
config.add_route('webhooks.github', '/webhooks/github', request_method='POST')
config.add_route('webhooks.gitlab', '/webhooks/gitlab', request_method='POST')
config.add_route('config', '/config')
config.add_route('config.ui', '/config/ui')
config.add_route('config.tei_schema', '/config/tei-schema')
config.add_route('theme.css', '/theme/theme.css')
config.add_route('theme.files', '/theme/*path')
config.include('.views.api')
| 626 | 218 |
import pprint
import logging
from contextlib import contextmanager
import pytest
import pandas as pd
from peerscout.shared.database import populated_in_memory_database
from peerscout.server.services.ManuscriptModel import ManuscriptModel
from peerscout.server.services.DocumentSimilarityModel import DocumentSimilarityModel
from peerscout.server.services.manuscript_person_relationship_service import RelationshipTypes
from peerscout.server.services.RecommendReviewers import RecommendReviewers, set_debugv_enabled
from .test_data import (
PERSON_ID,
PERSON_ID1, PERSON_ID2, PERSON_ID3,
PERSON1, PERSON2, PERSON3,
MANUSCRIPT_VERSION1,
MANUSCRIPT_ID1, MANUSCRIPT_ID2,
MANUSCRIPT_ID_FIELDS1, MANUSCRIPT_ID_FIELDS2, MANUSCRIPT_ID_FIELDS3,
MANUSCRIPT_ID_FIELDS4, MANUSCRIPT_ID_FIELDS5,
MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2, MANUSCRIPT_VERSION_ID3,
MANUSCRIPT_TITLE2, MANUSCRIPT_TITLE3,
MANUSCRIPT_KEYWORD1,
VALID_DECISIONS, VALID_MANUSCRIPT_TYPES,
PUBLISHED_DECISIONS, PUBLISHED_MANUSCRIPT_TYPES,
Decisions,
KEYWORD1
)
MANUSCRIPT_ID = 'manuscript_id'
VERSION_ID = 'version_id'
MANUSCRIPT_ID_COLUMNS = [VERSION_ID]
PERSON_ID_COLUMNS = [PERSON_ID]
LDA_DOCVEC_COLUMN = 'lda_docvec'
EMAIL_1 = 'email1'
ROLE_1 = 'role1'
PERSON1_RESULT = {
**PERSON1,
'memberships': [],
'dates_not_available': [],
'stats': {
'overall': None,
'last_12m': None
}
}
PERSON2_RESULT = {
**PERSON1_RESULT,
**PERSON2
}
PERSON3_RESULT = {
**PERSON1_RESULT,
**PERSON3
}
MEMBERSHIP1_RESULT = {
'member_type': 'memberme',
'member_id': '12345'
}
MEMBERSHIP1 = {
**MEMBERSHIP1_RESULT,
PERSON_ID: PERSON_ID1,
}
MANUSCRIPT_VERSION1_RESULT = {
**MANUSCRIPT_VERSION1,
'authors': [],
'senior_editors': [],
'subject_areas': [],
'is_published': True
}
MANUSCRIPT_VERSION2_RESULT = {
**MANUSCRIPT_VERSION1_RESULT,
**MANUSCRIPT_ID_FIELDS2,
'title': MANUSCRIPT_TITLE2
}
MANUSCRIPT_VERSION2 = MANUSCRIPT_VERSION2_RESULT
MANUSCRIPT_VERSION3_RESULT = {
**MANUSCRIPT_VERSION1_RESULT,
**MANUSCRIPT_ID_FIELDS3,
'title': MANUSCRIPT_TITLE3
}
MANUSCRIPT_VERSION3 = MANUSCRIPT_VERSION3_RESULT
MANUSCRIPT_VERSION4_RESULT = {
**MANUSCRIPT_VERSION1_RESULT,
**MANUSCRIPT_ID_FIELDS4
}
MANUSCRIPT_VERSION4 = MANUSCRIPT_VERSION4_RESULT
MANUSCRIPT_VERSION5_RESULT = {
**MANUSCRIPT_VERSION1_RESULT,
**MANUSCRIPT_ID_FIELDS5
}
MANUSCRIPT_VERSION5 = MANUSCRIPT_VERSION5_RESULT
SUBJECT_AREA1 = 'Subject Area 1'
SUBJECT_AREA2 = 'Subject Area 2'
MANUSCRIPT_SUBJECT_AREA1 = {
**MANUSCRIPT_ID_FIELDS1,
'subject_area': SUBJECT_AREA1
}
MANUSCRIPT_SUBJECT_AREA2 = {
**MANUSCRIPT_ID_FIELDS1,
'subject_area': SUBJECT_AREA2
}
DOCVEC1 = [1, 1]
DOCVEC2 = [2, 2]
ABSTRACT_DOCVEC1 = {
**MANUSCRIPT_ID_FIELDS1,
LDA_DOCVEC_COLUMN: DOCVEC1
}
ABSTRACT_DOCVEC2 = {
**MANUSCRIPT_ID_FIELDS2,
LDA_DOCVEC_COLUMN: DOCVEC2
}
DOI1 = 'doi/1'
AUTHOR1 = {
**MANUSCRIPT_ID_FIELDS1,
PERSON_ID: PERSON_ID1,
'seq': 0,
'is_corresponding_author': False
}
AUTHOR2 = {
**AUTHOR1,
**MANUSCRIPT_ID_FIELDS1,
PERSON_ID: PERSON_ID2
}
AUTHOR3 = {
**AUTHOR1,
**MANUSCRIPT_ID_FIELDS1,
PERSON_ID: PERSON_ID3
}
STAGE_CONTACTING_REVIEWERS = 'Contacting Reviewers'
STAGE_REVIEW_ACCEPTED = 'Reviewers Accept'
STAGE_REVIEW_DECLINE = 'Reviewers Decline'
STAGE_REVIEW_COMPLETE = 'Review Received'
MANUSCRIPT_HISTORY_REVIEW_COMPLETE1 = {
**MANUSCRIPT_ID_FIELDS1,
'stage_name': STAGE_REVIEW_COMPLETE,
'stage_timestamp': pd.Timestamp('2017-01-01'),
PERSON_ID: PERSON_ID1
}
KEYWORD_SEARCH1 = {
'keywords': [KEYWORD1]
}
EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET = {
'person': [{
**PERSON1,
'is_early_career_researcher': True
}, {
**PERSON2,
'is_early_career_researcher': True
}],
'person_subject_area': [{
'person_id': PERSON_ID1,
'subject_area': SUBJECT_AREA1
}, {
'person_id': PERSON_ID2,
'subject_area': SUBJECT_AREA2
}]
}
class PersonRoles:
SENIOR_EDITOR = 'Senior Editor'
OTHER = 'Other'
PP = pprint.PrettyPrinter(indent=2, width=40)
def setup_module():
logging.basicConfig(level=logging.DEBUG)
set_debugv_enabled(True)
logging.getLogger().setLevel(logging.DEBUG)
def get_logger():
return logging.getLogger('test')
@pytest.fixture(name='logger')
def _logger_fixture():
return get_logger()
@contextmanager
def create_recommend_reviewers(dataset, filter_by_subject_area_enabled=False):
logger = get_logger()
with populated_in_memory_database(dataset) as db:
logger.debug("view manuscript_person_review_times:\n%s",
db.manuscript_person_review_times.read_frame())
logger.debug("view person_review_stats_overall:\n%s",
db.person_review_stats_overall.read_frame())
manuscript_model = ManuscriptModel(
db,
valid_decisions=VALID_DECISIONS,
valid_manuscript_types=VALID_MANUSCRIPT_TYPES,
published_decisions=PUBLISHED_DECISIONS,
published_manuscript_types=PUBLISHED_MANUSCRIPT_TYPES
)
similarity_model = DocumentSimilarityModel(
db,
manuscript_model=manuscript_model
)
yield RecommendReviewers(
db, manuscript_model=manuscript_model, similarity_model=similarity_model,
filter_by_subject_area_enabled=filter_by_subject_area_enabled
)
def recommend_for_dataset(dataset, filter_by_subject_area_enabled=False, **kwargs):
with create_recommend_reviewers(
dataset,
filter_by_subject_area_enabled=filter_by_subject_area_enabled) as recommend_reviewers:
result = recommend_reviewers.recommend(**kwargs)
get_logger().debug("result: %s", PP.pformat(result))
return result
def _potential_reviewers_person_ids(potential_reviewers):
return [r['person'][PERSON_ID] for r in potential_reviewers]
def _potential_reviewer_scores_by_person_id(potential_reviewers):
return {r['person'][PERSON_ID]: r['scores']['keyword'] for r in potential_reviewers}
def _potential_reviewer_related_version_ids(potential_reviewers, relationship_type):
return {
r['person'][PERSON_ID]: set(
r.get('related_manuscript_version_ids_by_relationship_type', {})
.get(relationship_type, [])
)
for r in potential_reviewers
}
def _review_complete_stages(id_fields, contacted, accepted, reviewed):
return [{
**id_fields,
'stage_name': STAGE_CONTACTING_REVIEWERS,
'stage_timestamp': contacted
}, {
**id_fields,
'stage_name': STAGE_REVIEW_ACCEPTED,
'stage_timestamp': accepted
}, {
**id_fields,
'stage_name': STAGE_REVIEW_COMPLETE,
'stage_timestamp': reviewed
}]
def _declined_stages(id_fields, contacted, declined):
return [{
**id_fields,
'stage_name': STAGE_CONTACTING_REVIEWERS,
'stage_timestamp': contacted
}, {
**id_fields,
'stage_name': STAGE_REVIEW_DECLINE,
'stage_timestamp': declined
}]
def _awaiting_accept_stages(id_fields, contacted):
return [{
**id_fields,
'stage_name': STAGE_CONTACTING_REVIEWERS,
'stage_timestamp': contacted
}]
def _awaiting_review_stages(id_fields, contacted, accepted):
return [{
**id_fields,
'stage_name': STAGE_CONTACTING_REVIEWERS,
'stage_timestamp': contacted
}, {
**id_fields,
'stage_name': STAGE_REVIEW_ACCEPTED,
'stage_timestamp': accepted
}]
@pytest.mark.slow
class TestRecommendReviewers:
class TestRecommendReviewersRegular:
def test_no_match(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no='unknown')
assert result['matching_manuscripts'] == []
assert result['potential_reviewers'] == []
def test_matching_manuscript(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
assert result == {
'potential_reviewers': [],
'related_manuscript_by_version_id': {},
'matching_manuscripts': [{
**MANUSCRIPT_VERSION1_RESULT
}]
}
def test_matching_manuscript_should_include_subject_areas(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1],
'manuscript_subject_area': [
MANUSCRIPT_SUBJECT_AREA1,
MANUSCRIPT_SUBJECT_AREA2
]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
subject_areas = result['matching_manuscripts'][0]['subject_areas']
assert subject_areas == [SUBJECT_AREA1, SUBJECT_AREA2]
def test_should_not_fail_for_manuscript_with_docvecs(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1],
'ml_manuscript_data': [ABSTRACT_DOCVEC1]
}
recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
def test_should_not_fail_for_manuscript_with_partial_docvecs(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1],
'ml_manuscript_data': [
ABSTRACT_DOCVEC1, {
**ABSTRACT_DOCVEC2,
LDA_DOCVEC_COLUMN: None
}
]
}
recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
def test_search_should_filter_early_career_reviewer_by_subject_area(self):
dataset = EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET
result = recommend_for_dataset(
dataset,
subject_area=SUBJECT_AREA1, keywords=None, manuscript_no=None
)
recommended_person_ids = [
(r['person'][PERSON_ID], r['person'].get('is_early_career_researcher'))
for r in result['potential_reviewers']
]
assert recommended_person_ids == [(PERSON_ID1, True)]
def test_search_should_not_filter_early_career_reviewer_by_subject_area_if_blank(self):
dataset = EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET
result = recommend_for_dataset(
dataset,
subject_area=None, keywords=KEYWORD1, manuscript_no=None
)
recommended_person_ids = [
(r['person'][PERSON_ID], r['person'].get('is_early_career_researcher'))
for r in result['potential_reviewers']
]
assert (
set(recommended_person_ids) ==
{(PERSON_ID1, True), (PERSON_ID2, True)}
)
def test_matching_manuscript_should_filter_early_career_reviewer_by_subject_area(self):
dataset = {
**EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET,
'person': (
EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET['person'] +
[PERSON3]
),
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [{**AUTHOR3, **MANUSCRIPT_ID_FIELDS1}],
'manuscript_subject_area': [MANUSCRIPT_SUBJECT_AREA1]
}
result = recommend_for_dataset(
dataset, filter_by_subject_area_enabled=False,
subject_area=None, keywords=None, manuscript_no=MANUSCRIPT_ID1
)
recommended_person_ids = [
(r['person'][PERSON_ID], r['person'].get('is_early_career_researcher'))
for r in result['potential_reviewers']
]
assert recommended_person_ids == [(PERSON_ID1, True)]
def test_matching_manuscript_should_return_draft_version_with_authors(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'decision': Decisions.REJECTED
}],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
assert [m[MANUSCRIPT_ID] for m in result['matching_manuscripts']] == [MANUSCRIPT_ID1]
assert [p[PERSON_ID]
for p in result['matching_manuscripts'][0]['authors']] == [PERSON_ID1]
def test_matching_manuscript_should_return_multiple_authors(self):
dataset = {
'person': [PERSON1, PERSON2],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_author': [
AUTHOR1,
{**AUTHOR1, **MANUSCRIPT_ID_FIELDS2},
{**AUTHOR2, **MANUSCRIPT_ID_FIELDS1}
]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
author_person_ids = [p[PERSON_ID] for p in result['matching_manuscripts'][0]['authors']]
assert set(author_person_ids) == set([PERSON_ID1, PERSON_ID2])
def test_matching_manuscript_should_indicate_corresponding_authors(self):
dataset = {
'person': [PERSON1, PERSON2],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_author': [
{
**AUTHOR1,
'is_corresponding_author': True
},
{
**AUTHOR2,
**MANUSCRIPT_ID_FIELDS1,
'is_corresponding_author': False
},
{
# make author1 not the corresponding author of another manuscript
**AUTHOR1,
**MANUSCRIPT_ID_FIELDS2,
'is_corresponding_author': False
}
]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
authors = sorted(result['matching_manuscripts'][0]
['authors'], key=lambda p: p[PERSON_ID])
author_summary = [(p[PERSON_ID], p.get('is_corresponding_author')) for p in authors]
assert author_summary == [(PERSON_ID1, True), (PERSON_ID2, False)]
def test_matching_manuscript_should_not_recommend_its_authors(self):
dataset = {
'person': [PERSON1, PERSON2],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_keyword': [
MANUSCRIPT_KEYWORD1,
{**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2}
],
'manuscript_author': [
AUTHOR1,
{**AUTHOR1, **MANUSCRIPT_ID_FIELDS2},
{**AUTHOR2, **MANUSCRIPT_ID_FIELDS2}
]
}
result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1)
recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']]
assert recommended_person_ids == [PERSON_ID2]
def _do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled(
self, filter_by_subject_area_enabled):
dataset = {
'person': [PERSON1, PERSON2, PERSON3],
'manuscript_version': [
MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3
],
'manuscript_keyword': [
MANUSCRIPT_KEYWORD1,
{**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2},
{**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS3}
],
'manuscript_subject_area': [
MANUSCRIPT_SUBJECT_AREA1,
{**MANUSCRIPT_SUBJECT_AREA2, **MANUSCRIPT_ID_FIELDS2},
{**MANUSCRIPT_SUBJECT_AREA1, **MANUSCRIPT_ID_FIELDS3}
],
'manuscript_author': [
AUTHOR1,
{**AUTHOR2, **MANUSCRIPT_ID_FIELDS2},
{**AUTHOR3, **MANUSCRIPT_ID_FIELDS3}
]
}
result = recommend_for_dataset(
dataset, filter_by_subject_area_enabled=filter_by_subject_area_enabled,
keywords='', manuscript_no=MANUSCRIPT_ID1
)
recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']]
if filter_by_subject_area_enabled:
assert recommended_person_ids == [PERSON_ID3]
else:
assert set(recommended_person_ids) == {PERSON_ID2, PERSON_ID3}
def test_matching_manuscript_should_filter_by_subject_areas_if_enabled(self):
self._do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled(
filter_by_subject_area_enabled=True
)
def test_matching_manuscript_should_not_filter_by_subject_areas_if_disabled(self):
self._do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled(
filter_by_subject_area_enabled=False
)
def test_matching_manuscript_should_filter_by_search_subject_area_only(self):
dataset = {
'person': [PERSON2, PERSON3],
'manuscript_version': [MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3],
'manuscript_subject_area': [
MANUSCRIPT_SUBJECT_AREA1,
{
**MANUSCRIPT_SUBJECT_AREA2,
**MANUSCRIPT_ID_FIELDS2
},
{
**MANUSCRIPT_SUBJECT_AREA1,
**MANUSCRIPT_ID_FIELDS3
}
],
'manuscript_author': [
{
**AUTHOR2,
**MANUSCRIPT_ID_FIELDS2
},
{
**AUTHOR3,
**MANUSCRIPT_ID_FIELDS3
}
]
}
result = recommend_for_dataset(
dataset, filter_by_subject_area_enabled=False,
keywords='', subject_area=SUBJECT_AREA1
)
recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']]
assert recommended_person_ids == [PERSON_ID3]
def test_matching_one_keyword_author_should_return_author(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert [r['person'][PERSON_ID] for r in result['potential_reviewers']] == [PERSON_ID1]
def test_matching_one_keyword_should_not_fail_on_unset_first_and_last_name(self):
# Note: use two persons to trigger sort
dataset = {
'person': [
PERSON1,
{**PERSON2, 'first_name': None, 'last_name': None}
],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1, AUTHOR2],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert (
{r['person'][PERSON_ID] for r in result['potential_reviewers']} ==
{PERSON_ID1, PERSON_ID2}
)
def test_matching_one_keyword_author_should_not_suggest_authors_of_rejected_manuscripts(
self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'decision': Decisions.REJECTED,
'is_published': None
}],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert result['potential_reviewers'] == []
def test_matching_one_keyword_author_should_suggest_reviewers_of_rejected_manuscripts(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'decision': Decisions.REJECTED
}],
'manuscript_stage': _review_complete_stages(
{**MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID1},
contacted=pd.Timestamp('2017-01-01'),
accepted=pd.Timestamp('2017-01-02'),
reviewed=pd.Timestamp('2017-01-03')
),
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1]
def test_matching_author_should_suggest_authors_with_unknown_decision_if_published(
self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'decision': None,
'is_published': True
}],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1]
def test_should_return_manuscript_scores_by_version_id(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1]
related_manuscript_by_version_id = result['related_manuscript_by_version_id']
assert related_manuscript_by_version_id[MANUSCRIPT_VERSION_ID1].get('score') == {
'combined': 1.0,
'keyword': 1.0,
'similarity': None
}
def test_should_return_decision_timestamp_as_published_timestamp(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'decision_timestamp': pd.Timestamp('2017-01-01')
}],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1]
related_manuscript_by_version_id = result['related_manuscript_by_version_id']
assert (
related_manuscript_by_version_id[
MANUSCRIPT_VERSION_ID1
].get('published_timestamp') ==
pd.Timestamp('2017-01-01')
)
def test_should_return_created_timestamp_as_published_timestamp(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [{
**MANUSCRIPT_VERSION1,
'created_timestamp': pd.Timestamp('2017-01-01')
}],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1]
related_manuscript_by_version_id = result['related_manuscript_by_version_id']
assert (
related_manuscript_by_version_id[
MANUSCRIPT_VERSION_ID1
].get('published_timestamp') ==
pd.Timestamp('2017-01-01')
)
def test_matching_one_keyword_author_should_return_stats(self, logger):
dataset = {
'person': [PERSON1],
'manuscript_version': [
MANUSCRIPT_VERSION1,
MANUSCRIPT_VERSION2,
MANUSCRIPT_VERSION3,
MANUSCRIPT_VERSION4,
MANUSCRIPT_VERSION5
],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1],
# add two review durations (two stages each)
# also add an open review (accepted)
'manuscript_stage': (
_review_complete_stages(
{
**MANUSCRIPT_ID_FIELDS1,
PERSON_ID: PERSON_ID1
},
contacted=pd.Timestamp('2017-01-01'),
accepted=pd.Timestamp('2017-01-02'),
reviewed=pd.Timestamp('2017-01-03')
) +
_review_complete_stages(
{
**MANUSCRIPT_ID_FIELDS2,
PERSON_ID: PERSON_ID1
},
contacted=pd.Timestamp('2017-02-01'),
accepted=pd.Timestamp('2017-02-02'),
reviewed=pd.Timestamp('2017-02-04')
) +
_awaiting_accept_stages(
{
**MANUSCRIPT_ID_FIELDS3,
PERSON_ID: PERSON_ID1
},
contacted=pd.Timestamp('2017-02-01')
) +
_awaiting_review_stages(
{
**MANUSCRIPT_ID_FIELDS4,
PERSON_ID: PERSON_ID1
},
contacted=pd.Timestamp('2017-02-01'),
accepted=pd.Timestamp('2017-02-02')
) +
_declined_stages(
{
**MANUSCRIPT_ID_FIELDS5,
PERSON_ID: PERSON_ID1
},
contacted=pd.Timestamp('2017-02-01'),
declined=pd.Timestamp('2017-02-02')
)
)
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
review_duration = {
'min': 1.0,
'mean': 1.5,
'max': 2,
'count': 2
}
overall_stats = {
'review_duration': review_duration,
'reviews_in_progress': 1,
'waiting_to_be_accepted': 1,
'declined': 1
}
result_person = result['potential_reviewers'][0]['person']
logger.debug("result_person: %s", PP.pformat(result_person))
assert result_person['stats'] == {
'overall': overall_stats,
'last_12m': overall_stats
}
def test_matching_one_keyword_author_should_return_memberships(self, logger):
dataset = {
'person': [PERSON1],
'person_membership': [MEMBERSHIP1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
result_person = result['potential_reviewers'][0]['person']
logger.debug("result_person: %s", PP.pformat(result_person))
assert result_person.get('memberships') == [MEMBERSHIP1_RESULT]
def test_matching_one_keyword_author_should_return_other_accepted_papers(self, logger):
dataset = {
'person': [PERSON1],
'manuscript_version': [
MANUSCRIPT_VERSION1, {
**MANUSCRIPT_VERSION2,
'decision': Decisions.ACCEPTED
}
],
'manuscript_author': [
AUTHOR1, {
**AUTHOR1,
**MANUSCRIPT_ID_FIELDS2
}
],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
potential_reviewers = result['potential_reviewers']
author_of_manuscript_ids_by_person_id = _potential_reviewer_related_version_ids(
potential_reviewers, RelationshipTypes.AUTHOR
)
logger.debug("author_of_manuscript_ids_by_person_id: %s",
author_of_manuscript_ids_by_person_id)
assert author_of_manuscript_ids_by_person_id == {
PERSON_ID1: {
MANUSCRIPT_VERSION_ID1,
MANUSCRIPT_VERSION_ID2
}
}
assert result['related_manuscript_by_version_id'].keys() == {
MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2
}
def test_matching_one_keyword_author_should_not_return_other_draft_papers(self, logger):
dataset = {
'person': [PERSON1],
'manuscript_version': [
MANUSCRIPT_VERSION1, {
**MANUSCRIPT_VERSION2,
'decision': Decisions.REJECTED,
'is_published': None
}
],
'manuscript_author': [
AUTHOR1, {
**AUTHOR1,
**MANUSCRIPT_ID_FIELDS2
}
],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
potential_reviewers = result['potential_reviewers']
author_of_manuscript_ids_by_person_id = _potential_reviewer_related_version_ids(
potential_reviewers, RelationshipTypes.AUTHOR
)
logger.debug(
"author_of_manuscript_ids_by_person_id: %s", author_of_manuscript_ids_by_person_id
)
assert author_of_manuscript_ids_by_person_id == {
PERSON_ID1: {
MANUSCRIPT_VERSION_ID1
}
}
assert result['related_manuscript_by_version_id'].keys() == {
MANUSCRIPT_VERSION_ID1
}
def test_should_consider_previous_reviewer_as_potential_reviewer(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_stage': [MANUSCRIPT_HISTORY_REVIEW_COMPLETE1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert [r['person'][PERSON_ID] for r in result['potential_reviewers']] == [PERSON_ID1]
def test_should_return_reviewer_as_potential_reviewer_only_once(self):
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_stage': [
{
**MANUSCRIPT_HISTORY_REVIEW_COMPLETE1,
'stage_timestamp': pd.Timestamp('2017-01-01'),
},
{
**MANUSCRIPT_HISTORY_REVIEW_COMPLETE1,
'stage_timestamp': pd.Timestamp('2017-01-02'),
}
],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='')
assert [
r['person'][PERSON_ID] for r in result['potential_reviewers']
] == [PERSON_ID1]
class TestRecommendReviewersByRole:
def test_should_not_recommend_regular_reviewer_when_searching_for_senior_editor_via_keyword(
self):
# a regular reviewer doesn't have a role
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(
dataset, keywords=KEYWORD1, manuscript_no=None,
role=PersonRoles.SENIOR_EDITOR
)
person_ids = _potential_reviewers_person_ids(result['potential_reviewers'])
assert person_ids == []
def test_should_not_recommend_regular_reviewer_when_searching_for_senior_editor_via_man_no(
self):
# a regular reviewer doesn't have a role
dataset = {
'person': [PERSON1],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1, {
**MANUSCRIPT_KEYWORD1,
**MANUSCRIPT_ID_FIELDS2
}]
}
result = recommend_for_dataset(
dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2,
role=PersonRoles.SENIOR_EDITOR
)
person_ids = _potential_reviewers_person_ids(result['potential_reviewers'])
assert person_ids == []
def test_should_not_recommend_reviewer_with_other_role_when_searching_for_senior_editor(
self):
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.OTHER}],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(
dataset, keywords=KEYWORD1, manuscript_no=None,
role=PersonRoles.SENIOR_EDITOR
)
person_ids = _potential_reviewers_person_ids(result['potential_reviewers'])
assert person_ids == []
def test_should_recommend_senior_editor_based_on_manuscript_keyword(self):
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
result = recommend_for_dataset(
dataset, keywords=KEYWORD1, manuscript_no=None,
role=PersonRoles.SENIOR_EDITOR
)
person_ids = _potential_reviewers_person_ids(result['potential_reviewers'])
assert person_ids == [PERSON_ID1]
def test_should_recommend_senior_editor_based_on_manuscript_keyword_via_manuscript_no(self):
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_author': [AUTHOR1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1, {
**MANUSCRIPT_KEYWORD1,
**MANUSCRIPT_ID_FIELDS2
}]
}
result = recommend_for_dataset(
dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2,
role=PersonRoles.SENIOR_EDITOR
)
person_ids = _potential_reviewers_person_ids(result['potential_reviewers'])
assert person_ids == [PERSON_ID1]
def test_should_recommend_previous_senior_editors_and_reflect_in_score(self):
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_senior_editor': [{**MANUSCRIPT_ID_FIELDS1, 'person_id': PERSON_ID1}],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1, {
**MANUSCRIPT_KEYWORD1,
**MANUSCRIPT_ID_FIELDS2
}]
}
result = recommend_for_dataset(
dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2,
role=PersonRoles.SENIOR_EDITOR,
recommend_relationship_types=[
RelationshipTypes.AUTHOR,
RelationshipTypes.EDITOR,
RelationshipTypes.SENIOR_EDITOR,
RelationshipTypes.REVIEWER
]
)
potential_reviewers = result['potential_reviewers']
person_ids = _potential_reviewers_person_ids(potential_reviewers)
assert person_ids == [PERSON_ID1]
assert _potential_reviewer_scores_by_person_id(potential_reviewers) == {
PERSON_ID1: 1.0
}
def test_should_return_manuscript_ids_person_has_senior_editor_of(self):
dataset = {
'person': [PERSON1, PERSON2, PERSON3],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'manuscript_version': [
MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3
],
'manuscript_author': [{**MANUSCRIPT_ID_FIELDS3, 'person_id': PERSON_ID1}],
'manuscript_editor': [{**MANUSCRIPT_ID_FIELDS2, 'person_id': PERSON_ID1}],
'manuscript_senior_editor': [{**MANUSCRIPT_ID_FIELDS1, 'person_id': PERSON_ID1}],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1, {
**MANUSCRIPT_KEYWORD1,
**MANUSCRIPT_ID_FIELDS2
}]
}
result = recommend_for_dataset(
dataset, keywords=KEYWORD1, manuscript_no=None,
role=PersonRoles.SENIOR_EDITOR,
recommend_relationship_types=[
RelationshipTypes.SENIOR_EDITOR
],
return_relationship_types=[
RelationshipTypes.SENIOR_EDITOR,
RelationshipTypes.EDITOR,
RelationshipTypes.AUTHOR
]
)
potential_reviewers = result['potential_reviewers']
person_ids = _potential_reviewers_person_ids(potential_reviewers)
assert person_ids == [PERSON_ID1]
assert _potential_reviewer_related_version_ids(
potential_reviewers, RelationshipTypes.SENIOR_EDITOR
) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID1}}
assert _potential_reviewer_related_version_ids(
potential_reviewers, RelationshipTypes.EDITOR
) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID2}}
assert _potential_reviewer_related_version_ids(
potential_reviewers, RelationshipTypes.AUTHOR
) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID3}}
assert result['related_manuscript_by_version_id'].keys() == {
MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2, MANUSCRIPT_VERSION_ID3
}
def test_should_recommend_based_on_stage_name(self):
custom_stage = 'custom_stage'
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2],
'manuscript_stage': [
{
**MANUSCRIPT_ID_FIELDS1,
'person_id': PERSON_ID1,
'stage_timestamp': pd.Timestamp('2017-01-01'),
'stage_name': custom_stage
}
],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1, {
**MANUSCRIPT_KEYWORD1,
**MANUSCRIPT_ID_FIELDS2
}]
}
result = recommend_for_dataset(
dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2,
role=PersonRoles.SENIOR_EDITOR,
recommend_stage_names=[
custom_stage
]
)
potential_reviewers = result['potential_reviewers']
person_ids = _potential_reviewers_person_ids(potential_reviewers)
assert person_ids == [PERSON_ID1]
def test_should_recommend_senior_editor_based_on_person_keyword_and_reflect_in_score(self):
dataset = {
'person': [PERSON1],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}],
'person_keyword': [{PERSON_ID: PERSON_ID1, 'keyword': KEYWORD1}]
}
result = recommend_for_dataset(
dataset, keywords=KEYWORD1, manuscript_no=None,
role=PersonRoles.SENIOR_EDITOR
)
potential_reviewers = result['potential_reviewers']
person_ids = _potential_reviewers_person_ids(potential_reviewers)
assert person_ids == [PERSON_ID1]
assert _potential_reviewer_scores_by_person_id(potential_reviewers) == {
PERSON_ID1: 1.0
}
class TestAllKeywords:
def test_should_include_manuscript_keywords_in_all_keywords(self):
dataset = {
'manuscript_version': [MANUSCRIPT_VERSION1],
'manuscript_keyword': [MANUSCRIPT_KEYWORD1]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
assert recommend_reviewers.get_all_keywords() == [KEYWORD1]
def test_should_include_person_keywords_in_all_keywords(self):
dataset = {
'person': [PERSON1],
'person_keyword': [{PERSON_ID: PERSON_ID1, 'keyword': KEYWORD1}]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
assert recommend_reviewers.get_all_keywords() == [KEYWORD1]
class TestUserHasRoleByEmail:
def test_should_return_wether_user_has_role(self):
dataset = {
'person': [{**PERSON1, 'email': EMAIL_1}],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': ROLE_1}]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
assert recommend_reviewers.user_has_role_by_email(
email=EMAIL_1, role=ROLE_1) is True
assert recommend_reviewers.user_has_role_by_email(
email=EMAIL_1, role='other') is False
assert recommend_reviewers.user_has_role_by_email(
email='other', role=ROLE_1) is False
class TestGetUserRolesByEmail:
def test_should_return_roles_of_existing_user(self):
dataset = {
'person': [{**PERSON1, 'email': EMAIL_1}],
'person_role': [{PERSON_ID: PERSON_ID1, 'role': ROLE_1}]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
assert recommend_reviewers.get_user_roles_by_email(email=EMAIL_1) == {ROLE_1}
class TestGetManuscriptDetails:
def test_should_return_none_if_version_id_is_invalid(self):
dataset = {
'manuscript_version': [MANUSCRIPT_VERSION2]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
assert recommend_reviewers.get_manuscript_details(MANUSCRIPT_VERSION_ID1) is None
def test_should_return_details_if_version_id_is_valid(self):
dataset = {
'manuscript_version': [MANUSCRIPT_VERSION1]
}
with create_recommend_reviewers(dataset) as recommend_reviewers:
manuscript_details = recommend_reviewers.get_manuscript_details(
MANUSCRIPT_VERSION_ID1)
assert manuscript_details is not None
assert manuscript_details.get(VERSION_ID) == MANUSCRIPT_VERSION_ID1
assert manuscript_details.get('manuscript_id') == MANUSCRIPT_ID1
assert manuscript_details.get('title') == MANUSCRIPT_VERSION1['title']
| 46,926 | 14,562 |
palavra = 'MACACO','ARROZ','AZEITONA','LASANHA','PIZZA','CANETA','PARALELEPIPEDO','ONZE','FERNANDO','CAIO'
for p in palavra:
print(f'\nNA PALAVRA {p} TEMOS AS VOGAIS ',end='')
for letra in p:
if letra in 'AEIOU':
print(letra, end=' ')
| 263 | 122 |
import uuid
from autoslug import AutoSlugField
from django.db import models
from sms.apps.accounts.models import TimeStampedModel
# from django_resized import ResizedImageField
# Create your models here.
class Product(models.Model):
IN_STOCK = 1
OUT_OF_STOCK = 0
STATUS_CHOICES = (
(IN_STOCK, 'In stock'),
(OUT_OF_STOCK, 'Out of stock')
)
LABEL_CHOICES = (
('P', 'primary'),
('S', 'secondary'),
('D', 'danger'),
)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES, default=IN_STOCK
)
product_id = models.UUIDField(default=uuid.uuid4)
name = models.CharField(max_length=200,)
description = models.TextField(blank=True, null=True)
label = models.CharField(choices=LABEL_CHOICES, max_length=1)
category = models.ForeignKey('Category', null=True, blank=True, on_delete=models.CASCADE)
sub_category = models.ForeignKey('SubCategory', null=True, blank=True, on_delete=models.CASCADE)
product_image = models.FileField(upload_to='products', null=True, blank=True)
product_video = models.FileField(upload_to='products', null=True, blank=True)
tax = models.IntegerField(null=True, blank=True)
price = models.FloatField(null=True, blank=True)
shipping_charge = models.FloatField(null=True, blank=True)
net_amount = models.FloatField(null=True, blank=True)
offer_percentage = models.IntegerField(null=True, blank=True)
# category = models.ForeignKey(category)
def __str__(self):
return self.name
class Category(TimeStampedModel):
title = models.CharField(max_length=200, blank=True, null=True)
slug = AutoSlugField(unique=True,)
class Meta:
ordering = ('-created_date',)
verbose_name = 'category'
verbose_name_plural = 'categories'
def __str__(self):
return self.title
class SubCategory(TimeStampedModel):
title = models.CharField(max_length=200, blank=True, null=True)
slug = AutoSlugField(unique=True,)
class Meta:
ordering = ('-created_date',)
verbose_name = 'sub-category'
verbose_name_plural = 'sub-categories'
def __str__(self):
return self.title
| 2,218 | 736 |
import argparse
import logging
import os
import sys
from certifire import app, auth, config, database, db, get_version
from certifire.errors import CertifireError
from certifire.plugins.acme import crypto
from certifire.plugins.acme.models import Account, Certificate, Order
from certifire.plugins.acme.plugin import (create_order, register, reorder,
revoke_certificate)
from certifire.plugins.destinations.models import Destination
from certifire import app
logger = logging.getLogger(__name__)
# Text
DESCRIPTION = \
"""
Certifire {}.
Interact with ACME certification authorities such as Let's Encrypt.
No idea what you're doing? Register an account, authorize your domains and
issue a certificate or two. Call a command with -h for more instructions.
""".format(get_version())
DESCRIPTION_REGISTER = \
"""
Creates a new account key and registers on the server. The resulting --account
is saved in the database, and required for most other operations.
Takes email as required argument
You can pass arguments like organization, organizational_unit, country, state,
and location for csr generations from this account. if not provided, default
values from the config file will be used
You can also pass your own RSA private key if needed
(Provide key size 2048 and above, otherwise the server won't accept it.)
You only have to do this once.
"""
DESCRIPTION_ISSUE = \
"""
Issues a certificate for one or more domains. Firstly, domains passed will be
authorized by the type of authentication specified. If dns authentication
is used, also provide the dns provider. If type and dns provider not passed,
default values will be used from the config file
Takes account_id as required argument
You can pass arguments like organization, organizational_unit, country, state,
and location for csr generations from this account. if not provided, default
values from the account will be used
This will generate a new RSA key and CSR for you. But if you want, you can
bring your own with the --key-file and --csr-file attributes.
(Provide key size 2048 and above, otherwise the server won't accept it.)
The resulting key and certificate are written into the database.
A chained certificate with the intermediate included is also written to databse.
(If you're passing your own CSR, the given domains can be whatever you want.)
Note that unlike many other certification authorities, ACME does not add a
non-www or www alias to certificates. If you want this to happen, add it
yourself. You need to authorize both as well.
Certificate issuance has a server-side rate limit. Don't overdo it.
"""
DESCRIPTION_REVOKE = \
"""
Revokes a certificate. The certificate must have been issued using the
current account.
Takes account_id and certificate_id as required arguments
"""
# Command handlers
def _register(args):
key = None
if args.key_file:
with open(args.key_file, 'rb') as f:
key = crypto.load_private_key(f.read())
with app.app_context():
ret, act_id = register(
user_id=1,
email=args.email,
server=args.server,
rsa_key=key,
organization=args.organization,
organizational_unit=args.organizational_unit,
country=args.country,
state=args.state,
location=args.location)
if ret:
print("Account created with account id: {}".format(act_id))
print("Pass this account id for issue, revoke, etc...")
else:
print("Account with same email exists: account id: {}".format(act_id))
def _issue(args):
key = None
if args.key_file:
with open(args.key_file, 'rb') as f:
key = crypto.load_private_key(f.read())
csr = None
if args.csr_file:
with open(args.csr_file, 'rb') as f:
key = crypto.load_csr(f.read())
with app.app_context():
ret, order_id = create_order(
account_id=args.account,
destination_id=args.destination,
domains=args.domains,
type=args.type,
provider=args.provider,
email=args.email,
organization=args.organization,
organizational_unit=args.organizational_unit,
country=args.country,
state=args.state,
location=args.location,
reissue=args.reissue,
csr=csr,
key=key)
if ret:
print("Order created with order id: {}".format(order_id))
else:
print("Order creation failed.")
def _revoke(args):
with app.app_context():
certdb = Certificate.query.get(args.certificate)
if not certdb:
print("There is no such certificate {}".format(args.certificate))
return
order = Order.query.get(certdb.order_id)
if not order:
print("Order for this certificate not found")
return
revoke_certificate(order.account_id, certdb.id)
def _create_dest(args):
pkey = None
if args.pkey:
with open(args.pkey, 'rb') as f:
pkey = crypto.load_private_key(f.read())
with app.app_context():
dest = Destination(user_id=1,
host=args.host,
port=args.port,
user=args.user,
password=args.pwd,
ssh_priv_key=pkey,
ssh_priv_key_pass=args.pkeypass,
challengeDestinationPath=args.challengePath,
certDestinationPath=args.certPath,
exportFormat=args.exportFormat,
no_check=args.nocheck)
if dest.create():
print("Destination: {} created".format(dest.id))
print(dest.json)
else:
print("Error creating destination with given data. Check hostname, password, private key")
print(dest.json)
def _update_dest(args):
with app.app_context():
dest = Destination.query.get(args.id)
if not dest:
print("There is no such destination {}".format(args.id))
return
if dest.user_id != 1:
print("This destination does not belong to the admin")
return
pkey = None
if args.pkey:
with open(args.pkey, 'rb') as f:
pkey = crypto.load_private_key(f.read())
if dest.update(user_id=1,
host=args.host,
port=args.port,
user=args.user,
password=args.pwd,
ssh_priv_key=pkey,
ssh_priv_key_pass=args.pkeypass,
challengeDestinationPath=args.challengePath,
certDestinationPath=args.certPath,
exportFormat=args.exportFormat,
no_check=args.nocheck):
print("Destination: {} updated".format(dest.id))
print(dest.json)
else:
print("Error updating destination with given data. Check hostname, password, private key")
print(dest.json)
def _delete_dest(args):
with app.app_context():
dest = Destination.query.get(args.id)
if not dest:
print("There is no such destination {}".format(args.id))
return
if dest.user_id != 1:
print("This destination does not belong to the admin")
return
dest = dest.delete()
print("Destination {} deleted from database".format(dest.id))
class Formatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def certifire_main():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=Formatter,
)
subparsers = parser.add_subparsers()
# Account creation
register = subparsers.add_parser(
'register',
help="Create a new account and register",
description=DESCRIPTION_REGISTER,
formatter_class=Formatter,
)
register.add_argument('email', type=str, help="Account email address")
register.add_argument('--server', '-i', help="ACME Server url")
register.add_argument('--key-file', '-k',
help="Existing key file to use for the account")
register.add_argument('--organization', '-o', help="Name of organization")
register.add_argument('--organizational_unit', '-u',
help="Name of organizational unit")
register.add_argument('--country', '-c', help="Name of country")
register.add_argument('--state', '-s', help="Name of state")
register.add_argument('--location', '-l', help="Name of location")
register.set_defaults(func=_register)
# Certificate issuance
issue = subparsers.add_parser(
'issue',
help="Authorize and Request a new certificate",
description=DESCRIPTION_ISSUE,
formatter_class=Formatter,
)
issue.add_argument('--account', '-a',
help="The acme account id to use", required=True)
issue.add_argument('--destination',
help="Destination to authorize/push certificates")
issue.add_argument('--domains',
help="One or more domain names to authorize", nargs='+')
issue.add_argument('--type',
'-t',
help="Authorization type",
choices=('dns', 'sftp'),
default='dns')
issue.add_argument('--provider',
'-p',
help="DNS Provider",
choices=config.VALID_DNS_PROVIDERS,
default=config.VALID_DNS_PROVIDERS[0])
issue.add_argument('--key-file', '-k',
help="Existing key file to use for the certificate")
issue.add_argument('--csr-file', help="Existing signing request to use")
issue.add_argument('--email', '-e', help="email address for CSR")
issue.add_argument('--organization', '-o', help="Name of organization")
issue.add_argument('--organizational_unit', '-u',
help="Name of organizational unit")
issue.add_argument('--country', '-c', help="Name of country")
issue.add_argument('--state', '-s', help="Name of state")
issue.add_argument('--location', '-l', help="Name of location")
issue.add_argument('--reissue',
dest='reissue',
help="Reissue certificate",
action='store_true')
issue.set_defaults(func=_issue, reissue=False)
# Certificate revocation
revoke = subparsers.add_parser(
'revoke',
help="Revoke an issued certificate",
description=DESCRIPTION_REVOKE,
formatter_class=Formatter,
)
revoke.add_argument("certificate", help="The certificate id to revoke")
revoke.add_argument('--account', '-a',
help="The acme account id to use", required=True)
revoke.set_defaults(func=_revoke)
destination = subparsers.add_parser(
'destination',
help="Manage Destinations",
# description=DESCRIPTION_REVOKE, #TODO: Destinations description
formatter_class=Formatter,
)
destination_subparsers = destination.add_subparsers()
create_dest = destination_subparsers.add_parser(
'create',
help='Create a Destination',
formatter_class=Formatter
)
create_dest.add_argument("host", help="Host FQDN. eg: api.certifire.xyz")
create_dest.add_argument('--port', '-p', help="SSH port", default=22)
create_dest.add_argument('--user', '-u', help="SSH user", default='root')
create_dest.add_argument('--pwd', '-s', help="SSH password")
create_dest.add_argument('--pkey', '-k', help="SSH private key file")
create_dest.add_argument('--pkeypass', '-c', help="SSH private key password")
create_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path", default='/var/www/html')
create_dest.add_argument('--certPath', help="Certificate push destination path", default='/etc/nginx/certs')
create_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache'),default='NGINX')
create_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true')
create_dest.set_defaults(func=_create_dest, nocheck=False)
update_dest = destination_subparsers.add_parser(
'update',
help='Update a Destination',
formatter_class=Formatter
)
update_dest.add_argument("id", help="Destination id")
update_dest.add_argument("--host", '-f', help="Host FQDN. eg: api.certifire.xyz")
update_dest.add_argument('--port', '-p', help="SSH port")
update_dest.add_argument('--user', '-u', help="SSH user")
update_dest.add_argument('--pwd', '-s', help="SSH password")
update_dest.add_argument('--pkey', '-k', help="SSH private key file")
update_dest.add_argument('--pkeypass', '-c', help="SSH private key password")
update_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path")
update_dest.add_argument('--certPath', help="Certificate push destination path")
update_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache'))
update_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true')
update_dest.set_defaults(func=_update_dest, nocheck=False)
delete_dest = destination_subparsers.add_parser(
'delete',
help='Delete a Destination',
formatter_class=Formatter
)
delete_dest.add_argument("id", help="Destination id")
delete_dest.set_defaults(func=_delete_dest)
# Version
version = subparsers.add_parser("version", help="Show the version number")
version.set_defaults(func=lambda *args: print(
"certifire {}\n".format(get_version())))
# Parse
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
sys.exit()
# Set up logging
root = logging.getLogger('certifire')
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("%(message)s"))
root.addHandler(handler)
# Let's encrypt
try:
args.func(args)
except CertifireError as e:
if str(e):
logger.error(e)
sys.exit()
except KeyboardInterrupt:
logger.error("")
logger.error("Interrupted.")
sys.exit()
except Exception as e:
logger.error("Oops! An unhandled error occurred. Please file a bug.")
logger.exception(e)
sys.exit()
if __name__ == "__main__":
certifire_main()
| 14,927 | 4,256 |
import argparse, os
from urllib.parse import unquote
import os.path
from os import path
import pickle
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--language", default='en',type = str,
help="path")
args = ap.parse_args()
exec(open("utils/utils.py").read())
exec(open("data_gen/parse_wiki_dump/parse_wiki_dump_tools.py").read())
print('Computing Wikipedia p_e_m')
wiki_e_m_counts = {}
num_lines = 0
parsing_errors = 0
list_ent_errors = 0
diez_ent_errors = 0
disambiguation_ent_errors = 0
num_valid_hyperlinks = 0
with open('wiki_data/' + args.language + '/' + args.language + '-wikidataid-TextWithAnchorsFromAllWikipedia.txt', encoding="utf-8") as f:
for line in f:
line = unquote(line.strip())
num_lines += 1
if num_lines % 5000000 == 0:
print('Processed ' + str(num_lines) + ' lines. Parsing errs = ' +\
str(parsing_errors) + ' List ent errs = ' + \
str(list_ent_errors) + ' diez errs = ' + str(diez_ent_errors) +\
' disambig errs = ' + str(disambiguation_ent_errors) + \
' . Num valid hyperlinks = ' + str(num_valid_hyperlinks))
if not '<doc id="' in line:
list_hyp, text, le_errs, p_errs, dis_errs, diez_errs = extract_text_and_hyp(line, False)
parsing_errors += p_errs
list_ent_errors += le_errs
disambiguation_ent_errors += dis_errs
diez_ent_errors += diez_errs
for el in list_hyp:
mention = el
ent_wikiid = list_hyp[el]['ent_wikiid']
num_valid_hyperlinks += 1
if mention not in wiki_e_m_counts:
wiki_e_m_counts[mention] = {}
if ent_wikiid not in wiki_e_m_counts[mention]:
wiki_e_m_counts[mention][ent_wikiid] = 0
wiki_e_m_counts[mention][ent_wikiid] += 1
print(' Done computing Wikipedia p(e|m). Num valid hyperlinks = ', num_valid_hyperlinks)
print('Now sorting and writing ..')
with open('generated/' + args.language + '/wikipedia_p_e_m.txt', "w", encoding="utf-8") as f:
for mention in wiki_e_m_counts:
tbl = {}
for ent_wikiid in wiki_e_m_counts[mention]:
tbl[ent_wikiid] = wiki_e_m_counts[mention][ent_wikiid]
tbl = {k: v for k, v in sorted(tbl.items(), key=lambda item: item[1], reverse=True)}
text = ''
total_freq = 0
for ent_wikiid in tbl:
text += str(ent_wikiid) + ',' + str(tbl[ent_wikiid])
text += ',' + get_ent_name_from_wikiid(ent_wikiid).replace(' ', '_') + '\t'
total_freq = total_freq + tbl[ent_wikiid]
f.write(mention + '\t' + str(total_freq) + '\t' + text + '\n')
print(' Done sorting and writing.')
| 2,783 | 973 |
"""Homie Models module"""
from .homie_device import HomieDevice
from .homie_node import HomieNode
from .homie_property import HomieProperty
| 141 | 44 |
############################################################################################
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
from setuptools import setup, find_packages
def readme():
with open("README.md") as f:
README = f.read()
return README
#with open("requirements.txt") as f:
# required = f.read().splitlines()
#with open("requirements-optional.txt") as f:
# optional_required = f.read().splitlines()
setup(
name="allthingsnlp",
version="0.0.4",
description="All things NLP - An open source, low-code NLP library in Python.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/Pranavj94/all-things-nlp",
author="Pranav J",
author_email="pranavj13594@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
install_requires=['pandas','numpy','tqdm','nltk','wordcloud','matplotlib','IPython']
#extras_require={"full": optional_required,},
) | 1,865 | 560 |
# Generated by Django 2.2.12 on 2020-05-07 09:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_application_qualifications'),
]
operations = [
migrations.AlterField(
model_name='application',
name='applicant',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='accounts.Professional'),
),
]
| 533 | 178 |
# Реализуйте абстракцию (набор функций) для работы с прямоугольниками,
# стороны которого всегда параллельны осям. Прямоугольник может
# располагаться в любом месте координатной плоскости.
#
# При такой постановке, достаточно знать только три параметра для однозначного
# задания прямоугольника на плоскости: координаты левой-верхней точки, ширину
# и высоту. Зная их, мы всегда можем построить прямоугольник
# одним единственным способом.
#
# |
# 4 | точка ширина
# | *-------------
# 3 | | |
# | | | высота
# 2 | | |
# | --------------
# 1 |
# |
# ------|---------------------------
# 0 | 1 2 3 4 5 6 7
# |
# |
# |
# Основной интерфейс:
#
# make_rectangle (конструктор) – создает прямоугольник.
# Принимает параметры: левую-верхнюю точку, ширину и высоту.
# Ширина и высота – положительные числа.
#
# Селекторы get_start_point, get_width и get_height
#
# contains_origin – проверяет, принадлежит ли центр координат прямоугольнику
# (не лежит на границе прямоугольника, а находится внутри).
# Чтобы в этом убедиться, достаточно проверить,
# что все вершины прямоугольника лежат в разных квадрантах
# (их можно высчитать в момент проверки).
#
# # Создание прямоугольника:
# # p - левая верхняя точка
# # 4 - ширина
# # 5 - высота
# #
# # p 4
# # -----------
# # | |
# # | | 5
# # | |
# # -----------
#
# >>> p = make_decart_point(0, 1)
# >>> rectangle = make_rectangle(p, 4, 5)
#
# >>> contains_origin(rectangle)
# False
#
# >>> rectangle2 = make_rectangle(make_decart_point(-4, 3), 5, 4)
# >>> contains_origin(rectangle2)
# True
# Подсказки
# Квадрант плоскости — любая из 4 областей (углов),
# на которые плоскость делится двумя взаимно перпендикулярными прямыми,
# принятыми в качестве осей координат.
# Для определения квадранта, в которой лежит точка,
# используйте функцию get_quadrant.
def make_rectangle(start_point, width, height):
return {
"start_point": start_point,
"width": width,
"height": height,
}
def get_start_point(rectangle):
return rectangle["start_point"]
def get_width(rectangle):
return rectangle["width"]
def get_height(rectangle):
return rectangle["height"]
def get_ur_rectangle_point(rectangle):
return make_decart_point(
get_x(get_start_point(rectangle)) + get_width(rectangle),
get_y(get_start_point(rectangle)),
)
def get_dl_rectangle_point(rectangle):
return make_decart_point(
get_x(get_start_point(rectangle)),
get_y(get_start_point(rectangle)) - get_height(rectangle),
)
def get_dr_rectangle_point(rectangle):
return make_decart_point(
get_x(get_start_point(rectangle)) + get_width(rectangle),
get_y(get_start_point(rectangle)) - get_height(rectangle),
)
def contains_origin(rectangle):
points_quadrants = (
get_quadrant(get_start_point(rectangle)),
get_quadrant(get_ur_rectangle_point(rectangle)),
get_quadrant(get_dl_rectangle_point(rectangle)),
get_quadrant(get_dr_rectangle_point(rectangle)),
)
return len(set(points_quadrants)) == 4
# ______________________________________________________________________
def make_decart_point(x, y):
return {"x": x, "y": y}
def get_x(point):
return point["x"]
def get_y(point):
return point["y"]
def get_quadrant(point):
x = get_x(point)
y = get_y(point)
if x > 0 and y > 0:
return 1
if x < 0 < y:
return 2
if x < 0 and y < 0:
return 3
if y < 0 < x:
return 4
return None
def test_rectangle():
p = make_decart_point(-4, 3)
rectangle1 = make_rectangle(p, 5, 4)
assert contains_origin(rectangle1)
rectangle2 = make_rectangle(p, 5, 2)
assert not contains_origin(rectangle2)
rectangle3 = make_rectangle(p, 2, 2)
assert not contains_origin(rectangle3)
rectangle4 = make_rectangle(p, 4, 3)
assert not contains_origin(rectangle4)
def test_cross_zero():
point = make_decart_point(0, 1)
rectangle = make_rectangle(point, 4, 5)
assert not contains_origin(rectangle)
test_cross_zero()
test_rectangle()
| 4,257 | 1,579 |
from masonite.testing import TestCase
from masonite.routes import Get, Post
from src.masonite.js_routes.routes import Routes as JsRoutes
all_expected_routes = {
"home": {"uri": "home", "methods": ["GET"]},
"posts.show": {"uri": "posts/{post}", "methods": ["GET"]},
"posts.store": {"uri": "posts", "methods": ["POST"]},
"posts.index": {"uri": "posts", "methods": ["GET"]},
"postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]},
"postComments.show": {
"uri": "posts/{post}/comments/{comment}",
"methods": ["GET"],
},
"admin.users.index": {"uri": "admin/users", "methods": ["GET"]},
}
class TestRoutes(TestCase):
sqlite = False
def setUp(self):
super().setUp()
self.routes(
only=[
Get("home", "tests.TestController@show").name("home"),
Get("posts", "tests.TestController@show").name("posts.index"),
Get("posts/@post", "tests.TestController@show").name("posts.show"),
Get("posts/@post/comments", "tests.TestController@show").name(
"postComments.index"
),
Get(
"posts/@post/comments/@comment:int", "tests.TestController@show"
).name("postComments.show"),
Post("posts", "tests.TestController@show").name("posts.store"),
Get("admin/users", "tests.TestController@show").name(
"admin.users.index"
),
]
)
self.buildOwnContainer()
def test_basic_routes_generation(self):
js_routes = JsRoutes()
routes = js_routes.routes
self.assertEqual(all_expected_routes, routes)
def test_can_filter_to_only_include_routes_matching_a_pattern(self):
js_routes = JsRoutes()
routes = js_routes.filter_routes(["posts.s*", "home"])
expected = {
"home": {"uri": "home", "methods": ["GET"]},
"posts.show": {"uri": "posts/{post}", "methods": ["GET"]},
"posts.store": {"uri": "posts", "methods": ["POST"]},
}
self.assertEqual(expected, routes)
def test_can_filter_to_exclude_routes_matching_a_pattern(self):
js_routes = JsRoutes()
routes = js_routes.filter_routes(["posts.s*", "home", "admin.*"], False)
expected = {
"posts.index": {"uri": "posts", "methods": ["GET"]},
"postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]},
"postComments.show": {
"uri": "posts/{post}/comments/{comment}",
"methods": ["GET"],
},
}
self.assertEqual(expected, routes)
def test_can_set_included_routes_using_only_config(self):
from config.js_routes import FILTERS
FILTERS["except"] = []
FILTERS["only"] = ["posts.s*", "home"]
routes = JsRoutes().to_dict()["routes"]
expected = {
"home": {"uri": "home", "methods": ["GET"]},
"posts.show": {"uri": "posts/{post}", "methods": ["GET"]},
"posts.store": {"uri": "posts", "methods": ["POST"]},
}
self.assertEqual(expected, routes)
def test_can_set_included_routes_using_except_config(self):
from config.js_routes import FILTERS
FILTERS["only"] = []
FILTERS["except"] = ["posts.s*", "home"]
routes = JsRoutes().to_dict()["routes"]
expected = {
"posts.index": {"uri": "posts", "methods": ["GET"]},
"postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]},
"postComments.show": {
"uri": "posts/{post}/comments/{comment}",
"methods": ["GET"],
},
"admin.users.index": {"uri": "admin/users", "methods": ["GET"]},
}
self.assertEqual(expected, routes)
def test_returns_unfiltered_routes_when_both_only_and_except_configs_set(self):
from config.js_routes import FILTERS
FILTERS["except"] = ["posts.s*", "home"]
FILTERS["only"] = ["some.other.routes"]
routes = JsRoutes().to_dict()["routes"]
self.assertEqual(all_expected_routes, routes)
def test_can_set_included_routes_using_groups_config(self):
from config.js_routes import FILTERS
FILTERS["groups"] = {"posts": ["posts.s*"]}
routes = JsRoutes("posts").to_dict()["routes"]
expected = {
"posts.show": {"uri": "posts/{post}", "methods": ["GET"]},
"posts.store": {"uri": "posts", "methods": ["POST"]},
}
self.assertEqual(expected, routes)
def test_can_set_included_routes_using_groups_array_config(self):
from config.js_routes import FILTERS
FILTERS["groups"] = {"posts": ["posts.s*"], "admin": ["admin.*"]}
routes = JsRoutes(["posts", "admin"]).to_dict()["routes"]
expected = {
"posts.show": {"uri": "posts/{post}", "methods": ["GET"]},
"posts.store": {"uri": "posts", "methods": ["POST"]},
"admin.users.index": {"uri": "admin/users", "methods": ["GET"]},
}
self.assertEqual(expected, routes)
def can_ignore_passed_group_not_set_in_config(self):
from config.js_routes import FILTERS
FILTERS["groups"] = {"posts": ["posts.s*"]}
routes = JsRoutes(["unknown_group"]).to_dict()["routes"]
self.assertEqual(all_expected_routes, routes)
def can_include_middleware(self):
pass
def can_include_only_middleware_set_in_config(self):
pass
| 5,622 | 1,737 |
"""Dam break past an obstacle with data from SPHysics. (40 minutes)
For benchmarking, we use the input geometry and discretization as the
SPHYSICS Case 5
(https://wiki.manchester.ac.uk/sphysics/index.php/SPHYSICS_Home_Page)
We only require the INDAT and IPART files generated by SPHysics. These
define respectively, the numerical parameters and the initial particle
data used for the run. The rest of the problem is set-up in the usual
way.
"""
import os
import numpy
from pysph.sph.equation import Group
from pysph.base.kernels import CubicSpline
from pysph.sph.wc.basic import TaitEOS, TaitEOSHGCorrection, MomentumEquation
from pysph.sph.basic_equations import ContinuityEquation, XSPHCorrection
from pysph.solver.solver import Solver
from pysph.solver.application import Application
from pysph.sph.integrator import EPECIntegrator, PECIntegrator
from pysph.sph.integrator_step import WCSPHStep
from pysph.tools.sphysics import sphysics2pysph
MY_DIR = os.path.dirname(__file__)
INDAT = os.path.join(MY_DIR, 'INDAT.gz')
IPART = os.path.join(MY_DIR, 'IPART.gz')
# problem dimensionality
dim = 3
# suggested initial time step and final time
dt = 1e-5
tf = 2.0
# physical constants for the run loaded from SPHysics INDAT
indat = numpy.loadtxt(INDAT)
H = float( indat[10] )
B = float( indat[11] )
gamma = float( indat[12] )
eps = float( indat[14] )
rho0 = float( indat[15] )
alpha = float( indat[16] )
beta = 0.0
c0 = numpy.sqrt( B*gamma/rho0 )
class DamBreak3DSPhysics(Application):
def add_user_options(self, group):
group.add_argument(
"--test", action="store_true", dest="test", default=False,
help="For use while testing of results, uses PEC integrator."
)
def create_particles(self):
return sphysics2pysph(IPART, INDAT, vtk=False)
def create_solver(self):
kernel = CubicSpline(dim=3)
if self.options.test:
integrator = PECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep())
adaptive, n_damp = False, 0
else:
integrator = EPECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep())
adaptive, n_damp = True, 0
solver = Solver(dim=dim, kernel=kernel, integrator=integrator,
adaptive_timestep=adaptive, tf=tf, dt=dt,
n_damp=n_damp)
return solver
def create_equations(self):
equations = [
# Equation of state
Group(equations=[
TaitEOS(dest='fluid', sources=None,
rho0=rho0, c0=c0, gamma=gamma),
TaitEOSHGCorrection(dest='boundary', sources=None,
rho0=rho0, c0=c0, gamma=gamma),
], real=False),
# Continuity Momentum and XSPH equations
Group(equations=[
ContinuityEquation(dest='fluid',
sources=['fluid', 'boundary']),
ContinuityEquation(dest='boundary', sources=['fluid']),
MomentumEquation(
dest='fluid', sources=['fluid', 'boundary'], c0=c0,
alpha=alpha, beta=beta, gz=-9.81,
tensile_correction=True),
# Position step with XSPH
XSPHCorrection(dest='fluid', sources=['fluid'], eps=eps)
])
]
return equations
if __name__ == '__main__':
app = DamBreak3DSPhysics()
app.run()
| 3,532 | 1,161 |
# -*- coding: utf-8 -*-
'''
第1回
LEDの点滅を3回繰り返すプログラムを作ってください。
LEDが3つのバージョンを作ってください。
'''
import RPi.GPIO as GPIO
import time
PINS=[10, 11, 12]
#毎回するおまじない
GPIO.setmode(GPIO.BCM)
GPIO.setup(PINS,GPIO.OUT)
for x in range(3):
GPIO.output(PINS,GPIO.HIGH) # ピン10, 11, 12に電流を流す(HIGH)
time.sleep(2)
GPIO.output(PINS,GPIO.LOW) # ピン10, 11, 12に流れる電流を0にする(LOW)
time.sleep(2)
GPIO.cleanup()
| 396 | 276 |
X = np.arange(-100, 100, 0.1)
Y = 2*X + 1
plt.plot(X,Y)
plt.xlabel('X')
plt.ylabel('Y') | 87 | 60 |
import pytest
from amqp_mock import Message
from ._test_utils.fixtures import amqp_client, mock_client, mock_server
from ._test_utils.helpers import random_uuid, to_binary
from ._test_utils.steps import given, then, when
__all__ = ("mock_client", "mock_server", "amqp_client",)
@pytest.mark.asyncio
async def test_reset_exchanges(*, mock_server, mock_client, amqp_client):
with given:
exchange = "test_exchange"
message = {"id": random_uuid()}
await amqp_client.publish(to_binary(message), exchange)
with when:
result = await mock_client.reset()
with then:
assert result is None
messages = await mock_client.get_exchange_messages(exchange)
assert len(messages) == 0
@pytest.mark.asyncio
async def test_reset_queues(*, mock_server, mock_client, amqp_client):
with given:
queue = "test_queue"
await mock_client.publish_message(queue, Message("text"))
with when:
result = await mock_client.reset()
with then:
assert result is None
await amqp_client.consume(queue)
await amqp_client.wait(seconds=0.1)
assert len(amqp_client.get_consumed_messages()) == 0
@pytest.mark.asyncio
async def test_reset_history(*, mock_server, mock_client, amqp_client):
with given:
queue = "test_queue"
await mock_client.publish_message(queue, Message("text"))
await amqp_client.consume(queue)
with when:
result = await mock_client.reset()
with then:
assert result is None
history = await mock_client.get_queue_message_history(queue)
assert len(history) == 0
| 1,654 | 526 |
"""
Band implementations for storing data in Karta Grid instances
Overview
--------
`BandIndexer` interface for accessing data from one or more bands
`SimpleBand` use numpy arrays for data storage
`CompressedBand` uses blosc compression to reduce in-memory footprint
Implementation
--------------
Bands are expected to implement the following methods:
- `__init__(self, size, dtype, initval=None)`
- `getblock(self, yoff, xoff, ny, nx)`
- `setblock(self, yoff, xoff, array)`
Attributes:
- `dtype`
- `size`
The following methods are deprecated:
- `__getitem__(self, key)`, accepting as *key* any of
- an int
- a slice
- a 2-tuple of ints
- a 2-tuple of slices
- `__setitem__(self, key, value)`, accepting as *key* the same
possibilities as __getitem__
"""
import blosc
import numpy as np
from numbers import Real, Integral
from math import ceil
class BandIndexer(object):
def __init__(self, bands):
self.bands = bands
def __getitem__(self, key):
if isinstance(key, np.ndarray):
return self._get_from_array_mask(key)
if isinstance(key, slice):
key = (key, slice(None, None, None), slice(None, None, None))
if not isinstance(key, tuple):
raise TypeError("key should be an array or a tuple")
collapse_rows = collapse_cols = collapse_bands = False
ny, nx = self.bands[0].size
if isinstance(key[0], Integral):
collapse_rows = True
r = key[0] % ny
ystart, yend, ystep = (r, r+1, 1)
elif isinstance(key[0], slice):
ystart, yend, ystep = key[0].indices(ny)
else:
raise TypeError("first key item should be an integer or a slice")
if isinstance(key[1], Integral):
collapse_cols = True
r = key[1] % nx
xstart, xend, xstep = (r, r+1, 1)
elif isinstance(key[1], slice):
xstart, xend, xstep = key[1].indices(nx)
else:
raise TypeError("second key item should be an integer or a slice")
if len(key) == 2:
bands = list(range(len(self.bands)))
elif len(key) == 3 and isinstance(key[2], Integral):
collapse_bands = True
bands = [key[2] % len(self.bands)]
elif len(key) == 3 and isinstance(key[2], slice):
bands = list(range(*key[2].indices(len(self.bands))))
else:
raise TypeError("third key item should be an integer or a slice")
if ystep < 0:
ystart, yend = yend+1, ystart+1
if xstep < 0:
xstart, xend = xend+1, xstart+1
shape = [1 + (yend-ystart-1) // abs(ystep),
1 + (xend-xstart-1) // abs(xstep),
len(bands)]
out = np.empty(shape, dtype = self.bands[0].dtype)
for i, iband in enumerate(bands):
band = self.bands[iband]
band_values = band.getblock(ystart, xstart, yend-ystart, xend-xstart)
out[:,:,i] = band_values[::ystep,::xstep]
if collapse_bands:
out = out[:,:,0]
if collapse_cols:
out = out[:,0]
if collapse_rows:
out = out[0]
return out
def __setitem__(self, key, value):
if isinstance(key, np.ndarray):
return self._set_from_array_mask(key, value)
if isinstance(key, slice):
key = (key, slice(None, None, None), slice(None, None, None))
if not isinstance(key, tuple):
raise TypeError("key should be an array or a tuple")
ny, nx = self.bands[0].size
if isinstance(key[0], Integral):
r = key[0] % ny
ystart, yend, ystep = (r, r+1, 1)
elif isinstance(key[0], slice):
ystart, yend, ystep = key[0].indices(ny)
else:
raise TypeError("first key item should be an integer or a slice")
if isinstance(key[1], Integral):
r = key[1] % nx
xstart, xend, xstep = (r, r+1, 1)
elif isinstance(key[1], slice):
xstart, xend, xstep = key[1].indices(nx)
else:
raise TypeError("second key item should be an integer or a slice")
if len(key) == 2:
bands = list(range(len(self.bands)))
elif len(key) == 3 and isinstance(key[2], Integral):
collapse_bands = True
bands = [key[2] % len(self.bands)]
elif len(key) == 3 and isinstance(key[2], slice):
bands = list(range(*key[2].indices(len(self.bands))))
else:
raise TypeError("third key item should be an integer or a slice")
if not (xstep == ystep == 1):
raise NotImplementedError("setting band values with stepped slices")
#if ystep < 0:
# ystart, yend = yend+1, ystart+1
#if xstep < 0:
# xstart, xend = xend+1, xstart+1
shape = [1 + (yend-ystart-1) // abs(ystep),
1 + (xend-xstart-1) // abs(xstep),
len(bands)]
if isinstance(value, np.ndarray) and (value.ndim == 1) and (shape[0] == shape[1] == 1):
val_array = np.reshape(np.atleast_3d(value), shape)
else:
val_array = np.broadcast_to(np.atleast_3d(value), shape)
for i, iband in enumerate(bands):
band = self.bands[iband]
band.setblock(ystart, xstart, val_array[:,:,i])
return
def _get_from_array_mask(self, mask):
# The mask is assumed to be in (row, column[, band]) order
# TODO: make this memory efficient
if mask.ndim == 2:
return self[:,:,:][mask]
elif mask.ndim == 3:
return self[:,:,:][mask]
else:
raise IndexError("masking array must have two or three dimensions")
def _set_from_array_mask(self, mask, value):
# The mask is assumed to be in (row, column[, band]) order
# TODO: make this memory efficient
for i, band in enumerate(self.bands):
if mask.ndim == 3:
mask_ = mask[:,:,i]
else:
mask_ = mask
tmp = band.getblock(0, 0, *band.size)
if isinstance(value, Real) or (value.ndim == 1):
tmp[mask_] = value
else:
tmp[mask_] = value[:,i]
band.setblock(0, 0, tmp)
def __iter__(self):
nx = self.bands[0].size[1]
for i in range(self.bands[0].size[0]):
if len(self.bands) == 1:
yield self.bands[0].getblock(i, 0, 1, nx)
else:
yield np.vstack([b.getblock(i, 0, 1, nx) for b in self.bands])
@property
def shape(self):
""" Returns the dimensions of raster bands. If there is a single
(m x n) band, output is a tuple (m, n). If there are N>1 bands, output
is a tuple (N, m, n).
"""
if len(self.bands) == 0:
raise ValueError("no bands")
else:
return self.bands[0].size
@property
def dtype(self):
""" Returns bands' dtype """
return self.bands[0].dtype
class SimpleBand(object):
""" SimpleBand wraps a numpy.ndarray for storage. """
def __init__(self, size, dtype, initval=None):
self.size = size
if initval is None:
self._array = np.empty(size, dtype=dtype)
else:
self._array = np.full(size, initval, dtype=dtype)
self.dtype = dtype
def getblock(self, yoff, xoff, ny, nx):
return self._array[yoff:yoff+ny, xoff:xoff+nx]
def setblock(self, yoff, xoff, array):
(ny, nx) = array.shape
self._array[yoff:yoff+ny, xoff:xoff+nx] = array
return
class CompressedBand(object):
""" CompressedBand is a chunked, blosc-compressed array. """
CHUNKSET = 1
CHUNKUNSET = 0
def __init__(self, size, dtype, chunksize=(256, 256), initval=0):
""" Initialize a CompressedBand instance.
Parameters
----------
size : tuple of two ints
size of band in pixels
dtype : type
data type of pixel values
chunksize : tuple of two ints, optional
size of compressed chunks, default (256, 256)
initval : value, optional
if set, the entire grid is initialized with this value, which should
be of *dtype*
"""
assert len(size) == 2
self.size = size
self.dtype = dtype
self._chunksize = chunksize
self._initval = initval
self.nchunkrows = int(ceil(float(size[0])/float(chunksize[0])))
self.nchunkcols = int(ceil(float(size[1])/float(chunksize[1])))
nchunks = self.nchunkrows * self.nchunkcols
# Data store
self._data = [None for i in range(nchunks)]
# 0 => unset
# 1 => set
self.chunkstatus = np.zeros(nchunks, dtype=np.int8)
return
def _store(self, array, index):
self._data[index] = blosc.compress(array.tostring(),
np.dtype(self.dtype).itemsize)
self.chunkstatus[index] = self.CHUNKSET
return
def _retrieve(self, index):
bytestr = blosc.decompress(self._data[index], as_bytearray=True)
return np.frombuffer(bytestr, dtype=self.dtype).reshape(self._chunksize)
def _getchunks(self, yoff, xoff, ny, nx):
""" Return a generator returning tuples identifying chunks covered by a
range. The tuples contain (chunk_number, ystart, yend, xstart, xend)
for each chunk touched by a region defined by corner indices and region
size. """
chunksize = self._chunksize
ystart = yoff // chunksize[0]
yend = ceil(float(yoff+ny) / chunksize[0])
xstart = xoff // chunksize[1]
xend = ceil(float(xoff+nx) / chunksize[1])
nxchunks = int(ceil(float(self.size[1])/float(chunksize[1])))
i = ystart
while i < yend:
j = xstart
while j < xend:
chunk_number = i*nxchunks + j
chunk_ystart = i*chunksize[0]
chunk_xstart = j*chunksize[1]
chunk_yend = min((i+1)*chunksize[0], self.size[0])
chunk_xend = min((j+1)*chunksize[1], self.size[1])
yield (chunk_number, chunk_ystart, chunk_yend,
chunk_xstart, chunk_xend)
j += 1
i+= 1
def setblock(self, yoff, xoff, array):
""" Store block of values in *array* starting at offset *yoff*, *xoff*.
"""
size = array.shape[:2]
chunksize = self._chunksize
for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, *size):
# Get from data store
if self.chunkstatus[i] == self.CHUNKSET:
chunkdata = self._retrieve(i)
else:
chunkdata = np.full(self._chunksize, self._initval, dtype=self.dtype)
# Compute region within chunk to place data in
cy0 = max(0, yoff-yst)
cy1 = min(chunksize[0], yoff+size[0]-yst)
cx0 = max(0, xoff-xst)
cx1 = min(chunksize[1], xoff+size[1]-xst)
# Compute region to slice from data
dy0 = max(0, yst-yoff)
dy1 = min(size[0], yen-yoff)
dx0 = max(0, xst-xoff)
dx1 = min(size[1], xen-xoff)
chunkdata[cy0:cy1, cx0:cx1] = array[dy0:dy1, dx0:dx1]
# Return to data store
self._store(chunkdata, i)
return
def getblock(self, yoff, xoff, ny, nx):
""" Retrieve values with dimensions *size*, starting at offset *yoff*,
*xoff*.
"""
result = np.empty([ny, nx], self.dtype)
for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, ny, nx):
# Compute the bounds in the output
oy0 = max(0, yst-yoff)
oy1 = min(ny, yen-yoff)
ox0 = max(0, xst-xoff)
ox1 = min(nx, xen-xoff)
if self.chunkstatus[i] == self.CHUNKUNSET:
result[oy0:oy1, ox0:ox1] = np.full((oy1-oy0, ox1-ox0),
self._initval,
dtype=self.dtype)
else:
# Compute the extents from the chunk to retain
cy0 = max(yoff, yst) - yst
cy1 = min(yoff+ny, yen) - yst
cx0 = max(xoff, xst) - xst
cx1 = min(xoff+nx, xen) - xst
result[oy0:oy1, ox0:ox1] = self._retrieve(i)[cy0:cy1, cx0:cx1]
return result
| 12,749 | 4,168 |
from math import floor
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.template.context_processors import csrf
from django.views import generic
from django.urls import reverse_lazy
from engine.utils import paginator
from django.utils.html import strip_tags
from .forms import *
from .models import *
from rest_framework import viewsets, permissions
from rest_framework.permissions import IsAdminUser, IsAuthenticatedOrReadOnly
from engine.serializers import *
# TODO hide user email from api and from profile
# TODO fix password_reset_confirm, post moderation, images upload
# TODO convert notifications to socket
# Mixin views
class StaffRequiredMixin(LoginRequiredMixin):
raise_exception = True
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return self.handle_no_permission()
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
class AuthorRequiredMixin(LoginRequiredMixin):
raise_exception = True
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if not obj.author == self.request.user and not self.request.user.is_staff:
return self.handle_no_permission()
return super(AuthorRequiredMixin, self).dispatch(request, *args, **kwargs)
# Register view
class RegisterView(generic.CreateView):
model = User
form_class = RegisterForm
template_name = "registration/_register.html"
def get_success_url(self):
return reverse_lazy("accounts:register_done")
# Logs views
class LogMixin(object):
def dispatch(self, request, *args, **kwargs):
ip = request.META.get('REMOTE_ADDR', '') or request.META.get('HTTP_X_FORWARDED_FOR', '')
Log.objects.create(ip=ip,
author=request.user,
method=request.method,
path=request.path,
body=str(request.body).strip(),
cookies=str(request.COOKIES),
meta=str(request.META),
date=timezone.now()
)
return super(LogMixin, self).dispatch(request, *args, **kwargs)
class LogsView(StaffRequiredMixin, generic.TemplateView):
template_name = 'engine/logs_view.html'
def get_context_data(self, **kwargs):
context = super(LogsView, self).get_context_data()
context['filter'] = self.request.GET.get('filter')
context['path'] = self.request.GET.get('path')
context['ip'] = self.request.GET.get('ip')
context['author'] = self.request.GET.get('author')
context['data'] = self.request.GET.get('data')
return context
class LogsListView(StaffRequiredMixin, generic.ListView):
model = Log
context_object_name = 'logs'
template_name = 'engine/logs_list.html'
def get_queryset(self):
filters = self.request.GET.get('filter')
if filters == "path":
return self.model.objects.filter(path=self.request.GET.get(filters)).order_by('-date')[0:500]
if filters == "ip":
return self.model.objects.filter(ip=self.request.GET.get(filters)).order_by('-date')[0:500]
if filters == "author":
return self.model.objects.filter(author=self.request.GET.get(filters)).order_by('-date')[0:500]
if filters == "data":
return self.model.objects.filter(data=self.request.GET.get(filters)).order_by('-date')[0:500]
return self.model.objects.all().order_by('-date')[0:500]
class LogDetailsView(StaffRequiredMixin, generic.DetailView):
model = Log
context_object_name = 'log'
template_name = 'engine/logs_detail.html'
# Feedback views
class FeedbackSendView(LogMixin, generic.CreateView):
model = Feedback
fields = ['email', 'subject', 'message']
template_name = "engine/form_default.html"
success_url = "/"
class FeedbackListView(StaffRequiredMixin, LogMixin, generic.ListView):
model = Feedback
context_object_name = 'feedback_list'
template_name = "engine/feedback_list.html"
def get_queryset(self):
return self.model.objects.all().order_by('-date')[0:100]
class FeedbackDetailsView(StaffRequiredMixin, LogMixin, generic.DetailView):
model = Feedback
context_object_name = 'feedback'
template_name = "engine/feedback_detail.html"
class FeedbackAnsweredView(StaffRequiredMixin, LogMixin, generic.UpdateView):
model = Feedback
fields = ['status']
success_url = "/"
template_name = "engine/base.html"
def get_object(self, queryset=None):
return get_object_or_404(self.model, pk=self.request.POST.get('pk'))
def post(self, *args, **kwargs):
feedback = self.get_object()
feedback.status = not feedback.status
feedback.save()
return self.get(self, *args, **kwargs)
# Users views
class UserDetailsView(LogMixin, generic.DetailView):
model = User
context_object_name = 'user'
template_name = 'engine/user.html'
def get_context_data(self, **kwargs):
context = super(UserDetailsView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.filter(author__username=self.kwargs['username'])
if self.request.user.is_authenticated:
if self.request.user.author_subscriber.filter(author__username=self.kwargs['username']):
context['subscribe'] = True
return context
def get_object(self):
return get_object_or_404(self.model, username=self.kwargs['username'])
class UserEditView(LoginRequiredMixin, LogMixin, generic.UpdateView):
model = Profile
fields = ['description', 'img']
template_name = 'engine/form_default.html'
def get_object(self, queryset=None):
return self.model.objects.get(user__username=self.request.user)
def get_success_url(self):
return reverse('user_detail', args=(self.object.user.username,))
class UserChangeEmailView(LoginRequiredMixin, LogMixin, generic.UpdateView):
model = User
fields = ['email']
template_name = 'engine/form_default.html'
def get_object(self, queryset=None):
return self.model.objects.get(username=self.request.user)
def get_success_url(self):
return reverse('user_detail', args=(self.object.username,))
# Notifications views
class SubscribeOnUserNotificationsView(LoginRequiredMixin, LogMixin, generic.View):
def post(self, *args, **kwargs):
if not self.request.user.author_subscriber.filter(author__username=self.request.POST.get('author')):
AuthorSubscriber.objects.create(
author__username=self.request.POST.get('author'),
subscriber=self.request.user
)
return HttpResponseRedirect('/')
class UnSubscribeFromUserNotificationsView(LoginRequiredMixin, LogMixin, generic.DeleteView):
model = AuthorSubscriber
success_url = '/'
def get_object(self, queryset=None):
return get_object_or_404(AuthorSubscriber, subscriber=self.request.user,
author__username=self.request.POST.get('author'))
class SubscribeOnPostNotificationsView(LoginRequiredMixin, LogMixin, generic.View):
def post(self, *args, **kwargs):
if not self.request.user.post_subscriber.filter(post__pk=self.request.POST.get('pk')):
PostSubscriber.objects.create(
post__pk=self.request.POST.get('pk'),
subscriber=self.request.user
)
return HttpResponseRedirect('/')
class UnSubscribeFromPostNotificationsView(LoginRequiredMixin, LogMixin, generic.DeleteView):
model = PostSubscriber
success_url = '/'
def get_object(self, queryset=None):
return get_object_or_404(PostSubscriber, subscriber=self.request.user,
post__pk=self.request.POST.get('pk'))
class NotificationsListView(LoginRequiredMixin, LogMixin, generic.ListView):
model = Notification
context_object_name = 'notifications'
template_name = 'engine/notifications.html'
def get_queryset(self):
posts = self.model.objects.filter(author_subscriber__subscriber=self.request.user).order_by('-pk')
comments = self.model.objects.filter(post_subscriber__subscriber=self.request.user).order_by('-pk')
notifications = posts | comments
return notifications[0:100]
class NotificationsCountView(LoginRequiredMixin, generic.View):
def post(self, *args, **kwargs):
notifications = Notification.objects.filter(status=False).filter(
Q(post_subscriber__subscriber=self.request.user) | Q(author_subscriber__subscriber=self.request.user)
).count()
return HttpResponse(notifications)
class NotificationViewedView(LoginRequiredMixin, LogMixin, generic.UpdateView):
model = Notification
fields = ['status']
success_url = "/"
template_name = "engine/base.html"
def get_object(self, queryset=None):
notification = get_object_or_404(Notification, pk=self.request.POST.get("pk"))
if notification.post:
owner = notification.author_subscriber.subscriber.username
if notification.comment:
owner = notification.post_subscriber.subscriber.username
if owner == self.request.user.username:
return notification
return HttpResponseForbidden()
def post(self, *args, **kwargs):
notification = self.get_object()
notification.status = True
notification.save()
return self.get(self, *args, **kwargs)
class NotificationDeleteView(LoginRequiredMixin, LogMixin, generic.DeleteView):
model = Notification
success_url = '/'
def get_object(self, queryset=None):
notification = get_object_or_404(Notification, pk=self.request.POST.get("pk"))
if notification.post:
owner = notification.author_subscriber.subscriber.username
if notification.comment:
owner = notification.post_subscriber.subscriber.username
if owner == self.request.user.username:
return notification
return HttpResponseForbidden()
# Comments views
class CommentsListView(generic.ListView):
model = Comment
context_object_name = 'comments'
template_name = 'engine/comments.html'
def get_queryset(self):
return self.model.objects.filter(post__pk=self.kwargs['post_id']).order_by('-pk')
class CommentAddView(LoginRequiredMixin, LogMixin, generic.CreateView):
model = Comment
fields = ['text']
template_name = "engine/comments.html"
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.post = get_object_or_404(Post, pk=self.kwargs['post_id'])
return super(CommentAddView, self).form_valid(form)
class CommentDeleteView(AuthorRequiredMixin, LogMixin, generic.DeleteView):
model = Comment
success_url = '/'
def get_object(self, queryset=None):
return get_object_or_404(Comment, pk=self.request.POST.get("id"))
# Posts views
class PostMixin:
form_class = PostForm
model = Post
class PostCreateView(PostMixin, LoginRequiredMixin, LogMixin, generic.CreateView):
template_name = 'engine/form_default.html'
def get_context_data(self, **kwargs):
context = super(PostCreateView, self).get_context_data()
context.update(csrf(self.request))
return context
def form_valid(self, form):
form.instance.author = self.request.user
return super(PostCreateView, self).form_valid(form)
class PostEditView(PostMixin, AuthorRequiredMixin, LogMixin, generic.UpdateView):
template_name = 'engine/form_default.html'
def get_context_data(self, **kwargs):
context = super(PostEditView, self).get_context_data(**kwargs)
context['button_delete_show'] = True
return context
def get_success_url(self):
return reverse('post_detail', args=(self.object.url,))
class PostDeleteView(PostMixin, AuthorRequiredMixin, LogMixin, generic.DeleteView):
success_url = '/'
def post(self, request, *args, **kwargs):
post = self.get_object()
if self.request.POST.get("confirm_delete"):
post.delete()
return HttpResponseRedirect(self.success_url)
elif self.request.POST.get("cancel"):
return HttpResponseRedirect(post.get_absolute_url())
return self.get(self, *args, **kwargs)
def get_object(self, queryset=None):
return get_object_or_404(Post, pk=self.kwargs['pk'])
# Main page
class PostsListView(PostMixin, LogMixin, generic.ListView):
context_object_name = 'posts'
template_name = 'engine/post_list.html'
def get_context_data(self, **kwargs):
context = super(PostsListView, self).get_context_data(**kwargs)
category = self.kwargs.get('category_name', 'all')
pk = self.kwargs.get('pk', 1)
posts = []
if category != "all":
context['category'] = category
if get_object_or_404(Category, name=category):
posts = Post.objects.filter(category__name=category,
created_date__lte=timezone.now()).order_by('category', '-created_date')
else:
if pk != 1:
context['category'] = "all"
posts = Post.objects.filter(created_date__lte=timezone.now()).order_by('-created_date')
context['posts'] = paginator(posts, pk, 15)
return context
class PostDetailsView(PostMixin, LogMixin, generic.DetailView):
context_object_name = 'post'
template_name = 'engine/post_detail.html'
def get_context_data(self, **kwargs):
post = self.get_object()
post.update_views()
context = super(PostDetailsView, self).get_context_data()
time = floor(len(post.text_big) * 0.075 / 60) + 1 # move to models
context['read_time'] = time
context['user'] = self.request.user
context['text_big'] = strip_tags(post.text_big).replace('\r\n', '<br>')
if self.request.user.is_authenticated:
if self.request.user.post_subscriber.filter(post__pk=post.pk):
context['subscribe'] = True
return context
def get_object(self):
return get_object_or_404(Post, url=serialize_url(self.kwargs['name']))
# Search view
class SearchListView(LogMixin, generic.ListView):
model = Post
context_object_name = 'posts'
template_name = "engine/search.html"
def get_context_data(self, **kwargs):
context = super(SearchListView, self).get_context_data()
word = self.request.GET.get('q', '')
context['search_text'] = word
if (len(word) < 3) or (len(word) > 120):
context['text'] = "Search query should be from 3 to 120 characters"
else:
posts = Index.find(word)
if posts:
self.template_name = "engine/post_list.html"
context['posts'] = paginator(posts, self.request.GET.get('pk', 1), 15)
context['query'] = word
else:
context['text'] = "Nothing found"
return context
# API
class IsOwnerOrIsStaffOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user or request.user.is_staff
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
permission_classes = (IsAdminUser,)
queryset = Group.objects.all()
serializer_class = GroupSerializer
class PostViewSet(viewsets.ModelViewSet):
permission_classes = (IsOwnerOrIsStaffOrReadOnly, IsAuthenticatedOrReadOnly,)
queryset = Post.objects.all().order_by('-pk')
serializer_class = PostSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def perform_update(self, serializer):
serializer.save(author=self.request.user)
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all().order_by('-pk')
serializer_class = CategorySerializer
| 16,580 | 4,987 |
import InputOutput
files = ["a_example.txt",
"b_lovely_landscape.txt",
"c_memorable_moments.txt",
"d_pet_pictures.txt",
"e_shiny_selfies.txt"]
data = InputOutput.Data("input_data/" + files[0])
"""
data.photos[photo_id][0] - photo id
data.photos[photo_id][1] - 'h' or 'v'
data.photos[photo_id][2] - number of tags of this photo
data.photos[photo_id][3] - a list, all the tags of that photo
data.num_photos - the number of photos in the collection in total
data.tags[my_tag][0] - unique id for each tag
data.tags[my_tag][1] - counter for how often that tag occurs
"""
output = InputOutput.Output()
"""
output.add_slide(index_of_slide_in_show, [photo_id_0, optional_id_1])
output.write(my_output_file_name)
"""
# Boyd's solution
def get_score(slide1, slide2):
| 798 | 301 |
import tensorflow as tf
import matplotlib.pyplot as plt
import pdb
import numpy as np
import pandas as pd
from tensorflow.keras import layers
sample_num = 500000
# coeff = tf.cast(4*np.pi*np.pi/(6.673*10**-11), dtype = tf.float32)
coeff = tf.cast(1, dtype = tf.float32)
#try the range of 10**5 ~10**7 for both mass and radius
# radius = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32)
# massinv = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32)
radius = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32)
massinv = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32)
period = radius ** 3 * massinv * coeff
def normalize(data):
if isinstance(data, tf.Tensor):
data = data.numpy()
data = (data - np.mean(data)) / np.std(data)
return tf.cast(data, dtype = tf.float64)
def denorm(data, denorm_factor):
# denorm_factor is a tuple of (mean, std)
return data * denorm_factor[1] + denorm_factor[0]
data = tf.stack([radius, massinv], axis = 1)
data = tf.squeeze(data)
normed_label = normalize(period)
denorm_factor = (np.mean(period.numpy()), np.std(period.numpy()))
def build_model():
model = tf.keras.Sequential([layers.Dense(17),
layers.BatchNormalization(),
layers.Activation('sigmoid'),
layers.Dense(17),
layers.BatchNormalization(),
layers.Activation('sigmoid'),
layers.Dense(1)])
model.compile(optimizer = tf.keras.optimizers.Adam(0.0001),
loss = 'mse',
metrics = ['mape', 'mae', 'mse'])
return model
model = build_model()
history = model.fit(data, normed_label, epochs = 50, validation_split = 0.2, batch_size = 64, verbose =1)
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epochs'] = history.epoch
plt.figure()
plt.xlabel('epochs')
plt.ylabel('mae')
plt.plot(hist['epochs'], hist['mae'], label = 'train_mae')
plt.plot(hist['epochs'], hist['val_mae'], label = 'val_mae')
plt.legend()
plt.figure()
plt.xlabel('epochs')
plt.ylabel('mse')
plt.plot(hist['epochs'], hist['mse'], label = 'train_mse')
plt.plot(hist['epochs'], hist['val_mse'], label = 'val_mse')
plt.legend()
plt.show()
plot_history(history)
sun_earth = {'radius': [2440*10**6, 3390*10**6, 6052*10**6],'mass':[(3.3*10**23)**-1, (6.4*10**23)**-1, (4.87*10**24)**-1]}
sun_earth_data = np.stack([sun_earth['radius'], sun_earth['mass']], axis = 1)
result1 = model.predict(sun_earth_data)
result = denorm(result1,denorm_factor)
print(result)
#수 화 금
# 수성 0.2409
# 화성 1.8809
# 금성 0.6102
# 지구 1.0000
| 2,789 | 1,116 |
import sys, getopt, struct, time, termios, fcntl, sys, os, colorsys, threading, time, datetime, subprocess, random, os.path, math, json
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/fbtft')
from RenderManager import RenderManager
from WanemManager import WanemManager
from ScBase import ScBase
from gfx import Rect
from DataAsset import CTX
from SeekManager import SeekManager
class ScPlayback(ScBase):
def __init__(self, pCTX, pRender, pWanem):
super(ScPlayback, self).__init__(pCTX, pRender, pWanem)
self.STATE_IDLE = 1
self.STATE_PLAY = 2
self.STATE_PAUSE = 3
self.ptDef.insert(
0, self.CreateTocuhDef("BtBack", 468, 29, 62, 42, self.BtHandler))
self.ptDef.insert(
1, self.CreateTocuhDef("BtPrev", 470, 95, 43, 90, self.BtHandler))
self.ptDef.insert(
2, self.CreateTocuhDef("BtNext", 65, 95, 43, 90, self.BtHandler))
#self.ptDef.insert(3, self.CreateTocuhDef("BtAuto", 460, 268, 80, 50, self.BtHandler))
self.ptDef.insert(
4, self.CreateTocuhDef("BtStop", 370, 268, 80, 50, self.BtHandler))
self.ptDef.insert(
5, self.CreateTocuhDef("BtPlay", 280, 268, 80, 50, self.BtHandler))
self.ptDef.insert(
6, self.CreateTocuhDef("BtPause", 190, 268, 80, 50,
self.BtHandler))
self.ptDef.insert(
7, self.CreateTocuhDef("BtRepeat", 100, 268, 80, 50,
self.BtHandler))
#self.ptDef.insert(3, self.CreateTocuhDef("BtTargetL", 430, 95, 120, 90, self.BtHandler))
#self.ptDef.insert(4, self.CreateTocuhDef("BtTargetC", 430, 95, 120, 90, self.BtHandler))
#self.ptDef.insert(5, self.CreateTocuhDef("BtTargetR", 430, 95, 120, 90, self.BtHandler))
def BtHandler(self, key):
print "BtHandler" + key + " @ " + str(self.state)
if key == "BtBack":
if self.state == self.STATE_IDLE:
self.pWanem.Clear()
self.nextScene = "Replay"
self.state = self.STATE_TERM
elif key == "BtPrev":
if self.state == self.STATE_IDLE:
self.UpdatePanel(-1)
elif key == "BtNext":
if self.state == self.STATE_IDLE:
self.UpdatePanel(1)
elif key == "BtStop":
if self.state == self.STATE_PLAY or self.state == self.STATE_PAUSE:
self.StopHandler()
elif key == "BtPlay":
if self.state == self.STATE_IDLE:
self.RenderCurrentInfo("PLAYING")
self.PlayHandler()
elif key == "BtPause":
if self.state == self.STATE_PLAY:
self.seekManager.isPause = True
elif self.state == self.STATE_PAUSE:
self.seekManager.isPause = False
elif key == "BtRepeat":
self.seekManager.isRepeat = not self.seekManager.isRepeat
self.RenderToggleFocus(4, self.seekManager.isRepeat)
def RenderPanel(self, panelIdx, isActive, isFocus=False, datPath=""):
offsetX = 128 * panelIdx
if isActive == False:
c = self.pRender.ConvRgb(0.31, 0.2, 0.2)
self.pRender.fb.draw.rect(c, Rect(52 + offsetX, 84, 120, 90), 0)
return
targetPath = self.pCTX.currentReplayData + "/" + datPath
file = open(targetPath)
dat = json.load(file)
file.close()
mtime = os.path.getmtime(targetPath)
t = datetime.datetime.fromtimestamp(mtime)
datMtime = t.strftime("%y/%m/%d")
c = self.pRender.ConvRgb(0.31, 0.2, 0.8)
self.pRender.fb.draw.rect(c, Rect(52 + offsetX, 84, 120, 90), 0)
c = self.pRender.ConvRgb(0.31, 0.2, 0.1)
self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10, datPath[0:8], c, 2)
c = self.pRender.ConvRgb(0.31, 0.2, 0.1)
self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 2, "Modify",
c, 1)
self.pRender.fb.putstr(52 + 10 + 70 + offsetX, 84 + 10 + 12 * 2,
"Time", c, 1)
self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 3, datMtime,
c, 1)
self.pRender.fb.putstr(
52 + 10 + 70 + offsetX, 84 + 10 + 12 * 3,
self.seekManager.Conv2FormatedTime(dat["dps"], dat["duration"]), c,
1)
self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 4, "Memo", c,
1)
self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 5,
dat["memo"][0:17], c, 1)
if isFocus:
self.RenderGraph(dat["graph"])
self.seekManager.Setup(dat["dps"], dat["duration"])
self.RenderSeekInfo()
self.dat = dat["dat"]
def UpdatePanel(self, vec, forceClear=False):
prevPageIdx = self.datPageIdx
prevFocusIdx = self.datFocusIdx
isPageSwitch = forceClear
if (self.datPageIdx * 3 + self.datFocusIdx) == 0 and vec == -1:
return
if (self.datPageIdx * 3 + self.datFocusIdx) == (self.datNr -
1) and vec == 1:
return
if (self.datFocusIdx % 3) == 0 and vec == -1:
self.datPageIdx -= 1
isPageSwitch = True
elif (self.datFocusIdx % 3) == 2 and vec == 1:
self.datPageIdx += 1
isPageSwitch = True
self.datFocusIdx = (self.datFocusIdx + vec) % 3
self.ClearFocus(prevFocusIdx)
if isPageSwitch:
self.datFocusIdx = 0
# Render List
# currentIdx = self.datPageIdx * 3 + self.datFocusIdx
currentIdxTop = self.datPageIdx * 3
focusIdx = 0
for file in self.datList[currentIdxTop:currentIdxTop + 3]:
if focusIdx == self.datFocusIdx:
self.RenderPanel(focusIdx, True, True, file)
else:
self.RenderPanel(focusIdx, True, False, file)
focusIdx += 1
for idx in range(focusIdx, 3):
self.RenderPanel(idx, False)
else:
currentIdxTop = self.datPageIdx * 3
focusIdx = 0
for file in self.datList[currentIdxTop:currentIdxTop + 3]:
if focusIdx == self.datFocusIdx:
targetPath = self.pCTX.currentReplayData + "/" + file
file = open(targetPath)
dat = json.load(file)
file.close()
self.RenderGraph(dat["graph"])
self.seekManager.Setup(dat["dps"], dat["duration"])
self.RenderSeekInfo()
self.dat = dat["dat"]
focusIdx += 1
self.RenderFocus(self.datFocusIdx)
def RenderFocus(self, idx):
c = self.pRender.ConvRgb(1.00, 0.9, 0.8)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84 - 4, 128, 4), 0)
self.pRender.fb.draw.rect(
c, Rect(48 + 128 * idx, 84 - 4 + 90 + 4, 128, 4), 0)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84, 4, 90), 0)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx + 124, 84, 4, 90), 0)
def RenderToggleFocus(self, idx, isActivey):
if idx == 3:
xoffset = 0
elif idx == 4:
xoffset = 90
else:
return
if isActivey:
c = self.pRender.ConvRgb(1.00, 0.9, 0.8)
else:
c = self.pRender.N
self.pRender.fb.draw.rect(c, Rect(288 + xoffset, 264 - 2, 84, 2), 0)
self.pRender.fb.draw.rect(c,
Rect(288 + xoffset, 264 - 2 + 50 + 2, 84, 2),
0)
self.pRender.fb.draw.rect(c, Rect(288 + xoffset, 264, 2, 50), 0)
self.pRender.fb.draw.rect(c, Rect(288 + xoffset + 82, 264, 2, 50), 0)
def ClearFocus(self, idx):
c = self.pRender.ConvRgb(0, 0, 0)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84 - 4, 128, 4), 0)
self.pRender.fb.draw.rect(
c, Rect(48 + 128 * idx, 84 - 4 + 90 + 4, 128, 4), 0)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84, 4, 90), 0)
self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx + 124, 84, 4, 90), 0)
def RenderFootBt(self, idx, label, h):
if idx == 0:
x = 200 - 180
elif idx == 1:
x = 200 - 90
elif idx == 2:
x = 200 + 0
elif idx == 3:
x = 200 + 90
elif idx == 4:
x = 200 + 180
c = self.pRender.ConvRgb(h, 0.6, 0.6)
self.pRender.fb.draw.rect(c, Rect(x, 264, 80, 44), 0)
c = self.pRender.ConvRgb(h, 0.6, 0.2)
self.pRender.fb.draw.rect(c, Rect(x, 264 + 44, 80, 6), 0)
if idx == 3:
self.pRender.fb.putstr(x + 4 + 7, 278, label, c, 2)
else:
self.pRender.fb.putstr(x + 4, 278, label, c, 2)
def RenderSeekInfo(self):
self.pRender.fb.draw.rect(self.pRender.N, Rect(445, 219, 30, 7), 0)
self.pRender.fb.putstr(445, 240 - 21,
self.seekManager.GetTotalFormatTime(),
self.pRender.W, 1)
################################################################################
def Update(self):
isRender = False
if self.pCTX.tick == 1:
isRender = True
if self.state == self.STATE_PLAY:
#######################################
if isRender:
if self.seekManager.isPause:
self.RenderToggleFocus(3, self.seekManager.isPause)
self.state = self.STATE_PAUSE
return
self.seekManager.seekSec += 1
if self.seekManager.seekSec < 0:
return
if self.seekManager.IsTerm():
if self.seekManager.isRepeat:
self.RenderDotAll()
self.PlayHandler()
self.UpdateSeekTime()
else:
self.StopHandler()
return
#######################################
# check Seek diff and force loop and apply.
# @todo variable fps
if self.pCTX.tick % self.seekManager.updateInterval == 0:
#datSeek = self.seekSec * 30 + int(self.pCTX.tick / 2)
#if self.pCTX.tick >= 60:
# print str(self.pCTX.tick) + ":" + str(self.seekManager.seekFrame) + ":" + str(self.seekManager.updateInterval)
self.pWanem.DirectApply(self.dat[self.seekManager.seekFrame])
if (self.pCTX.tick % 15) == 0:
self.RenderCurrentInfo(
"", self.dat[self.seekManager.seekFrame])
self.seekManager.Update(isRender)
# nnn....
if isRender:
self.UpdateSeekTime()
elif self.state == self.STATE_PAUSE:
if not self.seekManager.isPause:
self.RenderToggleFocus(3, self.seekManager.isPause)
self.state = self.STATE_PLAY
return
################################################################################
def RenderDotAll(self):
for idx in range(0, self.seekManager.progressBarResolution):
self.RenderDot(idx, False)
def RenderDot(self, idx, isFlush):
w = 10
h = 10
if isFlush:
c = self.pRender.ConvRgb(0.4, 1, 1)
else:
c = self.pRender.ConvRgb(0.4, 0.3, 0.3)
xoffset = 11 * idx + 20
self.pRender.fb.draw.rect(c, Rect(xoffset, 238, w, h), 0)
def RenderGraph(self, graphDat):
c = self.pRender.ConvRgb(0, 0, 0)
self.pRender.fb.draw.rect(c, Rect(20, 186, 440, 30), 0)
for idx in range(0, 440):
xoffset = idx + 20
h = graphDat[idx]
#c = self.pRender.ConvRgb(1.0/440.0*idx,0.8,0.8)
c = self.pRender.ConvRgb(1.0 / 30.0 * h, 0.8, 0.8)
self.pRender.fb.draw.rect(c, Rect(xoffset, 216 - h, 1, h), 0)
# Update block and seek string
def UpdateSeekTime(self):
if self.seekManager.seekLap >= self.seekManager.progressBarResolution:
return
self.pRender.fb.draw.rect(self.pRender.N, Rect(224, 219, 30, 7), 0)
self.pRender.fb.putstr(224, 219,
self.seekManager.GetCurrentFormatTime(),
self.pRender.W, 1)
while self.seekManager.IsSeekSecOverCurrentLap():
self.RenderDot(self.seekManager.seekLap, True)
self.seekManager.seekLap += 1
if self.seekManager.seekLap >= self.seekManager.progressBarResolution:
return
self.isBlockFlash = not self.isBlockFlash
self.RenderDot(self.seekManager.seekLap, self.isBlockFlash)
def PlayHandler(self):
self.seekManager.Start()
self.state = self.STATE_PLAY
self.RenderDotAll()
def StopHandler(self):
self.seekManager.Stop()
self.state = self.STATE_IDLE
self.UpdateSeekTime()
self.RenderDotAll()
self.RenderToggleFocus(3, self.seekManager.isPause)
self.RenderCurrentInfo("STOP", 0)
self.pWanem.DirectApply(0)
def Start(self):
super(ScPlayback, self).Start()
##[ INIT STATE ]################################################################
self.progressBarResolution = 40
self.seekManager = SeekManager(self.pCTX, self.progressBarResolution)
self.state = self.STATE_IDLE
self.isBlockFlash = False
self.dat = None
##[ Get DataDir Info ]######################################################
self.datList = os.listdir(self.pCTX.currentReplayData)
self.datList.sort()
self.datPageIdx = 0
self.datNr = len(self.datList)
self.datFocusIdx = 0
##[ RENDER ]################################################################
self.pRender.UpdateTitle("WAN Emulation - Replay")
self.pRender.UpdateSubTitle("Dat Path : " +
self.pCTX.currentReplayData)
c = yellow = self.pRender.fb.rgb(255, 255, 0)
self.pRender.fb.draw.rect(c, Rect(0, 54, self.pRender.xres, 1), 0)
self.pRender.fb.draw.rect(c, Rect(0, 74, self.pRender.xres, 1), 0)
self.pRender.fb.draw.rect(c, Rect(0, 54, 10 + 60, 20), 0)
self.pRender.fb.draw.rect(c, Rect(480 - 10, 54, 10, 20), 0)
self.pRender.fb.putstr(26, 54 + 7, ">>>", self.pRender.N, 1)
######################
self.UpdatePanel(0, True)
c = self.pRender.ConvRgb(0.31, 0.2, 0.2)
self.pRender.fb.draw.rect(c, Rect(1, 84, 43, 90), 0)
self.pRender.fb.draw.rect(c, Rect(480 - 44, 84, 43, 90), 0)
self.pRender.fb.putstr(10, 84 + 29, '<', 0, 4)
self.pRender.fb.putstr(480 - 34, 84 + 29, '>', 0, 4)
######################
c = self.pRender.ConvRgb(0.16, 1, 0.6)
#self.pRender.fb.draw.rect(c, Rect(1, 240 - 54, self.pRender.xres-2, 1), 0)
self.pRender.fb.putstr(5, 240 - 21, "00:00", self.pRender.W, 1)
self.UpdateSeekTime()
self.pRender.fb.draw.rect(c, Rect(1, 240 - 12, self.pRender.xres - 2,
1), 0)
self.pRender.fb.draw.rect(c, Rect(1, 240 + 18, self.pRender.xres - 2,
1), 0)
#self.RenderFootBt(0, " Auto", 0.16)
self.RenderFootBt(1, " Stop", 0.36)
self.RenderFootBt(2, " Play", 0.36)
self.RenderFootBt(3, "Pause", 0.16)
self.RenderFootBt(4, "Repeat", 0.16)
self.RenderDotAll()
self.RenderBackBt(True)
self.RenderCurrentInfo("STOP", 0)
self.pRender.fb.putstr(12 + 54, 268 + 24 + 6, "msec", self.pRender.W,
1)
self.pWanem.InitSingle()
def RenderCurrentInfo(self, state="", delay=-1):
if state != "":
self.pRender.fb.draw.rect(self.pRender.N, Rect(12, 268, 84, 16), 0)
self.pRender.fb.putstr(12, 268, state, self.pRender.W, 2)
if delay >= 0:
self.pRender.fb.draw.rect(self.pRender.N,
Rect(12, 268 + 24, 50, 16), 0)
self.pRender.fb.putstr(12, 268 + 24, "%04d" % delay,
self.pRender.W, 2)
| 16,727 | 6,299 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("TESTOUTPUT")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(20)
)
process.Thing = cms.EDProducer("ThingProducer")
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.thingWithMergeProducer = cms.EDProducer("ThingWithMergeProducer")
process.intProducer1 = cms.EDProducer("IntProducer",
ivalue = cms.int32(7)
)
process.intProducer2 = cms.EDProducer("IntProducer",
ivalue = cms.int32(11)
)
process.aliasForInt1 = cms.EDAlias(
intProducer1 = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct'))
)
)
process.aliasForInt2 = cms.EDAlias(
intProducer2 = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct'))
)
)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('file:PoolOutputTestUnscheduled.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_intProducer1_*_*',
'drop *_aliasForInt1_*_*',
'drop *_intProducer2_*_*'
)
)
process.getInt = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag(
cms.InputTag("aliasForInt1"),
),
expectedSum = cms.untracked.int32(140)
)
process.source = cms.Source("EmptySource")
process.t = cms.Task(process.Thing, process.OtherThing, process.thingWithMergeProducer, process.intProducer1, process.intProducer2)
process.path1 = cms.Path(process.getInt, process.t)
process.ep = cms.EndPath(process.output)
| 1,589 | 612 |
#
# PySNMP MIB module A3COM-HUAWEI-SYS-MAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-SYS-MAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:07:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
h3cCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cCommon")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
SnmpTagValue, SnmpTagList = mibBuilder.importSymbols("SNMP-TARGET-MIB", "SnmpTagValue", "SnmpTagList")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
IpAddress, NotificationType, Counter32, Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Integer32, ModuleIdentity, MibIdentifier, Counter64, ObjectIdentity, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "NotificationType", "Counter32", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Integer32", "ModuleIdentity", "MibIdentifier", "Counter64", "ObjectIdentity", "Bits")
TextualConvention, DisplayString, RowStatus, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus", "DateAndTime")
h3cSystemMan = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3))
h3cSystemMan.setRevisions(('2004-04-08 13:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: h3cSystemMan.setRevisionsDescriptions((' ',))
if mibBuilder.loadTexts: h3cSystemMan.setLastUpdated('200906070000Z')
if mibBuilder.loadTexts: h3cSystemMan.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
if mibBuilder.loadTexts: h3cSystemMan.setContactInfo('Platform Team Hangzhou H3C Tech. Co., Ltd. Hai-Dian District Beijing P.R. China http://www.h3c.com Zip:100085')
if mibBuilder.loadTexts: h3cSystemMan.setDescription('This MIB contains objects to manage the system. It focuses on the display of current configure file and image file,and the definition of reloading image. Add the support for XRN. ')
h3cSystemManMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1))
h3cSysClock = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1))
h3cSysLocalClock = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 1), DateAndTime()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysLocalClock.setStatus('current')
if mibBuilder.loadTexts: h3cSysLocalClock.setDescription(' This node gives the current local time of the system. The unit of it is DateAndTime. ')
h3cSysSummerTime = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2))
h3cSysSummerTimeEnable = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysSummerTimeEnable.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeEnable.setDescription('This node indicates the status of summer time. If the value of this node is enable, means that summer time is enabled. If the value is disable, means that summer time is disabled. ')
h3cSysSummerTimeZone = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysSummerTimeZone.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeZone.setDescription(' This node describes the name of time zone in summer. The string is only used to display in local time when summer time is running. That the value of h3cSysLocalClock has the time zone information means that summer time is running. ')
h3cSysSummerTimeMethod = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("oneOff", 1), ("repeating", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysSummerTimeMethod.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeMethod.setDescription(' This node provides the execute method of summer time. oneOff(1): means that summer time only takes effect at specified time. repeating(2): means that summer time takes effect in specified month/day once a year. ')
h3cSysSummerTimeStart = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 4), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysSummerTimeStart.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeStart.setDescription(' This node provides the start time of summer time. ')
h3cSysSummerTimeEnd = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 5), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysSummerTimeEnd.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeEnd.setDescription(' This node provides the end time of summer time. The end time must be more than start time one day and less than start time one year. ')
h3cSysSummerTimeOffset = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86399))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysSummerTimeOffset.setStatus('current')
if mibBuilder.loadTexts: h3cSysSummerTimeOffset.setDescription(' This node provides the offset time of summer time. The offset time(in seconds) means that how much time need to be appended to the local time. ')
h3cSysLocalClockString = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysLocalClockString.setStatus('current')
if mibBuilder.loadTexts: h3cSysLocalClockString.setDescription('This node gives the current local time of the system. For example, Tuesday May 26, 2002 at 1:30:15 would be displayed as: 2002-5-26T13:30:15.0Z')
h3cSysCurrent = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2))
h3cSysCurTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1), )
if mibBuilder.loadTexts: h3cSysCurTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurTable.setDescription(' The current status of system. A configuration file, an image file and bootrom information are used to describe the current status. ')
h3cSysCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurEntPhysicalIndex"))
if mibBuilder.loadTexts: h3cSysCurEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurEntry.setDescription(' An entry of h3cSysCurTable. ')
h3cSysCurEntPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cSysCurEntPhysicalIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurEntPhysicalIndex.setDescription('The value of this object is the entity index which depends on the implementation of ENTITY-MIB. If ENTITY-MIB is not supported, the value for this object is the unit ID for XRN devices , 0 for non-XRN device which has only one mainboard, the board number for non-XRN device which have several mainboards. ')
h3cSysCurCFGFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCurCFGFileIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurCFGFileIndex.setDescription(' The startup configuration file currently used by the specified entity. If the value of it is zero, no configuration file is used. It will be the value of corresponding h3cSysCFGFileIndex in h3cSysCFGFileTable. ')
h3cSysCurImageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCurImageIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurImageIndex.setDescription('The image file currently used by the specified entity. It will be the value of corresponding h3cSysImageIndex in h3cSysImageTable.')
h3cSysCurBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCurBtmFileName.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurBtmFileName.setDescription('The bootrom file currently used by the specified entity.')
h3cSysCurUpdateBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCurUpdateBtmFileName.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurUpdateBtmFileName.setDescription(' The default value of this object is the same as the value of h3cSysCurBtmFileName. The value will be changed after updating the bootrom successfully. This bootrom will take effect on next startup. ')
h3cSysReload = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3))
h3cSysReloadSchedule = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysReloadSchedule.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadSchedule.setDescription(' The object points one row in h3cSysReloadScheduleTable. Its value is equal to the value of h3cSysReloadScheduleIndex. When a reload action is finished, the value of it would be zero which means no any reload schedule is selected. ')
h3cSysReloadAction = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reloadUnavailable", 1), ("reloadOnSchedule", 2), ("reloadAtOnce", 3), ("reloadCancel", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysReloadAction.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadAction.setDescription(" Writing reloadOnSchedule(2) to this object performs the reload operation on schedule. If h3cSysReloadScheduleTime is not set, the value of h3cSysReloadAction can't be set to 'reloadOnSchedule(2)'. Writing reloadAtOnce(3)to this object performs the reload operation at once, regardless of the h3cSysReloadScheduleTime. When reloadCancel(4)is set, the scheduled reload action will be cancelled and the value of h3cSysReloadAction will be 'reloadUnavailable(1)',the value of h3cSysReloadSchedule will be 0, h3cSysReloadTag will be given a value of zero length, but the content of h3cSysReloadScheduleTable will remain. The h3cSysReloadSchedule and h3cSysReloadTag determine the reload entity(ies) in mutually exclusive way. And the h3cSysReloadSchedule will be handled at first. If the value of h3cSysReloadSchedule is invalid, then the h3cSysReloadTag will be handled. If the value of h3cSysReloadSchedule is valid, the value of h3cSysReloadTag is ignored and a reload action will be implemented to the entity specified by h3cSysReloadEntity in the entry pointed by h3cSysReloadSchedule. If h3cSysReloadSchedule is valid, but the entry h3cSysReloadSchedule pointing to is not active, the reload action will be ignored , and an inconsistent value will be returned. If multiple entities are required to be reloaded at the same time, the value of h3cSysReloadTag must be specified to select the reload parameters in the h3cSysReloadSceduelTable, and h3cSysReloadSchedule must have the value of '0'. If the whole fabric is to be reloaded in an XRN device, all the units in the fabric must have at least one entry in the h3cSysReloadSceduelTable with the same tag in h3cSysReloadSceduelTagList. When a reload action is done, or there is no reload action, the value should be reloadUnavailable(1). ")
h3cSysReloadScheduleTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3), )
if mibBuilder.loadTexts: h3cSysReloadScheduleTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadScheduleTable.setDescription(' A reload parameters set table. The table is exclusively used for reloading. When reloading action finished, the value of the table may be empty or still exist. If the mainboard in non-XRN device or all the units of the fabric in XRN device are reloaded,then the table will be refreshed. ')
h3cSysReloadScheduleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleIndex"))
if mibBuilder.loadTexts: h3cSysReloadScheduleEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadScheduleEntry.setDescription('Entry of h3cSysReloadScheduleTable.')
h3cSysReloadScheduleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysReloadScheduleIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadScheduleIndex.setDescription('The index of h3cSysReloadScheduleTable. There are two parts for this index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++++++++ + physical index + random index + ( bit 16..31 ) ( bit 0..15 ) +++++++++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes), if the row is automatic created, the value is zero, and if the row is created by users, then the value is determined by the users. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. For XRN devices, physical index is the value of a chassis entPhysicalIndex. 0 for non-XRN device which has only one main board, the board number for non-XRN device which have multiple main boards.')
h3cSysReloadEntity = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadEntity.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadEntity.setDescription(' The value of h3cSysReloadEntity indicates an entry in entPhysicalTable, which is the physical entity to be reloaded. If ENTITY-MIB is not supported,the value for this object is the unit ID for XRN devices , 0 for non-XRN device which has only one mainboard, the board number for non-XRN device which have several mainboards. Each entity has only one row in h3cSysReloadScheduleTable. ')
h3cSysReloadCfgFile = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadCfgFile.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadCfgFile.setDescription(' The value indicates an entry in h3cSysCFGFileTable. It defines a configuration file for reload action. It is the value of corresponding h3cSysCFGFileIndex in h3cSysCFGFileTable. The zero value means no configuration file has been set for this entry, and no configuration file is used during system reloading. ')
h3cSysReloadImage = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadImage.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadImage.setDescription(' The value indicates an entry in h3cSysImageTable. It defines an image file for reload action. It is the value of corresponding h3cSysImageIndex in h3cSysImageTable. If dual image is supported, the main image attribute can be set through this object or by h3cSysImageType of h3cSysImageTable of the entity. It is strongly suggested to set this attribute by the latter. If main image attribute is set here, the h3cSysImageType in h3cSysImageTable of the corresponding entity will be updated, and vice versa. Before reboot, the device will check the validation of the entry. If the file does not exist, the device will not reboot and a trap will be send to NMS. ')
h3cSysReloadReason = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadReason.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadReason.setDescription(" The reason of system's reloading. It is a zero length octet string when not set. ")
h3cSysReloadScheduleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 6), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadScheduleTime.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadScheduleTime.setDescription(' Specify the local time at which the reload action will occur. we will only take octet strings with length 8 for this object which indicates the local time of the switch. The maximum scheduled interval between the specified time and the current system clock time is 24 days . field octets contents range ----- ------ -------- ----- 1 1-2 year 0..65536 2 3 month 1..12 3 4 day 1..31 4 5 hour 0..23 5 6 minutes 0..59 6 7 seconds 0..60 For example, Tuesday May 26, 1992 at 1:30:15 PM would be displayed as: 1992-5-26,13:30:15 If the set value is less than the value of h3cSysLocalClock or beyond the maximum scheduled time limit, a bad value error occurred. The value of all-zero octet strings indicates system reload at once if the reload action is reloadOnSchedule(2). ')
h3cSysReloadRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadRowStatus.setDescription(' If one of the value of h3cSysReloadEntity,h3cSysReloadImage is invalid, the value of h3cSysReloadRowStatus can not be set to the value of ACTIVE. A valid entry means the specified element is available in current system. ')
h3cSysReloadScheduleTagList = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 8), SnmpTagList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysReloadScheduleTagList.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadScheduleTagList.setDescription(' It specifies a tag list for the entry. ')
h3cSysReloadTag = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 4), SnmpTagValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysReloadTag.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadTag.setDescription("This object contains a single tag value which is used to select entries in the h3cSysReloadScheduleTable. In the h3cSysReloadScheduleTable,any entry that contains a tag value which is equal to the value of this object is selected. For example, the value of h3cSysReloadTag is 'TOM',and the h3cSysReloadScheduleTagList of each h3cSysReloadScheduleTable entry are as follows: 1)'TOM,ROBERT,MARY' 2)'TOM,DAVE' 3)'DAVE,MARY' Since there are 'TOM' in 1) and 2),so 1) and 2) are selected. If this object contains a value of zero length, no entries are selected. ")
h3cSysImage = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4))
h3cSysImageNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysImageNum.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageNum.setDescription(' The number of system images. It indicates the total entries of h3cSysImageTable. ')
h3cSysImageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2), )
if mibBuilder.loadTexts: h3cSysImageTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageTable.setDescription("The system image management table. When 'copy srcfile destfile' is executed via the CLI, if destfile is not existed, then h3cSysImageType of the new file will be 'none'; otherwise h3cSysImageType keeps its current value. When 'move srcfile destfile' is executed via the CLI, h3cSysImageType and h3cSysImageIndex remain the same while h3cSysImageLocation changes. When 'rename srcfile' is executed via the CLI,h3cSysImageType and h3cSysImageIndex remain the same while h3cSysImageName changes. When 'delete srcfile' is executed via the CLI, the file is deleted from h3cSysImageTable while index of the file keeps and will not be allocated. ")
h3cSysImageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageIndex"))
if mibBuilder.loadTexts: h3cSysImageEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageEntry.setDescription(' An entity image entry. Each entry consists of information of an entity image. The h3cSysImageIndex exclusively defines an image file. ')
h3cSysImageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cSysImageIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + image index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the image index;Image file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported,the value for this object is the unit ID for XRN devices ,0 for non-XRN device which has only one main board,the board number for non-XRN device which have several main boards. Any index beyond the above range will not be supported. If a file is added in, its h3cSysImageIndex will be the maximum image index plus one. If the image file is removed, renamed, or moved from one place to another, its h3cSysImageIndex is not reallocated. If the image file's content is replaced, its h3cSysImageIndex will not change. ")
h3cSysImageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysImageName.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageName.setDescription('The file name of the image. It MUST NOT contain the path of the file.')
h3cSysImageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysImageSize.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageSize.setDescription(' Size of the file in bytes. ')
h3cSysImageLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysImageLocation.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageLocation.setDescription(' The directory path of the image. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ For XRN devices: unitN>slotN#flash:/ ')
h3cSysImageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("main", 1), ("backup", 2), ("none", 3), ("secure", 4), ("main-backup", 5), ("main-secure", 6), ("backup-secure", 7), ("main-backup-secure", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysImageType.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageType.setDescription(" It indicates the reloading sequence attribute of the image. For devices which support dual image: If the value is 'main(1)',the image will be the first image in the next reloading procedure. If the value is 'backup(2)', the image will be used if the main image fails. If the value is 'secure(4)', the image will be used if the main image and backup image both fail. If the value is 'none(3)',the image will not be used in the next reloading procedure. At the same time,you also can specify the main image by h3cSysReloadImage in h3cSysReloadScheduleTable. If the image is different from previous main image, the previous main image will not be main image again. And the image table will update with this variation. Vice versa, if you have defined the reload schedule, and then you define a new main image through h3cSysImageType when you are waiting the reload schedule to be executed, the real main image will be the latest one. It is strongly suggested to define the main image here, not by h3cSysReloadImage in h3cSysReloadScheduleTable. There are some rules for setting the value of h3cSysImageType: a)When a new image file is defined as 'main' or 'backup' file,the h3cSysImageType of old 'main' or 'backup' file will automatically be 'none'. b)It is forbidden to set 'none' attribute manually. c)It is forbidden to set 'secure' attribute manually. d)If 'main' image is set to 'backup', the file keeps 'main'. And vice versa. At this time, the file has 'main-backup' property. e)If the secure image is set to 'main' or 'backup', the file has 'main-secure' or 'backup-secure'property. f)If the secure image is set to 'main' and 'backup', the file has the 'main-backup-secure' property. g)If the none image is set to 'main' or 'backup', the file has the 'main' or 'backup' property. The following table describes whether it is ok to set to another state directly from original state. +--------------+-----------+-------------+-------------+ | set to | set to | set to | set to | | | | | | original | 'main' | 'backup' | 'none' | 'secure' | state | | | | | --------------+--------------+-----------+-------------+-------------+ | | | | | main | --- | yes | no | no | | | | | | | | | | | --------------+--------------+-----------+-------------|-------------+ | | | | | backup | yes | --- | no | no | | | | | | --------------+--------------+-----------+-------------|-------------+ | | | | | | | | | | none | yes | yes | --- | no | | | | | | --------------+--------------+-----------+-------------+-------------+ | | | | | secure | yes | yes | no | --- | | | | | | | | | | | --------------+--------------+-----------+-------------+-------------+ If there is one main image in the system, one row of H3cSysReloadScheduleEntry whose h3cSysReloadImage is equal to the main image's h3cSysImageIndex will be created automatically. But if any row is deleted, it will not be created automatically in h3cSysReloadScheduleTable. For the device which doesn't support dual image(main/backup): Only 'main' and 'none' is supported and it only can be set from none to main. When a new image file is defined as 'main' file,the h3cSysImageType of old 'main' file will automatically be 'none'. ")
h3cSysCFGFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5))
h3cSysCFGFileNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCFGFileNum.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileNum.setDescription(' The number of the configuration files in the system. It indicates the total entries of h3cSysCFGFileTable. ')
h3cSysCFGFileTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2), )
if mibBuilder.loadTexts: h3cSysCFGFileTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileTable.setDescription("A table of configuration files in this system. At present, the system doesn't support dual configure file, it should act as 'dual image' if dual configure file is supported. ")
h3cSysCFGFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileIndex"))
if mibBuilder.loadTexts: h3cSysCFGFileEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileEntry.setDescription(' A configuration file entry. Each entry consists of information of a configuration file. h3cSysCFGFileIndex exclusively decides a configuration file. ')
h3cSysCFGFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cSysCFGFileIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileIndex.setDescription('There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + cfgFile index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the configuration file index; the configuration file index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported, the value for this object is the unit ID for XRN devices ,0 for non-XRN device which has only one slot,the board number for non-XRN device which have several slots. Any index beyond the above range will not be supported. ')
h3cSysCFGFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCFGFileName.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileName.setDescription(' Configuration file name. The name should not include the colon (:) character as it is a special separator character used to delineate the device name, partition name and the file name. ')
h3cSysCFGFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCFGFileSize.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileSize.setDescription(' Size of the file in bytes. Note that it does not include the size of the filesystem file header. File size will always be non-zero. ')
h3cSysCFGFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysCFGFileLocation.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileLocation.setDescription(' The directory path of the image. Its form should be the same as what defined in filesystem. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ For XRN devices: unitN>slotN#flash:/ ')
h3cSysBtmFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6))
h3cSysBtmFileLoad = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 1))
h3cSysBtmLoadMaxNumber = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysBtmLoadMaxNumber.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmLoadMaxNumber.setDescription(' This object shows the maximum number of h3cSysBtmLoadEntry in each device/unit. ')
h3cSysBtmLoadTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2), )
if mibBuilder.loadTexts: h3cSysBtmLoadTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmLoadTable.setDescription(' This table is used to update the bootrom and show the results of the update operation. The bootrom files are listed at the h3cFlhFileTable. These files are used to update bootrom. ')
h3cSysBtmLoadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadIndex"))
if mibBuilder.loadTexts: h3cSysBtmLoadEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmLoadEntry.setDescription(' Entries in the h3cSysBtmLoadTable are created and deleted using the h3cSysBtmRowStatus object. When a new row is being created and the number of entries is h3cSysBtmLoadMaxNumber, the row with minimal value of h3cSysBtmLoadTime and the value of h3cSysBtmFileType is none(2), should be destroyed automatically. ')
h3cSysBtmLoadIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysBtmLoadIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmLoadIndex.setDescription(' The index of h3cSysBtmLoadTable. There are two parts for this index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++++++++ + physical index + random index + ( bit 16..31 ) ( bit 0..15 ) +++++++++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes), if the row is created by command line, the value is determined by system, and if the row is created by SNMP, the value is determined by users. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported, the value of this object is the unit ID for XRN devices, 0 for non-XRN device which has only one main board, the board number for non-XRN device which has multiple main boards. ')
h3cSysBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysBtmFileName.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmFileName.setDescription(' The bootrom file name is determined by the users. The file must exist in corresponding entity. The validity of the bootrom file will be identified by system. If the file is invalid, the bootrom should fail to be updated, and the value of h3cSysBtmErrorStatus should be failed(4). ')
h3cSysBtmFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("main", 1), ("none", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysBtmFileType.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmFileType.setDescription(' main(1) - The effective bootrom file. none(2) - The noneffective file. When bootrom is being updated, this object must be set to main(1). When bootrom is updated successfully, this object should be main(1), and the former object with the same physical index should be none(2). When bootrom failed to be updated, this object should be none(2). ')
h3cSysBtmRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysBtmRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmRowStatus.setDescription(' Only support active(1), createAndGo(4), destroy(6). When a row is created successfully, the value of this object should be active(1), the value of h3cSysBtmFileName and h3cSysBtmFileType can not be modified by users. When bootrom is being updated, the value of h3cSysBtmErrorStatus is inProgress(2). When bootrom failed to be updated, the value of h3cSysBtmErrorStatus should be failed(4). When bootrom is updated successfully, the value of h3cSysBtmErrorStatus should be success(3). The value of h3cSysCurUpdateBtmFileName should change to the new bootrom file name. When another row is created successfully with the same physical index, and the update is successful, then the value of former h3cSysBtmFileType should be none(2) automatically. If a row is destroyed, h3cSysCurUpdateBtmFileName should not change. If a device/unit reboots, h3cSysBtmLoadTable should be empty. ')
h3cSysBtmErrorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("invalidFile", 1), ("inProgress", 2), ("success", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysBtmErrorStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmErrorStatus.setDescription(' This object shows the status of the specified operation after creating a row. invalidFile(1) - file is invalid. inProgress(2) - the operation is in progress. success(3) - the operation was done successfully. failed(4) - the operation failed. ')
h3cSysBtmLoadTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysBtmLoadTime.setStatus('current')
if mibBuilder.loadTexts: h3cSysBtmLoadTime.setDescription(' This object indicates operation time. ')
h3cSysPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7))
h3cSysPackageNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageNum.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageNum.setDescription(' The number of software packages. It indicates the total entries of h3cSysPackageTable. ')
h3cSysPackageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2), )
if mibBuilder.loadTexts: h3cSysPackageTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageTable.setDescription('The system package management table. ')
h3cSysPackageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysPackageIndex"))
if mibBuilder.loadTexts: h3cSysPackageEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageEntry.setDescription(' An software package entry. Each entry consists of information of an software package. ')
h3cSysPackageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysPackageIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + package index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the Package index; Package file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. Any index beyond the above range will not be supported. If a file is added in, its h3cSysPackageIndex will be the maximum image index plus one. If the package file is removed, renamed, or moved from one place to another, its h3cSysPackageIndex is not reallocated. If the package file's content is replaced, its h3cSysPackageIndex will not change. ")
h3cSysPackageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageName.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageName.setDescription(' The file name of the package. It MUST NOT contain the path of the file. ')
h3cSysPackageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageSize.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageSize.setDescription(' Size of the file in bytes. ')
h3cSysPackageLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageLocation.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageLocation.setDescription(' The directory path of the package. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ ')
h3cSysPackageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("boot", 1), ("system", 2), ("feature", 3), ("patch", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageType.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageType.setDescription(' It indicates the type of the package file. boot : kernel, file system, memory management and other core components. system : interface management, configuration management and other basic system package. feature : feature packages, providing different services. patch : patch file contains fixes for a specific defect. ')
h3cSysPackageAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("primary", 2), ("secondary", 3), ("primarySecondary", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysPackageAttribute.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageAttribute.setDescription(" It indicates the attribute of the package file. If the value is 'primary', the package will be the first package in the next reloading procedure. If the value is 'secondary', the package will be used if the primary package fails. If the value is 'none', it will not be used in the next reloading procedure. ")
h3cSysPackageStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageStatus.setDescription(" It indicates the status of the package file. If this file is used in the current system, its status is 'active'. ")
h3cSysPackageDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageDescription.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageDescription.setDescription(' It is the description of the package. ')
h3cSysPackageFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageFeature.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageFeature.setDescription(' Indicate the feature of the package. Different package files could be the same feature. ')
h3cSysPackageVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageVersion.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageVersion.setDescription(' Indicate the version of the package. ')
h3cSysPackageOperateEntryLimit = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cSysPackageOperateEntryLimit.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateEntryLimit.setDescription(' The maximum number of the entries in h3cSysPackageOperateTable. ')
h3cSysPackageOperateTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4), )
if mibBuilder.loadTexts: h3cSysPackageOperateTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateTable.setDescription('A table of package file operate.')
h3cSysPackageOperateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysPackageOperateIndex"))
if mibBuilder.loadTexts: h3cSysPackageOperateEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateEntry.setDescription(' An operate request entry. ')
h3cSysPackageOperateIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysPackageOperateIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateIndex.setDescription(' The unique index value of a row in this table. ')
h3cSysPackageOperatePackIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysPackageOperatePackIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperatePackIndex.setDescription(' Specify the package file in the h3cSysPackageTable. ')
h3cSysPackageOperateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysPackageOperateStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateStatus.setDescription(' activate or deactivate a package in the h3cSysPackageTable. ')
h3cSysPackageOperateRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysPackageOperateRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateRowStatus.setDescription(" the status of this table entry. When the status is active all the object's value in the entry is not allowed to modified. ")
h3cSysPackageOperateResult = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("opInProgress", 1), ("opSuccess", 2), ("opUnknownFailure", 3), ("opInvalidFile", 4), ("opNotSupport", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysPackageOperateResult.setStatus('current')
if mibBuilder.loadTexts: h3cSysPackageOperateResult.setDescription(' the result of the operation. ')
h3cSysIpeFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8))
h3cSysIpeFileNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpeFileNum.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileNum.setDescription(' The number of software IPE(Image Package Envelop) files. It indicates the total entries of h3cSysIpeFileTable. ')
h3cSysIpeFileTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2), )
if mibBuilder.loadTexts: h3cSysIpeFileTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileTable.setDescription('The system IPE file manage table. ')
h3cSysIpeFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileIndex"))
if mibBuilder.loadTexts: h3cSysIpeFileEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileEntry.setDescription(' An IPE package file entry. Each entry consists of information of an IPE package file. h3cSysIpeFileIndex exclusively decides an IPE file. ')
h3cSysIpeFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysIpeFileIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + IPE index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the IPE file index; IPE file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. Any index beyond the above range will not be supported. If a file is added in, its h3cSysIpeFileIndex will be the maximum image ndex plus one. If the IPE file is removed, renamed, or moved from one place to another, its h3cSysIpeFileIndex is not reallocated. If the IPE file's content is replaced, its h3cSysIpeFileIndex will not change. ")
h3cSysIpeFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpeFileName.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileName.setDescription(' The file name of the IPE file. It MUST NOT contain the path of the file. ')
h3cSysIpeFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpeFileSize.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileSize.setDescription(' Size of the file in bytes. ')
h3cSysIpeFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpeFileLocation.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileLocation.setDescription(' The directory path of the IPE file. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ ')
h3cSysIpePackageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3), )
if mibBuilder.loadTexts: h3cSysIpePackageTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageTable.setDescription(' The IPE package file table. It shows the package files in the IPE file. ')
h3cSysIpePackageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileIndex"), (0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpePackageIndex"))
if mibBuilder.loadTexts: h3cSysIpePackageEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageEntry.setDescription(' An entry of the h3cIpePackageTable. Indexed by h3cSysIpeFileIndex and h3cSysIpePackageIndex. ')
h3cSysIpePackageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysIpePackageIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageIndex.setDescription(' It is the IPE package index; IPE Package Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. Any index beyond the above range will not be supported. ')
h3cSysIpePackageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageName.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageName.setDescription('The file name of the package file. ')
h3cSysIpePackageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageSize.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageSize.setDescription(' Size of the package file in bytes. ')
h3cSysIpePackageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("boot", 1), ("system", 2), ("feature", 3), ("patch", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageType.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageType.setDescription(' It indicates the type of the package file. boot : kernel, file system, memory management and other core components. system : interface management, configuration management and other basic system package. feature : feature packages, providing different services. patch : patch file contains fixes for a specific defect. ')
h3cSysIpePackageDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageDescription.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageDescription.setDescription(' It is the description of the package. ')
h3cSysIpePackageFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageFeature.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageFeature.setDescription(' Indicate the feature of the package. ')
h3cSysIpePackageVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpePackageVersion.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpePackageVersion.setDescription(' The version of the package. ')
h3cSysIpeFileOperateTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4), )
if mibBuilder.loadTexts: h3cSysIpeFileOperateTable.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateTable.setDescription('A table of IPE file operate.')
h3cSysIpeFileOperateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileOperateIndex"))
if mibBuilder.loadTexts: h3cSysIpeFileOperateEntry.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateEntry.setDescription(' An operate request entry. ')
h3cSysIpeFileOperateIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cSysIpeFileOperateIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateIndex.setDescription(' The unique index value of a row in this table. ')
h3cSysIpeFileOperateFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysIpeFileOperateFileIndex.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateFileIndex.setDescription(' Specify the IPE file in the h3cSysIpeFileTable. This IPE file will be unpacked to package files. ')
h3cSysIpeFileOperateAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("primary", 2), ("secondary", 3), ("primarySecondary", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysIpeFileOperateAttribute.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateAttribute.setDescription(" It indicates the attribute of the IPE file when it is used in the reloading. If the value is 'primary', the packages in the IPE file will be the first packages in the next reloading procedure. If the value is 'secondary', the package in the IPE file will be used if the primary packages fails. If the value is 'none', the IPE file is only unpacked, will not be used in the reloading procedure. ")
h3cSysIpeFileOperateRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cSysIpeFileOperateRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateRowStatus.setDescription(" the status of this table entry. When the status is active all the object's value in the entry is not allowed to modified. ")
h3cSysIpeFileOperateResult = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("opInProgress", 1), ("opSuccess", 2), ("opUnknownFailure", 3), ("opInvalidFile", 4), ("opDeviceFull", 5), ("opFileOpenError", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSysIpeFileOperateResult.setStatus('current')
if mibBuilder.loadTexts: h3cSysIpeFileOperateResult.setDescription(' the result of the operation. ')
h3cSystemManMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2))
h3cSysClockChangedNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysLocalClock"))
if mibBuilder.loadTexts: h3cSysClockChangedNotification.setStatus('current')
if mibBuilder.loadTexts: h3cSysClockChangedNotification.setDescription(' A clock changed notification is generated when the current local date and time for the system has been manually changed. The value of h3cSysLocalClock reflects new date and time. ')
h3cSysReloadNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 2)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadImage"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadCfgFile"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadReason"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTime"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadAction"))
if mibBuilder.loadTexts: h3cSysReloadNotification.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadNotification.setDescription(' A h3cSysReloadNotification will be sent before the corresponding entity is rebooted. It will also be sent if the entity fails to reboot because the clock has changed. ')
h3cSysStartUpNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 3)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageType"))
if mibBuilder.loadTexts: h3cSysStartUpNotification.setStatus('current')
if mibBuilder.loadTexts: h3cSysStartUpNotification.setDescription(" a h3cSysStartUpNotification trap will be sent when the system starts up with 'main' image file failed, a trap will be sent to indicate which type the current image file (I.e backup or secure)is. ")
h3cSystemManMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3))
h3cSystemManMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 1))
h3cSystemManMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 1, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysClockGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSystemManNotificationGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSystemBtmLoadGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSystemManMIBCompliance = h3cSystemManMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: h3cSystemManMIBCompliance.setDescription(' The compliance statement for entities which implement the system management MIB. ')
h3cSystemManMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2))
h3cSysClockGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysLocalClock"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeEnable"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeZone"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeMethod"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeStart"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeEnd"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeOffset"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSysClockGroup = h3cSysClockGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSysClockGroup.setDescription('A collection of objects providing mandatory system clock information.')
h3cSysReloadGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 2)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadSchedule"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadAction"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadImage"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadCfgFile"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadReason"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTagList"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadTag"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTime"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadEntity"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSysReloadGroup = h3cSysReloadGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSysReloadGroup.setDescription('A collection of objects providing mandatory system reload.')
h3cSysImageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 3)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageNum"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageSize"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageLocation"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSysImageGroup = h3cSysImageGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSysImageGroup.setDescription('A collection of objects providing mandatory system image information.')
h3cSysCFGFileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 4)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileNum"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileSize"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSysCFGFileGroup = h3cSysCFGFileGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSysCFGFileGroup.setDescription(' A collection of objects providing mandatory system configuration file information. ')
h3cSysCurGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 5)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurCFGFileIndex"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurImageIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSysCurGroup = h3cSysCurGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSysCurGroup.setDescription('A collection of system current status.')
h3cSystemManNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 6)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysClockChangedNotification"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadNotification"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysStartUpNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSystemManNotificationGroup = h3cSystemManNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSystemManNotificationGroup.setDescription('A collection of notifications.')
h3cSystemBtmLoadGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 7)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurUpdateBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadMaxNumber"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmFileType"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmRowStatus"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmErrorStatus"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cSystemBtmLoadGroup = h3cSystemBtmLoadGroup.setStatus('current')
if mibBuilder.loadTexts: h3cSystemBtmLoadGroup.setDescription('A collection of objects providing system update bootrom information.')
mibBuilder.exportSymbols("A3COM-HUAWEI-SYS-MAN-MIB", h3cSysPackageOperateEntry=h3cSysPackageOperateEntry, h3cSysPackageOperateStatus=h3cSysPackageOperateStatus, h3cSysPackageFeature=h3cSysPackageFeature, h3cSysIpeFileIndex=h3cSysIpeFileIndex, h3cSysIpeFileOperateRowStatus=h3cSysIpeFileOperateRowStatus, h3cSysIpePackageIndex=h3cSysIpePackageIndex, h3cSysCurEntry=h3cSysCurEntry, h3cSysReload=h3cSysReload, h3cSysClockChangedNotification=h3cSysClockChangedNotification, h3cSysPackageStatus=h3cSysPackageStatus, h3cSysIpeFileOperateTable=h3cSysIpeFileOperateTable, h3cSysPackageType=h3cSysPackageType, h3cSysReloadReason=h3cSysReloadReason, h3cSysReloadImage=h3cSysReloadImage, h3cSysCurImageIndex=h3cSysCurImageIndex, h3cSysImageGroup=h3cSysImageGroup, h3cSysPackageDescription=h3cSysPackageDescription, h3cSysSummerTimeZone=h3cSysSummerTimeZone, h3cSysClockGroup=h3cSysClockGroup, h3cSysIpeFile=h3cSysIpeFile, h3cSysPackageOperateTable=h3cSysPackageOperateTable, h3cSysIpeFileOperateFileIndex=h3cSysIpeFileOperateFileIndex, h3cSysPackageOperateEntryLimit=h3cSysPackageOperateEntryLimit, h3cSystemManMIBCompliance=h3cSystemManMIBCompliance, h3cSysIpePackageSize=h3cSysIpePackageSize, h3cSysSummerTimeMethod=h3cSysSummerTimeMethod, h3cSysBtmLoadIndex=h3cSysBtmLoadIndex, h3cSysIpeFileNum=h3cSysIpeFileNum, h3cSysClock=h3cSysClock, h3cSystemManMIBNotifications=h3cSystemManMIBNotifications, h3cSysReloadSchedule=h3cSysReloadSchedule, h3cSysImageTable=h3cSysImageTable, h3cSysBtmRowStatus=h3cSysBtmRowStatus, h3cSysIpePackageEntry=h3cSysIpePackageEntry, h3cSysBtmErrorStatus=h3cSysBtmErrorStatus, h3cSysSummerTimeOffset=h3cSysSummerTimeOffset, h3cSysReloadGroup=h3cSysReloadGroup, h3cSysIpeFileOperateIndex=h3cSysIpeFileOperateIndex, h3cSysCFGFileIndex=h3cSysCFGFileIndex, h3cSysCFGFileName=h3cSysCFGFileName, h3cSysReloadScheduleTime=h3cSysReloadScheduleTime, h3cSysPackageTable=h3cSysPackageTable, h3cSysReloadScheduleIndex=h3cSysReloadScheduleIndex, h3cSysReloadScheduleTable=h3cSysReloadScheduleTable, h3cSysReloadEntity=h3cSysReloadEntity, h3cSysLocalClockString=h3cSysLocalClockString, h3cSysPackageOperateIndex=h3cSysPackageOperateIndex, h3cSystemManMIBConformance=h3cSystemManMIBConformance, h3cSystemBtmLoadGroup=h3cSystemBtmLoadGroup, h3cSysPackageOperatePackIndex=h3cSysPackageOperatePackIndex, h3cSysIpeFileOperateAttribute=h3cSysIpeFileOperateAttribute, h3cSysPackage=h3cSysPackage, h3cSysIpePackageName=h3cSysIpePackageName, h3cSysPackageNum=h3cSysPackageNum, h3cSysCurEntPhysicalIndex=h3cSysCurEntPhysicalIndex, h3cSysReloadCfgFile=h3cSysReloadCfgFile, h3cSysCFGFileNum=h3cSysCFGFileNum, h3cSystemManNotificationGroup=h3cSystemManNotificationGroup, h3cSysSummerTimeStart=h3cSysSummerTimeStart, h3cSysIpePackageVersion=h3cSysIpePackageVersion, h3cSystemManMIBObjects=h3cSystemManMIBObjects, h3cSysSummerTime=h3cSysSummerTime, h3cSysReloadAction=h3cSysReloadAction, h3cSysImageEntry=h3cSysImageEntry, h3cSystemManMIBCompliances=h3cSystemManMIBCompliances, h3cSysIpeFileTable=h3cSysIpeFileTable, h3cSysCFGFileSize=h3cSysCFGFileSize, h3cSysImageSize=h3cSysImageSize, h3cSysStartUpNotification=h3cSysStartUpNotification, h3cSysBtmLoadTable=h3cSysBtmLoadTable, h3cSysIpePackageDescription=h3cSysIpePackageDescription, PYSNMP_MODULE_ID=h3cSystemMan, h3cSysReloadNotification=h3cSysReloadNotification, h3cSysPackageVersion=h3cSysPackageVersion, h3cSysIpeFileOperateResult=h3cSysIpeFileOperateResult, h3cSysReloadTag=h3cSysReloadTag, h3cSysPackageOperateRowStatus=h3cSysPackageOperateRowStatus, h3cSysPackageAttribute=h3cSysPackageAttribute, h3cSysImage=h3cSysImage, h3cSysPackageOperateResult=h3cSysPackageOperateResult, h3cSysReloadRowStatus=h3cSysReloadRowStatus, h3cSysReloadScheduleTagList=h3cSysReloadScheduleTagList, h3cSysCurrent=h3cSysCurrent, h3cSysBtmFileName=h3cSysBtmFileName, h3cSysCFGFileTable=h3cSysCFGFileTable, h3cSysBtmFile=h3cSysBtmFile, h3cSysIpePackageType=h3cSysIpePackageType, h3cSystemManMIBGroups=h3cSystemManMIBGroups, h3cSysPackageSize=h3cSysPackageSize, h3cSysBtmLoadEntry=h3cSysBtmLoadEntry, h3cSysImageName=h3cSysImageName, h3cSysIpeFileOperateEntry=h3cSysIpeFileOperateEntry, h3cSysImageIndex=h3cSysImageIndex, h3cSysCurCFGFileIndex=h3cSysCurCFGFileIndex, h3cSysCurBtmFileName=h3cSysCurBtmFileName, h3cSysCFGFileEntry=h3cSysCFGFileEntry, h3cSysPackageEntry=h3cSysPackageEntry, h3cSysIpeFileName=h3cSysIpeFileName, h3cSysBtmFileType=h3cSysBtmFileType, h3cSysImageType=h3cSysImageType, h3cSysCurUpdateBtmFileName=h3cSysCurUpdateBtmFileName, h3cSysCFGFile=h3cSysCFGFile, h3cSysCurTable=h3cSysCurTable, h3cSysPackageLocation=h3cSysPackageLocation, h3cSysBtmLoadTime=h3cSysBtmLoadTime, h3cSysReloadScheduleEntry=h3cSysReloadScheduleEntry, h3cSysIpeFileEntry=h3cSysIpeFileEntry, h3cSysCFGFileGroup=h3cSysCFGFileGroup, h3cSysIpeFileLocation=h3cSysIpeFileLocation, h3cSysPackageIndex=h3cSysPackageIndex, h3cSysLocalClock=h3cSysLocalClock, h3cSysCurGroup=h3cSysCurGroup, h3cSysCFGFileLocation=h3cSysCFGFileLocation, h3cSysBtmFileLoad=h3cSysBtmFileLoad, h3cSysImageLocation=h3cSysImageLocation, h3cSysImageNum=h3cSysImageNum, h3cSysSummerTimeEnable=h3cSysSummerTimeEnable, h3cSysIpePackageTable=h3cSysIpePackageTable, h3cSysIpeFileSize=h3cSysIpeFileSize, h3cSysPackageName=h3cSysPackageName, h3cSystemMan=h3cSystemMan, h3cSysSummerTimeEnd=h3cSysSummerTimeEnd, h3cSysIpePackageFeature=h3cSysIpePackageFeature, h3cSysBtmLoadMaxNumber=h3cSysBtmLoadMaxNumber)
| 70,390 | 28,128 |
# -*- coding: utf-8 -*-
"""This module contains everything that is related to flows.
This module contains one class : Flow.
"""
from typing import Any, Dict
from pineapple_core.utils.serialization import make_value_serializable
class Flow:
"""Class that represents a flow
Attributes
==========
name: Any
Name of the Flow, it can be of any type
priority: int
Number that defines the order of execution of the flow in a set of flows
It the number is positive, the flow is enabled, otherwise it's disabled
node: Node
The next node to execute with this flow
"""
@staticmethod
def from_reference(flow: "Flow"):
"""Creates a fake exact copy of a Flow by usurpating its hash
Parameters
==========
flow: Flow
Reference to the flow you want to create an exact copy of
"""
new_flow = flow.copy()
new_flow.inject_reference(flow)
return new_flow
def __init__(self, node: "Node", name: Any, priority: int):
"""Flow constructor
Parameters
==========
node: Node
Node that the flow will trigger
name: Any
Name of the flow
priority: int
Priority of the flow
"""
self.node = node
self.priority = priority
self.name = name
self.pretty_name = name
self._ref = None
def toggle(self):
"""Toggle a flow (enables it if it was disabled, disables it otherwise)
"""
self.priority *= -1
def disable(self):
"""Disables a flow (make its priority negative)
"""
if self.priority > 0:
self.toggle()
def enable(self):
"""Enables a flow (gives an absolute priority to the flow)
"""
self.priority = abs(self.priority)
def increase_priority(self):
"""Increases the priority of the flow with the given name
"""
self.priority += 1
def decrease_priority(self):
"""Decreases the priority of the flow with the given name
"""
self.priority -= 1
def inject_reference(self, ref: "Flow"):
"""Stores a reference of the Flow to usurp
Parameters
==========
ref: Flow
Reference to the Flow you want to usurp
"""
self._ref = ref
def __eq__(self, other) -> bool:
return self.__hash__() == other.__hash__()
def __hash__(self) -> int:
"""Implement of special method __hash__
It allows a Flow to usurpate another one by giving its underlying _ref __hash__
with _ref being the Flow you want to usurp
Returns
=======
int:
Hash of the Flow object
"""
if not self._ref:
return super().__hash__()
return self._ref.__hash__()
def copy(self) -> "Flow":
"""Copies a Flow, name and priority will be copied while
the Node inside the flow will be the same reference
Returns
=======
Flow:
Reference to the newly copied Flow
"""
flow_copy = Flow(self.node, self.name, self.priority)
flow_copy.pretty_name = self.pretty_name
return flow_copy
def dump(self) -> Dict[str, Any]:
"""Dumps a flow
Returns
=======
dict:
A dictionnary of attributes representing its state
"""
return {
"name": make_value_serializable(self.name),
"node": str(self.node.id),
"priority": self.priority,
}
def __repr__(self):
return f"Flow(name='{self.name}', priority={self.priority}, node={self.node})"
| 3,748 | 1,025 |
import datetime
import os
import subprocess
from django.apps import apps
from django.core.management import call_command
import attr
from freezegun import freeze_time
import pytest
import pytz
from downloader.exceptions import (
NoFilesCreatedError,
TooManyFilesCreatedError,
YoutubeDLError,
)
from playlists.models import Playlist, Video
def test_server_starts(client):
client.get('/')
def test_checks_pass():
call_command('check')
def test_get_playlist_info_raises_for_garbage_playlist():
downloader = apps.get_app_config('downloader')
with pytest.raises(YoutubeDLError):
downloader.get_playlist_info('asdf')
_TEST_PLAYLIST_ID = 'PL59FEE129ADFF2B12'
_TEST_VIDEO_ID = '007VM8NZxkI'
def test_get_playlist_info_returns_iterable():
downloader = apps.get_app_config('downloader')
results = downloader.get_playlist_info(_TEST_PLAYLIST_ID)
iter(results)
def test_get_playlist_info_returns_id_and_title_for_all_results():
downloader = apps.get_app_config('downloader')
results = downloader.get_playlist_info(_TEST_PLAYLIST_ID)
for result in results:
assert 'id' in result
assert 'title' in result
def test_download_video_raises_for_garbage_video(tmp_path):
downloader = apps.get_app_config('downloader')
with pytest.raises(YoutubeDLError):
downloader.download_video('asdf', tmp_path)
def test_download_video_creates_a_file(tmp_path):
downloader = apps.get_app_config('downloader')
filename = downloader.download_video(_TEST_VIDEO_ID, tmp_path)
expected_path = os.path.join(tmp_path, filename)
assert os.path.exists(expected_path)
os.remove(expected_path)
def test_download_video_raises_when_youtube_dl_misbehaves(tmp_path, mocker):
downloader = apps.get_app_config('downloader')
def run_factory(files_to_create):
def run(*args, cwd, **kwargs):
for i in range(files_to_create):
open(os.path.join(cwd, str(i)), 'w').close()
return run
mocker.patch.object(subprocess, 'run', run_factory(0))
with pytest.raises(NoFilesCreatedError):
downloader.download_video(_TEST_VIDEO_ID, tmp_path)
mocker.patch.object(subprocess, 'run', run_factory(2))
with pytest.raises(TooManyFilesCreatedError):
downloader.download_video(_TEST_VIDEO_ID, tmp_path)
@attr.s
class Params(object):
preexisting = attr.ib()
playlist_info = attr.ib()
expected = attr.ib()
now = datetime.datetime(2018, 12, 2, 0, 0, 0, tzinfo=pytz.UTC)
yesterday = datetime.datetime(2018, 12, 1, 0, 0, 0, tzinfo=pytz.UTC)
@freeze_time('2018-12-02 00:00:00.0')
@pytest.mark.django_db
@pytest.mark.parametrize(
'params',
[
Params( # None preexisting, none new.
preexisting=[],
playlist_info=[],
expected=[],
),
Params( # None preexisting, one new.
preexisting=[],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[
{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
},
]
),
Params( # None preexisting, some new.
preexisting=[],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID2', 'title': 'Test Title 2'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': now,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, none new.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
}],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
}],
),
Params( # Some preexisting, one new.
preexisting=[{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
}],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID2', 'title': 'Test Title 2'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, one removed.
preexisting=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': None,
},
],
playlist_info=[{'id': 'testID1', 'title': 'Test Title 1'}],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': now,
},
],
),
Params( # Some preexisting, one new, one removed.
preexisting=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': None,
},
],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID3', 'title': 'Test Title 3'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': now,
},
{
'youtube_id': 'testID3',
'title': 'Test Title 3',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, one renamed.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
}],
playlist_info=[{'id': 'testID', 'title': 'Renamed'}],
expected=[{
'youtube_id': 'testID',
'title': 'Renamed',
'added': yesterday,
'removed': None,
}],
),
Params( # Some preexisting, one deleted.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
'deleted': False,
}],
playlist_info=[{'id': 'testID', 'title': '[Deleted video]'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': True,
'privated': False,
}],
),
Params( # Some preexisting, one made private.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
'privated': False,
}],
playlist_info=[{'id': 'testID', 'title': '[Private video]'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': False,
'privated': True,
}],
),
Params( # Some preexisting private, one made public.
preexisting=[{
'youtube_id': 'testID',
'title': '[Private video]',
'added': yesterday,
'removed': None,
'privated': True,
}],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': False,
'privated': False,
}],
),
Params( # None preexisting, one new private, one new deleted.
preexisting=[],
playlist_info=[
{'id': 'testID1', 'title': '[Private video]'},
{'id': 'testID2', 'title': '[Deleted video]'},
],
expected=[
{
'youtube_id': 'testID1',
'title': '[Private video]',
'added': now,
'removed': None,
'deleted': False,
'privated': True,
},
{
'youtube_id': 'testID2',
'title': '[Deleted video]',
'added': now,
'removed': None,
'deleted': True,
'privated': False,
},
],
),
],
)
def test_create_and_update_videos(params, mocker):
playlist = Playlist.objects.create(youtube_id='playlistID')
for details in params.preexisting:
Video.objects.create(playlist=playlist, **details)
downloader = apps.get_app_config('downloader')
mocker.patch.object(downloader, 'get_playlist_info')
downloader.get_playlist_info.return_value = (
item for item in params.playlist_info
)
playlist.create_and_update_videos()
videos = playlist.videos.all()
for details in params.expected:
video = videos.get(youtube_id=details['youtube_id'])
for attr_name, value in details.items():
assert getattr(video, attr_name) == value
assert playlist.videos.count() == len(params.expected)
| 11,435 | 3,261 |
import spacy
import sys
from spacy.matcher import Matcher
from src.core.document import Document
from src.core.relation_extraction import *
def test_one_to_one_replacement():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
doc2 = Document("word word word word word word hello")
span = doc.span(2, 3)
span_swap = doc2.span(6, 7)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "Python code hello the cleanest code around unlike C++ which is garbage nonsense"
def test_multiple_replacements():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(5, 6)
span2 = doc.span(12, 13)
span3 = doc.span(8, 10)
doc2 = Document("the joker fights guys batman is evil")
doc3 = Document("highlighting textbooks for fun")
span_swap = doc2.span(4, 5)
span_swap2 = doc3.span(0, 1)
span_swap3 = doc3.span(1, 3)
doc.swap(span, span_swap)
doc.swap(span2, span_swap2)
doc.swap(span3, span_swap3)
new_text = doc.print()
assert new_text == "Python code is the cleanest batman around unlike textbooks for is garbage highlighting"
def test_one_to_one_replacement_same_start_end():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(5, 6)
doc2 = Document("the joker fights guys batman is evil")
span_swap = doc2.span(4, 5)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "Python code is the cleanest batman around unlike C++ which is garbage nonsense"
def test_n_to_n_replacement():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(0, 7)
doc2 = Document("my adult nephew enjoys crafts and cutting paper like a two year old")
span_swap = doc2.span(6, 14)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "cutting paper like a two year old unlike C++ which is garbage nonsense"
def test_n_to_n_replacement_same_start_end():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(6, 9)
doc2 = Document("my mom told me to button up jacket young man its cold outside")
span_swap = doc2.span(5, 9)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "Python code is the cleanest code button up jacket young which is garbage nonsense"
def test_shorter_replacement():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(4, 9)
doc2 = Document("down dog, get off of the couch")
span_swap = doc2.span(0, 2)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "Python code is the down dog which is garbage nonsense"
def test_longer_replacement():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(8, 10)
doc2 = Document("my mom told me to button up jacket young man its cold outside")
span_swap = doc2.span(5,12)
doc.swap(span, span_swap)
new_text = doc.print()
assert new_text == "Python code is the cleanest code around unlike button up jacket young man its cold is garbage nonsense"
def test_original_still_prints_after_replacement():
sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense"
doc = Document(sample_text)
span = doc.span(8, 10)
doc2 = Document("my mom told me to button up jacket young man its cold outside")
span_swap = doc2.span(5,12)
doc.swap(span, span_swap)
old_text = doc.print(True)
assert old_text == sample_text
| 4,012 | 1,318 |
from kigo.bpmn.elements.element import Element
class BpmnDefinitions(Element):
item_name = "bpmn:definitions"
| 116 | 42 |
"""A parser for reading radio source coordinates from VASCC apriori crf
Description:
------------
Reads radio source coordinates from VASCC (VLBI Software Analysis Comparison Campaign) apriori file.
"""
# Midgard imports
from midgard.dev import plugins
from midgard.parsers._parser_line import LineParser
@plugins.register
class VasccCrfParser(LineParser):
"""A parser for reading source coordinates from ICRF files
"""
def setup_parser(self):
return dict(usecols=(0, 3, 4), dtype="U8, f8, f8", skip_header=1)
def structure_data(self):
self.data = {
name: {
"ra": ra,
"dec": dec,
"special": False,
"undefined": True,
"non_vcs": False,
"vcs": False,
"defining": False,
}
for name, ra, dec in self._array
}
| 900 | 258 |
import asyncio
import sys
# sys.path.insert(0, "..")
import logging
from asyncua import Client, Node, ua
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger('asyncua')
async def main():
url = 'opc.tcp://localhost:4840/freeopcua/server/'
# url = 'opc.tcp://commsvr.com:51234/UA/CAS_UA_Server'
async with Client(url=url) as client:
# Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects
# Node objects have methods to read and write node attributes as well as browse or populate address space
_logger.info('Children of root are: %r', await client.nodes.root.get_children())
uri = 'http://examples.freeopcua.github.io'
idx = await client.get_namespace_index(uri)
# get a specific node knowing its node id
# var = client.get_node(ua.NodeId(1002, 2))
# var = client.get_node("ns=3;i=2002")
var = await client.nodes.root.get_child(["0:Objects", f"{idx}:MyObject", f"{idx}:MyVariable"])
print("My variable", var, await var.read_value())
# print(var)
# await var.read_data_value() # get value of node as a DataValue object
# await var.read_value() # get value of node as a python builtin
# await var.write_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type
# await var.write_value(3.9) # set node value using implicit data type
if __name__ == '__main__':
asyncio.run(main())
| 1,520 | 488 |
from typing import Type
from elliptic.Kernel.Context import ContextDelegate
from elliptic_meshql.Selector import SelectorImplementationBase
class LoopDelegate(ContextDelegate):
loop_name = ''
def __init__(self, context, unique_id):
super().__init__(context, unique_id)
self.loop_var_prefix = self.loop_name + str(self.unique_id)
def template_kwargs(self):
return {'current_entity': self.context.get_value('current_entity_name'),
'current_range': self.context.get_value('current_range_name'),
'current_index': self.context.get_value('current_index_name'),
'reduced_variables': self.context.context[self.loop_var_prefix + 'reduced'],
'mapped_variables': self.context.context[self.loop_var_prefix + 'mapped'],
'reduce_nested_children': self.context.context[self.loop_var_prefix + 'nested_children']}
def context_enter(self):
self.context.put_value('current_loop', self.loop_var_prefix)
self.context.put_value('declare_range', self.loop_var_prefix + 'range')
self.context.put_value('current_range_name', self.loop_var_prefix + 'range')
self.context.put_value('declare_entityhandle', self.loop_var_prefix + 'entity')
self.context.put_value('current_entity_name', self.loop_var_prefix + 'entity')
self.context.put_value('declare_index', self.loop_var_prefix + 'index')
self.context.put_value('current_index_name', self.loop_var_prefix + 'index')
def context_exit(self):
self.context.pop_value('current_range_name')
self.context.pop_value('current_entity_name')
self.context.pop_value('current_index_name')
class SelectorImplementation(SelectorImplementationBase):
def by_ent_delegate(self, dim: int) -> Type[ContextDelegate]:
class ByEntDelegate(LoopDelegate):
loop_name = 'by_ent'
def get_template_file(self):
return 'Selector/by_ent.pyx.etp'
def template_kwargs(self):
return {'dim': dim,
**super().template_kwargs()}
return ByEntDelegate
def by_adj_delegate(self, bridge_dim: int, to_dim: int) -> Type[ContextDelegate]:
class ByAdjDelegate(LoopDelegate):
loop_name = 'by_adj'
def get_template_file(self):
return 'Selector/by_adj.pyx.etp'
def template_kwargs(self):
return {'bridge_dim': bridge_dim,
'to_dim': to_dim,
'old_entity': self.context.context['current_entity_name'][-2],
**super().template_kwargs()}
return ByAdjDelegate
def where_delegate(self, conditions):
pass
| 2,783 | 806 |
def lengthOfLastWord_(s):
words = s.split()
return 0 if len(words) == 0 else len(words[-1])
| 100 | 38 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from cms.menu_bases import CMSAttachMenu
from menus.base import Menu, NavigationNode
from menus.menu_pool import menu_pool
from cmskit.recipes.models import Recipe
class RecipesMenu(CMSAttachMenu):
name = _("Recipes menu")
def get_nodes(self, request):
nodes = []
for recipe in Recipe.objects.published().select_related():
try:
node = NavigationNode(
recipe.title,
recipe.get_absolute_url(),
recipe.pk
)
nodes.append(node)
except:
pass
return nodes
menu_pool.register_menu(RecipesMenu)
| 822 | 234 |
"""
"""
import re
import os
import matplotlib.pyplot as plt
import re
import numpy as np
import math
from scipy.stats.stats import pearsonr
from scipy.stats.stats import kendalltau
import scipy
from matplotlib.patches import Rectangle
from scipy import stats
import seaborn as sns
import pandas as pd
sns.set(font_scale = 1.2)
fig = plt.figure()
ax = fig.add_subplot(111)
PROJECTS_LIST = "../../info/settings-project.txt"
RESULT_PATH="../../data/complexity-and-change-data/"
styles=['-', '--','-.',':']
colors = ['r', 'g','b','y']
styles=["-", "--","-.", ":", "-", "--","-.", ":"]
marks=["^", "d", "o", "v", "p", "s", "<", ">"]
#marks_size=[15, 17, 10, 15, 17, 10, 12,15]
marks_size=[15, 17, 10, 15, 17, 10, 12,15]
marker_color=['#0F52BA','#ff7518','#6CA939','#e34234','#756bb1','brown','#c994c7', '#636363']
gap = [5,5,3,4,4,3]
PROJECTS = {}
STATS = {}
correl_type = {}
def list_projects():
fr = open(PROJECTS_LIST,"r")
lines = fr.readlines()
fr.close()
projects = []
c = 0
for line in lines:
c+=1
#if c>2:
# break
line = line.strip()
data = re.findall("[^\t]+",line)
if data[0] not in PROJECTS:
PROJECTS[data[0]]=1
### to help step2
def find_index(feature, project):
fr = open(RESULT_PATH+project+".txt")
line = fr.readline() ## header
line = line.strip()
data = re.findall("[^\t]+",line)
for i in range(len(data)):
if data[i] == feature:
return i
def parse_data():
global STATS
for project in PROJECTS:
list_indexes(feature,"checkstyle")
STATS[project]={}
fr = open(RESULT_PATH+project+".txt")
line = fr.readline() ## header
lines = fr.readlines()
fr.close()
for line in lines:
line = line.strip()
data = re.findall("[^\t]+",line)
age = int(data[0])
if apply_age_restriction == 1 and age < age_restriction:
continue
method = data[len(data)-1]
if method not in STATS[project]:
STATS[project][method]={}
feature_values = re.findall("[^,]+",data[feature_index])
date_values = re.findall("[^,]+",data[date_index])
diff_values = re.findall("[^,]+",data[diff_index])
addition_values = re.findall("[^,]+",data[addition_index])
edit_values = re.findall("[^,]+",data[edit_index])
track = 0
for i in range(1, len(diff_values)):
if int(date_values[i]) > age_restriction: ## change not within time
break
if int(diff_values[i]) == 0: ## no change in content
continue
track = 1
feature_value = int(feature_values[i-1]) ## current change happened because of the previous state
if feature_value not in STATS[project][method]:
STATS[project][method][feature_value]=build_dic()
update_stats(project, method, feature_value, 1, int(addition_values[i]), int(diff_values[i]), int(edit_values[i]))
if track == 0: ## there was no change
feature_value = int(feature_values[0]) ##
if feature_value not in STATS[project][method]:
STATS[project][method][feature_value]=build_dic()
update_stats(project, method, feature_value, 0, 0, 0, 0)
def update_stats(project, method, feature_value, rev, add, diff, edit):
# print project, method
STATS[project][method][feature_value][changeTypes[0]] += rev ###
STATS[project][method][feature_value][changeTypes[1]] += add
STATS[project][method][feature_value][changeTypes[2]] += diff
STATS[project][method][feature_value][changeTypes[3]] += edit
def build_dic():
dic = {}
for t in changeTypes:
dic[t]=0
return dic
def list_indexes(feature, project):
global feature_index
global date_index
global diff_index
global addition_index
global edit_index
feature_index = find_index(feature, project)
date_index = find_index("ChangeDates", project)
diff_index = find_index("DiffSizes", project)
addition_index = find_index("NewAdditions", project)
edit_index = find_index("EditDistances", project)
def correlation():
for project in STATS:
for type in changeTypes:
X=[]
Y=[]
for method in STATS[project]:
for feature_value in STATS[project][method]:
X.append(feature_value)
Y.append(STATS[project][method][feature_value][type])
cr = kendalltau(X, Y)
#print project, type, cr, cr[0]
if type not in correl_type:
correl_type[type] = []
correl_type[type].append(float(cr[0]))
def draw_graph():
index = 0
for type in changeTypes:
X,Y = build_cdf(correl_type[type])
#print Y
line=(plt.plot(X,Y))
plt.setp(line, linewidth=3,ls=styles[index], marker=marks[index],
markerfacecolor=marker_color[index], markersize = 12, color=marker_color[index],markevery=gap[index])
index += 1
plt.legend(changeTypes,loc=0,fontsize=17)
plt.xlabel("Correlation",fontsize=20)
plt.ylabel("CDF",fontsize=18)
for label in ax.get_xticklabels():
label.set_fontsize(19)
for label in ax.get_yticklabels():
label.set_fontsize(18)
plt.tight_layout()
plt.show()
def build_cdf(ls):
X = []
Y = []
prev = 0.0
total = len(ls)
dic = {}
for key in ls:
if key not in dic:
dic[key] = 0.0
dic[key] += 1.0
tracked = {}
for key in sorted(ls):
if key in tracked:
continue
tracked[key] = 1
X.append(key)
prob = dic[key]/total
Y.append(prob + prev)
prev = prob + prev
return X,Y
if __name__ == "__main__":
global feature
global age_restriction
global changeTypes
global risks
apply_age_restriction = 1
age_restriction = 730
risks =["Low", "Medium", "High", "Very High"]
changeTypes =["#Revisions", "NewAdditions", "DiffSizes", "EditDistances"]
### will change based on feature
feature = "SLOCStandard" ### changeDates for #revisions
list_projects()
#list_indexes(feature)
parse_data()
correlation()
draw_graph()
| 6,218 | 2,276 |
import pandas as pd
from os import path, listdir
import pickle
from nltk import word_tokenize
import json
import re
import emoji
import string
TARGET_STATE = 'OR'
source_dir = f''
twitter_files = [i for i in list(listdir(source_dir)) if TARGET_STATE in i]
punctuation_list = list(string.punctuation) + ['....','...', '..', '\"', '\'', '“','”','`','``','…']
tweets_by_date = {}
#Collect and clean tweets
for idx, tfile in enumerate(twitter_files):
with open(path.join(source_dir, tfile), 'r') as i:
t_f = [json.loads(line) for line in i]
en_tweets = []
for tweet in t_f:
en_tweets.append((tweet['id'],tweet['text'],tweet['created_at'][:10]))
cleaned_string = {}
for tweet in en_tweets:
if tweet[0] in cleaned_string:
continue
refined_tweet = tweet[1].lower()
refined_tweet = re.sub(emoji.get_emoji_regexp(), r'', refined_tweet)
refined_tweet = re.sub(r'http\S+', '', refined_tweet)
refined_tweet = re.sub(r'@\S+', '', refined_tweet)
refined_tweet = re.sub(r'#', '', refined_tweet)
refined_tweet = re.sub(r'&', '&', refined_tweet)
refined_tweet = re.sub(r'\s+', ' ', refined_tweet)
refined_tweet = re.sub(r'^rts*\s+', '', refined_tweet)
refined_tweet = re.sub(r'^\s+', '', refined_tweet)
refined_tweet = re.sub(r'\S+…','',refined_tweet)
refined_tweet = ' '.join([i for i in word_tokenize(refined_tweet) if i not in punctuation_list])
refined_tweet = refined_tweet.replace(' \' ','\'')
if tweet[2] in tweets_by_date:
tweets_by_date[tweet[2]].append(refined_tweet)
else:
tweets_by_date[tweet[2]] = [refined_tweet]
cleaned_string[tweet[0]] = refined_tweet
print(idx)
with open(f'E:\\state_corpora\\tweets_by_date\\{TARGET_STATE}_tweets_by_date.pkl','wb') as pkl_writer:
pickle.dump(tweets_by_date,pkl_writer)
#Obtain ground truth dates
covid_timeline = pd.read_csv(f'Public_Health_Measures.csv')
start_dates = covid_timeline['Start_Date'].tolist()
start_dates = list({date:'' for date in start_dates}.keys())
end_dates = covid_timeline['End_Date'].tolist()
end_dates = list({date:'' for date in end_dates}.keys())
start_end_dict = {start_dates[idx]:[start_dates[idx],end_dates[idx]] for idx in range(len(start_dates))}
for key in start_end_dict.keys():
start_year_int = int(key[:4])
start_month_int = int(key[5:7])
start_day_int = int(key[-2:])
end_year_int = int(start_end_dict[key][-1][:4])
end_month_int = int(start_end_dict[key][-1][5:7])
end_day_int = int(start_end_dict[key][-1][-2:])
if start_month_int == end_month_int:
days_to_add = [i for i in range(start_day_int,end_day_int+1)]
start_end_dict[key] = [f'{start_year_int}-{start_month_int}-{idx}' for idx in days_to_add]
else:
month1_to_add = [f'{start_year_int}-{start_month_int}-{idx}' for idx in range(start_day_int,32)]
month2_to_add = [f'{end_year_int}-{end_month_int}-{idx}' for idx in range(1,end_day_int+1)]
start_end_dict[key] = month1_to_add + month2_to_add
for idx, date in enumerate(start_end_dict[key]):
if date[-2] == '-':
start_end_dict[key][idx] = date[:-1] + '0' + date[-1]
for idx, date in enumerate(start_end_dict[key]):
if date[-5] == '-':
start_end_dict[key][idx] = date[:5] + '0' + date[5:]
#Process with newlines for PPMI
for key in tweets_by_date.keys():
tweets = tweets_by_date[key]
tweets_by_date[key] = ['\n'.join(i.split(' ')) for i in tweets]
#Join into time period corpora
tweets_by_time_period = {}
for key in start_end_dict.keys():
time_range_tweets = []
for date in start_end_dict[key]:
if date in tweets_by_date:
if time_range_tweets:
time_range_tweets.extend(tweets_by_date[date])
else:
time_range_tweets = tweets_by_date[date]
tweets_by_time_period[key] = time_range_tweets
if time_range_tweets:
with open(f'E:\\state_corpora\\strict_divisions\\corpus_lists\\{TARGET_STATE}_{key}_tweet_list.pkl','wb') as pkl_writer:
pickle.dump(time_range_tweets,pkl_writer)
corpus_string = '\n\n\n\n\n\n\n\n'.join(time_range_tweets)
with open(f'E:\\state_corpora\\strict_divisions\\corpus_strings\\{TARGET_STATE}_{key}_tweet_corpus.txt','w',encoding='utf8') as writer:
writer.write(corpus_string) | 4,585 | 1,856 |
from collections import OrderedDict # OrderedDict deve ser importado!
def enesimo_nao_repetido(entrada: str, n: int):
# Cria o dicionário usando OrderedDict e inicializa
# cada chave com o valor 0
dicionario = OrderedDict.fromkeys(entrada, 0)
# Conta a ocorrência de cada letra
for letra in entrada:
dicionario[letra] += 1
# Usando List Comprehension, elimina letras duplicadas
elementos_nao_repetidos = [ chave for (chave, valor) in \
dicionario.items()if valor == 1 ]
# Se N é maior do que o número de letras não repetidas
if len(elementos_nao_repetidos) < n:
return -1
else: # Senão, retorne a n-ésima letra não repetida
return elementos_nao_repetidos[n-1]
if __name__ == "__main__":
texto = "AA BB CC DD EE F GG H II JJ KK L M N OO"
letra = 5
# -- SAÍDA --:
# N
print(enesimo_nao_repetido(texto, letra)) | 911 | 340 |
import wiringpi2 as wiringpi
import time
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(17,1)
wiringpi.digitalWrite(17,1)
time.sleep(4)
wiringpi.digitalWrite(17,0)
wiringpi.pinMode(17,0)
| 187 | 88 |
# coding: utf-8
from sqlalchemy import Numeric, Column
from .fundamental_base_sql import FundamentalBase
class AnaStkFinIdx(FundamentalBase):
earnings_per_share = Column(Numeric(18, 4))
fully_diluted_earnings_per_share = Column(Numeric(18, 4))
diluted_earnings_per_share = Column(Numeric(18, 4))
adjusted_earnings_per_share = Column(Numeric(18, 4))
adjusted_fully_diluted_earnings_per_share = Column(Numeric(18, 4))
adjusted_diluted_earnings_per_share = Column(Numeric(18, 4))
book_value_per_share = Column(Numeric(18, 4))
operating_cash_flow_per_share = Column(Numeric(18, 4))
operating_total_revenue_per_share = Column(Numeric(18, 4))
operating_revenue_per_share = Column(Numeric(18, 4))
capital_reserve_per_share = Column(Numeric(18, 4))
earned_reserve_per_share = Column(Numeric(18, 4))
undistributed_profit_per_share = Column(Numeric(18, 4))
retained_earnings_per_share = Column(Numeric(18, 4))
cash_flow_from_operations_per_share = Column(Numeric(18, 4))
ebit_per_share = Column(Numeric(18, 4))
free_cash_flow_company_per_share = Column(Numeric(18, 4))
free_cash_flow_equity_per_share = Column(Numeric(18, 4))
dividend_per_share = Column(Numeric(18, 4))
return_on_equity = Column(Numeric(18, 4))
return_on_equity_weighted_average = Column(Numeric(18, 4))
return_on_equity_diluted = Column(Numeric(18, 4))
adjusted_return_on_equity_average = Column(Numeric(18, 4))
adjusted_return_on_equity_weighted_average = Column(Numeric(18, 4))
adjusted_return_on_equity_diluted = Column(Numeric(18, 4))
return_on_asset = Column(Numeric(18, 4))
return_on_asset_net_profit = Column(Numeric(18, 4))
return_on_invested_capital = Column(Numeric(18, 4))
annual_return_on_equity = Column(Numeric(18, 4))
annual_return_on_asset = Column(Numeric(18, 4))
annual_return_on_asset_net_profit = Column(Numeric(18, 4))
net_profit_margin = Column(Numeric(18, 4))
gross_profit_margin = Column(Numeric(18, 4))
cost_to_sales = Column(Numeric(18, 4))
net_profit_to_revenue = Column(Numeric(18, 4))
profit_from_operation_to_revenue = Column(Numeric(18, 4))
ebit_to_revenue = Column(Numeric(18, 4))
expense_to_revenue = Column(Numeric(18, 4))
operating_profit_to_profit_before_tax = Column(Numeric(18, 4))
invesment_profit_to_profit_before_tax = Column(Numeric(18, 4))
non_operating_profit_to_profit_before_tax = Column(Numeric(18, 4))
income_tax_to_profit_before_tax = Column(Numeric(18, 4))
adjusted_profit_to_total_profit = Column(Numeric(18, 4))
debt_to_asset_ratio = Column(Numeric(18, 4))
equity_multiplier = Column(Numeric(18, 4))
current_asset_to_total_asset = Column(Numeric(18, 4))
non_current_asset_to_total_asset = Column(Numeric(18, 4))
interest_bearing_debt_to_capital = Column(Numeric(18, 4))
current_debt_to_total_debt = Column(Numeric(18, 4))
non_current_debt_to_total_debt = Column(Numeric(18, 4))
current_ratio = Column(Numeric(18, 4))
quick_ratio = Column(Numeric(18, 4))
super_quick_ratio = Column(Numeric(18, 4))
debt_to_equity_ratio = Column(Numeric(18, 4))
equity_to_debt_ratio = Column(Numeric(18, 4))
equity_to_interest_bearing_debt = Column(Numeric(18, 4))
ebit_to_debt = Column(Numeric(18, 4))
ocf_to_debt = Column(Numeric(18, 4))
ocf_to_interest_bearing_debt = Column(Numeric(18, 4))
ocf_to_current_ratio = Column(Numeric(18, 4))
ocf_to_net_debt = Column(Numeric(18, 4))
time_interest_earned_ratio = Column(Numeric(18, 4))
long_term_debt_to_working_capital = Column(Numeric(18, 4))
account_payable_turnover_rate = Column(Numeric(18, 4))
account_payable_turnover_days = Column(Numeric(18, 4))
account_receivable_turnover_days = Column(Numeric(18, 4))
inventory_turnover = Column(Numeric(18, 4))
account_receivable_turnover_rate = Column(Numeric(18, 4))
current_asset_turnover = Column(Numeric(18, 4))
fixed_asset_turnover = Column(Numeric(18, 4))
total_asset_turnover = Column(Numeric(18, 4))
inc_earnings_per_share = Column(Numeric(18, 4))
inc_diluted_earnings_per_share = Column(Numeric(18, 4))
inc_revenue = Column(Numeric(18, 4))
inc_operating_revenue = Column(Numeric(18, 4))
inc_gross_profit = Column(Numeric(18, 4))
inc_profit_before_tax = Column(Numeric(18, 4))
inc_net_profit = Column(Numeric(18, 4))
inc_adjusted_net_profit = Column(Numeric(18, 4))
inc_cash_from_operations = Column(Numeric(18, 4))
inc_return_on_equity = Column(Numeric(18, 4))
inc_book_per_share = Column(Numeric(18, 4))
inc_total_asset = Column(Numeric(18, 4))
du_return_on_equity = Column(Numeric(18, 4))
du_equity_multiplier = Column(Numeric(18, 4))
du_asset_turnover_ratio = Column(Numeric(18, 4))
du_profit_margin = Column(Numeric(18, 4))
du_return_on_sales = Column(Numeric(18, 4))
non_recurring_profit_and_loss = Column(Numeric(18, 4))
adjusted_net_profit = Column(Numeric(18, 4))
ebit = Column(Numeric(18, 4))
ebitda = Column(Numeric(18, 4))
invested_capital = Column(Numeric(18, 4))
working_capital = Column(Numeric(18, 4))
net_working_capital = Column(Numeric(18, 4))
retained_earnings = Column(Numeric(18, 4))
interest_bearing_debt = Column(Numeric(18, 4))
net_debt = Column(Numeric(18, 4))
non_interest_bearing_current_debt = Column(Numeric(18, 4))
non_interest_bearing_non_current_debt = Column(Numeric(18, 4))
fcff = Column(Numeric(18, 4))
fcfe = Column(Numeric(18, 4))
depreciation_and_amortization = Column(Numeric(18, 4))
ev = Column(Numeric(21, 4))
ev_2 = Column(Numeric(21, 4))
ev_to_ebit = Column(Numeric(18, 4))
ev_to_ebitda = Column(Numeric(19, 4))
tangible_assets = Column(Numeric(19, 4))
tangible_asset_to_debt = Column(Numeric(19, 4))
tangible_asset_to_interest_bearing_debt = Column(Numeric(19, 4))
| 5,946 | 2,532 |
"""Ice-liquid water equilibrium functions.
This module provides thermodynamic properties of ice and liquid water in
equilibrium, e.g. the enthalpy of melting.
:Examples:
>>> pressure(temp=270.)
39313338.8825
>>> densityliq(temp=270.)
1019.05568894
>>> enthalpymelt(temp=270.)
325166.686739
>>> entropymelt(temp=270.)
1204.32106199
>>> volumemelt(temp=270.)
-1.04052121182e-4
>>> temperature(pres=1e7)
272.401648868
>>> densityliq(pres=1e7)
1004.79353660
>>> enthalpymelt(pres=1e7)
331548.910815
>>> entropymelt(pres=1e7)
1217.13254010
>>> volumemelt(pres=1e7)
-9.4217890326e-05
:Functions:
* :func:`eq_tp`: Calculate ice-liquid water equilibrium properties at
either temperature or pressure.
* :func:`temperature`: Temperature at ice-liquid water equilibrium.
* :func:`pressure`: Pressure at ice-liquid water equilibrium.
* :func:`densityliq`: Liquid water density at ice-liquid water
equilibrium.
* :func:`chempot`: Chemical potential at ice-liquid water equilibrium.
* :func:`densityice`: Ice density at ice-liquid water equilibrium.
* :func:`enthalpyice`: Ice enthalpy at ice-liquid water equilibrium.
* :func:`enthalpyliq`: Liquid water enthalpy at ice-liquid water
equilibrium.
* :func:`enthalpymelt`: Enthalpy of melting.
* :func:`entropyice`: Ice entropy at ice-liquid water equilibrium.
* :func:`entropyliq`: Liquid water entropy at ice-liquid water
equilibrium.
* :func:`entropymelt`: Entropy of melting.
* :func:`volumemelt`: Specific volume of melting.
"""
__all__ = ['eq_tp','temperature','pressure','densityliq','chempot','densityice',
'enthalpyice','enthalpyliq','enthalpymelt','entropyice','entropyliq',
'entropymelt','volumemelt']
import warnings
import numpy
from teospy import constants0
from teospy import ice1
from teospy import flu2
from teospy import ice2
from teospy import maths3
_CHKTOL = constants0.CHKTOL
_TTP = constants0.TTP
_PTPI = constants0.PTPI
_DLTP = constants0.DLTP
_LILTP = constants0.LILTP
_chkflubnds = constants0.chkflubnds
_chkicebnds = constants0.chkicebnds
_ice_g = ice1.ice_g
_eq_chempot = flu2.eq_chempot
_eq_pressure = flu2.eq_pressure
_newton = maths3.newton
_C_APPS = ((-1.78582981492113,-12.2325084306734,-52.8236936433529),
(-1.67329759176351e-7,-2.02262929999658e-13))
## Equilibrium functions
def _approx_t(temp):
"""Approximate PDl at T.
Approximate the pressure and liquid water density for ice and liquid
water in equilibrium at the given temperature. This approximation is
based on an empirical polynomial for density.
:arg float temp: Temperature in K.
:returns: Pressure in Pa and liquid water density in kg/m3.
"""
tau = temp/_TTP - 1
dta = 0.
for (i,a) in enumerate(_C_APPS[0]):
dta += a * tau**(i+1)
dliq = _DLTP * (1 + dta)
pres = flu2.pressure(temp,dliq)
return pres, dliq
def _approx_p(pres):
"""Approximate TDl at P.
Approximate the temperature and liquid water density for ice and
liquid water in equilibrium at the given pressure. This
approximation is based on empirical polynomials for temperature and
density.
:arg float pres: Pressure in Pa.
:returns: Temperature in K and liquid water density in kg/m3.
"""
a1, a2 = _C_APPS[1]
psi = pres/_PTPI - 1
tau = a1*psi + a2*psi**2
temp = _TTP * (1 + tau)
dta = 0.
for (i,a) in enumerate(_C_APPS[0]):
dta += a * tau**(i+1)
dliq = _DLTP * (1 + dta)
return temp, dliq
def _diff_t(p,dl,temp):
"""Calculate ice-liquid disequilibrium at T.
Calculate both sides of the equations
given pressure = pressure of liquid water
chemical potential of ice = potential of liquid water
and their Jacobians with respect to pressure and liquid water
density. Solving these equations gives the pressure and liquid water
density at the given temperature.
:arg float p: Pressure in Pa.
:arg float dl: Liquid water density in kg/m3.
:arg float temp: Temperature in K.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pl = _eq_pressure(0,0,temp,dl)
gi = _ice_g(0,0,temp,p)
gl = _eq_chempot(0,0,temp,dl)
lhs = numpy.array([p, gi])
rhs = numpy.array([pl, gl])
pl_d = _eq_pressure(0,1,temp,dl)
gi_p = _ice_g(0,1,temp,p)
gl_d = _eq_chempot(0,1,temp,dl)
dlhs = numpy.array([[1.,0.], [gi_p,0.]])
drhs = numpy.array([[0.,pl_d], [0.,gl_d]])
return lhs, rhs, dlhs, drhs
def _diff_p(t,dl,pres):
"""Calculate ice-liquid disequilibrium at P.
Calculate both sides of the equations
given pressure = pressure of liquid water
chemical potential of ice = potential of liquid water
and their Jacobians with respect to temperature and liquid water
density. Solving these equations gives the temperature and liquid
water density at the given temperature.
:arg float t: Temperature in K.
:arg float dl: Liquid water density in kg/m3.
:arg float pres: Pressure in Pa.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pl = _eq_pressure(0,0,t,dl)
gi = _ice_g(0,0,t,pres)
gl = _eq_chempot(0,0,t,dl)
lhs = numpy.array([pres, gi])
rhs = numpy.array([pl, gl])
pl_t = _eq_pressure(1,0,t,dl)
pl_d = _eq_pressure(0,1,t,dl)
gi_t = _ice_g(1,0,t,pres)
gl_t = _eq_chempot(1,0,t,dl)
gl_d = _eq_chempot(0,1,t,dl)
dlhs = numpy.array([[0.,0.], [gi_t,0.]])
drhs = numpy.array([[pl_t,pl_d], [gl_t,gl_d]])
return lhs, rhs, dlhs, drhs
def eq_tp(temp=None,pres=None,dliq=None,chkvals=False,chktol=_CHKTOL,
temp0=None,pres0=None,dliq0=None,chkbnd=False,mathargs=None):
"""Get primary ice-liquid variables at T or P.
Get the values of all primary variables for ice and liquid water in
equilibrium at either of a given temperature or pressure.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature, pressure, and liquid water density (all in SI
units).
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
if temp is None and pres is None:
errmsg = 'One of temp or pres must be provided'
raise ValueError(errmsg)
if temp is not None:
if any(val is None for val in (pres,dliq)):
x0 = (pres0,dliq0)
fargs = (temp,)
if mathargs is None:
mathargs = dict()
x1 = _newton(_diff_t,x0,_approx_t,fargs=fargs,**mathargs)
pres, dliq = x1
else:
x0 = (temp0,dliq0)
fargs = (pres,)
if mathargs is None:
mathargs = dict()
x1 = _newton(_diff_p,x0,_approx_p,fargs=fargs,**mathargs)
temp, dliq = x1
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chkicebnds(temp,pres,chkbnd=chkbnd)
if not chkvals:
return temp, pres, dliq
lhs, rhs, __, __ = _diff_p(temp,dliq,pres)
errs = list()
for (l,r) in zip(lhs,rhs):
if abs(r) >= chktol:
errs.append(abs(l/r-1))
else:
errs.append(abs(l-r))
if max(errs) > chktol:
warnmsg = ('Given values {0} and solutions {1} disagree to more than '
'the tolerance {2}').format(lhs,rhs,chktol)
warnings.warn(warnmsg,RuntimeWarning)
return temp, pres, dliq
## Thermodynamic properties
def temperature(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid temperature.
Calculate the temperature of ice and liquid water in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature in K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> temperature(pres=1e7)
272.40164887
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
return temp
def pressure(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid pressure.
Calculate the pressure of ice and liquid water in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure in Pa.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> pressure(temp=270.)
39313338.8825
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
return pres
def densityliq(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid liquid water density.
Calculate the density of liquid water for ice and liquid water in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Liquid water density in kg/m3.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densityliq(pres=1e7)
1004.79353660
>>> densityliq(temp=270.)
1019.05568894
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
return dliq
def chempot(temp=None,pres=None,dliq=None,chkvals=False,chktol=_CHKTOL,
temp0=None,pres0=None,dliq0=None,chkbnd=False,mathargs=None):
"""Calculate ice-liquid chemical potential.
Calculate the chemical potential of ice and liquid water in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Chemical potential in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> chempot(pres=1e7)
9972.8817069
>>> chempot(temp=270.)
38870.0605192
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
g = _ice_g(0,0,temp,pres)
return g
def densityice(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid ice density.
Calculate the density of ice for ice and liquid water in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Ice density in kg/m3.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densityice(pres=1e7)
917.896690830
>>> densityice(temp=270.)
921.359428514
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
dice = ice2.density(temp,pres)
return dice
def enthalpyice(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate liquid-ice ice enthalpy.
Calculate the specific enthalpy of ice for ice and liquid water in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyice(pres=1e7)
-324602.983822
>>> enthalpyice(temp=270.)
-299055.938629
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
hi = ice2.enthalpy(temp,pres)
return hi
def enthalpyliq(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid liquid water enthalpy.
Calculate the specific enthalpy of liquid water for ice and liquid
water in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyliq(pres=1e7)
6945.9269937
>>> enthalpyliq(temp=270.)
26110.7481094
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
hl = flu2.enthalpy(temp,dliq)
return hl
def enthalpymelt(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate enthalpy of melting.
Calculate the specific enthalpy of melting.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpymelt(pres=1e7)
331548.910815
>>> enthalpymelt(temp=270.)
325166.686739
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
hl = flu2.enthalpy(temp,dliq)
hi = ice2.enthalpy(temp,pres)
hmelt = hl - hi
return hmelt
def entropyice(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid ice entropy.
Calculate the specific entropy of ice for ice and liquid water in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropyice(pres=1e7)
-1228.24464139
>>> entropyice(temp=270.)
-1251.57777462
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
si = ice2.entropy(temp,pres)
return si
def entropyliq(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-liquid liquid entropy.
Calculate the specific entropy of liquid water for ice and liquid
water in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropyliq(pres=1e7)
-11.11210129
>>> entropyliq(temp=270.)
-47.2567126291
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
sl = flu2.entropy(temp,dliq)
return sl
def entropymelt(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate entropy of melting.
Calculate the specific entropy of melting.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropymelt(pres=1e7)
1217.13254010
>>> entropymelt(temp=270.)
1204.32106199
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
sl = flu2.entropy(temp,dliq)
si = ice2.entropy(temp,pres)
smelt = sl - si
return smelt
def volumemelt(temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False,
mathargs=None):
"""Calculate specific volume of melting.
Calculate the specific volume of melting.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Specific volume in m3/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> volumemelt(pres=1e7)
-9.4217890326e-05
>>> volumemelt(temp=270.)
-1.04052121182e-4
"""
temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
mathargs=mathargs)
vi = _ice_g(0,1,temp,pres,chkbnd=chkbnd)
vl = dliq**(-1)
vmelt = vl - vi
return vmelt
| 35,990 | 12,282 |
#encoding:utf-8
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import heapq
def _decorate_source(source):
for message in source:
yield ((message.dt, message.source_id), message)
def date_sorted_sources(*sources):
"""
Takes an iterable of sources, generating namestrings and
piping their output into date_sort.
# 获取可迭代的源,生成名称字符串并将其输出传递到date_sort。
"""
sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources))
# Strip out key decoration,脱去主键装饰
for _, message in sorted_stream:
yield message
| 1,091 | 360 |
# Generated by Django 2.2.5 on 2019-11-28 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('studies', '0025_auto_20191128_0958'),
]
operations = [
migrations.AddField(
model_name='expressionstudy',
name='edit_groups',
field=models.ManyToManyField(blank=True, related_name='edit_access_to', to='auth.Group'),
),
migrations.AddField(
model_name='expressionstudy',
name='read_groups',
field=models.ManyToManyField(blank=True, related_name='read_access_to', to='auth.Group'),
),
]
| 716 | 239 |
from nmigen import *
from nmigen.build import Platform, ResourceError
from nmigen.back.pysim import Simulator, Delay, Settle
# Small helper classes to simulate the structure of the platform device
class _OutputSimulator():
def __init__(self, signal):
self.signal = signal
self.eq = self.signal.eq
class _SegmentSimulator():
def __init__(self, signal):
self.o = _OutputSimulator(signal)
class FourteenSegmentDisplay(Elaboratable):
"""
This submodule shows the provided ASCII character on a 14 segment display.
The eight bit of the input data is used to switch the dot on or off.
"""
def __init__(self, deviceType="alnum_led", deviceId=0, simulation=False):
# Public
self.data = Signal(8, reset=0)
self.simulation = simulation
self.simSignals = []
# Private
self._device = None
self._deviceType = deviceType
self._deviceId = deviceId
self._segments = ['a','b','c','d','e','f','g','h','j','k','l','m','n','p']
self._dotSegment = 'dp'
self._lut = [
[0,0,0,0,0,0, 0,0,0, 0, 0,0,0, 0], # (0x20)
[0,0,0,0,1,1, 0,0,0, 0, 0,0,0, 0], # ! (0x21)
[0,1,0,0,0,1, 0,0,0, 0, 0,0,0, 0], # " (0x22)
[0,1,1,1,0,0, 0,1,0, 1, 0,1,0, 1], # # (0x23)
[1,0,1,1,0,1, 0,1,0, 1, 0,1,0, 1], # $ (0x24)
[0,0,1,0,0,1, 0,0,1, 0, 0,0,1, 0], # % (0x25)
[1,0,0,1,1,0, 1,0,1, 0, 1,0,0, 1], # & (0x26)
[0,1,0,0,0,0, 0,1,0, 0, 0,0,0, 0], # ' (0x27)
[1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # ( (0x28)
[1,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # ) (0x29)
[0,0,0,0,0,0, 1,1,1, 1, 1,1,1, 1], # * (0x2A)
[0,0,0,0,0,0, 0,1,0, 1, 0,1,0, 1], # + (0x2B)
[0,0,0,0,0,0, 0,0,0, 0, 1,0,0, 0], # , (0x2C)
[0,0,0,0,0,0, 0,0,0, 1, 0,0,0, 1], # - (0x2D)
[0,0,0,0,0,0, 0,0,0, 0, 0,1,0, 0], # . (0x2E)
[0,0,0,0,0,0, 0,0,1, 0, 0,0,1, 0], # / (0x2F)
[1,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # 0 (0x30)
[0,1,1,0,0,0, 0,0,1, 0, 0,0,0, 0], # 1 (0x31)
[1,1,0,1,1,0, 0,0,0, 1, 0,0,0, 1], # 2 (0x32)
[1,1,1,1,0,0, 0,0,0, 1, 0,0,0, 0], # 3 (0x33)
[0,1,1,0,0,1, 0,0,0, 1, 0,0,0, 1], # 4 (0x34)
[1,0,1,1,0,1, 0,0,0, 1, 0,0,0, 1], # 5 (0x35)
[1,0,1,1,1,1, 0,0,0, 1, 0,0,0, 1], # 6 (0x36)
[1,0,0,0,0,0, 0,0,1, 0, 0,1,0, 0], # 7 (0x37)
[1,1,1,1,1,1, 0,0,0, 1, 0,0,0, 1], # 8 (0x38)
[1,1,1,0,0,1, 0,0,0, 1, 0,0,0, 1], # 9 (0x39)
[0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # : (0x3A)
[0,0,0,0,0,0, 0,1,0, 0, 0,0,1, 0], # ; (0x3B)
[0,0,0,0,0,0, 0,0,1, 0, 1,0,0, 0], # < (0x3C)
[0,0,0,1,0,0, 0,0,0, 1, 0,0,0, 1], # = (0x3D)
[0,0,0,0,0,0, 1,0,0, 0, 0,0,1, 0], # > (0x3E)
[1,0,0,0,0,1, 0,0,1, 0, 0,1,0, 0], # ? (0x3F)
[1,1,1,1,1,1, 1,0,1, 0, 1,0,1, 0], # @ (0x40)
[1,1,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # A (0x41)
[1,1,1,1,0,0, 0,1,0, 1, 0,1,0, 0], # B (0x42)
[1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # C (0x43)
[1,1,1,1,0,0, 0,1,0, 0, 0,1,0, 0], # D (0x44)
[1,0,0,1,1,1, 0,0,0, 1, 0,0,0, 1], # E (0x45)
[1,0,0,0,1,1, 0,0,0, 1, 0,0,0, 1], # F (0x46)
[1,0,1,1,1,1, 0,0,0, 1, 0,0,0, 0], # G (0x47)
[0,1,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # H (0x48)
[1,0,0,1,0,0, 0,1,0, 0, 0,1,0, 0], # I (0x49)
[0,1,1,1,1,0, 0,0,0, 0, 0,0,0, 0], # J (0x4A)
[0,0,0,0,1,1, 0,0,1, 0, 1,0,0, 1], # K (0x4B)
[0,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # L (0x4C)
[0,1,1,0,1,1, 1,0,1, 0, 0,0,0, 0], # M (0x4D)
[0,1,1,0,1,1, 1,0,0, 0, 1,0,0, 0], # N (0x4E)
[1,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # O (0x4F)
[1,1,0,0,1,1, 0,0,0, 1, 0,0,0, 1], # P (0x50)
[1,1,1,1,1,1, 0,0,0, 0, 1,0,0, 0], # Q (0x51)
[1,1,0,0,1,1, 0,0,0, 1, 1,0,0, 1], # R (0x52)
[1,0,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # S (0x53)
[1,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # T (0x54)
[0,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # U (0x55)
[0,0,0,0,1,1, 0,0,1, 0, 0,0,1, 0], # V (0x56)
[0,1,1,0,1,1, 0,0,0, 0, 1,0,1, 0], # W (0x57)
[0,0,0,0,0,0, 1,0,1, 0, 1,0,1, 0], # X (0x58)
[0,0,0,0,0,0, 1,0,1, 0, 0,1,0, 0], # Y (0x59)
[1,0,0,1,0,0, 0,0,1, 0, 0,0,1, 0], # Z (0x5A)
[1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # [ (0x5B)
[0,0,0,0,0,0, 1,0,0, 0, 1,0,0, 0], # \ (0x5C)
[1,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # ] (0x5D)
[1,1,0,0,0,1, 0,0,0, 0, 0,0,0, 0], # ^ (0x5E)
[0,0,0,1,0,0, 0,0,0, 0, 0,0,0, 0], # _ (0x5F)
[0,0,0,0,0,0, 1,0,0, 0, 0,0,0, 0], # ` (0x60)
[1,1,1,1,1,0, 0,0,0, 1, 0,0,0, 1], # a (0x61)
[0,0,0,1,1,1, 0,0,0, 0, 1,0,0, 1], # b (0x62)
[0,0,0,1,1,0, 0,0,0, 1, 0,0,0, 1], # c (0x63)
[0,1,1,1,0,0, 0,0,0, 1, 0,0,1, 0], # d (0x64)
[1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 1], # e (0x65)
[1,0,0,0,1,1, 0,0,0, 0, 0,0,0, 1], # f (0x66)
[1,1,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # g (0x67)
[0,0,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # h (0x68)
[0,0,0,0,0,0, 0,0,0, 0, 0,1,0, 0], # i (0x69)
[0,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # j (0x6A)
[0,0,0,0,1,1, 0,0,1, 0, 1,0,0, 0], # k (0x6B)
[0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # l (0x6C)
[0,0,1,0,1,0, 0,0,0, 1, 0,1,0, 1], # m (0x6D)
[0,0,0,0,1,0, 0,0,0, 0, 1,0,0, 1], # n (0x6E)
[0,0,1,1,1,0, 0,0,0, 1, 0,0,0, 1], # o (0x6F)
[1,0,0,0,1,1, 0,0,1, 0, 0,0,0, 1], # p (0x70)
[1,1,0,0,0,1, 0,0,0, 1, 1,0,0, 1], # q (0x71)
[0,0,0,0,1,0, 0,0,0, 0, 0,0,0, 1], # r (0x72)
[1,0,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # s (0x73)
[0,0,0,1,1,1, 0,0,0, 0, 0,0,0, 1], # t (0x74)
[0,0,1,1,1,0, 0,0,0, 0, 0,0,0, 0], # u (0x75)
[0,0,0,0,1,0, 0,0,0, 0, 0,0,1, 0], # v (0x76)
[0,0,1,0,1,0, 0,0,0, 0, 1,0,1, 0], # w (0x77)
[0,0,0,0,0,0, 1,0,1, 0, 1,0,1, 0], # x (0x78)
[0,1,1,1,0,0, 0,1,0, 1, 0,0,0, 0], # y (0x79)
[1,0,0,1,0,0, 0,0,1, 0, 0,0,1, 0], # z (0x7A)
[1,0,0,1,0,0, 1,0,0, 0, 0,0,1, 1], # { (0x7B)
[0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # | (0x7C)
[1,0,0,1,0,0, 0,0,1, 1, 1,0,0, 0], # } (0x7D)
[0,0,0,0,0,0, 0,0,0, 1, 0,0,0, 1], # ~ (0x7E)
]
def elaborate(self, platform: Platform) -> Module:
m = Module()
if self.simulation:
self._device = {}
for segment in self._segments + [self._dotSegment]:
s = Signal(1)
s.name = segment
self.simSignals.append(s)
self._device[segment] = _SegmentSimulator(s)
else:
self._device = platform.request(self._deviceType, self._deviceId)
# Remove the eighth bit from the data signal and map the seven remaining bits onto the LUT
data7 = Signal(unsigned(7))
with m.If(self.data[0:7] < 0x20): # Out of range
m.d.comb += data7.eq(0) # Set to SPACE (0x20), 0 in our LUT, when data is out of range
with m.Else():
m.d.comb += data7.eq(self.data[0:7]-0x20)
# Drive the dot segment using the eighth bit of the data signal
m.d.comb += self._device[self._dotSegment].o.eq(self.data[7])
# Drive the other fourteen segments using the LUT
with m.Switch(data7):
for i in range(len(self._lut)):
with m.Case(i): # (SPACE to ~)
for j in range(len(self._segments)):
m.d.comb += self._device[self._segments[j]].o.eq(self._lut[i][j])
with m.Default(): # (0x7F / DEL)
for j in range(len(self._segments)):
m.d.comb += self._device[self._segments[j]].o.eq(1)
return m
def ports(self):
ports = [self.data]
if self.simulation:
ports.extend(self.simSignals)
return ports
if __name__ == "__main__":
m = FourteenSegmentDisplay(simulation = True)
sim = Simulator(m)
def process():
# This design consist purely of combinational logic
# so we just loop through all possible input values
for i in range(256):
yield m.data.eq(i)
yield Delay(1e-6)
yield Settle()
sim.add_process(process)
with sim.write_vcd("test.vcd", "test.gtkw", traces=m.ports()):
sim.run()
| 7,430 | 5,385 |
import json
import boto.kinesis
from motorway.intersection import Intersection
class KinesisInsertIntersection(Intersection):
stream_name = None
def __init__(self, **kwargs):
super(KinesisInsertIntersection, self).__init__(**kwargs)
self.conn = boto.kinesis.connect_to_region(**self.connection_parameters())
assert self.stream_name, "Please define attribute stream_name on your KinesisInsertIntersection"
def connection_parameters(self):
return {
'region_name': 'eu-west-1',
# Add this or use ENV VARS
# 'aws_access_key_id': '',
# 'aws_secret_access_key': ''
}
def process(self, message):
self.conn.put_record(
self.stream_name,
json.dumps(message.content),
message.grouping_value
)
self.ack(message)
yield
| 884 | 255 |
from skimage.io import imread, imsave
from numpy import ones
from scipy.signal import convolve2d
import warnings
warnings.filterwarnings("ignore")
img = imread('img.png')
img = convolve2d(img, ones((5, 5), dtype=int), mode='valid') // 25
imsave('out_img.png', img) | 266 | 96 |
from .knowledgebase import KnowledgeBase
from .dataset import Dataset
from .dataset import SmartNegativeSampling, NegativeSampling
def get_data(data_path, batch_size):
train_path = data_path + 'train.txt'
valid_path = data_path + 'valid.txt'
test_path = data_path + 'test.txt'
# load knowledge base of train data
kb_train = KnowledgeBase.load_from_raw_data(train_path)
kb_train.convert_triples()
dset_train = Dataset(kb_train, batch_size=batch_size)
# derive a knowledge base of validation data
kb_val = KnowledgeBase.derive_from(kb_train)
kb_val.load_raw_triples(valid_path)
kb_val.convert_triples()
dset_val = Dataset(kb_val, batch_size=batch_size)
# derive a knowledge base of testing data
kb_test = KnowledgeBase.derive_from(kb_train)
kb_test.load_raw_triples(test_path)
kb_test.convert_triples()
dset_test = Dataset(kb_test, batch_size=batch_size)
return kb_train, dset_train, kb_val, dset_val, kb_test, dset_test
def load_saved_data(folder):
#currently only the kb for the training data was saved.
kb_train = KnowledgeBase()
kb_train.load_converted_triples(folder + '/triples.npy')
kb_train.load_mappings_from_json(folder + '/entity2id.json',
folder + '/relation2id.json')
# have to do this temporarily. should be fixed to be more general
batch_size = 32
data_path = '../data/Release/'
valid_path = data_path + 'valid.txt'
test_path = data_path + 'test.txt'
dset_train = Dataset(kb_train, batch_size=batch_size)
# derive a knowledge base of validation data
kb_val = KnowledgeBase.derive_from(kb_train)
kb_val.load_raw_triples(valid_path)
kb_val.convert_triples()
dset_val = Dataset(kb_val, batch_size=batch_size)
# derive a knowledge base of testing data
kb_test = KnowledgeBase.derive_from(kb_train)
kb_test.load_raw_triples(test_path)
kb_test.convert_triples()
dset_test = Dataset(kb_test, batch_size=batch_size)
return kb_train, dset_train, kb_val, dset_val, kb_test, dset_test
| 2,087 | 753 |
import json
import time
import web3
import sha3
from os import environ
from web3 import Web3, HTTPProvider
from web3.contract import ConciseContract
from contract import IDENTITY_STORE_JSON
API_KEY = environ.get('API_KEY')
PRIVATE_KEY = environ.get('PRIVATE_KEY')
CONTRACT_ADDRESS = environ.get('CONTRACT_ADDRESS')
NETWORK_ENDPOINT = "https://ropsten.infura.io/v3/{}".format(API_KEY)
w3 = Web3(HTTPProvider(NETWORK_ENDPOINT))
w3.eth.enable_unaudited_features()
#known_nonce = set()
def setTenant(hashObject, address, timestamp, tenantId):
#global known_nonce
contract = load_contract()
account = w3.eth.account.privateKeyToAccount(PRIVATE_KEY)
get_data = contract.encodeABI(
fn_name='setTenant',
args=[
hashObject,
address,
timestamp,
tenantId
])
trans_count = w3.eth.getTransactionCount(account.address)
nonce = trans_count
#while nonce in known_nonce:
# nonce += 1
print("transaction count=%d nonce=%d" %(trans_count, nonce))
price = w3.toWei('21', 'gwei')
success = False
retry = 100
while not success and retry > 0:
retry -= 1
try:
transaction = {
'to': contract.address,
'data': get_data,
'gas': 1728712,
'gasPrice': price,
'nonce': nonce
}
signed = w3.eth.account.signTransaction(transaction, PRIVATE_KEY)
txn_hash = w3.eth.sendRawTransaction(signed.rawTransaction)
txn = w3.eth.getTransaction(txn_hash)
print('Contract Transaction Hash {}'.format(txn_hash))
print('Transaction {}'.format(txn))
#known_nonce.add(nonce)
success = True
except ValueError as err:
err_msg = err.args[0]['message']
print('web3 error:: %s' % err_msg)
if 'replacement transaction underpriced' in err_msg:
price += 1
retry += 1 # underprice doesn't count for retrying
print('increase price to %d' % price)
elif 'nonce too low' in err_msg or 'known transaction' in err_msg:
#known_nonce.add(nonce)
nonce += 1
print('increase nonce to %d' % nonce)
else:
raise err
if retry <= 0:
print('stop retrying')
return txn
def get_deloyed_contract(contract_definition, contract_address):
contract_abi = contract_definition['abi']
contract = w3.eth.contract(abi=contract_abi, address=contract_address)
return contract
def load_contract():
contract_definition = json.loads(IDENTITY_STORE_JSON)
return get_deloyed_contract(contract_definition, CONTRACT_ADDRESS)
def is_valid(tenant_id, user_address):
contract = load_contract()
timestamp = int(time.time())
# call isValid on contract
isValid = contract.functions.isValid(tenant_id, user_address, timestamp).call()
return isValid
| 3,034 | 962 |
print(f'Loading {__file__}')
def configure_area_det(det,acq_time,acq_period=None,exposure=None,num_exposures=1):
if det.name == 'prosilica':
acq_time = min(acq_time,25)
if det.cam.acquire.get() == 0:
yield from bps.abs_set(det.cam.acquire, 1, wait=True)
if det.name == 'dexela':
yield from bps.abs_set(det.cam.acquire_time, max(acq_time,0.1), wait=True)
acq_time_rbv = det.cam.acquire_time.get()
else:
yield from bps.abs_set(det.cam.acquire_time, acq_time, wait=True)
acq_time_rbv = det.cam.acquire_time.get()
if det.name == 'dexela':
yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv+0.005, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
else:
if acq_period is None:
if det.name == 'blackfly':
yield from bps.abs_set(det.cam.acquire_period, 0.1, wait=False)
else:
yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
else:
if det.name == 'blackfly':
yield from bps.abs_set(det.cam.acquire_period, min(1,acq_period), wait=False)
else:
yield from bps.abs_set(det.cam.acquire_period, acq_period, wait=True)
acq_period_rbv = det.cam.acquire_period.get()
if exposure is None:
exposure = acq_time_rbv*10
num_frames = np.ceil(exposure / acq_time_rbv)
yield from bps.abs_set(det.images_per_set, num_frames, wait=True)
yield from bps.abs_set(det.number_of_sets, num_exposures, wait=True)
if det.name == 'emergent':
print(">>>%s is configured as:\n acq_time = %.3fmsec; acq_period = %.3fmsec; exposure = %.3fmsec \
(num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures))
else:
print(">>>%s is configured as:\n acq_time = %.3fsec; acq_period = %.3fsec; exposure = %.3fsec \
(num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures))
return
| 2,227 | 868 |
#!/usr/bin/env python3
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import re
from argparse import ArgumentParser
from difflib import SequenceMatcher
from itertools import chain, groupby
from os import environ
from sys import stderr, stdout
from typing import *
from typing import Match
class DiffLine:
def __init__(self, kind:str, match:Match, rich_text:str) -> None:
self.kind = kind # The name from `diff_pat` named capture groups.
self.match = match
self.rich_text = rich_text # Original colorized text from git.
self.old_num = 0 # 1-indexed.
self.new_num = 0 # ".
self.chunk_idx = 0 # Positive for rem/add.
self.is_src = False # True for ctx/rem/add.
self.text = '' # Final text for ctx/rem/add.
@property
def plain_text(self) -> str:
return self.match.string # type: ignore
def main() -> None:
arg_parser = ArgumentParser(prog='same-same', description='Git diff filter.')
arg_parser.add_argument('-interactive', action='store_true', help="Accommodate git's interactive mode.")
args = arg_parser.parse_args()
# Git can generate utf8-illegal sequences; ignore them.
stdin = open(0, errors='replace')
if 'SAME_SAME_OFF' in environ:
for line in stdin:
stdout.write(line)
exit(0)
dbg = ('SAME_SAME_DBG' in environ)
buffer:List[DiffLine] = []
def flush_buffer() -> None:
nonlocal buffer
if buffer:
handle_file_lines(buffer, interactive=args.interactive)
buffer = []
try:
for rich_text in stdin:
rich_text = rich_text.rstrip('\n')
plain_text = sgr_pat.sub('', rich_text) # remove colors.
match = diff_pat.match(plain_text)
assert match is not None
kind = match.lastgroup
assert kind is not None, match
if dbg:
print(kind, ':', repr(plain_text))
continue
if kind == 'diff':
flush_buffer()
buffer.append(DiffLine(kind, match, rich_text))
flush_buffer()
except BrokenPipeError:
stderr.close() # Prevents warning message.
def handle_file_lines(lines:List[DiffLine], interactive:bool) -> None:
first = lines[0]
kind = first.kind
skip = False
# Detect if we should skip these lines.
if kind not in ('diff', 'loc'): skip = True
elif graph_pat.match(first.plain_text).end(): skip = True # type: ignore
if skip:
for line in lines: print(line.rich_text)
return
old_ctx_nums:Set[int] = set() # Line numbers of context lines.
new_ctx_nums:Set[int] = set() # ".
old_lines:Dict[int, DiffLine] = {} # Maps of line numbers to line structs.
new_lines:Dict[int, DiffLine] = {} # ".
old_uniques:Dict[str, Optional[int]] = {} # Maps unique line bodies to line numbers.
new_uniques:Dict[str, Optional[int]] = {} # ".
old_num = 0 # 1-indexed source line number.
new_num = 0 # ".
chunk_idx = 0 # Counter to differentiate chunks; becomes part of the groupby key.
# Accumulate source lines into structures.
old_path = '<OLD_PATH>'
new_path = '<NEW_PATH>'
is_prev_add_rem = False
for line in lines:
match = line.match
kind = line.kind
is_add_rem = (kind in ('rem', 'add'))
if not is_prev_add_rem and is_add_rem: chunk_idx += 1
is_prev_add_rem = is_add_rem
if kind in ('ctx', 'rem', 'add'):
line.is_src = True
if kind == 'ctx':
line.text = match['ctx_text']
elif kind == 'rem':
line.text = match['rem_text']
line.chunk_idx = chunk_idx
insert_unique_line(old_uniques, line.text, old_num)
elif kind == 'add':
line.text = match['add_text']
line.chunk_idx = chunk_idx
insert_unique_line(new_uniques, line.text, new_num)
if kind in ('ctx', 'rem'):
assert old_num not in old_lines
assert old_num not in old_ctx_nums
line.old_num = old_num
old_lines[old_num] = line
old_ctx_nums.add(old_num)
old_num += 1
if kind in ('ctx', 'add'):
assert new_num not in new_lines
assert new_num not in new_ctx_nums
line.new_num = new_num
new_lines[new_num] = line
new_ctx_nums.add(new_num)
new_num += 1
elif kind == 'loc':
o = int(match['old_num'])
if o > 0:
assert o > old_num, (o, old_num, match.string)
old_num = o
n = int(match['new_num'])
if n > 0:
assert n > new_num
new_num = n
elif kind == 'old': old_path = vscode_path(match['old_path'].rstrip('\t'))
elif kind == 'new': new_path = vscode_path(match['new_path'].rstrip('\t')) # Not sure why this trailing tab appears.
# Detect moved lines.
def diff_lines_match(old_idx:int, new_idx:int) -> bool:
if old_idx in old_ctx_nums or new_idx in new_ctx_nums: return False
try: return old_lines[old_idx].text.strip() == new_lines[new_idx].text.strip()
except KeyError: return False
old_moved_nums:Set[int] = set()
new_moved_nums:Set[int] = set()
for body, new_idx in new_uniques.items():
if new_idx is None: continue
old_idx = old_uniques.get(body)
if old_idx is None: continue
p_o = old_idx
p_n = new_idx
while diff_lines_match(p_o-1, p_n-1):
p_o -= 1
p_n -= 1
e_o = old_idx + 1
e_n = new_idx + 1
while diff_lines_match(e_o, e_n):
e_o += 1
e_n += 1
old_moved_nums.update(range(p_o, e_o))
new_moved_nums.update(range(p_n, e_n))
# Break lines into rem/add chunks.
# While a "hunk" is a series of (possibly many) ctx/rem/add lines provided by git diff,
# a "chunk" is either a contiguous block of rem/add lines, or else any other single line.
# This approach simplifies the token diffing process so that it is a reasonably
# straightforward comparison of a rem block to an add block.
def chunk_key(line:DiffLine) -> Tuple[int, bool]:
return (line.is_src, line.chunk_idx, (line.old_num in old_moved_nums or line.new_num in new_moved_nums))
for ((is_src, chunk_idx, is_moved), _chunk) in groupby(lines, key=chunk_key):
chunk = list(_chunk) # We iterate over the sequence several times.
if chunk_idx and not is_moved: # Chunk should be diffed by tokens.
# We must ensure that the same number of lines is output, at least for `-interactive` mode.
# Currently, we do not reorder lines at all, but that is an option for the future.
rem_lines = [l for l in chunk if l.old_num]
add_lines = [l for l in chunk if l.new_num]
add_token_diffs(rem_lines, add_lines)
elif is_src: # ctx or moved.
for l in chunk:
l.text = highlight_strange_chars(l.text)
# Print lines.
for line in chunk:
kind = line.kind
match = line.match
text = line.text
if kind == 'ctx':
print(text)
elif kind == 'rem':
m = C_REM_MOVED if line.old_num in old_moved_nums else ''
print(C_REM_LINE, m, text, C_END, sep='')
elif kind == 'add':
m = C_ADD_MOVED if line.new_num in new_moved_nums else ''
print(C_ADD_LINE, m, text, C_END, sep='')
elif kind == 'loc':
new_num = match['new_num']
snippet = match['parent_snippet']
s = ' ' + C_SNIPPET if snippet else ''
print(C_LOC, new_path, ':', new_num, ':', s, snippet, C_END, sep='')
elif kind == 'diff':
msg = new_path if (old_path == new_path) else '{} -> {}'.format(old_path, new_path)
print(C_FILE, msg, ':', C_END, sep='')
elif kind == 'meta':
print(C_MODE, new_path, ':', RST, ' ', line.rich_text, sep='')
elif kind in dropped_kinds:
if interactive: # cannot drop lines, becasue interactive mode slices the diff by line counts.
print(C_DROPPED, line.plain_text, RST, sep='')
elif kind in pass_kinds:
print(line.rich_text)
else:
raise Exception('unhandled kind: {}\n{!r}'.format(kind, text))
def insert_unique_line(d:Dict[str, Optional[int]], line:str, idx:int) -> None:
'For the purpose of movement detection, lines are tested for uniqueness after stripping leading and trailing whitespace.'
body = line.strip()
if body in d: d[body] = None
else: d[body] = idx
def add_token_diffs(rem_lines:List[DiffLine], add_lines:List[DiffLine]) -> None:
'Rewrite DiffLine.text values to include per-token diff highlighting.'
# Get lists of tokens for the entire chunk.
r_tokens = tokenize_difflines(rem_lines)
a_tokens = tokenize_difflines(add_lines)
m = SequenceMatcher(isjunk=is_token_junk, a=r_tokens, b=a_tokens, autojunk=True)
r_frags:List[List[str]] = [[] for _ in rem_lines] # Accumulate highlighted tokens.
a_frags:List[List[str]] = [[] for _ in add_lines]
r_line_idx = 0 # Step through the accumulators.
a_line_idx = 0
r_d = 0 # Token index of previous/next diff.
a_d = 0
# TODO: r_lit, a_lit flags could slightly reduce emission of color sequences.
blocks = m.get_matching_blocks() # last block is the sentinel: (len(a), len(b), 0).
for r_p, a_p, l in m.get_matching_blocks():
# Highlight the differing tokens.
r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_d, r_p, C_REM_TOKEN)
a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_d, a_p, C_ADD_TOKEN)
r_d = r_p+l # update to end of match / beginning of next diff.
a_d = a_p+l
# Do not highlight the matching tokens.
r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_p, r_d, C_RST_TOKEN)
a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_p, a_d, C_RST_TOKEN)
for rem_line, frags in zip(rem_lines, r_frags):
rem_line.text = ''.join(frags)
for add_line, frags in zip(add_lines, a_frags):
add_line.text = ''.join(frags)
def tokenize_difflines(lines:List[DiffLine]) -> List[str]:
'Convert the list of line texts into a single list of tokens, including newline tokens.'
tokens:List[str] = []
for i, line in enumerate(lines):
if i: tokens.append('\n')
tokens.extend(m[0] for m in token_pat.finditer(line.text))
return tokens
def is_token_junk(token:str) -> bool:
'''
Treate newlines as tokens, but all other whitespace as junk.
This forces the diff algorithm to respect line breaks but not get distracted aligning to whitespace.
'''
return token.isspace() and token != '\n'
def append_frags(frags:List[List[str]], tokens:List[str], line_idx:int, pos:int, end:int, highlight:str) -> int:
for frag in tokens[pos:end]:
if frag == '\n':
line_idx += 1
else:
line_frags = frags[line_idx]
line_frags.append(highlight)
line_frags.append(highlight_strange_chars(frag))
return line_idx
def highlight_strange_chars(string:str) -> str:
return strange_char_pat.sub(
lambda m: '{}{}{}'.format(C_STRANGE, m[0].translate(strange_char_trans_table), C_RST_STRANGE),
string)
dropped_kinds = {
'idx', 'old', 'new'
}
pass_kinds = {
'empty', 'other'
}
sgr_pat = re.compile(r'\x1B\[[0-9;]*m')
graph_pat = re.compile(r'(?x) [ /\*\|\\]*') # space is treated as literal inside of brackets, even in extended mode.
diff_pat = re.compile(r'''(?x)
(?:
(?P<empty> $)
| (?P<commit> commit\ [0-9a-z]{40} )
| (?P<author> Author: )
| (?P<date> Date: )
| (?P<diff> diff\ --git )
| (?P<idx> index )
| (?P<old> --- \ (?P<old_path>.+) )
| (?P<new> \+\+\+ \ (?P<new_path>.+) )
| (?P<loc> @@\ -(?P<old_num>\d+)(?P<old_len>,\d+)?\ \+(?P<new_num>\d+)(?P<new_len>,\d+)?\ @@\ ?(?P<parent_snippet>.*) )
| (?P<ctx> \ (?P<ctx_text>.*) )
| (?P<rem> - (?P<rem_text>.*) )
| (?P<add> \+ (?P<add_text>.*) )
| (?P<meta>
( old\ mode
| new\ mode
| deleted\ file\ mode
| new\ file\ mode
| copy\ from
| copy\ to
| rename\ from
| rename\ to
| similarity\ index
| dissimilarity\ index ) )
| (?P<other> .* )
)
''')
token_pat = re.compile(r'''(?x)
\w[\w\d]* # Symbol token.
| \d+ # Number token.
| \ + # Spaces; distinct from other whitespace.
| \t+ # Tabs; distinct from other whitespace.
| \s+ # Other whitespace.
| . # Any other single character; newlines are never present so DOTALL is irrelevant.
''')
# Unicode ranges for strange characters:
# C0: \x00 - \x1F
# \n: \x0A
# C0 !\n: [ \x00-\x09 \x0B-\x1F ]
# SP: \x20
# DEL: \x7F
# C1: \x80 - \x9F
# NBSP: \xA0 (nonbreaking space)
# SHY: \xAD (soft hyphen)
strange_char_re = r'(?x) [\x00-\x09\x0B-\x1F\x7F\x80-\x9F\xA0\xAD]+'
strange_char_pat = re.compile(strange_char_re)
assert not strange_char_pat.match(' ')
strange_char_ords = chain(range(0, 0x09+1), range(0x0B, 0x1F+1), range(0x7F, 0x7F+1),
range(0x80, 0x9F+1), range(0xA0, 0xA0+1), range(0xAD, 0xAD+1))
assert ord(' ') not in strange_char_ords
strange_char_names = { i : '\\x{:02x}'.format(i) for i in strange_char_ords }
strange_char_names.update({
'\0' : '\\0',
'\a' : '\\a',
'\b' : '\\b',
'\f' : '\\f',
'\r' : '\\r',
'\t' : '\\t',
'\v' : '\\v',
})
strange_char_trans_table = str.maketrans(strange_char_names)
# ANSI control sequence indicator.
CSI = '\x1b['
ERASE_LINE_F = CSI + 'K' # Sending erase line forward while background color is set colors to end of line.
def sgr(*codes:Any) -> str:
'Select Graphic Rendition control sequence string.'
code = ';'.join(str(c) for c in codes)
return '\x1b[{}m'.format(code)
RST = sgr()
RST_BOLD, RST_ULINE, RST_BLINK, RST_INVERT, RST_TXT, RST_BG = (22, 24, 25, 27, 39, 49)
BOLD, ULINE, BLINK, INVERT = (1, 4, 5, 7)
# xterm-256 sequence initiators; these should be followed by a single color index.
# both text and background can be specified in a single sgr call.
TXT = '38;5'
BG = '48;5'
# RGB6 color cube: 6x6x6, from black to white.
K = 16 # black.
W = 231 # white.
# Grayscale: the 24 palette values have a suggested 8 bit grayscale range of [8, 238].
middle_gray_indices = range(232, 256)
def gray26(n:int) -> int:
assert 0 <= n < 26
if n == 0: return K
if n == 25: return W
return W + n
def rgb6(r:int, g:int, b:int) -> int:
'index RGB triples into the 256-color palette (returns 16 for black, 231 for white).'
assert 0 <= r < 6
assert 0 <= g < 6
assert 0 <= b < 6
return (((r * 6) + g) * 6) + b + 16
# same-same colors.
C_FILE = sgr(BG, rgb6(1, 0, 1))
C_MODE = sgr(BG, rgb6(1, 0, 1))
C_LOC = sgr(BG, rgb6(0, 1, 2))
C_UNKNOWN = sgr(BG, rgb6(5, 0, 5))
C_SNIPPET = sgr(TXT, gray26(22))
C_DROPPED = sgr(TXT, gray26(10))
C_REM_LINE = sgr(BG, rgb6(1, 0, 0))
C_ADD_LINE = sgr(BG, rgb6(0, 1, 0))
C_REM_MOVED = sgr(TXT, rgb6(4, 2, 0))
C_ADD_MOVED = sgr(TXT, rgb6(2, 4, 0))
C_REM_TOKEN = sgr(TXT, rgb6(5, 2, 3), BOLD)
C_ADD_TOKEN = sgr(TXT, rgb6(2, 5, 3), BOLD)
C_RST_TOKEN = sgr(RST_TXT, RST_BOLD)
C_STRANGE = sgr(INVERT)
C_RST_STRANGE = sgr(RST_INVERT)
C_END = ERASE_LINE_F + RST
def vscode_path(path:str) -> str:
'VSCode will only recognize source locations if the path contains a slash; add "./" to plain file names.'
if '/' in path or '<' in path or '>' in path: return path # Do not alter pseudo-names like <stdin>.
return './' + path
def errL(*items:Any) -> None: print(*items, sep='', file=stderr)
def errSL(*items:Any) -> None: print(*items, file=stderr)
if __name__ == '__main__': main()
| 15,085 | 5,940 |
"""Validate command configuration variables."""
# Local
from .frr import FRRCommands
from .bird import BIRDCommands
from .tnsr import TNSRCommands
from .vyos import VyosCommands
from ..main import HyperglassModelExtra
from .common import CommandGroup
from .huawei import HuaweiCommands
from .juniper import JuniperCommands
from .cisco_xr import CiscoXRCommands
from .cisco_ios import CiscoIOSCommands
from .arista_eos import AristaEOSCommands
from .cisco_nxos import CiscoNXOSCommands
from .nokia_sros import NokiaSROSCommands
from .mikrotik_routeros import MikrotikRouterOS
from .mikrotik_switchos import MikrotikSwitchOS
_NOS_MAP = {
"arista_eos": AristaEOSCommands,
"bird": BIRDCommands,
"cisco_ios": CiscoIOSCommands,
"cisco_nxos": CiscoNXOSCommands,
"cisco_xr": CiscoXRCommands,
"frr": FRRCommands,
"huawei": HuaweiCommands,
"juniper": JuniperCommands,
"mikrotik_routeros": MikrotikRouterOS,
"mikrotik_switchos": MikrotikSwitchOS,
"nokia_sros": NokiaSROSCommands,
"tnsr": TNSRCommands,
"vyos": VyosCommands,
}
class Commands(HyperglassModelExtra):
"""Base class for command definitions."""
arista_eos: CommandGroup = AristaEOSCommands()
bird: CommandGroup = BIRDCommands()
cisco_ios: CommandGroup = CiscoIOSCommands()
cisco_nxos: CommandGroup = CiscoNXOSCommands()
cisco_xr: CommandGroup = CiscoXRCommands()
frr: CommandGroup = FRRCommands()
huawei: CommandGroup = HuaweiCommands()
juniper: CommandGroup = JuniperCommands()
mikrotik_routeros: CommandGroup = MikrotikRouterOS()
mikrotik_switchos: CommandGroup = MikrotikSwitchOS()
nokia_sros: CommandGroup = NokiaSROSCommands()
tnsr: CommandGroup = TNSRCommands()
vyos: CommandGroup = VyosCommands()
@classmethod
def import_params(cls, **input_params):
"""Import loaded YAML, initialize per-command definitions.
Dynamically set attributes for the command class.
Arguments:
input_params {dict} -- Unvalidated command definitions
Returns:
{object} -- Validated commands object
"""
obj = Commands()
for nos, cmds in input_params.items():
nos_cmd_set = _NOS_MAP.get(nos, CommandGroup)
nos_cmds = nos_cmd_set(**cmds)
setattr(obj, nos, nos_cmds)
return obj
class Config:
"""Override pydantic config."""
validate_all = False
| 2,443 | 838 |
#!\usr\bin\python3
# -*- coding: utf-8 -*-
'''
Created on Oct. 2019
ED_Chapter4
@author: ZYW @ BNU
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy import interpolate
from mpl_toolkits.mplot3d import Axes3D
import os
from matplotlib import font_manager as fm, rcParams
import astropy.units as u
##------------parameters settings-----------------##
pixel_sides = 10#pixels per cm
N = np.array([3,3,3])#wave node numbers
L = np.array([100,100,100])#unit:cm
A = np.array([2,12,5])#initial intensities
pi = np.pi
K_0 = np.array([N[0]*pi/L[0],N[1]*pi/L[1],N[2]*pi/L[2]])/pixel_sides#wave vector
fpath = os.path.join(rcParams["datapath"], "fonts/ttf/cmr10.ttf")
prop = fm.FontProperties(fname=fpath)
xx = np.linspace(0,L[0]*pixel_sides,L[0]*pixel_sides)
yy = np.linspace(0,L[1]*pixel_sides,L[1]*pixel_sides)
zz = np.zeros(0,L[1]*pixel_sides,L[1]*pixel_sides)
##------------functions settings-----------------##
'''
def E_x(x,y,z):
return A[0]*np.cos(x*K_0[0])*np.sin(y*K_0[1])*np.sin(z*K_0[2])
def E_y(x,y,z):
return A[1]*np.sin(x*K_0[0])*np.cos(y*K_0[1])*np.sin(z*K_0[2])
'''
def E_z(x,y,z):
return A[2]*np.sin(x*K_0[0])*np.sin(y*K_0[1])*np.cos(z*K_0[2])
#Intensities of 3 directions in Cartissian coordinate
xx, yy= np.meshgrid(xx, yy)
zz = 11
E = E_z(xx,yy,zz)
def draw3D(X,Y,Z,angle):
fig = plt.figure(figsize=(15,7))
ax1 = fig.add_subplot(121)
ax1.imshow(Z,cmap='YlGnBu')
ax2 = fig.add_subplot(122,projection='3d')
ax2.view_init(angle[0],angle[1])
ax2.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap='rainbow',alpha=0.8)
surf = ax2.contourf(X,Y,Z,zdir='z',offset=-5,cmap='rainbow')
ax1.set_title(r'$E_z-plane-figure$')
ax2.set_title(r'$E_z-hologram$')
plt.tight_layout()
plt.savefig('ED_4.png',dpi=600)
plt.show()
##------------data writting & figures making-----------------##
draw3D(xx,yy,E,(45,45))
exit()
| 1,912 | 921 |
from .voc1 import VOC
| 25 | 14 |
#!/bin/env python
import inspect
import sys
import os.path
import threading
from logging import warning
sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..', '..', 'lib' ) ]
from inspect import currentframe
from inspect import getframeinfo
from traceback import format_list
from traceback import extract_tb
from traceback import print_exc
from sys import exc_info
import dramatis
import dramatis.error
from dramatis import interface
Actor = dramatis.Actor
sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..', '..' ) ]
from test_helper import DramatisTestHelper
class Name_Test ( DramatisTestHelper ):
def teardown(self):
self.runtime_check()
def test_attribute_error_no_atts(self):
"should return AttributeError as appropriate"
actor = dramatis.Actor( object() )
okay = False
try:
actor.foo()
raise Exception("should not be reached")
except AttributeError, ae:
assert str(ae) == "'object' object has no attribute 'foo'"
okay = True
assert okay
def test_attribute_error(self):
"should return AttributeError as appropriate"
o = object()
actor = dramatis.Actor( o )
okay = False
try:
actor.foo()
raise Exception("should not be reached")
except AttributeError, ae:
assert str(ae) == "'object' object has no attribute 'foo'"
okay = True
assert okay
def test_recreate_errors(self):
"should recreate errors rather just forward them(?)"
def test_block_methods_during_cont(self):
"should block other methods during a continuation"
def test_unbound(self):
"should be creatable unbound"
dramatis.Actor()
def test_msg_unbound(self):
"should allow messages to unbound"
okay = False
try:
dramatis.Actor().foo()
raise Exception("should not be reached")
except dramatis.Deadlock: okay = True
assert okay
def test_creatable_bound(self):
"should be creatable bound"
name = dramatis.Actor( object() )
assert isinstance(name,dramatis.Actor.Name)
def test_allow_and_exec_msgs(self):
"should allow and execute messages to bound names"
class o ( object ):
def foo(self,arg):
assert arg == "bar"
return "foobar"
name = dramatis.Actor( o() )
result = name.foo("bar")
assert result == "foobar"
def test_delv_releases(self):
class O (object):
def foo(self,arg):
assert arg == "bar"
name = dramatis.Actor( O() )
dramatis.interface( name ).continuation(None).foo("bar")
def test_short_release(self):
"should have a nice short method for casts"
class O (object):
def foo(self,arg):
assert arg == "bar"
name = dramatis.Actor( O() )
dramatis.release( name ).foo( "bar" )
def test_release_from_interface(self):
"should suport cast from the object interface"
def test_no_double_binding(self):
"shouldn't be possible to bind twice"
name = dramatis.Actor()
dramatis.interface( name ).bind( object() )
okay = False
try:
dramatis.interface( name ).bind( object() )
raise Exception("should not be reached")
except dramatis.error.Bind: okay = True
assert okay
def test_allow_exec_blocks(self):
"should allow and execute block continuations"
class O (object):
def foo(self,arg):
assert arg == "bar"
return "foobar"
actor = O()
name = dramatis.Actor(actor)
result = []
def block(value):
result[:] = [value]
retval = dramatis.interface( name ).continuation(block).foo( "bar" )
assert retval == None
assert result == []
assert result == []
dramatis.Runtime.current.quiesce()
assert result == ["foobar"]
def test_exec_tasks_after_binding(self):
"should execute messages to unbound names once bound"
name = dramatis.Actor()
class O(object):
def foo(self,arg):
assert arg == "bar"
return "foobar"
result = []
def block(value):
result[:] = [ value ]
retval = dramatis.interface( name ).continuation(block).foo("bar")
assert retval == None
assert result == []
dramatis.Runtime.current.quiesce()
assert result == []
dramatis.interface( name ).bind( O() )
dramatis.Runtime.current.quiesce()
assert result == [ "foobar" ]
def test_rpc_binds_return_name(self):
"rpc binds should return an actor name"
name = dramatis.Actor()
retval = dramatis.interface( name ).bind( dict() )
assert isinstance(retval,dramatis.Actor.Name)
def test_bind_with_release(self):
"should be possible to bind with a non-rpc continuation"
name = dramatis.Actor()
result = []
def block(v):
result[:] = [ v ]
name = dramatis.interface( name ).continuation(block)
retval = dramatis.interface( name ).bind( object() )
assert retval == None
assert result == []
dramatis.Runtime.current.quiesce()
assert result != []
def test_url(self):
"should provide a url, if asked"
def test_unboudn_queue_ordered(self):
"unbound names should queue messages and deliver them in order"
def test_sometimes_out_of_order(self):
"messages should be delivered out of order sometimes"
def test_flush_quarantees_order(self):
"flushing should guarantee message order"
def test_can_use_call_sytanx(self):
class Foo( dramatis.Actor ):
def __call__( self, arg, foo, bar ):
assert arg == "foobar"
assert foo == "foo"
assert bar == "bar"
return "okay"
actor = Foo()
assert actor("foobar", "foo", bar = "bar" ) == "okay"
def test_can_use_left_shift_sytanx(self):
class Foo( dramatis.Actor ):
def __lshift__( self, arg ):
assert arg == "foobar"
return "okay"
actor = Foo()
assert actor << "foobar" == "okay"
| 6,618 | 1,877 |
#!/usr/bin/env python
"""
Background:
--------
NARR_RetrieveLocation_Variable.py
Purpose:
--------
Routines to retrieve, output NARR data from a single point over time to combine for analysis
History:
--------
2016-09-20 : Bell - simplify existing multiple routines for various locations into one package
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import num2date
#User Stack
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS, Datetime2EPIC
import calc.haversine as sphered
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2016, 9, 20)
__modified__ = datetime.datetime(2016, 9, 20)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR'
"---"
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
df = EcoFOCI_netCDF(infile)
nchandle = df._getnchandle_()
params = df.get_vars() #gets all of them
print "Parameters available: "
#print params
ncdata = ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
df.close()
return ncdata
def get_geocoords(infile, lat='lat', lon='lon'):
df = EcoFOCI_netCDF(infile)
nchandle = df._getnchandle_()
data = {}
for j, v in enumerate([lat, lon]):
data[v] = nchandle.variables[v][:]
df.close()
return (data)
def ncreadfile_dic_slice(nchandle, params, height_ind=None, lat_ind=None, lon_ind=None):
"""returns slice of data for all times but for specified height/lat/lon indicies"""
data = {}
if height_ind == None:
for j, v in enumerate(params):
try: #check for nc variable
data[v] = nchandle.variables[v][:,lat_ind,lon_ind]
except ValueError: #if parameter is not of expected dimensions
data[v] = nchandle.variables[v][:]
else:
for j, v in enumerate(params):
try: #check for nc variable
data[v] = nchandle.variables[v][:,:,lat_ind,lon_ind]
except ValueError: #if parameter is not of expected dimensions
data[v] = nchandle.variables[v][:]
return data
"""--------------------------------main Routines---------------------------------------"""
""" currently hard coded - variables and ranges """
### Grab grid points for future slicing - assume grid is same in all model output
NARR = '/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/'
infile = [NARR + 'uwnd.10m.2016.nc']
lat_lon = get_geocoords(infile[0])
#stn ['1','2']
station_name = ['UP stn_1']
sta_lat = [54.5]
sta_long = [161.0]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
stn1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "stn1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], stn1_modelpt[0], stn1_modelpt[1])
"""
#loop over all requested data
years = range(2010,2017)
years = ['mon.mean']
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
stn1u_f = triangle_smoothing(stn1_data['uwnd'])
stn1u = stn1_data['uwnd']
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
stn1v_f = triangle_smoothing(stn1_data['vwnd'])
stn1v = stn1_data['vwnd']
#convert to EPIC time
#epic_time, epic_time1 = Datetime2EPIC(num2date(stn1_data['time'], "hours since 1800-1-1 00:00:0.0"))
Datetime2EPIC(num2date(x, "hours since 1800-1-1 00:00:0.0")) for x in stn1_data['time']
###
#output 0,6,12,18 UTC
#subsample data
# time_ind = np.where(pydate%0.25 == 0)[0]
# output u,v wind components from model grid points
save_to_nc = False
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_stn1_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
# write2epic( outfile, station_name[1], [epic_time[time_ind], epic_time1[time_ind]], stn1_modelpt, [stn1u_f[time_ind], stn1v_f[time_ind]])
write2epic( outfile, station_name[1], [epic_time, epic_time1], stn1_modelpt, [stn1u, stn1v])
"""
"""-----------using xarray---------"""
import pandas as pd
import xarray as xa
#index = [station_1[3],station_1[4]]
index=[195,76]
ufilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/uwnd.10m.2016.nc'
udata = xa.open_dataset(ufilein, decode_cf=False)
udata = xa.decode_cf(udata,mask_and_scale=False)
dum = udata.uwnd[:443,195,76].resample('D', udata.time, how='mean')
print dum.to_pandas().to_csv()
vfilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/vwnd.10m.2016.nc'
vdata = xa.open_dataset(vfilein, decode_cf=False)
vdata = xa.decode_cf(vdata,mask_and_scale=False)
dvm = vdata.vwnd[:443,195,76].resample('D', vdata.time, how='mean')
print dvm.to_pandas().to_csv()
| 6,281 | 2,477 |
# $Header: /nfs/slac/g/glast/ground/cvs/GlastRelease-scons/rootUtil/rootUtilLib.py,v 1.3 2008/10/27 17:49:11 ecephas Exp $
def generate(env, **kw):
if not kw.get('depsOnly', 0):
env.Tool('addLibrary', library = ['rootUtil'])
if env['PLATFORM']=='win32' and env.get('CONTAINERNAME','')=='GlastRelease':
env.Tool('findPkgPath', package = 'rootUtil')
env.Tool('addLibrary', library = env['rootLibs'])
env.Tool('addLibrary', library = env['minuitLibs'])
env.Tool('addLibrary', library = env['rootGuiLibs'])
if env['PLATFORM']=='win32' and env.get('CONTAINERNAME','')=='GlastRelease':
env.Tool('findPkgPath', package = 'facilities')
def exists(env):
return 1;
| 710 | 276 |
import os
import torch
import argparse
import yaml
import utils
import matplotlib.pyplot as plt
import seaborn as sns
import math
import numpy as np
from datetime import date
def plot_matrix_as_heatmap(matrix,show=False,title='',xlabel='',ylabel='',save_path=''):
'''plots the cosine similariy matrix of a number of models
or model configurations'''
n = np.shape(np.array(matrix))[0]
ticks = math.floor(n/4)
sns.set_theme()
ax = sns.heatmap(matrix,xticklabels=ticks,yticklabels=ticks,cmap='bwr')
ax.invert_yaxis()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if save_path:
plt.savefig(save_path)
if show:
plt.show()
logs_path = os.path.join('logs','exman-train-net.py','runs')
runs = [os.path.join(logs_path,run) for run in os.listdir(logs_path) if run[:6] not in ['000001','000002']]
INIT_NAMES = [['vae'],['ghn_default']]
SAVE_PATH = os.path.join('..','..','small-results',str(date.today()),'prediction_similarity')
if not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
parser = argparse.ArgumentParser()
parser.add_argument('--init',type=str)
parser.add_argument('--device')
parser.add_argument('--sim',choices=['pred','logits'])
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed_all(42)
torch.manual_seed(42)
init = args.init
model_paths = []
for run in runs:
file = os.path.join(run,'net_params.torch')
yaml_p = os.path.join(run,'params.yaml')
with open(yaml_p) as f:
dict = yaml.full_load(f)
if not 'mult_init_prior' in dict:
if dict['mult_init_mode'] == init:
model_paths.append(file)
_, testloader = utils.load_dataset(data='cifar', train_bs=64, test_bs=500, num_examples=None, seed=42,augmentation=False)
if args.sim == 'pred':
all_predictions = []
for model_path in model_paths:
if init == 'vae':
model = utils.load_vae(model_path,device)
predictions = []
for x,_ in testloader:
x = x.to(device)
p = model(x)
predictions.append(p.max(1)[1])
predictions = torch.cat(predictions)
all_predictions.append(predictions)
all_predictions = torch.stack(all_predictions)
length_data = all_predictions.shape[1]
matrix = torch.zeros(length_data,length_data)
for i in range(length_data):
for j in range(i+1):
pred_sim = torch.sum(all_predictions[i] == all_predictions[j])/length_data
matrix[i,j] = matrix[j,i] = pred_sim
if args.sim == 'logits':
CosineSimilarity = torch.nn.CosineSimilarity(dim=0)
all_predictions = []
for model_path in model_paths:
if init == 'vae':
model = utils.load_vae(model_path,device)
predictions = []
for x,_ in testloader:
x = x.to(device)
p = model(x)
predictions.append(torch.flatten(p))
predictions = torch.cat(predictions)
all_predictions.append(predictions)
all_predictions = torch.stack(all_predictions)
length_data = all_predictions.shape[1]
matrix = torch.zeros(length_data,length_data)
for i in range(length_data):
for j in range(i+1):
cos_sim = CosineSimilarity(all_predictions[i],all_predictions[j])
matrix[i,j] = matrix[j,i] = cos_sim
title = f'{args.sim} Similarity of {args.init} inits'
save_path = os.path.join(SAVE_PATH,title)
plot_matrix_as_heatmap(matrix,title=title,save_path=save_path)
| 3,748 | 1,408 |
from datetime import datetime
from flask_login import UserMixin
from flask import Flask, current_app
from website.routes import get_db
db = get_db()
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
| 425 | 144 |
import sublime
import sublime_plugin
# Related Reading:
# https://forum.sublimetext.com/t/find-for-a-macro/57387/
#
# This example command allows you to jump the cursor to the next or previous
# location of a given pattern of text, which can be either a regex or not and
# case sensitive or not based on command arguments.
#
# A use case for this is implementing a specific Find operation in a macro in
# a repeatable way.
class PatternNavigateCommand(sublime_plugin.TextCommand):
"""
Jump the selection in the file to the next or previous location of the
given textual pattern based on the current cursor location. The search
direction is controlled by the forward argument, and will wrap around the
ends of the buffer.
"""
def run(self, edit, pattern, literal=True, ignorecase=False, forward=True):
# Convert the incoming arguments to the appropriate search flags.
flags = ((sublime.LITERAL if literal else 0) |
(sublime.IGNORECASE if ignorecase else 0))
# Find the locations where this pattern occurs; leave if none
regions = self.view.find_all(pattern, flags)
if not regions:
return
# Get a starting point for our search, and where we should jump to if
# there are no matches in the specified direction.
point = self.view.sel()[0].b
fallback = regions[-1] if not forward else regions[0]
# Remove all selections.
self.view.sel().clear()
# Look in the given direction for the first match from the current
# position; if one is found jump there.
pick = lambda p: (point < p.a) if forward else (point > p.a)
for pos in regions if forward else reversed(regions):
if pick(pos):
return self.jump(pos.a)
# No matches in the search direction, so wrap around.
self.jump(fallback.a)
def jump(self, point):
# Add in the given position as a selection and ensure that it's
# visible.
self.view.sel().add(sublime.Region(point))
self.view.show(point, True)
| 2,116 | 569 |
import argparse
from pathlib import Path
import markdown2
import jinja2
import os
import shutil
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="Chemin du/des source.", type=str)
parser.add_argument("-o", help="Chemin du dossier des fichiers générés.", type=str)
parser.add_argument("-t", help="Chemin du dossier des fichiers modeles.", type=str)
parser.add_argument(
"-s", help="type de sources (fichier ou dossier).", action="store_true"
)
parser.add_argument("-v", "--verbose", help="Verbose mode.", action="store_true")
args = parser.parse_args()
VERBOSE = args.verbose
if VERBOSE:
print("input :", args.i)
print("output :", args.o)
print("template:", args.t)
print("type input", args.s)
def add_image(html_test):
src_path = Path(args.o + "/src")
if not os.path.exists(src_path):
os.makedirs(src_path)
if args.s:
input_path = os.fspath(args.i)
else:
input_path = os.path.dirname(args.i)
result_string = ""
for line in html_test.split("\n"):
line = str(line)
if "<img " in line:
image_path = line.split('src="')[1].split('" ')[0]
if VERBOSE:
print("image_path", image_path)
image_name = image_path.split("/")[-1].split("\\")[-1]
if VERBOSE:
print("image_name", image_name)
shutil.copyfile(
str(input_path) + "/" + image_path, str(src_path) + "/" + image_name
)
line = (
line.split('src="')[0]
+ 'src="./src/'
+ image_name
+ '" '
+ line.split('src="')[-1].split('" ')[-1]
)
result_string += line + "\n"
else:
result_string += line + "\n"
return result_string
if __name__ == "__main__":
if args.s:
if args.i != None and args.o != None:
with Path(args.i) as directory:
for file in list(directory.glob("*_main.md")):
config_dict = {}
with open(file, "r") as input_file:
if VERBOSE:
print("intput file :", input_file.name)
file_name = (
input_file.name.split(".")[-2]
.split("/")[-1]
.split("\\")[-1]
.split("_main")[0]
)
with open(
str(args.o) + "/" + str(file_name) + ".html", "w"
) as output_file:
if VERBOSE:
print("output file :", output_file.name)
html = markdown2.markdown(input_file.read())
config_dict["main"] = html
if args.t != None:
for config_file in list(
directory.glob(file_name + "*.md")
):
config_name = (
config_file.name.split(".")[-2]
.split(file_name + "_")[-1]
.lower()
)
if config_name != "main":
with open(config_file, "r") as open_config_file:
config_dict[
config_name
] = open_config_file.read()
with open(args.t) as template_file:
resutl = jinja2.Template(
template_file.read()
).render(config_dict)
else:
resutl = html
if VERBOSE:
print("template file :", args.t)
resutl = add_image(resutl)
output_file.write(resutl)
else:
if args.i != None and args.o != None:
config_dict = {}
with open(args.i, "r") as input_file:
if VERBOSE:
print("intput file :", input_file.name)
file_name = (
input_file.name.split(".")[-2]
.split("/")[-1]
.split("\\")[-1]
.split("_main")[0]
)
with open(
str(args.o) + "/" + str(file_name) + ".html", "w"
) as output_file:
if VERBOSE:
print("output file :", output_file.name)
html = markdown2.markdown(input_file.read())
config_dict["main"] = html
if args.t != None:
path_directory = args.i.split(file_name + "_main.md")[0]
with Path(path_directory) as directory:
# recupe le dossier ou est le fichier
for config_file in list(directory.glob(file_name + "*.md")):
config_name = (
config_file.name.split(".")[-2]
.split(file_name + "_")[-1]
.lower()
)
if config_name != "main":
with open(config_file, "r") as open_config_file:
config_dict[
config_name
] = open_config_file.read()
with open(args.t) as template_file:
resutl = jinja2.Template(template_file.read()).render(
config_dict
)
else:
resutl = html
if VERBOSE:
print("template file :", args.t)
resutl = add_image(resutl)
output_file.write(resutl)
| 6,440 | 1,708 |
from enum import Enum
from typing import Tuple, Union
# CardinalityLiteral = Literal["1:1", "1:N", "N:1", "M:N"]
CardinalityT = Union[str, "Cardinality"]
class Cardinality(Enum):
"""Enumeration type for cardinality relationships.
Cardinalities are comparable using numerical operators, and can be thought of as comparing "preciseness". The less
ambiguity there is for a given cardinality, the smaller it is in comparison to the others. The hierarchy is given by
``1:1 < 1:N = N:1 < M:N``. Note that ``1:N`` and ``N:1`` are considered equally precise.
Examples:
Comparing cardinalities
>>> from rics.cardinality import Cardinality
>>> Cardinality.ManyToOne
<Cardinality.ManyToOne: 'N:1'>
>>> Cardinality.OneToOne
<Cardinality.OneToOne: '1:1'>
>>> Cardinality.ManyToOne < Cardinality.OneToOne
False
"""
OneToOne = "1:1"
OneToMany = "1:N"
ManyToOne = "N:1"
ManyToMany = "M:N"
@property
def many_left(self) -> bool:
"""Many-relationship on the right, True for ``N:1`` and ``M:N``."""
return self == Cardinality.ManyToMany or self == Cardinality.ManyToOne # pragma: no cover
@property
def many_right(self) -> bool:
"""Many-relationship on the right, True for ``1:N`` and ``M:N``."""
return self == Cardinality.ManyToMany or self == Cardinality.OneToMany # pragma: no cover
@property
def one_left(self) -> bool:
"""One-relationship on the right, True for ``1:1`` and ``1:N``."""
return not self.many_left # pragma: no cover
@property
def one_right(self) -> bool:
"""One-relationship on the right, True for ``1:1`` and ``N:1``."""
return not self.many_right # pragma: no cover
@property
def inverse(self) -> "Cardinality":
"""Inverse cardinality. For symmetric cardinalities, ``self.inverse == self``.
Returns:
Inverse cardinality.
See Also:
:attr:`symmetric`
"""
if self == Cardinality.OneToMany:
return Cardinality.ManyToOne
if self == Cardinality.ManyToOne:
return Cardinality.OneToMany
return self
@property
def symmetric(self) -> bool:
"""Symmetry flag. For symmetric cardinalities, ``self.inverse == self``.
Returns:
Symmetry flag.
See Also:
:attr:`inverse`
"""
return self == Cardinality.OneToOne or self == Cardinality.ManyToMany
def __ge__(self, other: "Cardinality") -> bool:
"""Equivalent to :meth:`set.issuperset`."""
return _is_superset(self, other)
def __lt__(self, other: "Cardinality") -> bool:
return not self >= other
@classmethod
def from_counts(cls, left_count: int, right_count: int) -> "Cardinality":
"""Derive a `Cardinality` from counts.
Args:
left_count: Number of elements on the left-hand side.
right_count: Number of elements on the right-hand side.
Returns:
A :class:`Cardinality`.
Raises:
ValueError: For counts < 1.
"""
return _from_counts(left_count, right_count)
@classmethod
def parse(cls, arg: CardinalityT, strict: bool = False) -> "Cardinality":
"""Convert to cardinality.
Args:
arg: Argument to parse.
strict: If True, `arg` must match exactly when it is given as a string.
Returns:
A :class:`Cardinality`.
Raises:
ValueError: If the argument could not be converted.
"""
return arg if isinstance(arg, Cardinality) else _from_generous_string(arg, strict)
########################################################################################################################
# Supporting functions
#
# Would rather have this in a "friend module", but that's not practical (before 3.10?)
########################################################################################################################
def _parsing_failure_message(arg: str, strict: bool) -> str:
options = tuple([c.value for c in Cardinality])
alternatively = tuple([c.name for c in Cardinality])
strict_hint = "."
if strict:
try:
strict = False
Cardinality.parse(arg, strict=strict)
strict_hint = f". Hint: set {strict=} to allow this input."
except ValueError:
pass
return f"Could not convert {arg=} to Cardinality{strict_hint} Correct input {options=} or {repr(alternatively)}"
_MATRIX = (
(Cardinality.ManyToMany, Cardinality.ManyToOne),
(Cardinality.OneToMany, Cardinality.OneToOne),
)
def _is_superset(c0: Cardinality, c1: Cardinality) -> bool:
if c0 == c1:
return True
c0_i, c0_j = _pos(c0)
c1_i, c1_j = _pos(c1)
return c0_i <= c1_i and c0_j <= c1_j
def _pos(cardinality: Cardinality) -> Tuple[int, int]:
for i in range(2):
for j in range(2):
if _MATRIX[i][j] == cardinality:
return i, j
raise AssertionError("This should be impossible.")
def _from_counts(left_count: int, right_count: int) -> Cardinality:
if left_count < 1:
raise ValueError(f"{left_count=} < 1")
if right_count < 1:
raise ValueError(f"{right_count=} < 1")
one_left = left_count == 1
one_right = right_count == 1
return _MATRIX[int(one_left)][int(one_right)]
def _from_generous_string(s: str, strict: bool) -> Cardinality:
if not strict:
s = s.strip().upper().replace("-", ":", 1).replace("*", "N", 2)
if s == "N:N":
s = "M:N"
for c in Cardinality:
if c.value == s:
return c
raise ValueError(_parsing_failure_message(s, strict))
| 5,837 | 1,846 |
#!/usr/bin/env python3
import argparse
import os
import sys
import re
import yaml
import jinja2
# {{{1 Parse arguments
parser = argparse.ArgumentParser(description="Lints Salt states to ensure " +
"pillars are used correctly")
parser.prog = 'salt-pillar-linter'
parser.add_argument('-p',
action='append',
metavar='PILLARS_ROOT',
required=True,
dest='pillar_roots',
help="Directories where pillars are present, can be " +
"specified multiple times")
parser.add_argument('-s',
action='append',
metavar='STATES_ROOT',
required=True,
dest='state_roots',
help="Directories where states are located, can be " +
"specified multiple times")
parser.add_argument('-f',
action='append',
metavar='TMPL_FILE',
dest='template_files',
help="Non state files which uses Jinja templating to " +
"check, can be specified multiple times")
parser.add_argument('-d',
action='store_true',
default=False,
dest='debug',
help="Print additional debug information")
args = parser.parse_args()
# {{{1 Locate all state and pillar files
def gather_sls_files(initial_dirs):
""" Walks directories to find locations of all sls files
"""
dirs = set()
dirs.update(initial_dirs)
sls_files = set()
while dirs:
root = dirs.pop()
for top_dir, sub_dirs, files in os.walk(root):
sls_files.update([os.path.join(top_dir, f) for f in files
if f != 'top.sls' and
os.path.splitext(f)[1] == '.sls'])
dirs.update([os.path.join(top_dir, sub_dir)
for sub_dir in sub_dirs])
return sls_files
pillar_files = gather_sls_files(args.pillar_roots)
state_files = gather_sls_files(args.state_roots)
if args.template_files:
state_files.update(args.template_files)
# {{{1 Get all pillar keys
def flatten_dict(d, parent_key=''):
""" Return array of flattened dict keys
"""
keys = []
for k in d:
combined_key = k
if parent_key:
combined_key = "{}.{}".format(parent_key, k)
if type(d[k]) == dict:
keys.extend(flatten_dict(d[k], parent_key=combined_key))
else:
keys.append(combined_key)
return keys
pillar_keys = {}
loader = jinja2.FileSystemLoader(searchpath=os.getcwd())
env = jinja2.Environment(loader=loader)
if args.debug:
print("###################")
print("# PARSING PILLARS #")
print("###################")
for pillar_file in pillar_files:
template = env.get_template(pillar_file)
template_str = None
try:
template_str = template.render()
except Exception as e:
raise ValueError("Failed to render Jinja template: {}".format(e))
value = yaml.load(template_str)
flat_keys = flatten_dict(value)
if args.debug:
print()
print ("{} keys:".format(pillar_file))
print()
for k in flat_keys:
print(" {}".format(k))
for k in flat_keys:
pillar_keys[k] = True
if args.debug:
print()
# {{{1 Lint states
if args.debug:
print("##################")
print("# LINTING STATES #")
print("##################")
jinja_pattern = re.compile(r"{{\s*pillar\.([0-9a-zA-Z\._]*)\s*}}")
for state_file in state_files:
with open(state_file, 'r') as f:
line_num = 1
not_keys = {}
if args.debug:
print("{} keys used by state:".format(state_file))
print()
# For each line in a state
for line in f:
# For each Jinja pillar usage in state
for match in re.finditer(jinja_pattern, line):
# Get groups from match
for pillar_str in match.groups():
if args.debug:
print(" {}".format(pillar_str))
# Check if pillar key used exists
if pillar_str not in pillar_keys:
# Create entry in not_keys dict for line if this is the
# first item on this line
if line_num not in not_keys:
not_keys[line_num] = []
# Add pillar key to dict so we can tell user about
# improper usage later
not_keys[line_num].append(pillar_str)
# Increment line number so we can keep track of where errors are
line_num += 1
if args.debug:
print()
# If any errors
if not_keys:
common_prefix = os.path.commonprefix([os.getcwd(), state_file])
pretty_file_name = os.path.relpath(state_file, common_prefix)
print("{} uses pillar keys which do not exist".format(pretty_file_name))
for line_num in not_keys:
print(" Line {}:".format(line_num))
for k in not_keys[line_num]:
print (" {}".format(k))
print()
| 5,443 | 1,594 |
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactcore.factories.domain import ProgramActivityFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b9_award_financial'
def test_column_headers(database):
expected_subset = {'row_number', 'agency_identifier', 'main_account_code',
'program_activity_name', 'program_activity_code'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Testing valid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular
A-11. """
af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='test', program_activity_code='test')
af_2 = AwardFinancialFactory(row_number=2, agency_identifier='test', main_account_code='test',
program_activity_name='test', program_activity_code='test')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 0
def test_success_null(database):
"""Program activity name/code as null"""
af = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name=None, program_activity_code=None)
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test')
assert number_of_errors(_FILE, database, models=[af, pa]) == 0
def test_success_fiscal_year(database):
""" Testing valid name for FY that matches with budget_year"""
af_1 = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test',
main_account_code='test', program_activity_name='test',
program_activity_code='test')
af_2 = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test2',
main_account_code='test2', program_activity_name='test2',
program_activity_code='test2')
pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2',
account_number='test2', program_activity_name='test2', program_activity_code='test2')
submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017')
assert number_of_errors(_FILE, database, models=[af_1, af_2, pa_1, pa_2], submission=submission) == 0
def test_failure_fiscal_year(database):
""" Testing invalid name for FY, not matches with budget_year"""
af = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test4',
main_account_code='test4', program_activity_name='test4',
program_activity_code='test4')
pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2',
account_number='test2', program_activity_name='test2', program_activity_code='test2')
pa_3 = ProgramActivityFactory(budget_year=2018, agency_id='test3', allocation_transfer_id='test3',
account_number='test3', program_activity_name='test3', program_activity_code='test3')
pa_4 = ProgramActivityFactory(budget_year=2019, agency_id='test4', allocation_transfer_id='test4',
account_number='test4', program_activity_name='test4', program_activity_code='test4')
submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017')
assert number_of_errors(_FILE, database, models=[af, pa_1, pa_2, pa_3, pa_4], submission=submission) == 1
def test_success_ignore_case(database):
""" Testing program activity validation to ignore case """
af = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='TEST', program_activity_code='test')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[af, pa]) == 0
def test_failure_program_activity_name(database):
""" Testing invalid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular
A-11. """
af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test',
main_account_code='test', program_activity_name='test_wrong',
program_activity_code='test')
af_2 = AwardFinancialFactory(row_number=1, agency_identifier='test',
main_account_code='test', program_activity_name='test_wrong',
program_activity_code='0000')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 1
def test_failure_program_activity_code(database):
"""Failure where the program _activity_code does not match"""
af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test',
main_account_code='test', program_activity_name='test',
program_activity_code='test_wrong')
af_2 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test',
program_activity_name='Unknown/Other', program_activity_code='12345')
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test', program_activity_name='test', program_activity_code='test')
assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 1
def test_success_null_program_activity(database):
"""program activity name/code as null"""
af = AwardFinancialFactory(row_number=1, agency_identifier='test_wrong',
main_account_code='test', program_activity_name=None, program_activity_code=None)
pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test',
account_number='test')
assert number_of_errors(_FILE, database, models=[af, pa]) == 0
| 7,493 | 2,174 |
'''
Optimal hyperparameters for CM + Laplacian kernel
Ea: alpha 1e-11, gamma 1e-4
polarizability: alpha 1e-3, gamma 1e-4
HOMO-LUMO gap: alpha 1e-2, gamma 1e-4
Dipole moment: alpha 1e-1, gamma 1e-3
Optimal hyperparameters for BoB + Laplacian kernel
Ea: alpha 1e-11, gamma 1e-5
polarizability: alpha 1e-3, gamma 1e-4
HOMO-LUMO gap: alpha 1e-3, gamma 1e-4
Dipole moment: alpha 1e-1, gamma 1e-3
Optimal hyperparameters for MBTR + Gaussian kernel
Ea: alpha 1e-7, gamma 1e-8
polarizability: alpha 1e-6, gamma 1e-7
HOMO-LUMO gap: alpha 1e-3, gamma 1e-6
Dipole moment: alpha 1e-2, gamma 1e-5
Results for CM + Laplacian kernel
Ea: MAE 0.38, RMSE 0.55, R2 0.9977
polarizability: MAE 0.12, RMSE 0.18, R2 0.9828
HOMO-LUMO gap: MAE 0.56, RMSE 0.70, R2 0.7203
Dipole moment: MAE 0.14, RMSE 0.19, R2 0.5901
Results for BoB + Laplacian kernel
Ea: MAE 0.08, RMSE 0.13, R2 0.9998
polarizability: MAE 0.06, RMSE 0.09, R2 0.9952
HOMO-LUMO gap: MAE 0.23, RMSE 0.31, R2 0.9465
Dipole moment: MAE 0.11, RMSE 0.16, R2 0.7327
Results for MBTR + Gaussian kernel
Ea: MAE 0.04, RMSE 0.06, R2 0.9999
polarizability: MAE 0.02, RMSE 0.04, R2 0.9993
HOMO-LUMO gap: MAE 0.17, RMSE 0.23, R2 0.9686
Dipole moment: MAE 0.08, RMSE 0.11, R2 0.8508
'''
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import mean_absolute_error as MAE
from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import r2_score as R2
from scipy.special import comb
from itertools import combinations, permutations
## This part of the code reads the raw data (.xyz files) and returns the central quantities stored in arrays
def preprocess(datasize,atoms):
# Selects all molecules with 7 or fewer non-H atoms (3963) and (datasize - 3963) molecules with 8 non-H atoms at random.
# This compensates the underrepresentation of small molecules (molecules with 9 non-H atoms are excluded)
ind = np.concatenate((np.arange(1,3964),np.random.randint(3964,21989,size=datasize-3963)))
# Initialize the variables as empty lists
# natoms = number of atoms in a given molecule
# nonHatoms = number of non-H atoms in a given molecule 21989
# Ea = Atomization energy (Ha)
# dipmom = Dipole moment (D)
# polar = Isotropic polarizability (bohr^3)
# atomlist = list of the atoms constituting a given molecule (e.g. ['C','H','H','H'] for methane)
# coords = xyz coordinates of each atom in a given molecule
natoms,nonHatoms,Ea,polar,dipmom,gap,atomlist,coords=[],[],[],[],[],[],[],[]
# Energies (Ha) of single atoms [H,C,N,O,F]
atomref=[-0.500273,-37.846772,-54.583861,-75.064579,-99.718730]
# Loop over all selected indices (molecules)
for i in ind:
# Initialize list that will contain coordinates and element types of ith molecule
xyz,elemtype,mulliken,nnonH=[],[],[],0
# This pads the index with zeros so that all contain 6 digits (e.g. index 41 -> 000041)
i = str(i).zfill(6)
# Define the path to the .xyz file of ith molecule. Here it is assumed that the dataset is stored in a
# subdirectory "xyz" within the one containing machine.py
# xyz/*.xyz
fpath = os.path.join('xyz',"dsgdb9nsd_%s.xyz" % i)
# Open the file and loop over the lines
with open(fpath) as f:
for j, line in enumerate(f):
if j == 0:
# Number of atoms in molecule
na = int(line)
natoms.append(na)
elif j == 1:
# Properties written on second line. Atomization energy, dipole moment, polarizability, HOMO-LUMO gap
E = float(line.split()[12])
dipmom.append(float(line.split()[5])*0.20819)
polar.append(float(line.split()[6])*0.14818)
gap.append(float(line.split()[9])*27.21139)
elif 2 <= j <= na+1:
# Lines 2 -> na+1 contains element types, coordinates and charges
parts = line.split()
# Index 0 = element type, 1 = x, 2 = y, 3 = z
elemtype.append(parts[0])
# Subtract energy of isolated atom from total energy
E = E - atomref[atoms.index(parts[0])]
if parts[0] != 'H':
nnonH += 1
xyz.append(np.array([float(parts[1]),float(parts[2]),float(parts[3])]))
Ea.append(-E*27.21139)
atomlist.append(elemtype)
coords.append(xyz)
nonHatoms.append(nnonH)
# Return all lists in the form of numpy arrays
return np.array(natoms),np.array(Ea),np.array(dipmom),np.array(polar),np.array(gap), \
np.array(atomlist),np.array(coords),np.array(nonHatoms)
def gauss(x,weight,sigma,mu):
return weight/(sigma*np.sqrt(2*np.pi))*np.exp(-((x-mu)**2)/(2*sigma**2))
# The many-body tensor representation (MBTR) descriptor
def mbtr(atomlist,coords,atoms,Z):
# Decay factor (d) and sigmas are roughly optimal
d=0.5
w1=1
sigma1,sigma2,sigma3=0.1,0.01,0.05
x1=np.linspace(0,10,201)
x2=np.linspace(0,1.25,201)
x3=np.linspace(-1,1,201)
mbtr_output=[]
atoms = list(set([''.join(p) for p in combinations('CHONF',1)]))
pairs = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)]))
triples = list(set([''.join(p) for p in permutations('CCCHHHOOONNNFFF',3)]))
for i in range(len(atomlist)):
bag1=dict((k,np.zeros(len(x1))) for k in atoms)
bag2=dict((k,np.zeros(len(x2))) for k in pairs)
bag3=dict((k,np.zeros(len(x3))) for k in triples)
MBTRvec=np.array([])
for j in range(len(atomlist[i])):
g1=Z[atoms.index(atomlist[i][j])]
bag1[atomlist[i][j]]+=gauss(x1,w1,sigma1,g1)
for k in range(len(atomlist[i])):
if k > j:
Rjk=np.linalg.norm(coords[i][j]-coords[i][k])
w2=np.exp(-d*Rjk)
g2=1/Rjk
try:
bag2[atomlist[i][j]+atomlist[i][k]]+=gauss(x2,w2,sigma2,g2)
except KeyError:
bag2[atomlist[i][k]+atomlist[i][j]]+=gauss(x2,w2,sigma2,g2)
for l in range(len(atomlist[i])):
if l > k:
Rjl=np.linalg.norm(coords[i][j]-coords[i][l])
Rkl=np.linalg.norm(coords[i][k]-coords[i][l])
w3=np.exp(-d*(Rjk+Rjl+Rkl))
g3=np.dot(coords[i][j]-coords[i][l],coords[i][k]-coords[i][l])/(Rjl*Rkl)
try:
bag3[atomlist[i][j]+atomlist[i][l]+atomlist[i][k]]+=gauss(x3,w3,sigma3,g3)
except KeyError:
bag3[atomlist[i][k]+atomlist[i][l]+atomlist[i][j]]+=gauss(x3,w3,sigma3,g3)
for atom in bag1:
MBTRvec = np.concatenate((MBTRvec,bag1[atom]))
for pair in bag2:
MBTRvec = np.concatenate((MBTRvec,bag2[pair]))
for triple in bag3:
MBTRvec = np.concatenate((MBTRvec,bag3[triple]))
mbtr_output.append(MBTRvec)
return mbtr_output
## The bag-of-bonds (BOB) descriptor
def bob(atomlist,coords,atoms,Z):
bob_output = []
# 18 H atoms in octane -> comb(18,2) H-H pairs (max. size of a bond vector in a bag of bonds)
dim = int(comb(18,2))
perms = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)]))
for i in range(len(atomlist)):
bag=dict((k,dim*[0]) for k in perms)
BoBvec = np.array([])
for j in range(len(atomlist[i])):
for k in range(len(atomlist[i])):
if j > k:
try:
bag[atomlist[i][j]+atomlist[i][k]].insert(0,Z[atoms.index(atomlist[i][j])]* \
Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k]))
del bag[atomlist[i][j]+atomlist[i][k]][-1]
except KeyError:
bag[atomlist[i][k]+atomlist[i][j]].insert(0,Z[atoms.index(atomlist[i][j])]* \
Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k]))
# Avoid KeyError raised by "wrong" order of atoms in a bond (e.g. 'CH' -> 'HC')
del bag[atomlist[i][k]+atomlist[i][j]][-1]
for pair in bag:
BoBvec = np.concatenate((BoBvec,np.array(sorted(bag[pair],reverse=True))))
bob_output.append(BoBvec)
return bob_output
## The following function takes the number of atoms in each molecule, the atom types and corresponding coordinates
## and returns an array of corresponding Coulomb matrices (CM)
def coulomb(natoms,atomlist,coords,atoms,Z):
# Specify the dimensions of the Coulomb matrices based on the largest molecule
dim = natoms.max()
# Initialize an array of all Coulomb matrices
CM = np.zeros((len(natoms),dim,dim))
CMvec = []
# Loop over all molecules
for i in range(len(natoms)):
for j in range(len(atomlist[i])):
# Loop over all atom pairs (j,k) in molecule i
for k in range(len(atomlist[i])):
if j == k:
CM[i][j][k] = 0.5*Z[atoms.index(atomlist[i][j])]**2.4
else:
CM[i][j][k] = Z[atoms.index(atomlist[i][j])]*Z[atoms.index(atomlist[i][k])]/ \
np.linalg.norm(coords[i][j]-coords[i][k])
# Sort Coulomb matrix according to descending row norm
# Get the indices in the sorted order
indexlist = np.argsort(-np.linalg.norm(CM[i],axis=1))
# Rearrange the matrix
CM[i] = CM[i][indexlist]
# Convert the lower triangular matrix into a vector and append to a list of Coulomb 'vectors'
CMvec.append(CM[i][np.tril_indices(dim,k=0)])
return CMvec
## Do grid search (if optimal hyperparameters are not known), then training and prediction using KRR
## If doing grid search for optimal parameters use small training set size, like 1k (takes forever otherwise)
def krr(x,y,nonHatoms):
inp4 = input('Do grid search for optimal hyperparameters? [True/False]\n')
if inp4 == True:
inp5 = raw_input('Provide kernel. [laplacian/rbf]\n').split()
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.9,stratify=nonHatoms)
kr = GridSearchCV(KernelRidge(kernel=inp5[0]),cv=5,param_grid={"alpha": np.logspace(-11,-1,11), \
"gamma": np.logspace(-9,-3,7)})
kr.fit(x_train,y_train)
print(kr.best_params_)
elif inp4 == False:
inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split()
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.1,stratify=nonHatoms)
kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2]))
kr.fit(x_train,y_train)
y_pred = kr.predict(x_test)
mae = MAE(y_test,y_pred)
rmse = np.sqrt(MSE(y_test,y_pred))
r2 = R2(y_test,y_pred)
# Print mean absolute error and root mean squared error
print('Mean absolute error: ' + repr(mae) + ', Root mean squared error: ' + repr(rmse) + \
', R2-score: ' + repr(r2))
return y_pred,y_test
def learning_curve(x,y,nonHatoms):
# Do training with different sample sizes and see how the MAE behaves (learning curve)
inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split()
mae,rmse,r2=[],[],[]
sample_sizes = [50,200,1000,3000,9000]
kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2]))
for i in sample_sizes:
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=1-float(i)/len(y),stratify=nonHatoms)
kr.fit(x_train,y_train)
y_pred = kr.predict(x_test)
mae.append(MAE(y_test,y_pred))
rmse.append(np.sqrt(MSE(y_test,y_pred)))
r2.append(R2(y_test,y_pred))
print('Mean absolute error: ' + repr(mae[-1]) + ', Root mean squared error: ' \
+ repr(rmse[-1]) + ', R2-score: ' + repr(r2[-1]))
return y_pred,y_test,mae,rmse,sample_sizes
## The main routine and plotting
def main():
# Just some plot settings
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=14)
plt.rc('xtick', direction='in')
# Preprocess data
datasize=10000
atoms = ['H','C','N','O','F']
Z = [1,6,7,8,9]
natoms,Ea,dipmom,polar,gap,atomlist,coords,nonHatoms = preprocess(datasize,atoms)
inp1 = raw_input('Which descriptor? [CM/BoB/MBTR]\n')
if inp1 == 'CM':
descriptor = coulomb(natoms,atomlist,coords,atoms,Z)
elif inp1 == 'BoB':
descriptor = bob(atomlist,coords,atoms,Z)
elif inp1 == 'MBTR':
descriptor = mbtr(atomlist,coords,atoms,Z)
inp2 = raw_input('Which property? [Ea/gap/polar/dipmom]\n')
plt.figure()
if inp2 == 'Ea':
prop = Ea
plt.title(r'Atomization energy (eV)')
plt.xlabel(r'$\Delta_\mathrm{at}E^\mathrm{DFT}$ (eV)')
plt.ylabel(r'$\Delta_\mathrm{at}E^\mathrm{KRR}$ (eV)')
elif inp2 == 'gap':
prop = gap
plt.title(r'HOMO-LUMO gap (eV)')
plt.xlabel(r'$\Delta\varepsilon^\mathrm{DFT}$ (eV)')
plt.ylabel(r'$\Delta\varepsilon^\mathrm{KRR}$ (eV)')
elif inp2 == 'polar':
prop = polar
plt.title(r'Isotropic polarizability (\r{A}$^3$)')
plt.xlabel(r'$\alpha^\mathrm{DFT}$ (\r{A}$^3$)')
plt.ylabel(r'$\alpha^\mathrm{KRR}$ (\r{A}$^3$)')
elif inp2 == 'dipmom':
prop = dipmom
plt.title(r'Dipole moment (e\r{A})')
plt.xlabel(r'$\mu^\mathrm{DFT}$ (e\r{A})')
plt.ylabel(r'$\mu^\mathrm{KRR}$ (e\r{A})')
inp3 = input('Plot learning curve? [True/False]\n')
if inp3 == True:
# Train
y_pred,y_test,mae,rmse,sample_sizes=learning_curve(descriptor,prop,nonHatoms)
np.savetxt('dipmom_BoB.dat',np.c_[y_test,y_pred])
np.savetxt('dipmom_BoB_lc.dat',np.c_[sample_sizes,mae])
# Plot learning curve
plt.semilogx(sample_sizes,mae,'o-',color='blue')
plt.xlabel(r'Training set size')
plt.ylabel(r'MAE')
elif inp3 == False:
# Train
y_pred,y_test=krr(descriptor,prop,nonHatoms)
#Plot results
plt.plot(y_test,y_pred,'.',color='blue')
plt.plot(np.linspace(y_test.min(),y_test.max(),1000),np.linspace(y_test.min(),y_test.max(),1000),'k--')
plt.show()
if __name__ == '__main__':
main()
| 15,107 | 5,862 |
from Core.Logger import log
class MethodException(Exception):
def __init__(self, method):
self.method = method
log('Request method error, not support {} method, please choose ["GET", "POST","PUT","DELETE"]'.format(method)) | 244 | 64 |
from rinse import NS_MAP
from rinse.util import safe_parse_string
from soap import SoapFault
class SoapResponse():
def __init__(self, response):
self._response = response
# Parse response
try:
self._doc = safe_parse_string(response.content)
self._body = self._doc.xpath(
"/soapenv:Envelope/soapenv:Body", namespaces=NS_MAP)[0]
except:
raise SoapFault("ResponseParseError", "Cannot parse response")
self._fault = self._body.find("soapenv:Fault", NS_MAP)
if self._fault is not None:
raise SoapFault(self._fault.find("faultcode").text,
self._fault.find("faultstring").text)
# Get and set Allegro API namespaces
self._ns = NS_MAP.copy()
for i, v in enumerate(self._doc.nsmap.values()):
if v != NS_MAP["soapenv"]:
self._ns["ns{}".format(i)] = v
from soap.response.item_list import *
| 981 | 287 |
import json
import time
import argparse
import os
import datetime
from os import path
import numpy as np
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
import torch.nn.functional as F
import nets
import sampling
import rewards_lib
import alive_sieve
def render_action(t, s, prop, term):
agent = t % 2
speaker = 'A' if agent == 0 else 'B'
utility = s.utilities[:, agent]
print(' ', end='')
if speaker == 'B':
print(' ', end='')
if term[0][0]:
print(' ACC')
else:
print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')
print(' %s:%s/%s %s:%s/%s %s:%s/%s' % (
utility[0][0], prop[0][0], s.pool[0][0],
utility[0][1], prop[0][1], s.pool[0][1],
utility[0][2], prop[0][2], s.pool[0][2],
), end='')
print('')
if t + 1 == s.N[0]:
print(' [out of time]')
def save_model(model_file, agent_models, agent_opts, start_time, episode):
state = {}
for i in range(2):
state['agent%s' % i] = {}
state['agent%s' % i]['model_state'] = agent_models[i].state_dict()
state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()
state['episode'] = episode
state['elapsed_time'] = time.time() - start_time
with open(model_file + '.tmp', 'wb') as f:
torch.save(state, f)
os.rename(model_file + '.tmp', model_file)
def load_model(model_file, agent_models, agent_opts):
with open(model_file, 'rb') as f:
state = torch.load(f)
for i in range(2):
agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])
agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])
episode = state['episode']
# create a kind of 'virtual' start_time
start_time = time.time() - state['elapsed_time']
return episode, start_time
class State(object):
def __init__(self, N, pool, utilities):
batch_size = N.size()[0]
self.N = N
self.pool = pool
self.utilities = torch.zeros(batch_size, 2, 3).long()
self.utilities[:, 0] = utilities[0]
self.utilities[:, 1] = utilities[1]
self.last_proposal = torch.zeros(batch_size, 3).long()
self.m_prev = torch.zeros(batch_size, 6).long()
def cuda(self):
self.N = self.N.cuda()
self.pool = self.pool.cuda()
self.utilities = self.utilities.cuda()
self.last_proposal = self.last_proposal.cuda()
self.m_prev = self.m_prev.cuda()
def sieve_(self, still_alive_idxes):
self.N = self.N[still_alive_idxes]
self.pool = self.pool[still_alive_idxes]
self.utilities = self.utilities[still_alive_idxes]
self.last_proposal = self.last_proposal[still_alive_idxes]
self.m_prev = self.m_prev[still_alive_idxes]
def run_episode(
batch,
enable_cuda,
enable_comms,
enable_proposal,
prosocial,
agent_models,
# batch_size,
testing,
render=False):
"""
turning testing on means, we disable stochasticity: always pick the argmax
"""
type_constr = torch.cuda if enable_cuda else torch
batch_size = batch['N'].size()[0]
s = State(**batch)
if enable_cuda:
s.cuda()
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=enable_cuda)
actions_by_timestep = []
alive_masks = []
# next two tensofrs wont be sieved, they will stay same size throughout
# entire batch, we will update them using sieve.out_idxes[...]
rewards = type_constr.FloatTensor(batch_size, 3).fill_(0)
num_steps = type_constr.LongTensor(batch_size).fill_(10)
term_matches_argmax_count = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
num_policy_runs = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
entropy_loss_by_agent = [
Variable(type_constr.FloatTensor(1).fill_(0)),
Variable(type_constr.FloatTensor(1).fill_(0))
]
if render:
print(' ')
for t in range(10):
agent = t % 2
agent_model = agent_models[agent]
if enable_comms:
_prev_message = s.m_prev
else:
# we dont strictly need to blank them, since they'll be all zeros anyway,
# but defense in depth and all that :)
_prev_message = type_constr.LongTensor(sieve.batch_size, 6).fill_(0)
if enable_proposal:
_prev_proposal = s.last_proposal
else:
# we do need to blank this one though :)
_prev_proposal = type_constr.LongTensor(sieve.batch_size, 3).fill_(0)
nodes, term_a, s.m_prev, this_proposal, _entropy_loss, \
_term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = agent_model(
pool=Variable(s.pool),
utility=Variable(s.utilities[:, agent]),
m_prev=Variable(s.m_prev),
prev_proposal=Variable(_prev_proposal),
testing=testing
)
entropy_loss_by_agent[agent] += _entropy_loss
actions_by_timestep.append(nodes)
term_matches_argmax_count += _term_matches_argmax_count
num_policy_runs += sieve.batch_size
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if render and sieve.out_idxes[0] == 0:
render_action(
t=t,
s=s,
term=term_a,
prop=this_proposal
)
new_rewards = rewards_lib.calc_rewards(
t=t,
s=s,
term=term_a
)
rewards[sieve.out_idxes] = new_rewards
s.last_proposal = this_proposal
sieve.mark_dead(term_a)
sieve.mark_dead(t + 1 >= s.N)
alive_masks.append(sieve.alive_mask.clone())
sieve.set_dead_global(num_steps, t + 1)
if sieve.all_dead():
break
s.sieve_(sieve.alive_idxes)
sieve.self_sieve_()
if render:
print(' r: %.2f' % rewards[0].mean())
print(' ')
return actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent, \
term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws, \
prop_matches_argmax_count, prop_stochastic_draws
def safe_div(a, b):
"""
returns a / b, unless b is zero, in which case returns 0
this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar
"""
return 0 if b == 0 else a / b
def run(enable_proposal, enable_comms, seed, prosocial, logfile, model_file, batch_size,
term_entropy_reg, utterance_entropy_reg, proposal_entropy_reg, enable_cuda,
no_load, testing, test_seed, render_every_seconds):
"""
testing option will:
- use argmax, ie disable stochastic draws
- not run optimizers
- not save model
"""
type_constr = torch.cuda if enable_cuda else torch
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
train_r = np.random.RandomState(seed)
else:
train_r = np.random
test_r = np.random.RandomState(test_seed)
test_batches = sampling.generate_test_batches(batch_size=batch_size, num_batches=5, random_state=test_r)
test_hashes = sampling.hash_batches(test_batches)
episode = 0
start_time = time.time()
agent_models = []
agent_opts = []
for i in range(2):
model = nets.AgentModel(
enable_comms=enable_comms,
enable_proposal=enable_proposal,
term_entropy_reg=term_entropy_reg,
utterance_entropy_reg=utterance_entropy_reg,
proposal_entropy_reg=proposal_entropy_reg
)
if enable_cuda:
model = model.cuda()
agent_models.append(model)
agent_opts.append(optim.Adam(params=agent_models[i].parameters()))
if path.isfile(model_file) and not no_load:
episode, start_time = load_model(
model_file=model_file,
agent_models=agent_models,
agent_opts=agent_opts)
print('loaded model')
elif testing:
print('')
print('ERROR: must have loadable model to use --testing option')
print('')
return
last_print = time.time()
rewards_sum = type_constr.FloatTensor(3).fill_(0)
steps_sum = 0
count_sum = 0
for d in ['logs', 'model_saves']:
if not path.isdir(d):
os.makedirs(d)
f_log = open(logfile, 'w')
f_log.write('meta: %s\n' % json.dumps({
'enable_proposal': enable_proposal,
'enable_comms': enable_comms,
'prosocial': prosocial,
'seed': seed
}))
last_save = time.time()
baseline = type_constr.FloatTensor(3).fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
while True:
render = time.time() - last_print >= render_every_seconds
# render = True
batch = sampling.generate_training_batch(batch_size=batch_size, test_hashes=test_hashes, random_state=train_r)
actions, rewards, steps, alive_masks, entropy_loss_by_agent, \
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = run_episode(
batch=batch,
enable_cuda=enable_cuda,
enable_comms=enable_comms,
enable_proposal=enable_proposal,
agent_models=agent_models,
prosocial=prosocial,
# batch_size=batch_size,
render=render,
testing=testing)
term_matches_argmax_count += _term_matches_argmax_count
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
num_policy_runs += _num_policy_runs
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if not testing:
for i in range(2):
agent_opts[i].zero_grad()
reward_loss_by_agent = [0, 0]
baselined_rewards = rewards - baseline
rewards_by_agent = []
for i in range(2):
if prosocial:
rewards_by_agent.append(baselined_rewards[:, 2])
else:
rewards_by_agent.append(baselined_rewards[:, i])
sieve_playback = alive_sieve.SievePlayback(alive_masks, enable_cuda=enable_cuda)
for t, global_idxes in sieve_playback:
agent = t % 2
if len(actions[t]) > 0:
for action in actions[t]:
_rewards = rewards_by_agent[agent]
_reward = _rewards[global_idxes].float().contiguous().view(
sieve_playback.batch_size, 1)
_reward_loss = - (action * Variable(_reward))
_reward_loss = _reward_loss.sum()
reward_loss_by_agent[agent] += _reward_loss
for i in range(2):
loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]
loss.backward()
agent_opts[i].step()
rewards_sum += rewards.sum(0)
steps_sum += steps.sum()
baseline = 0.7 * baseline + 0.3 * rewards.mean(0)
count_sum += batch_size
if render:
"""
run the test batches, print the results
"""
test_rewards_sum = 0
for test_batch in test_batches:
actions, test_rewards, steps, alive_masks, entropy_loss_by_agent, \
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \
_prop_matches_argmax_count, _prop_stochastic_draws = run_episode(
batch=test_batch,
enable_cuda=enable_cuda,
enable_comms=enable_comms,
enable_proposal=enable_proposal,
agent_models=agent_models,
prosocial=prosocial,
render=True,
testing=True)
test_rewards_sum += test_rewards[:, 2].mean()
print('test reward=%.3f' % (test_rewards_sum / len(test_batches)))
time_since_last = time.time() - last_print
if prosocial:
baseline_str = '%.2f' % baseline[2]
# rewards_str = '%.2f' % (rewards_sum[2] / count_sum)
else:
baseline_str = '%.2f,%.2f' % (baseline[0], baseline[1])
rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum, rewards_sum[1] / count_sum, rewards_sum[2] / count_sum)
print('e=%s train=%s b=%s games/sec %s avg steps %.4f argmaxp term=%.4f utt=%.4f prop=%.4f' % (
episode,
rewards_str,
baseline_str,
int(count_sum / time_since_last),
steps_sum / count_sum,
term_matches_argmax_count / num_policy_runs,
safe_div(utt_matches_argmax_count, utt_stochastic_draws),
prop_matches_argmax_count / prop_stochastic_draws
))
f_log.write(json.dumps({
'episode': episode,
'avg_reward_0': rewards_sum[2] / count_sum,
'test_reward': test_rewards_sum / len(test_batches),
'avg_steps': steps_sum / count_sum,
'games_sec': count_sum / time_since_last,
'elapsed': time.time() - start_time,
'argmaxp_term': (term_matches_argmax_count / num_policy_runs),
'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),
'argmaxp_prop': (prop_matches_argmax_count / prop_stochastic_draws)
}) + '\n')
f_log.flush()
last_print = time.time()
steps_sum = 0
rewards_sum.fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
count_sum = 0
if not testing and time.time() - last_save >= 30.0:
save_model(
model_file=model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
last_save = time.time()
episode += 1
f_log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-file', type=str, default='model_saves/model.dat')
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--test-seed', type=int, default=123, help='used for generating test game set')
parser.add_argument('--seed', type=int, help='optional')
parser.add_argument('--term-entropy-reg', type=float, default=0.05)
parser.add_argument('--utterance-entropy-reg', type=float, default=0.001)
parser.add_argument('--proposal-entropy-reg', type=float, default=0.05)
parser.add_argument('--disable-proposal', action='store_true')
parser.add_argument('--disable-comms', action='store_true')
parser.add_argument('--disable-prosocial', action='store_true')
parser.add_argument('--render-every-seconds', type=int, default=30)
parser.add_argument('--testing', action='store_true', help='turn off learning; always pick argmax')
parser.add_argument('--enable-cuda', action='store_true')
parser.add_argument('--no-load', action='store_true')
parser.add_argument('--name', type=str, default='', help='used for logfile naming')
parser.add_argument('--logfile', type=str, default='logs/log_%Y%m%d_%H%M%S{name}.log')
args = parser.parse_args()
args.enable_comms = not args.disable_comms
args.enable_proposal = not args.disable_proposal
args.prosocial = not args.disable_prosocial
args.logfile = args.logfile.format(**args.__dict__)
args.logfile = datetime.datetime.strftime(datetime.datetime.now(), args.logfile)
del args.__dict__['disable_comms']
del args.__dict__['disable_proposal']
del args.__dict__['disable_prosocial']
del args.__dict__['name']
run(**args.__dict__)
| 16,969 | 5,634 |
"""Resources for making Nexar requests."""
import os, requests, re
from typing import Callable, Dict, Iterator
from requests_toolbelt import MultipartEncoder
NEXAR_URL = "https://api.nexar.com/graphql"
NEXAR_FILE_URL = "https://files.nexar.com/Upload/WorkflowAttachment"
class NexarClient:
def __init__(self, token) -> None:
self.s = requests.session()
self.s.headers.update({"token": token})
self.s.keep_alive = False
def get_query(self, query: str, variables: Dict) -> dict:
"""Return Nexar response for the query."""
try:
r = self.s.post(
NEXAR_URL,
json={"query": query, "variables": variables},
)
except Exception as e:
print(e)
raise Exception("Error while getting Nexar response")
response = r.json()
if ("errors" in response):
for error in response["errors"]: print(error["message"])
raise SystemExit
return response["data"]
def upload_file(self, workspaceUrl: str, path: str, container: str) -> str:
"""Return Nexar response for the file upload."""
try:
multipart_data = MultipartEncoder(
fields = {
'file': (os.path.basename(path), open(path, 'rb'), 'text/plain'),
'workspaceUrl': workspaceUrl,
'container': container,
}
)
r = self.s.post(
NEXAR_FILE_URL,
data = multipart_data,
headers = {
'Content-Type': multipart_data.content_type,
}
)
except Exception as e:
print(e)
raise Exception("Error while uploading file to Nexar")
return r.text
class Node:
def __init__(self, client, query: str, variables: Dict, f: Callable) -> None:
self.client = client
self.query = query
self.variables = variables
self.f = f
self.name = re.search("after[\s]*:[\s]*\$([\w]*)", query).group(1)
def __iter__(self) -> Iterator:
self.pageInfo = {"hasNextPage": True}
return self
def __next__(self):
if (not self.pageInfo["hasNextPage"]): raise StopIteration
data = self.client.get_query(self.query, self.variables)
self.pageInfo = self.f(data)["pageInfo"]
self.variables[self.name] = self.pageInfo["endCursor"]
return self.f(data)["nodes"]
def NodeIter(self, query: str, variables: dict, f: Callable) -> Iterator:
return NexarClient.Node(self, query, variables, f)
| 2,724 | 774 |
from django.contrib.auth import authenticate
from django.test import TestCase, override_settings
class TestAdminBackend(TestCase):
@override_settings(
AUTHENTICATION_BACKENDS=[
"utils.backend.admin_backends.SettingsBackend",
"django.contrib.auth.backends.ModelBackend",
],
ADMIN_LOGIN="admin",
ADMIN_PASSWORD="admin",
)
def test_local_settings_admin_login(self):
user = authenticate(username="admin", password="admin")
self.assertIsNotNone(user, "The user must be returned.")
self.assertIsNotNone(
user.is_superuser, "User must have is_superuser permission"
)
self.assertIsNotNone(user.is_staff, "User must have is_staff permission")
| 756 | 217 |
import numpy as np
import sys
import time
import mblas
# Python matrix multiplication
def sgemm_py(A, B, C, n):
for i in range(0, n):
for j in range(0, n):
loc = A[i * n + j]
for k in range(0, n):
C[i * n + k] += loc * B[j * n + k]
# Time measurement of Python sgemm
def measure_py(A, B, n):
C = np.zeros((n * n,), dtype=np.float32)
before = time.perf_counter()
sgemm_py(A, B, C, n)
after = time.perf_counter()
return after - before
# Time measurement of Numpy sgemm
def measure_np(A, B, n):
C = np.zeros((n, n), dtype=np.float32)
A = A.reshape(n, n)
B = B.reshape(n, n)
before = time.perf_counter()
C = np.dot(A, B)
after = time.perf_counter()
return after - before
# Time measurement of C sgemm
def measure_c(A, B, n):
C = np.zeros((n * n,), dtype=np.float32)
before = time.perf_counter()
mblas.sgemm_c(A, B, C, n)
after = time.perf_counter()
return after - before
# Main function
def main():
size = int(sys.argv[1])
A = np.random.rand(size * size).astype(np.float32)
B = np.random.rand(size * size).astype(np.float32)
elapsed_py = measure_py(A, B, size)
elapsed_np = measure_np(A, B, size)
elapsed_c = measure_c(A, B, size)
#elapsed_avx2 = measure_avx(A, B, size)
#elapsed_avx512 = measure_avx(A, B, size)
print("py / c: ", elapsed_py / elapsed_c);
print("py / np: ", elapsed_py / elapsed_np);
print("np / c: ", elapsed_np / elapsed_c);
if __name__ == "__main__":
main()
| 1,560 | 642 |
import pytest
@pytest.fixture
def hello_world_fn():
def fn():
return 'hello world'
return fn
@pytest.fixture
def greetings_fn():
def fn(name):
return 'hello %s' % name
return fn
@pytest.fixture
def greetings_default_fn():
def fn(name='nobody'):
return 'hello %s' % name
return fn
@pytest.fixture
def complex_fn():
def fn(name, age=32, **kwargs):
return '%s is %s years old and lives in %s' % (name, age, kwargs.get('country', 'nowhere'))
return fn
| 488 | 187 |