index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
11,600 | fc27f6db5b90e0be5617d3d735a5b718cab51c94 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'author' : 'Jonah Glover',
# 'url' : 'github.com/jonahglover/lol.git',
# 'download_url' : 'github.com/jonahglover/lold.git',
'author_email' : 'me@jonah.wtf',
'version' : '0.1',
'install_requires' : ['nose'],
'packages' : ['baffin'],
'scripts' : [],
'name' : 'baffin'
}
setup(**config)
|
11,601 | edff4b461f82cad7a6461a2abc43010e8cf868bf |
import math
var1 = int(input("Enter Non-Decimal Digit: ",))
if(isinstance(var1, int)):
print("Rounded By 2: \n",round(var1,2))
print("Absolute Value: \n",abs(var1))
print("Square Root: \n",math.sqrt(var1))
|
11,602 | 46b11dbc8b940ce750ecff3859f77c8f0bf77dbd | import torch
import torchvision
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
# img = img * 0.5 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
classes = ['aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
def VOC_DataLoader(dataset, batch_size = 1, shuffle = False):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return_anno = []
return_image = []
return_loader = []
t_image = []
t_out_annotation = []
for i, data in enumerate(dataset, 0):
images, annotations = data
# print(type(images))
# print(type(annotations))
t_anno_o = annotations['annotation']['object']
# images = images.to(device)
t_image.append(images)
#print('Annotation: ')
# print(annotations['annotation'])
#new label
t_annotation_m_i = []
for k in t_anno_o:
t_anno_bb = k['bndbox']
t_anno_n = k['name']
t_label_index = -1
for t_label in classes:
if t_anno_n == t_label:
t_label_index = classes.index(t_label)
break
# print('\nBounding-box name: {}'.format(t_anno_n))
# print('Bounding-box label index: {}'.format(t_label_index))
t_anno_size = annotations['annotation']['size']
# Get the size of the original image
t_width = int(t_anno_size['width'])
t_height = int(t_anno_size['height'])
# Get the size of the bounding-box in the original image
t_x_min = int(t_anno_bb['xmin'])
t_x_max = int(t_anno_bb['xmax'])
t_y_min = int(t_anno_bb['ymin'])
t_y_max = int(t_anno_bb['ymax'])
# Calculate the center of the bounding-box in the original image
t_x_c = (t_x_min + t_x_max) / 2
t_y_c = (t_y_min + t_y_max) / 2
t_wid_bb = t_x_max - t_x_min
t_height_bb = t_y_max - t_y_min
# print('Original bounding box width: {}'.format(t_wid_bb))
# print('Original bounding box height: {}'.format(t_height_bb))
# Calculate the Normalied size of the bounding-box and the center location of the bounding-box
t_wid_bb_s = t_wid_bb / t_width
t_height_bb_s = t_height_bb / t_height
t_x_s = t_x_c / t_width
t_y_s = t_y_c / t_height
# Calculate the corresponding grid square
t_grid_r = int(t_y_s * 7)
t_grid_c = int(t_x_s * 7)
# Calculate the normalized center location w.r.t the grid square
t_y_c_grid_s = t_y_s * 7 - t_grid_r
t_x_c_grid_s = t_x_s * 7 - t_grid_c
# print('Responsible grid square: (row: {}, col: {})'.format(
# t_grid_r, t_grid_c))
# print('Normalized center location inside the grid: (x_c: {}, y_c: {})'.format(
# t_x_c_grid_s, t_y_c_grid_s))
# print('Normalized bounding-box size: (width: {}, height: {})'.format(t_wid_bb_s, t_height_bb_s))
# Calculate the bounding-box parameters in the resized image
# t_wid_bb = int(t_wid_bb_s * 448)
# t_height_bb = int(t_height_bb_s * 448)
# t_x_c = int(t_x_s * 448)
# t_y_c = int(t_y_s * 448)
# print('New bounding-box width: {}'.format(t_wid_bb))
# print('New bounding-box height: {}'.format(t_height_bb))
# print('x_c: {}'.format(t_x_c))
# print('y_c: {}'.format(t_y_c))
# Calculate the bounding-box corner in the new resized image
# t_y_min = int(t_y_c - t_height_bb * 0.5)
# t_y_max = int(t_y_c + t_height_bb * 0.5 - 1)
# t_x_min = int(t_x_c - t_wid_bb * 0.5)
# t_x_max = int(t_x_c + t_wid_bb * 0.5 - 1)
# Draw the center of the bounding box in red
# images[0, t_y_c, t_x_c] = 1
# images[1, t_y_c, t_x_c] = 0
# images[2, t_y_c, t_x_c] = 0
# Draw the bounding box in white
# images[0:3, t_y_min, t_x_min:t_x_max] = 1
# images[0:3, t_y_max, t_x_min:(t_x_max + 1)] = 1
# images[0:3, t_y_min:t_y_max, t_x_min] = 1
# images[0:3, t_y_min:t_y_max, t_x_max] = 1
#Organize the label in the following order
t_annotation_m = [t_grid_r, t_grid_c, t_x_c_grid_s, t_y_c_grid_s, t_wid_bb_s, t_height_bb_s, t_label_index]
t_annotation_m = torch.tensor(t_annotation_m, dtype = torch.float32)
# t_annotation_m = t_annotation_m.to(device)
t_annotation_m_i.append(t_annotation_m)
if t_anno_o.index(k) == len(t_anno_o)-1:
t_out_annotation.append(t_annotation_m_i)
# print(t_anno_bb)
# Draw grid lines for reference purpose
# for j in range(1, 7):
# images[0, 64 * j - 1, 0:448] = 0
# images[1, 64 * j - 1, 0:448] = 1
# images[2, 64 * j - 1, 0:448] = 0
# images[0, 0:448, 64 * j - 1] = 0
# images[1, 0:448, 64 * j - 1] = 1
# images[2, 0:448, 64 * j - 1] = 0
# print('{} th grid line has been drawn'.format(j))
if len(t_image) == batch_size or i == len(dataset) - 1:
t_b_image = torch.stack([t_image[j] for j in range(len(t_image))], dim = 0)
t_b_annotation = t_out_annotation
# t_b_image = t_b_image.to(device)
# t_b_annotation = t_b_annotation.to(device)
return_image.append(t_b_image)
return_anno.append(t_b_annotation)
# print(t_b_image.size())
# print(t_b_annotation)
# imshow(torchvision.utils.make_grid(t_b_image))
t_image = []
t_out_annotation = []
if i % 500 == 499:
print('Dataloader processed: {}'.format(i+1))
# print(return_anno)
# print('Image size: {}'.format(images.size()))
# imshow(images)
return_loader.append(return_image)
return_loader.append(return_anno)
return return_loader
|
11,603 | 08da2a3c445b3e542da048e38e250eae542a60a7 | """
Author: Tris1702
Github: https://github.com/Tris1702
Gmail: phuonghoand2001@gmail.com
Thank you so much!
"""
N = int(input())
le = []
chan = []
sl = 0
arr = []
while sl < N:
arrtmp = list(int(i) for i in input().split())
for so in arrtmp:
sl = sl + 1
if so % 2 != 0:
le.append(so)
else:
chan.append(so)
arr.extend(arrtmp)
le.sort(reverse=True)
chan.sort()
i = 0
j = 0
for x in arr:
if x % 2 != 0:
print(le[i], end = ' ')
i = i + 1
else:
print(chan[j], end =' ')
j = j + 1
print() |
11,604 | b220347accb4879f753248dfcd691e69885b1d38 | """
Python package `librusapi` provides functions
and objects for interfacing with Librus Synergia.
All of this is archieved using the `requests`
library for fetching data and `BeaufifulSoup4`
for scraping the data
.. include:: ../README-pypi.rst
""" |
11,605 | 786314f0225cdcce75b80def15f17cda19c8c832 | import numpy as np
import npquad
print("declaring quad from string '1.7'")
q1 = np.quad('1.7')
print("result: {0}".format(str(q1)))
f = float("1.7")
f32 = np.float32("1.7")
f64 = np.float64("1.7")
print("coercing floating point values 1.7 to quad")
q3 = np.quad(f)
print("python float: {0}".format(str(q3)))
q4 = np.quad(f32)
print("numpy float32: {0}".format(str(q4)))
q5 = np.quad(f64)
print("numpy float64: {0}".format(str(q5)))
i = int(2)
i8 = np.int8(2)
i16 = np.int16(2)
i32 = np.int32(2)
i64 = np.int64(2)
print("coercing floating point values 1.7 to quad")
q = np.quad("1.7")
q6 = np.quad(i)
print("python int: {0}".format(str(q6)))
q7 = np.quad(i8)
print("numpy int8: {0}".format(str(q7)))
q8 = np.quad(i16)
print("numpy int16: {0}".format(str(q8)))
q9 = np.quad(i32)
print("numpy int32: {0}".format(str(q9)))
q10 = np.quad(i64)
print("numpy int64: {0}".format(str(q10)))
print("coercing quad values 1.7")
f = float(q)
print("python float: {0}".format(str(f)))
f32 = np.float32(q)
print("numpy float32: {0}".format(str(f32)))
f64 = np.float64(q)
print("numpy float64: {0}".format(str(f64)))
#i = int(q)
#print("python int: {0}".format(str(i)))
i8 = np.int8(q)
print("numpy int8: {0}".format(str(i8)))
i16 = np.int16(q)
print("numpy int16: {0}".format(str(i16)))
i32 = np.int32(q)
print("numpy int32: {0}".format(str(i32)))
i64 = np.int64(q)
print("numpy int64: {0}".format(str(i64)))
q_=np.quad(q)
print("numpy quad: {0}".format(str(q_)))
print("declaring empty array")
ar = np.empty(4, dtype=np.quad)
print(ar)
print("filling it with 1.7")
ar.fill(np.quad("1.7"))
print(ar)
print("assigning value to single elements:")
print("np.float64 2.3 at position 0")
ar[0] = np.float64(2.3)
print(ar)
print("np.int32 4 at position 1")
ar[1] = np.int32(4)
print(ar)
print("declaring zeros array")
ar = np.empty(4, dtype=np.quad)
print(ar)
print("declaring ones array")
ar = np.ones(4, dtype=np.quad)
print(ar)
print("creating np.float64 array")
far = np.empty(4, dtype=np.float64)
far.fill(1.7)
print(far)
print("coercing to quad")
far_toq = far.astype(np.quad)
print(far_toq)
print("dtype:", far_toq.dtype)
print("size:", far_toq.nbytes)
print("creating np.int32 array")
iar = np.empty(4, dtype=np.float64)
iar.fill(2)
print(iar)
print("coercing to quad")
iar_toq = iar.astype(np.quad)
print(iar_toq)
print("dtype:", iar_toq.dtype)
print("size:", iar_toq.nbytes)
print("creating two random 2x2 quad matrices")
mat1 = np.random.rand(2, 2).astype(np.quad)
mat2 = np.random.rand(2, 2).astype(np.quad)
print(mat1)
print(mat2)
print("adding")
print(mat1 + mat2)
print(mat1.astype(np.float64) + mat2.astype(np.float64))
print("multiplying")
print(mat1 * mat2)
print(mat1.astype(np.float64) * mat2.astype(np.float64))
print("exponentiating")
print(mat1 ** mat2)
print(mat1.astype(np.float64) ** mat2.astype(np.float64))
print("matrix multiplying")
print(np.dot(mat1, mat2))
print(np.dot(mat1.astype(np.float64), mat2.astype(np.float64)))
print("multiplying 1st matrix by 2")
print(mat1 * 2)
print("squaring 1st matrix")
print(mat1 ** 2)
print("creating two random 2x2 matrices, quad and float64")
mat1 = np.random.rand(2, 2).astype(np.quad)
mat2 = np.random.rand(2, 2).astype(np.float64)
print(mat1)
print(mat2)
print("adding")
print(mat1 + mat2)
print("multiplying")
print(mat1 * mat2)
print("exponentiating")
print(mat1 ** mat2)
print("creating quad value 1.7")
q = np.quad("1.7")
print("floor")
print(np.floor(q))
print("ceil")
print(np.ceil(q))
print("rint")
print(np.rint(q))
print("square")
print(np.square(q))
print("sqrt")
print(np.sqrt(q))
print("exp")
print(np.exp(q))
print("log")
print(np.log(q))
|
11,606 | c7e9bebcc501ce0b661916fea5a58d57d4c30a79 | #!/usr/bin/env python
"""
Update the entire set of notes with frequency data.
"""
import glob
from typing import Dict, Tuple
from analysis import load_word_frequency_map
from library import DynamicInlineTableDict
from library import INDEX_NAME
from library import NoteLibrary
from library import write_toml
# Where we store the frequency data in our notes
FREQUENCY_FIELD = 'frequency_scores'
ANIME_FREQUENCY_SUBFIELD = 'anime'
# TODO: Remove after running migrations
DEPRECATED_FIELDS = [
'frequency_highest',
'frequency_highest_source',
'highest_frequency',
'highest_frequency_source',
]
def calculate_highest_frequency(frequency_data: Dict[str, int]) -> Dict[str, int]:
"""
From a source->score dict, find the lowest score (highest frequency) source-score pair.
"""
if not frequency_data:
return None
lowest_score = 1000000000
lowest_score_source = None
for source, score in frequency_data.items():
if score < lowest_score:
lowest_score = score
lowest_score_source = source
if not lowest_score_source:
return None
return {'source': lowest_score_source, 'score': lowest_score}
def main():
# Several { Word => frequency } maps.
frequencies = {
'anime_45k': load_word_frequency_map('lists/anime_45k_relevant_words.txt'),
'leeds_15k' : load_word_frequency_map('lists/leeds_15k_frequency.txt'),
'novel_3k' : load_word_frequency_map('lists/Japanese-Word-Frequency-List-1-3000.txt'),
'wikipedia_10k' : load_word_frequency_map('lists/wikipedia_10k.txt'),
}
frequency_list_names = {
'anime_45k': ANIME_FREQUENCY_SUBFIELD,
'leeds_15k' : 'leeds',
'novel_3k' : 'novels',
'wikipedia_10k' : 'wikipedia',
}
print('==== Notes files ==== ')
total_notes = 0
for filename in glob.glob('**/*.toml', recursive=True):
if 'cardgen' in filename or 'temp/' in filename:
continue # XXX: Things here shouldn't be processed for now.
try:
notes = NoteLibrary.read_notes_from_toml_file(filename)
note_count = len(notes[INDEX_NAME])
freq_count = 0
for note in notes[INDEX_NAME]:
# First we clean the note of deprecated frequency fields.
# These were fields that were renamed or discarded.
for deprecated_field in DEPRECATED_FIELDS:
note.pop(deprecated_field, None)
# Now we attach all of the word frequencies we know about the note.
frequency_scores = {}
for freq_name, freq_map in frequencies.items():
current_score = None
if note['kanji'] in freq_map:
current_score = freq_map[note['kanji']]
elif note['kana'] in freq_map:
current_score = freq_map[note['kana']]
else:
continue
human_name = frequency_list_names[freq_name].lower()
frequency_scores[human_name] = current_score
if frequency_scores:
note[FREQUENCY_FIELD] = DynamicInlineTableDict(frequency_scores)
freq_count += 1
print('{0: <50} : {2} / {1} notes'.format(filename, note_count, freq_count))
write_toml(notes, filename)
except Exception as e:
print('Error processing file: {0}'.format(filename))
print(e)
if __name__ == '__main__':
main()
|
11,607 | 030efd8fbe2c5272f1f106e4ab32f956ac112cf0 | # Copyright 2019 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Pull entry point.
This command will pull the log files from a remote instance for AVD troubleshooting.
"""
from __future__ import print_function
import logging
import os
import subprocess
import tempfile
from acloud import errors
from acloud.internal import constants
from acloud.internal.lib import utils
from acloud.internal.lib.ssh import Ssh
from acloud.internal.lib.ssh import IP
from acloud.list import list as list_instances
from acloud.public import config
from acloud.public import report
logger = logging.getLogger(__name__)
_FIND_LOG_FILE_CMD = "find -L %s -type f" % constants.REMOTE_LOG_FOLDER
# Black list for log files.
_KERNEL = "kernel"
_IMG_FILE_EXTENSION = ".img"
def PullFileFromInstance(cfg, instance, file_name=None, no_prompts=False):
"""Pull file from remote CF instance.
1. Download log files to temp folder.
2. If only one file selected, display it on screen.
3. Show the download folder for users.
Args:
cfg: AcloudConfig object.
instance: list.Instance() object.
file_name: String of file name.
no_prompts: Boolean, True to skip the prompt about file streaming.
Returns:
A Report instance.
"""
ssh = Ssh(ip=IP(ip=instance.ip),
user=constants.GCE_USER,
ssh_private_key_path=cfg.ssh_private_key_path,
extra_args_ssh_tunnel=cfg.extra_args_ssh_tunnel)
log_files = SelectLogFileToPull(ssh, file_name)
download_folder = GetDownloadLogFolder(instance.name)
PullLogs(ssh, log_files, download_folder)
if len(log_files) == 1:
DisplayLog(ssh, log_files[0], no_prompts)
return report.Report(command="pull")
def PullLogs(ssh, log_files, download_folder):
"""Pull log files from remote instance.
Args:
ssh: Ssh object.
log_files: List of file path in the remote instance.
download_folder: String of download folder path.
"""
for log_file in log_files:
target_file = os.path.join(download_folder, os.path.basename(log_file))
ssh.ScpPullFile(log_file, target_file)
_DisplayPullResult(download_folder)
def DisplayLog(ssh, log_file, no_prompts=False):
"""Display the content of log file in the screen.
Args:
ssh: Ssh object.
log_file: String of the log file path.
no_prompts: Boolean, True to skip all prompts.
"""
warning_msg = ("It will stream log to show on screen. If you want to stop "
"streaming, please press CTRL-C to exit.\nPress 'y' to show "
"log or read log by myself[y/N]:")
if no_prompts or utils.GetUserAnswerYes(warning_msg):
ssh.Run("tail -f -n +1 %s" % log_file, show_output=True)
def _DisplayPullResult(download_folder):
"""Display messages to user after pulling log files.
Args:
download_folder: String of download folder path.
"""
utils.PrintColorString(
"Download logs to folder: %s \nYou can look into log files to check "
"AVD issues." % download_folder)
def GetDownloadLogFolder(instance):
"""Get the download log folder accroding to instance name.
Args:
instance: String, the name of instance.
Returns:
String of the download folder path.
"""
log_folder = os.path.join(tempfile.gettempdir(), instance)
if not os.path.exists(log_folder):
os.makedirs(log_folder)
logger.info("Download logs to folder: %s", log_folder)
return log_folder
def SelectLogFileToPull(ssh, file_name=None):
"""Select one log file or all log files to downalod.
1. Get all log file paths as selection list
2. Get user selected file path or user provided file name.
Args:
ssh: Ssh object.
file_name: String of file name.
Returns:
List of selected file paths.
Raises:
errors.CheckPathError: Can't find log files.
"""
log_files = GetAllLogFilePaths(ssh)
if file_name:
file_path = os.path.join(constants.REMOTE_LOG_FOLDER, file_name)
if file_path in log_files:
return [file_path]
raise errors.CheckPathError("Can't find this log file(%s) from remote "
"instance." % file_path)
if len(log_files) == 1:
return log_files
if len(log_files) > 1:
print("Multiple log files detected, choose any one to proceed:")
return utils.GetAnswerFromList(log_files, enable_choose_all=True)
raise errors.CheckPathError("Can't find any log file in folder(%s) from "
"remote instance." % constants.REMOTE_LOG_FOLDER)
def GetAllLogFilePaths(ssh):
"""Get the file paths of all log files.
Args:
ssh: Ssh object.
Returns:
List of all log file paths.
"""
ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]
log_files = []
try:
files_output = utils.CheckOutput(" ".join(ssh_cmd), shell=True)
log_files = FilterLogfiles(files_output.splitlines())
except subprocess.CalledProcessError:
logger.debug("The folder(%s) that running launch_cvd doesn't exist.",
constants.REMOTE_LOG_FOLDER)
return log_files
def FilterLogfiles(files):
"""Filter some unused files.
Two rules to filter out files.
1. File name is "kernel".
2. File type is image "*.img".
Args:
files: List of file paths in the remote instance.
Return:
List of log files.
"""
log_files = list(files)
for file_path in files:
file_name = os.path.basename(file_path)
if file_name == _KERNEL or file_name.endswith(_IMG_FILE_EXTENSION):
log_files.remove(file_path)
return log_files
def Run(args):
"""Run pull.
After pull command executed, tool will return one Report instance.
If there is no instance to pull, just return empty Report.
Args:
args: Namespace object from argparse.parse_args.
Returns:
A Report instance.
"""
cfg = config.GetAcloudConfig(args)
if args.instance_name:
instance = list_instances.GetInstancesFromInstanceNames(
cfg, [args.instance_name])
return PullFileFromInstance(cfg, instance[0], args.file_name, args.no_prompt)
return PullFileFromInstance(cfg,
list_instances.ChooseOneRemoteInstance(cfg),
args.file_name,
args.no_prompt)
|
11,608 | d7328e7c91a66bce421e7551da9f0c1a62763624 | """
Main file for distributed training
"""
import sys
import torch
import fire
from functools import partial
from vidqa_code.extended_config import (
get_default_cfg,
get_ch_cfg,
key_maps,
CN,
update_from_dict,
post_proc_config,
)
from vidqa_code.dat_loader import get_data
from vidqa_code.mdl_selector import get_mdl_loss_eval
from utils.trn_utils import Learner, synchronize
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
def get_name_from_inst(inst):
return inst.__class__.__name__
def learner_init(uid: str, cfg: CN) -> Learner:
mdl_loss_eval = get_mdl_loss_eval(cfg)
get_default_net = mdl_loss_eval["mdl"]
get_default_loss = mdl_loss_eval["loss"]
get_default_eval = mdl_loss_eval["eval"]
device = torch.device("cuda")
data = get_data(cfg)
comm = data.train_dl.dataset.comm
mdl = get_default_net(cfg=cfg, comm=comm)
loss_fn = get_default_loss(cfg, comm)
loss_fn.to(device)
eval_fn = get_default_eval(cfg, comm, device)
eval_fn.to(device)
opt_fn = partial(torch.optim.Adam, betas=(0.9, 0.99))
# unfreeze cfg to save the names
cfg.defrost()
module_name = mdl
cfg.mdl_data_names = CN(
{
"trn_data": get_name_from_inst(data.train_dl.dataset),
"val_data": get_name_from_inst(data.valid_dl.dataset),
"trn_collator": get_name_from_inst(data.train_dl.collate_fn),
"val_collator": get_name_from_inst(data.valid_dl.collate_fn),
"mdl_name": get_name_from_inst(module_name),
"loss_name": get_name_from_inst(loss_fn),
"eval_name": get_name_from_inst(eval_fn),
"opt_name": opt_fn.func.__name__,
}
)
cfg.freeze()
learn = Learner(
uid=uid,
data=data,
mdl=mdl,
loss_fn=loss_fn,
opt_fn=opt_fn,
eval_fn=eval_fn,
device=device,
cfg=cfg,
)
if cfg.do_dist:
mdl.to(device)
mdl = torch.nn.parallel.DistributedDataParallel(
mdl,
device_ids=[cfg.local_rank],
output_device=cfg.local_rank,
broadcast_buffers=True,
find_unused_parameters=True,
)
elif cfg.do_dp:
mdl = torch.nn.DataParallel(mdl)
mdl = mdl.to(device)
return learn
def run_job(local_rank, num_proc, func, init_method, backend, cfg):
"""
Runs a function from a child process.
Args:
local_rank (int): rank of the current process on the current machine.
num_proc (int): number of processes per machine.
func (function): function to execute on each of the process.
init_method (string): method to initialize the distributed training.
TCP initialization: equiring a network address reachable from all
processes followed by the port.
Shared file-system initialization: makes use of a file system that
is shared and visible from all machines. The URL should start with
file:// and contain a path to a non-existent file on a shared file
system.
shard_id (int): the rank of the current machine.
num_shards (int): number of overall machines for the distributed
training job.
backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are
supports, each with different capabilities. Details can be found
here:
https://pytorch.org/docs/stable/distributed.html
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Initialize the process group.
world_size = num_proc
rank = num_proc + local_rank
try:
torch.distributed.init_process_group(
backend=backend, init_method=init_method, world_size=world_size, rank=rank,
)
except Exception as e:
raise e
torch.cuda.set_device(local_rank)
func(cfg)
def launch_job(cfg, init_method, func, daemon=False):
"""
Run 'func' on one or more GPUs, specified in cfg
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
init_method (str): initialization method to launch the job with multiple
devices.
func (function): job to run on GPU(s)
daemon (bool): The spawned processesโ daemon flag. If set to True,
daemonic processes will be created
"""
if cfg.NUM_GPUS > 1:
torch.multiprocessing.spawn(
run_job,
nprocs=cfg.NUM_GPUS,
args=(
cfg.NUM_GPUS,
func,
init_method,
cfg.SHARD_ID,
cfg.NUM_SHARDS,
cfg.DIST_BACKEND,
cfg,
),
daemon=daemon,
)
else:
func(cfg=cfg)
def main_dist(uid: str, **kwargs):
"""
uid is a unique identifier for the experiment name
Can be kept same as a previous run, by default will start executing
from latest saved model
**kwargs: allows arbit arguments of cfg to be changed
"""
# cfg = conf
assert "ds_to_use" in kwargs
ds_to_use = kwargs["ds_to_use"]
assert ds_to_use in ["asrl_qa", "ch_qa"]
if ds_to_use == "asrl_qa":
cfg = get_default_cfg()
elif ds_to_use == "ch_qa":
cfg = get_ch_cfg()
else:
raise NotImplementedError
num_gpus = torch.cuda.device_count()
cfg.num_gpus = num_gpus
cfg.uid = uid
cfg.cmd = sys.argv
if num_gpus > 1:
if "local_rank" in kwargs:
# We are doing distributed parallel
cfg.do_dist = True
torch.distributed.init_process_group(backend="nccl", init_method="env://")
torch.cuda.set_device(kwargs["local_rank"])
synchronize()
else:
# We are doing data parallel
cfg.do_dist = False
# cfg.do_dp = True
# Update the config file depending on the command line args
cfg = update_from_dict(cfg, kwargs, key_maps)
cfg = post_proc_config(cfg)
# Freeze the cfg, can no longer be changed
cfg.freeze()
# print(cfg)
# Initialize learner
learn = learner_init(uid, cfg)
# Train or Test
if not (cfg.only_val or cfg.only_test or cfg.overfit_batch):
learn.fit(epochs=cfg.train.epochs, lr=cfg.train.lr)
if cfg.run_final_val:
print("Running Final Validation using best model")
learn.load_model_dict(resume_path=learn.model_file, load_opt=False)
val_loss, val_acc, _ = learn.validate(
db={"valid": learn.data.valid_dl}, write_to_file=True
)
print(val_loss)
print(val_acc)
else:
pass
else:
if cfg.overfit_batch:
learn.overfit_batch(cfg.train.epochs, 1e-4)
if cfg.only_val:
# learn.load_model_dict(resume_path=learn.model_file, load_opt=False)
if cfg.train.resume_path != "":
resume_path = cfg.train.resume_path
else:
resume_path = learn.model_file
learn.load_model_dict(resume_path=resume_path)
val_loss, val_acc, _ = learn.validate(
db={"valid": learn.data.valid_dl}, write_to_file=True
)
print(val_loss)
print(val_acc)
# learn.testing(learn.data.valid_dl)
pass
if cfg.only_test:
# learn.testing(learn.data.test_dl)
learn.load_model_dict(resume_path=learn.model_file, load_opt=False)
test_loss, test_acc, _ = learn.validate(db=learn.data.test_dl)
print(test_loss)
print(test_acc)
return
if __name__ == "__main__":
fire.Fire(main_dist)
|
11,609 | eff8bdaa42dbd35c9189e1ac652c363a6ae9f7a8 | import numpy as np
import pandas as pd
import csv
df = pd.read_csv("/data/training/blackfriday.csv")
a = df.isnull().any()
c=0
b=[]
for col in df.columns:
if a[col] == True:
c+=1
b.append([col])
with open("/code/output/output.csv", "w",newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(str(c))
for i in range(0,len(b)):
writer.writerow(b[i]) |
11,610 | 36bbc6eef108de032c0e8b75cc1d65a18c9bc179 | #!/usr/bin/python
##1st
a="kiran"
b="kriankumar"
len(a)-1
b[5:]
##2nd
a="hello \n"
b=a.strip("\n")
b
##3rd q's
a =" Hello World"
print a.replace("Hello", "Good Bye")
|
11,611 | 6d571d56804c3572afaf3d7893bbdde3792bc536 | # Libraries
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from scipy.stats import ranksums
directory='com_range1_8n'
start=1
qt=100
time=300000
boxplot = True
#nValues=[0.001]
sizes=[25,50,100,200]
fig, axes = plt.subplots(ncols=len(sizes), nrows=2, figsize=[16,5],sharex=True)
#axes[2].set_ylabel('#Words')
for i in range(len(sizes)):
axes[1][i].set_xlabel('Time-steps')
axes[1][0].xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: ('%.dk')%(x*1e-3)))
#col=["blue","cyan","red","pink","grey","green","orange"]
if(boxplot):
col=['#601A4A','#63ACBE','#EE442F','#F9F4EC']#["green","blue","red"]
ls=["-","-","-","--"]
lab=["PA-CE m=0.01","PA-CE m=0.001","Fixed optimal"]
folders = ['highDensity2/103wpSIZEm10/','highDensity2/103wpSIZEm1/','optiHigh2/101pSIZEopti/']
else:
col=["green","blue","red","orange"]
ls=["-","-","-","--"]
lab=["m=0","m=10^(-2)","m=10^(-10)"]
folders = ['highDensity/103wpSIZEm0/','highDensity/103wpSIZEm2/','highDensity/103wpSIZEm10/']
dummyLines = [plt.Line2D([],[],color="black",linestyle="-"),plt.Line2D([],[],color="black",linestyle=":")]
def drawFolder(folder,i,size,color,ls):
x = list(range(0,time+1,200))
y1 = [[]]*len(x)
y2 = [[]]*len(x)
y3 = [[]]*len(x)
w = [[]]*len(x)
print(folder)
for nb in range(start,start+qt):
try:
with open(folder+'stat_'+str(nb)) as f:
lines = f.readlines()
c = 0
for line in lines:
arr = np.fromstring(line, dtype=float, sep='\t')
tempX = arr[0]
#print(tempX%1000==0)
if(tempX<=time):
w[c] = w[c]+[arr[2]]
y1[c] = y1[c]+[arr[1]]
y2[c] = y2[c]+[arr[4]]
y3[c] = y3[c]+[arr[5]]
c+=1
except Exception as e:
print(e," ",folder+'stat_'+str(nb))
a = []
b = []
b_err = []
b_med = []
b_Q1 = []
b_Q3 = []
c = []
wa = []
for z in range(len(y1)):
a.append(np.mean(y1[z]))
b.append(np.mean(y2[z]))
b_err.append(np.std(y2[z]))
b_med.append(np.median(y2[z]))
b_Q1.append(np.percentile(y2[z],25))
b_Q3.append(np.percentile(y2[z],75))
c.append(np.mean(y3[z]))
wa.append(np.mean(w[z]))
x=np.array(sorted(x))
a=np.array(a)
b=np.array(b)
b_err=np.array(b_err)
b_med = np.array(b_med)
b_Q1 = np.array(b_Q1)
b_Q3 = np.array(b_Q3)
c=np.array(c)
wa=np.array(wa)
axes[0][i].set_title("N="+str(sizes[s]))
if(not boxplot):
axes[0][i].plot(x,b,color=color,linestyle=ls)
else:
axes[0][i].plot(x,b_med,color=color,linestyle=ls)
axes[0][i].fill_between(x, b_Q1, b_Q3, alpha=0.2, facecolor=color)
axes[1][i].plot(x,a,color=color,linestyle="-")
axes[1][i].plot(x,c,color=color,linestyle=":")
#axes[1][i].plot(x,wa,color=color,linestyle="-")
axes[0][i].set_ylim([0,1.1])
axes[1][i].set_ylim([0,size/4])
return np.array(y2[-1])
final_arrays = {}
for f in range(len(folders)):
final_arrays[folders[f]] = []
for s in range(len(sizes)):
folder = directory+'/'+folders[f].replace('SIZE',str(sizes[s]))
final_arrays[folders[f]].append(drawFolder(folder,s,sizes[s],col[f],ls[f]))
#print("Wilcoxon rank-sum test (",lab[0],">",lab[2])
print("\\begin{tabular}{|c|c|cc|}")
print("\hline")
print("m & N & statistic & p-value\\\\")
print("\hline")
print("\multirow{4}{*}{0.01}")
for s in range(len(sizes)):
stati, pval = ranksums(final_arrays[folders[0]][s], final_arrays[folders[2]][s])
print("&",sizes[s],"&",stati,"&",pval,"\\\\")
print("\hline")
print("\multirow{4}{*}{0.001}")
for s in range(len(sizes)):
stati, pval = ranksums(final_arrays[folders[1]][s], final_arrays[folders[2]][s])
print("&",sizes[s],"&",stati,"&",pval,"\\\\")
print("\hline")
print("\end{tabular}")
axes[0][0].set_ylabel('Cluster metric')
axes[1][0].set_ylabel('Lexicon size')
axes[1][len(sizes)-1].legend(dummyLines,["Clusters","Free Agents"],loc='lower left', bbox_to_anchor=(0.55, 0.75), ncol=1, fancybox=True, shadow=True, fontsize=10)
lgd = axes[1][1].legend(axes[0][0].get_lines(),lab,loc='lower right', bbox_to_anchor=(2.2, -0.5), ncol=4, fancybox=True, shadow=True, fontsize=12)
art=[lgd]
if(boxplot):
plt.savefig(directory+'/timeEvol_high_paper.png',additional_artists=art,bbox_inches='tight')
else:
plt.savefig(directory+'/timeEvol.png',additional_artists=art,bbox_inches='tight')
#plt.show()
|
11,612 | 56d7bc93cbc8f440b546361885c413cfcd8090a4 | #
# Python Module with Class
# for Vectorized Backtesting
# of Momentum-based Strategies
#
# Python for Algorithmic Trading
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
from MomVectorBacktester import *
class MRVectorBacktester(MomVectorBacktester):
''' Class for the vectorized backtesting of
Mean Reversion-based trading strategies.
Attributes
==========
symbol: str
Google Finance symbol with which to work with
start: str
start date for data retrieval
end: str
end date for data retrieval
amount: int, float
amount to be invested at the beginning
tc: float
proportional transaction costs (e.g. 0.5% = 0.005) per trade
Methods
=======
get_data:
retrieves and prepares the base data set
run_strategy:
runs the backtest for the mean reversion-based strategy
plot_results:
plots the performance of the strategy compared to the symbol
'''
def run_strategy(self, SMA, threshold):
''' Backtests the trading strategy.
'''
data = self.data.copy()
data['sma'] = data['price'].rolling(SMA).mean()
data['distance'] = data['price'] - data['sma']
# sell signals
data['position'] = np.where(data['distance'] > threshold, -1, np.nan)
# buy signals
data['position'] = np.where(data['distance'] < -threshold,
1, data['position'])
# crossing of current price and SMA (zero distance)
data['position'] = np.where(data['distance'] *
data['distance'].shift(1) < 0,
0, data['position'])
data['position'] = data['position'].ffill().fillna(0)
data['strategy'] = data['position'].shift(1) * data['return']
# determine when a trade takes place
trades = data['position'].diff().fillna(0) != 0
# subtract transaction costs from return when trade takes place
data['strategy'][trades] -= self.tc
data['creturns'] = self.amount * data['return'].cumsum().apply(np.exp)
data['cstrategy'] = self.amount * \
data['strategy'].cumsum().apply(np.exp)
self.results = data
# absolute performance of the strategy
aperf = self.results['cstrategy'].ix[-1]
# out-/underperformance of strategy
operf = aperf - self.results['creturns'].ix[-1]
return round(aperf, 2), round(operf, 2)
if __name__ == '__main__':
mrbt = MRVectorBacktester('GLD', '2010-1-1', '2016-10-31', 10000, 0.0)
print(mrbt.run_strategy(SMA=50, threshold=10))
mrbt = MRVectorBacktester('GLD', '2010-1-1', '2016-10-31', 10000, 0.0025)
print(mrbt.run_strategy(SMA=50, threshold=10))
mrbt = MRVectorBacktester('GDX', '2010-1-1', '2016-10-31', 10000, 0.0025)
print(mrbt.run_strategy(SMA=50, threshold=5))
|
11,613 | 71e27e7832f8e90f9b6d9eef7944d7219306bd2d | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
from odoo import models, fields, api, exceptions, _
class DocumentationManualIndex(models.Model):
_name = 'documentation.manual.index'
_description = _('Index')
name = fields.Char(string=_('Name'), required=True)
content = fields.Text(string=_('Content'))
order = fields.Integer(string=_('Order'))
execute_type = fields.Selection([('app',_('App')), ('script',_('Script'))], string=_('Execute type'))
app_id = fields.Many2one('documentation.app', string=_('App'))
script_id = fields.Many2one('documentation.script', string=_('Script'))
documentation_id = fields.Many2one('documentation.manual', ondelete='cascade', string=_('Manual'))
class DocumentationManual(models.Model):
_name = 'documentation.manual'
_description = _('Manual')
_inherit = ['mail.thread', 'mail.activity.mixin']
name = fields.Char(string=_('Name'), required=True)
image_thumbnail = fields.Binary(string=_('Image'), attachment=True)
category_id = fields.Many2one('documentation.category', string=_('Category'))
tag_ids = fields.Many2many('documentation.tag', 'manual_tags_rel', 'documentation_id', 'tag_id', string=_('Tags'))
owner_id = fields.Many2one('res.users', default=lambda self: self.env.user, string=_('Owner'))
description = fields.Html(string=_('Description'))
index_ids = fields.One2many('documentation.manual.index', 'documentation_id', string=_('Indexes')) |
11,614 | 4784726844911d815c210b329cbf86d136f0acb9 | # Generated by Django 2.1.2 on 2018-11-11 08:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('advert', '0002_auto_20181111_0854'),
]
operations = [
migrations.RenameField(
model_name='advert',
old_name='names',
new_name='name',
),
migrations.RenameField(
model_name='item',
old_name='name',
new_name='item',
),
]
|
11,615 | a969987b005d8583c7931d16d71f25df0a835e92 | def ArrayCompare(arr,arr2) :
arr.sort()
arr2.sort()
if len(arr) != len(arr2) :
return False
for i in range(len(arr)) :
if arr[i] != arr2[i] :
return False
else :
return True
if __name__ == '__main__' :
arr = [1, 2, 5, 4, 0]
arr2 = [2, 4, 5, 0, 1]
arr3= [1, 2, 5, 4, 0, 2, 1]
arr4= [2, 4, 5, 0, 1, 1, 2]
arr5= [1, 7, 1]
arr6= [7, 7, 1]
arr7= [2, 3, 4, 5]
arr8= [2, 3, 4]
for i in ((arr, arr2),(arr3, arr4),(arr5, arr6),(arr7, arr8)) :
if ArrayCompare(*i) :
print('yes')
else :
print ('no')
|
11,616 | c3346255f66064669eba98cec3c158a27d8d33bd | import libraries, json, random
import settings
import humongolus.widget as widget
import system.util as util
import datetime
class DateCtrl(widget.Date):
def render(self, **kwargs):
rand_id = random.randint(2000, 9999999)
kwargs['value'] = self.__field__.__value__.strftime("%b-%d-%Y") if self.__field__.__value__ else ""
kwargs['id'] = "id_%s%s" % (self.__field__.name, rand_id)
r = super(DateCtrl, self).render(**kwargs)
script = """
<script>
$(function() {
$( "#%s" ).datepicker({dateFormat:'M-dd-yy'});
});
</script> """ % self.__args__['id']
return script + r
def __clean__(self, value):
try:
return super(DateCtrl, self).__clean__(value)
except:
try:
return datetime.datetime.strptime(value, "%b-%d-%Y")
except:
ex = Exception(u'The value given is not a valid datetime')
self.__error__ = ex
raise ex
|
11,617 | e4227687c6447358c2847fb2c9891305d8754cac | # Placeholder for future weather data processing
|
11,618 | 03e2e0d86302c3afcc25611184fbc895ce189ad3 | from InstagramAPI import InstagramAPI
from PIL import Image
from resizeimage import resizeimage
import schedule
import time, datetime
import random, os
from getdata import getQuotes, getPhotos, getTags, set_smart_hashtags
def prep_post (keywords,photo_key,quote_key,foldername) :
# set keyword
keyword = random.sample(keywords,1)[0]
# get data
smart_hashtags = True
mytags = getTags (keyword, smart_hashtags, keywords)
myphoto = getPhotos (photo_key, foldername)
myquote = getQuotes(quote_key)
#create caption
mycaption = myquote + '\n.\n.\n.\n.\n.\n' + mytags
# get today date
nowtime = datetime.datetime.now()
year = nowtime.year
month = nowtime.month
day = nowtime.day
# set the posting time
#p_hour = random.sample(posting_hours,1)[0]
#p_minute = random.randint(10,59)
#ptime = datetime.datetime(year, month, day, p_hour, p_minute)
#print ('\nThe photo will be posted at: ', ptime)
print('\nYour Caption: ' , mycaption)
print('\nThe path for your photo: ' , myphoto)
# upload (username, password, myproxy, mycaption, myphoto)
return mycaption, myphoto
def upload (username, password, myproxy, mycaption, myphoto):
# lets make sure the image is the correct demensions
# If the image is already 640x640 then it does nothing
# If the image is not, it crops/scales to 640x640 by the middle
# TODO: prevent resizing period if image is already the correct ratio to save resources
# TODO: Figure out a better way of resizing to make sure the entire picture is included
# TODO: Perhaps create an option on how to handle images out of spec?
print("Resizing the image!")
try:
with open(myphoto, 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, [640, 640])
cover.save(myphoto, image.format)
print("Successfully resized the image!")
except:
print("Resizing Unsuccessful")
# upload
try:
api = InstagramAPI(username, password)
# If they have set a proxy, lets use it... otherwise
# Lets notify them and then continue without it.
if myproxy: # proxy is not blank
print("Using a proxy!")
api.setProxy(proxy= myproxy)
else:
print("You are not using a proxy, this is not recommended!")
api.login()
api.uploadPhoto(myphoto, caption=mycaption)
#remove photo
#os.remove(myphoto)
print('SUCCESS! Your photo has been posted!')
except :
print('FAILURE! Your photo was not posted!')
|
11,619 | 2772f4c81e6766b7a07db1ffb723455447a6f4dd | #This is where a previously trained model is used to generate audio
#load model
#generate audio
#save audio |
11,620 | 62dce7e814a1f72dbbb3f640a7ae64643d784977 | __author__ = '@author'
from PythonCalendar.Moment import *
from PythonCalendar.TimeError import *
class TimeSpan:
def __init__(self, start, stop):
"""
Constructor
:param start:
:param stop:
:return:
"""
if stop.before(start):
raise TimeError("The stop moment {0} cannot be before start moment {1}".format(stop, start))
else:
self.start = start
self.stop = stop
def __str__(self):
return "Timespan({0}, {1})".format(self.start, self.stop)
def __repr__(self):
return "Timespan({0}, {1})".format(repr(self.start), repr(self.stop))
def during_moment(self, moment):
"""
Checks if the moment is during the timespan
:param moment:
:return:
"""
if self.start.before(moment) and self.stop.after(moment):
return True
elif self.start.equal(moment) or self.stop.equal(moment):
return True
else:
return False
def overlaps(self, other):
"""
Checks if the Time spans overlaps
:param other:
:return:
"""
if self.start.equal(other.start) or self.stop.equal(other.stop):
return True
elif self.start.before(other.start) and self.stop.after(other.start):
return True
elif other.stop.after(self.start) and other.stop.before(self.stop):
return True
else:
return False
def set_duration(self, year=0, month=0, day=0, hour=0, minute=0):
"""
Resets ths values of the stop moment
:param year:
:param month:
:param day:
:param hour:
:param minute:
:return:
"""
self.stop.delta(year, month, day, hour, minute)
if self.stop.before(self.start):
raise TimeError("The stop Moment is before the start Moment")
else:
return True
|
11,621 | 0830d70649c7f91185704c8d0ca65fd793e8d479 | import socket
from geopy.distance import vincenty
from lmcp import LMCPFactory
from afrl.cmasi import EntityState
from afrl.cmasi import AirVehicleState
from afrl.cmasi import AirVehicleConfiguration
from afrl.cmasi import Play
from afrl.cmasi.SessionStatus import SessionStatus
from PyMASE import UAV, Location, get_args
from Shield import Shield
import string
from random import choice
stateMap = dict()
playMap = dict()
configMap = dict()
int_loc_map = { 1: (1.5467, -132.556), 2: (1.5467, -132.5623), 3: (1.5361, -132.5480), 4: (1.5467, -132.5479),\
5: (1.5362, -132.5635), 6: (1.5361, -132.5550), 7: (1.5203, -132.5648), 8: (1.5196, -132.5562),\
9: (1.5557, -132.5167), 10: (1.5484, -132.5121), 12: (1.5270, -132.5167), 13: (1.5300, -132.5091),\
11: (1.5300, -132.5240), 15: (1.5400, -132.5240), 14: (1.5370, -132.5100) }
def int_to_bin(num):
converted = '{0:04b}'.format(num)
l4 = int(converted[0])
l3 = int(converted[1])
l2 = int(converted[2])
l1 = int(converted[3])
return (l4, l3, l2, l1)
def bin_to_int(l4, l3, l2, l1):
l4_ = 0
l3_ = 0
l2_ = 0
l1_ = 0
if l4 == True:
l4_ = 1
if l3 == True:
l3_ = 1
if l2 == True:
l2_ = 1
if l1 == True:
l1_ = 1
num = l1_ + l2_*2 + l3_*4 + l4_*8
return num
def get_wp(play_map, wp):
global time, playMap, message, sc_time
while playMap.get(1) is None:
message = msg.getObject(sock.recv(2224))
message_received(message)
if sc_time - time > 400000:
time = sc_time
return wp
message = msg.getObject(sock.recv(2224))
message_received(message)
click_time = playMap.get(1).get_Time()
if click_time > time:
time = sc_time
wp = playMap.get(1).get_PlayID()
return wp
while click_time < time:
message = msg.getObject(sock.recv(2224))
message_received(message)
if sc_time - time > 400000:
time = sc_time
return wp
click_time = playMap.get(1).get_Time()
time = sc_time
wp = playMap.get(1).get_PlayID()
return wp
def move(uav,loc):
location = Location(loc[0],loc[1],0,0,'A')
uav.point_search(location)
def Fuel_monitor(uav):
fuel_ = uav.get_energy()
if fuel_ <= 90 and fuel_ != 0:
fuel = 1
elif fuel_ > 90:
fuel = 0
return fuel
def detect_area(uav, coverage_location):
uav_location = (stateMap.get(uav.id).get_Location().get_Latitude(), stateMap.get(uav.id).get_Location().get_Longitude())
if (uav_location[0] <= coverage_location[0] + 0.002 and uav_location[0] >= coverage_location[0] - 0.002\
and uav_location[1] >= coverage_location[1] - 0.002 and uav_location[1] >= coverage_location[1] - 0.002):
return 1
else:
return 0
def u1_sensor(uav):
coverage_location = (int_loc_map[1][0],int_loc_map[1][1])
return detect_area(uav, coverage_location)
def u2_sensor(uav):
coverage_location = (1.5263,-132.56)
return detect_area(uav, coverage_location)
def message_received(obj):
global stateMap
global configMap
global playMap
global commandMap
global sc_time
if isinstance(obj ,SessionStatus):
sc_time = obj.get_ScenarioTime()
if isinstance(obj ,AirVehicleConfiguration.AirVehicleConfiguration):
configMap[obj.get_ID()] = obj
if isinstance(obj, AirVehicleState.AirVehicleState):
stateMap[obj.get_ID()] = obj
if isinstance(obj, Play.Play):
playMap[obj.get_UAVID()] = obj
if isinstance(obj, SessionStatus):
ss = obj
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ("localhost", 5555)
print("connecting to %s port %s") % (server_address)
sock.connect(server_address)
print("connected")
return sock
sock = connect()
msg = LMCPFactory.LMCPFactory()
time = 0
uav_n = 2
UAVs = []
for i in range(0,uav_n):
UAVs.append(UAV(i+1,sock,stateMap))
flag = 0
while flag != 1:
flag = 1
message = msg.getObject(sock.recv(2224))
message_received(message)
for i in range(0,uav_n):
UAVs[i].stateMap = stateMap
if UAVs[i].stateMap.get(UAVs[i].id) is None:
flag = 0
current_wp = int_loc_map[1]
current_wp_advers = (stateMap.get(2).get_Location().get_Latitude(), stateMap.get(2).get_Location().get_Longitude())
wp_advers = current_wp_advers
corrected_next_wp = 1
shield = Shield()
advers = [7, 8, 3, 4]
try:
while True:
message = msg.getObject(sock.recv(2224))
message_received(message)
for i in range(0,uav_n):
UAVs[i].stateMap = stateMap
# UAVs[0] = Fuel_monitor(UAVs[0],0,0)
if (current_wp[0] <= int_loc_map[corrected_next_wp][0] + 0.002 and current_wp[0] >= int_loc_map[corrected_next_wp][0] - 0.002) and \
(current_wp[1] <= int_loc_map[corrected_next_wp][1] + 0.002 and current_wp[1] >= int_loc_map[corrected_next_wp][1] - 0.002):
next_wp = get_wp(playMap, corrected_next_wp)
UAVs[0].state = 0
fuel_sensor = Fuel_monitor(UAVs[0])
ugs_1 = u1_sensor(UAVs[1])
ugs_2 = u2_sensor(UAVs[1])
# print next_wp
print 'actual: ', next_wp
next_wp = int_to_bin(next_wp - 1)
print 'actual: ', (fuel_sensor, ugs_1, next_wp[0], next_wp[1], next_wp[2], next_wp[3])
corrected_next_wp = shield.move(fuel_sensor, ugs_1, next_wp[3], next_wp[2], next_wp[1], next_wp[0])
print 'correction:', corrected_next_wp
corrected_next_wp = bin_to_int(corrected_next_wp[3], corrected_next_wp[2], corrected_next_wp[1], corrected_next_wp[0])+1
wp = int_loc_map[corrected_next_wp]
print 'correction: ', corrected_next_wp
move(UAVs[0], wp)
if ((current_wp_advers[0] <= wp_advers[0] + 0.002 and current_wp_advers[0] >= wp_advers[0] - 0.002) and \
(current_wp_advers[1] <= wp_advers[1] + 0.002 and current_wp_advers[1] >= wp_advers[1] - 0.002)):
UAVs[1].state = 0
wp_advers = int_loc_map[choice(advers)]
move(UAVs[1], wp_advers)
current_wp = (stateMap.get(1).get_Location().get_Latitude(), stateMap.get(1).get_Location().get_Longitude())
current_wp_advers = (stateMap.get(2).get_Location().get_Latitude(), stateMap.get(2).get_Location().get_Longitude())
finally:
print("closing socket")
sock.close()
|
11,622 | 5d2312090583e0c5d667051efce92274f5595a29 | # This makes the model pickle file for making predictions. I ran this here to
# use the project's sklearn version and avoid this error:
# "ValueError: Buffer dtype mismatch, expected 'ITYPE_t' but got 'long long'"
# I used a different file to find the model params.
import pandas as pd
import pickle
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from joblib import dump, load
df = pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/00479/' +
r'SomervilleHappinessSurvey2015.csv',
encoding="'UTF-16'")
df = df.rename(columns={'D':'happy', 'X1':'info', 'X2':'housing', 'X3':'schools',
'X4':'police', 'X5':'streets', 'X6':'events',})
feature_cols = list(df.columns)
feature_cols.remove('happy')
x = df[feature_cols] # Features (inputs)
y = df.happy # Response (prediction, output)
model = KNeighborsClassifier(n_neighbors=4)
model.fit(x,y)
# save the pickle file
dump(model, r'ratings\static\ratings\happy_somerville.joblib')
# with open('r'ratings\static\ratings\happy_somerville.pkl', 'wb') as f:
# pickle.dump(model, f)
# test the pickle file
# with open('r'ratings\static\ratings\happy_somerville.pkl', 'rb') as f:
# model = pickle.load(f)
model = load(r'ratings\static\ratings\happy_somerville.joblib')
unit = {'info': '1', 'housing': '1', 'schools': '4', 'police': '5', 'streets': '5', 'events': '5'}
unit = unit.values()
unit = [int(e) for e in unit]
unit = np.array(unit).reshape(1,-1)
y_pred_prob = model.predict_proba(unit)
print(f'{y_pred_prob[0][1]*100:.2f}')
|
11,623 | 49a26a84b7cf5110b0b568701b289978ab9b9630 | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homoglyphs import confusables
import ahocorasick
from .base import Analyser
# Take a histogram here and find out the suitable value for this
BULK_DOMAIN_THRESHOLD = 15
# pylint: disable=too-few-public-methods
class AhoCorasickDomainMatching(Analyser):
'''
The domain and its SAN will be compared against the list of domains, for
example, the most popular domains from OpenDNS.
'''
# Get this number from the histogram of the length of all top domains
MIN_MATCHING_LENGTH = 3
# Some domains that don't work too well with tldextract and generate too
# many FPs
EXCLUDED_DOMAINS = {
'www': 1,
'web': 1,
}
# Some common domain parts that cause too many FP
IGNORED_PARTS = r'^(autodiscover\.|cpanel\.)'
def __init__(self, domains):
'''
Use Aho-Corasick to find the matching domain so we construct its Trie
here. Thought: How the f**k is com.com in the list?
'''
self.automaton = ahocorasick.Automaton()
self.domains = {}
for index, domain in enumerate(domains):
# Processing only the domain part. All sub-domains or TLDs will
# be ignored, for example:
# - www.google.com becomes google
# - www.google.co.uk becomes google
# - del.icio.us becomes icio
ext = tldextract.extract(domain)
if ext.domain in AhoCorasickDomainMatching.EXCLUDED_DOMAINS:
continue
self.automaton.add_word(ext.domain, (index, ext.domain))
self.domains[ext.domain] = domain
self.automaton.make_automaton()
def run(self, record):
'''
Use Aho-Corasick to find the matching domain. Check the time complexity
of this function later.
Tricky situation #1: When the string (domain) in the Trie is too short,
it could match many domains, for example, g.co or t.co. So they need
to be ignored somehow. Looking at the histogram of the length of all
domains in the list, there are only less than 100 domains with the
length of 2 or less. So we choose to ignore those. Also, we will
prefer longer match than a shorter one for now.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
'''
if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
'''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = []
Y_samples = []
for analyser in record['analysers']:
if analyser['analyser'] != 'WordSegmentation':
continue
for domain, segments in analyser['output'].items():
# Remove wildcard domain
domain = re.sub(r'^\*\.', '', domain)
parts = domain.split('.')
x = []
# Compute the number of domain parts
x.append(len(parts))
# Compute the length of the whole domain
x.append(len(domain))
longest = ''
# Compute the length of the longest domain parts
for part in parts:
if len(part) > len(longest):
longest = part
x.append(len(longest))
# Compute the length of the TLD
x.append(len(parts[-1]))
randomness_count = 0
# The nostril package which we are using to detect non-sense words
# in the domain only returns a boolean verdict so may be we need to
# think of how we want to quantify this
for w in segments:
try:
if len(w) >= FeaturesGenerator.NOSTRIL_LENGTH_LIMIT and nonsense(w):
randomness_count += 1
except ValueError:
continue
x.append(randomness_count / len(segments))
x_samples.append(x)
Y_samples.append(True if 'usual_suspect' in record else False)
break
record['analysers'].append({
'analyser': type(self).__name__,
'output': x_samples,
})
return record
|
11,624 | e4ea894fe772eacb69faa0d455a1d497c6046927 | import json
import unittest
from app import app
from assign_bands import *
from assign_dorms import *
from collections import defaultdict
class BandsTestCase(unittest.TestCase):
@classmethod
def setupClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
#print('Setting up bands test case')
db = open('./data/members_test.json', 'r')
self.test_data = json.loads(db.read())
db.close()
re_tem = open('./data/bands.json', 'r')
self.result = json.loads(re_tem.read())
re_tem.close()
db = open('./data/bands2.json','r')
self.backup_data = json.loads(db.read())
db.close()
#gray_box
def test_gendergrade(self):
test_data_input = self.test_data['gender_test'][0]
test_data_output = self.test_data['gender_test'][1]
temp_test = male_female_grade(test_data_input)
self.assertEqual(test_data_output, temp_test)
def test_average_variance(self):
test_data_input = self.test_data['trank_test_ave'][0]
test_data_output = self.test_data['trank_test_ave'][1]
test_data_input_var = self.test_data['trank_test_var'][0]
test_data_output_var = self.test_data['trank_test_var'][1]
for i in range(len(test_data_input)):
temp_test_ave = average(test_data_input[i])
self.assertEqual(test_data_output[i],temp_test_ave)
temp_test = variance(test_data_input_var[i],temp_test_ave)
self.assertEqual(test_data_output_var[i],temp_test)
#black_box: assign_bands
def test_assign_bands(self):
result = self.result
gender_list = []
trank_list = []
talent_type = ['Singer','Guitarist','Drummer',
'Bassist','Keyboardist','Instrumentalist']
for i in range(len(result)):
k = 0
t_sum = []
for j in talent_type:
item = result[i][j].split(' || ')
if item[1] == 'male':
k+=1
t_sum.append(int(item[-1]))
if abs(k-3) < 3:
gender_list.append(1)
else:
gender_list.append(0)
t_ave = average(t_sum)
t_var = variance(t_sum,t_ave)
if abs(t_var - 1.25) < 1.25:
trank_list.append(1)
else:
trank_list.append(0)
self.assertEqual(1,gender_list[i])
self.assertEqual(1,trank_list[i])
def tearDown(self):
pass
class DormsTestCase(unittest.TestCase):
@classmethod
def setupClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
#print('Setting up dorms test case')
db = open('./data/dorms.json','r')
dorms_data = json.loads(db.read())
db.close()
db = open('./data/members.json','r')
self.member = json.loads(db.read())
ids = []
ages = []
for i in range(len(dorms_data)):
ids_temp = []
ages_temp = []
for j in dorms_data[i]['members']:
item = j.split(' || ')
ids_temp.append(item[0])
ages_temp.append(int(item[1]))
ids.append(ids_temp)
ages.append(ages_temp)
self.dorms_data = dorms_data
self.ids = ids
self.ages = ages
def test_length(self):
dorm = self.dorms_data
for i in range(len(dorm)):
mem_sum = len(dorm[i]['members'])
self.assertEqual(8,mem_sum)
def test_gender(self):
ids = self.ids
dorms = self.dorms_data
gender = []
for i in range(len(dorms)):
gender=dorms[i]['gender']
for j in range(len(dorms[i]['members'])):
for person in self.member:
if person['memberId'] == ids[i][j]:
self.assertEqual(gender,person['gender'])
def test_age(self):
ages = self.ages
for i in range(len(ages)):
sum = 0
k = 0
for j in ages[i]:
sum +=j
k+=1
ave = sum/k
if abs(ave - 15.5) < 2:
s = 1
else:
s = 0
self.assertEqual(1,s)
def tearDown(self):
pass
class ViewsTestCase(unittest.TestCase):
@classmethod
def setupClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
#print('Setting up views test case')
self.app = app.test_client()
self.app.testing = True
def test_404(self):
response = self.app.get('/wrong/url')
self.assertEqual(response.status_code, 404)
def test_index_view(self):
response = self.app.get('/index', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_bands_view(self):
response = self.app.get('/bands', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_dorms_view(self):
response = self.app.get('/dorms', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_view(self):
response = self.app.get('/members', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_add_view(self):
response = self.app.get('/members/add', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_add(self):
response = self.app.post('/members/submit', data={
'memberId': '99999',
'type': 'member',
'firstName': 'James',
'lastName': 'Testman',
'gender': 'male',
'age': 15,
'street': '123 Fake St',
'city': 'Testville',
'state': 'CA',
'zipCode': 91000,
'phone': '000-000-0000',
'talent': 'Guitarist',
'cohort': 'first',
'status': 'Pending',
'checkin': False,
'forms': False,
'payment': False
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_delete(self):
response = self.app.post('/members/delete', data={
'memberId': '99999'
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_checkout(self):
response = self.app.post('/members/checkout', data={
'memberId': '99999'
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_checkin(self):
response = self.app.post('/members/checkin', data={
'memberId': '99999'
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
'''def test_members_edit(self):
response = self.app.post('/members/edit', data={
'memberId': '99999'
}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_members_update(self):
response = self.app.post(
'/members/update',
data=dict(
memberId=99999,
type="member",
firstName="James",
lastName="Testman",
gender="Male",
age=15,
street="123 Fake St",
city="Faketown",
state="CA",
zipCode=91000,
talent="Guitarist",
status="Pending",
checkin=False,
forms=False,
payment=False
),
follow_redirects=True
)
self.assertIn(b"Testing Member update", response.data)
def test_email_send(self):
response = self.app.post(
'/email/send',
data=dict(
memberId=99999,
type="Rejected"
),
follow_redirects=True
)
self.assertIn(b"Testing Email send", response.data)'''
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
11,625 | dadb624524da9668388838457efe65768d981fbd | #Final Database Project - Luke Ding and Pat Rademacher
#Charles Winstead
#June 4, 2019
#CS 586
import psycopg2 as p
import pandas as pd
import io
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Date
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import ForeignKey
from sqlalchemy import TEXT, MetaData, Table, Column, Float
import csv
#initialize engine to connect to class Database
engine = create_engine('postgresql://s19wdb28:q4gB6xah*z@dbclass.cs.pdx.edu:5432/s19wdb28')
conn = engine.connect()
# read in all excel and CSV sheets through Pandas
Stores = pd.read_excel('Datasheet.xlsx', Sheetname=0)
Cars = pd.read_csv('Datasheet.csv')
service_centers = pd.read_csv('service_centers.csv', encoding = 'latin-1')
superchargers = pd.read_csv('superchargers.csv', encoding = 'latin-1')
customers = pd.read_csv('customers.csv', encoding = 'latin-1')
employees = pd.read_csv('employees.csv', encoding = 'latin-1')
purchhistory = pd.read_csv('purchasehistory.csv', encoding = 'latin-1')
storeemp = pd.read_csv('storeemp.csv', encoding = 'latin-1')
serveemp = pd.read_csv('servemp.csv', encoding = 'latin-1')
appointments = pd.read_csv('REALAPPOINTMENTS.csv', encoding = 'latin-1')
waittimes = pd.read_csv('waittimes.csv', encoding = 'latin-1')
#initialize proper list sizes for transferring CSV and Excel sheets to list data types
superchargers_array = [''] * 10
cust = [''] * 5
emp = [''] * 5
ph = [''] * 5
ste = [''] * 2
sve = [''] * 2
app = [''] * 8
wt = [''] * 5
newemp = [''] * 5
for i in range(10):
superchargers_array[i] = list(superchargers.iloc[:, i])
for i in range(5):
cust[i] = list(customers.iloc[:, i])
#cust[i].strip("ร")
emp[i] = list(employees.iloc[:199, i])
#emp[i].strip("ร")
ph[i] = list(purchhistory.iloc[:, i])
wt[i] = list(waittimes.iloc[:, i])
for i in range(2):
ste[i] = list(storeemp.iloc[:, i])
sve[i] = list(serveemp.iloc[:, i])
for i in range(8):
app[i] = list(appointments.iloc[:, i])
#had a strange instance when reading in CSV files from home through PSU's server - had to eliminate following character
cust[1] = [s.replace('ร', ' ') for s in cust[1]]
cust[4] = [s.replace('ร', '') for s in cust[4]]
emp[1] = [s.replace('ร', ' ') for s in emp[1]]
emp[4] = [s.replace('ร', '') for s in emp[4]]
Store_StoreID = []
Store_Country = []
Store_Name = []
Store_Address = []
Store_Extended = []
Store_Local = []
Store_Phone = []
Store_GoogleReviewRating = []
Store_StoreID.append(Stores.iloc[:, 0])
Store_Country.append(Stores.iloc[:, 1])
Store_Name.append(Stores.iloc[:, 2])
Store_Address.append(Stores.iloc[:, 3])
Store_Extended.append(Stores.iloc[:, 4])
Store_Local.append(Stores.iloc[:, 5])
Store_Phone.append(Stores.iloc[:, 6])
Store_GoogleReviewRating.append(Stores.iloc[:, 7])
Cars_Model = []
Cars_Subtype = []
Cars_Range = []
Cars_Zto60 = []
Cars_Model.append(Cars.iloc[:, 0])
Cars_Subtype.append(Cars.iloc[:, 1])
Cars_Range.append(Cars.iloc[:, 2])
Cars_Zto60.append(Cars.iloc[:, 3])
service_id = []
service_country = []
service_name = []
service_address = []
service_extended = []
service_local = []
service_phone = []
service_google = []
service_id.append(service_centers.iloc[:, 0])
service_country.append(service_centers.iloc[:, 1])
service_name.append(service_centers.iloc[:, 2])
service_address.append(service_centers.iloc[:, 3])
service_extended.append(service_centers.iloc[:, 4])
service_local.append(service_centers.iloc[:, 5])
service_phone.append(service_centers.iloc[:, 6])
service_google.append(service_centers.iloc[:, 7])
#convert lists to Pandas' DataFrame data type
df_stores = pd.DataFrame(data = {'store_id' : Store_StoreID[0], 'country' : Store_Country[0], 'name' : Store_Name[0], 'address' : Store_Address[0], 'extended' : Store_Extended[0], 'local' : Store_Local[0], 'phone' : Store_Phone[0], 'google_review_rating' : Store_GoogleReviewRating[0]})
df_cars = pd.DataFrame(data = {'model' : Cars_Model[0], 'subtype' : Cars_Subtype[0], 'range' : Cars_Range[0], 'zto60' : Cars_Zto60[0]})
df_service = pd.DataFrame(data = {'service_id' : service_id[0], 'country' : service_country[0], 'name': service_name[0], 'address': service_address[0], 'extended': service_extended[0], 'local': service_local[0], 'phone': service_phone[0], 'google_review_rating': service_google[0]})
df_superchargers = pd.DataFrame(data = {'supercharger_id': superchargers_array[0], 'country': superchargers_array[1], 'name': superchargers_array[2], 'address': superchargers_array[3], 'extended': superchargers_array[4], 'local': superchargers_array[5], 'phone': superchargers_array[6], 'google_review_rating': superchargers_array[7], 'stalls': superchargers_array[8], 'charge_rate': superchargers_array[9]})
df_customers = pd.DataFrame(data = {'customer_id': cust[0], 'name': cust[1], 'phone': cust[2], 'country': cust[3], 'email': cust[4]})
df_employees = pd.DataFrame(data ={'employee_id': emp[0], 'name': emp[1], 'role': emp[2], 'phone': emp[3], 'email': emp[4]})
df_ph = pd.DataFrame(data = {'customer_id': ph[0], 'model': ph[1], 'subtype': ph[2], 'date': ph[3], 'vin': ph[4]})
df_ste = pd.DataFrame(data = {'employee_id': ste[0], 'store_id': ste[1]})
df_sve = pd.DataFrame(data = {'service_id': sve[0], 'employee_id': sve[1]})
df_wt = pd.DataFrame(data = {'model': wt[0], 'subtype': wt[1], 'country': wt[2], 'wait_time': wt[3], 'price': wt[4]})
df_app = pd.DataFrame(data = {'appointment_id': app[0], 'model': app[1], 'subtype': app[2], 'service_id': app[3], 'employee_id': app[4], 'customer_id': app[5], 'date': app[6], 'time': app[7]})
#use the Pandas 'to_sql' function which converts DataFrames into proper data type to be process for postgres SQL
df_stores.to_sql(name='stores', con=engine, if_exists = 'replace', index=False, dtype = {"store_id": Integer(), "country": String(), "name" : String(), "address" : String, "extended" : String(), "local" : String(), "phone" : String(), "google_review_rating": Integer()})
df_cars.to_sql(name='cars', con=engine, if_exists = 'replace', index=False, dtype = {"model" : String(), "subtype" : String(), "range" : String(), "zto60" : Float()})
df_service.to_sql(name='service_centers', con=engine, if_exists = 'replace', index=False, dtype = {"service_id": Integer(), "country": String(), "name": String(), "address": String(), "extended": String(), "local": String(), "phone": String(), "google_review_rating": Integer()})
df_superchargers.to_sql(name= 'superchargers', con=engine, if_exists = 'replace', index=False, dtype = {"supercharger_id": Integer(), "country": String(), "name" : String(), "address" : String, "extended" : String(), "local" : String(), "phone" : String(), "google_review_rating": Integer(), "stalls":Integer(), "charge_rate": String()})
df_customers.to_sql(name= 'customers', con=engine, if_exists = 'replace', index=False, dtype = {"customer_id": Integer(), "name": String(), "phone" : String(), "country": String(), "email": String()})
df_employees.to_sql(name= 'employees', con=engine, if_exists = 'replace', index=False, dtype = {"employee_id": Integer(), "name": String(), "role" : String(), "phone": String(), "email": String()})
df_ph.to_sql(name= 'purchase_history', con=engine, if_exists = 'replace', index=False, dtype = {'customer_id': Integer(), 'model': String(), 'subtype': String(), 'date': Date(), 'vin': String()})
df_ste.to_sql(name= 'store_employees', con=engine, if_exists = 'replace', index=False, dtype = {'employee_id': Integer(), 'store_id': Integer()})
df_sve.to_sql(name= 'service_employees', con=engine, if_exists = 'replace', index=False, dtype = {'service_id': Integer(), 'employee_id': Integer()})
df_app.to_sql(name= 'appointments', con=engine, if_exists = 'replace', index=False, dtype = {'appointment_id': Integer(), 'model': String(), 'subtype': String(), 'service_id': Integer(), 'employee_id': Integer(), 'customer_id': Integer(), 'date': String(), 'time': Integer()})
df_wt.to_sql(name = 'wait_time', con=engine, if_exists = 'replace', index=False, dtype = {'model': String(), 'subtype': String(), 'country': String(), 'wait_time': Integer(), 'price': Integer()})
conn.execute('ALTER TABLE stores ADD PRIMARY KEY (store_id);')
conn.execute('ALTER TABLE cars ADD PRIMARY KEY (model, subtype);')
conn.execute('ALTER TABLE customers ADD PRIMARY KEY (customer_id);')
conn.execute('ALTER TABLE employees ADD PRIMARY KEY (employee_id);')
conn.execute('ALTER TABLE purchase_history ADD PRIMARY KEY (vin), ADD CONSTRAINT ph_cust FOREIGN KEY (customer_id) REFERENCES customers(customer_id), ADD CONSTRAINT ph_car FOREIGN KEY(model, subtype) REFERENCES cars(model, subtype);')
conn.execute('ALTER TABLE service_centers ADD PRIMARY KEY (service_id);')
conn.execute('ALTER TABLE wait_time ADD PRIMARY KEY (model, subtype, country), ADD CONSTRAINT wait_car FOREIGN KEY (model, subtype) REFERENCES cars(model, subtype);')
conn.execute('ALTER TABLE service_employees ADD PRIMARY KEY (service_id, employee_id), ADD CONSTRAINT serv_emp FOREIGN KEY(service_id) REFERENCES service_centers(service_id), ADD CONSTRAINT emp_serv FOREIGN KEY (employee_id) REFERENCES employees(employee_id);')
conn.execute('ALTER TABLE store_employees ADD PRIMARY KEY (store_id, employee_id), ADD CONSTRAINT store_employee_id FOREIGN KEY (store_id) REFERENCES stores(store_id), ADD CONSTRAINT employee_store_id FOREIGN KEY (employee_id) REFERENCES employees(employee_id);')
conn.execute('ALTER TABLE superchargers ADD PRIMARY KEY (supercharger_id);')
conn.execute('ALTER TABLE appointments ADD PRIMARY KEY (appointment_id), ADD CONSTRAINT app_empl FOREIGN KEY (employee_id) REFERENCES employees(employee_id), ADD CONSTRAINT app_car FOREIGN KEY(model, subtype) REFERENCES cars(model, subtype), ADD CONSTRAINT app_cust FOREIGN KEY(customer_id) REFERENCES customers(customer_id), ADD CONSTRAINT app_serv FOREIGN KEY (service_id) REFERENCES service_centers(service_id);')
conn.close()
#References
# https://docs.sqlalchemy.org/en/13/core/type_basics.html
# https://robertdavidwest.com/2014/10/12/python-pandas-%E2%86%92-mysql-using-sqlalchemy-a-k-a-sqlalchemy-for-pandas-users-who-dont-know-sql-the-brave-and-the-foolhardy/
# https://docs.sqlalchemy.org/en/13/orm/join_conditions.html
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
|
11,626 | 625165c6210a6d17e7863f6ff5433de42c358ad7 | from sekizai.context import SekizaiContext
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
import datetime
import calendar
from models import EventListPlugin, EventRegistryPlugin, Event, EventRegistry
class EventList(CMSPluginBase):
"""
Plugin Class to render the list of ongoing events
"""
model = EventListPlugin
name = _('Event list')
render_template = 'cmsplugin_events/event_list_plugin.html'
module = 'events'
def render(self, context, instance, placeholder):
object_list = Event.ongoing.all()
if instance.category:
object_list = object_list.filter(category=instance.category)
context.update({'object_list': object_list,'instance': instance})
return super(EventList,self).render(context, instance, placeholder)
plugin_pool.register_plugin(EventList)
class CalendarPlugin(CMSPluginBase):
name = _('Events Calendar')
render_template = 'cmsplugin_events/calendar.html'
module = 'events'
cache = False
def render(self, context, instance, placeholder):
current_month = datetime.datetime.now().month
events = Event.month.find(current_month)
month_name = self.get_month_name(current_month)
year = datetime.datetime.now().year
#Tue is 1 monday is 7
date_range = calendar.monthrange(year, current_month)
uctx = SekizaiContext({'month':current_month,'month_name':month_name, 'year': year
,'first_day_of_week':date_range[0], 'number_of_days_in_month': 'X'*date_range[1]
,'events':events})
context.update(uctx)
return super(CalendarPlugin,self).render(context,instance, placeholder)
def get_month_name(self, month):
if month == 1: return _('January')
elif month == 2: return _('February')
elif month == 3: return _('March')
elif month == 4: return _('April')
elif month == 5: return _('May')
elif month == 6: return _('June')
elif month == 7: return _('July')
elif month == 8: return _('August')
elif month == 9: return _('September')
elif month == 10: return _('October')
elif month == 11: return _('November')
elif month == 12: return _('December')
else: return _('Unknown Month')
plugin_pool.register_plugin(CalendarPlugin)
class EventRegistrationPlugin(CMSPluginBase):
model = EventRegistryPlugin
name = _('Event Registration Form')
render_template = 'cmsplugin_events/register.html'
module = 'events'
def render(self, context, instance, placeholder):
request = context['request']
if request.method == 'POST' and "cmsplugin_events_register_" + str(instance.id) in request.POST.keys() :
EventRegistry.from_request(request)
context.update({'submitted':True})
else:
events = None
if instance.current_month_filter:
if instance.category_filter:
events = Event.month.find_by_category(instance.category_filter)
else:
events = Event.month.find()
elif instance.month_filter:
if instance.category_filter:
events = Event.month.find_by_category(instance.category_filter,instance.month_filter)
else:
events = Event.month.find(instance.month_filter)
elif instance.category_filter:
events = Event.by_category.find(instance.category_filter)
else:
events = Event.ongoing.all()
uctx = SekizaiContext({'events':events})
context.update(uctx)
return super(EventRegistrationPlugin, self).render(context, instance, placeholder)
plugin_pool.register_plugin(EventRegistrationPlugin)
|
11,627 | f4f80a2a18df7395236180928fe7f673b736909b | from django.contrib import admin
from database.models import PagesContent, State
class PageAdmin(admin.ModelAdmin):
list_display = ('name','link','content')
class StateAdmin(admin.ModelAdmin):
pass
admin.site.register(PagesContent, PageAdmin)
admin.site.register(State) |
11,628 | 5b3ccf5f5d77465cc60d9816cb27c4a7d0962611 | import scrapy
from scrapy.spiders import Spider
from seCrawler.common.searResultPages import searResultPages
from seCrawler.common.searchEngines import SearchEngineResultSelectors
from seCrawler.common.searchEngines import SearchEngines
from scrapy.selector import Selector
class keywordSpider(Spider):
name = 'keywordSpider'
allowed_domains = ['bing.com', 'google.com', 'baidu.com']
start_urls = []
keyword = None
searchEngine = None
selector = None
maxpage = None
def __init__(self, keyword, se='bing', maxpage=10):
self.keyword = keyword.lower()
self.searchEngine = se.lower()
self.selector = SearchEngineResultSelectors[self.searchEngine]
self.start_urls.append(SearchEngines[se].format(keyword, 1))
self.maxpage = maxpage
def parse(self, response):
for url in response.xpath(self.selector).re('^https?://.*'):
yield {'url': url}
currentPage = int(response.css('a.sb_pagS::text').extract_first())
nextUrl = response.css('a.sb_pagN::attr("href")').extract_first()
if nextUrl and currentPage < self.maxpage:
nextUrl = response.urljoin(nextUrl)
yield scrapy.Request(nextUrl, callback=self.parse)
|
11,629 | 36ec3b2c42245dba9112b3f23f988114c2a32990 | /usr/share/pyshared/openerp/addons/survey/survey.py |
11,630 | c392d8ea4f9027c7c29ecde524f24916ff6a04ea | import RPi.GPIO as GPIO
import time
from picamera import PiCamera
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
camera = PiCamera()
user = "yourmail"
pwd = "password"
to = "yourmail"
msg = MIMEMultipart()
msg["Subject"] = "Warning"
msg["From"] = user
msg["To"] = to
part = MIMEText("FXCK difficult project!")
msg.attach(part)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(12, GPIO.IN) #Read iutput from PIR motion sensor
seg = (4, 17, 27, 22, 23, 24, 25) #Displaying the digits 0 to 9
digits = {
'0': (1, 1, 1, 0, 1, 1, 1),
'1': (1, 0, 0, 0, 1, 0, 0),
'2': (1, 1, 0, 1, 0, 1, 1),
'3': (1, 1, 0, 1, 1, 1, 0),
'4': (1, 0, 1, 1, 1, 0, 0),
'5': (0, 1, 1, 1, 1, 1, 0),
'6': (0, 1, 1, 1, 1, 1, 1),
'7': (1, 1, 0, 0, 1, 0, 0),
'8': (1, 1, 1, 1, 1, 1, 1),
'9': (1, 1, 1, 1, 1, 1, 0)
}
for n in range(0, 7):
GPIO.setup(seg[n], GPIO.OUT)
c = 0
while True:
i=GPIO.input(12)
if i==0: #When output from motion sensor is LOW
time.sleep(.1)
elif i==1: #When output from motion sensor is HIGH
c = 0
for x in range(0, 4): #Countdown three seconds on 7seg
for n in range(0, 7):
GPIO.output(seg[n], digits[str(3 - c % 10)][n])
time.sleep(1)
c += 1
camera.capture('/home/pi/Desktop/image.jpg') #Take a pic
att1 = MIMEApplication(open("/home/pi/Desktop/image.jpg",'rb').read())
att1.add_header('Content-Disposition','attachment',filename="test.jpg")
msg.attach(att1)
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.starttls()
smtpObj.login('yourmail','password')
smtpObj.sendmail(user,to,msg.as_string()) #send an email
smtpObj.close
time.sleep(5)
|
11,631 | 3519c5c976e7131de22e8b9870557fe24f832b82 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
email = models.CharField(max_length = 255)
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
password = models.CharField(max_length = 255)
admin = models.CharField(max_length = 255, null= True, blank = True)
description = models.CharField(max_length = 255, null = True, blank = True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Message(models.Model):
content = models.CharField(max_length = 255, null = True, blank = True)
user_id = models.ForeignKey(User, related_name="usermessage", on_delete = models.CASCADE)
visitor_id = models.ForeignKey(User, related_name="visitormessage", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Comment(models.Model):
content = models.CharField(max_length = 255, null = True, blank = True)
message_id = models.ForeignKey(Message, related_name="messagecomment", on_delete = models.CASCADE)
user_id = models.ForeignKey(User, related_name="commentuser", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
|
11,632 | 661b44a6fb7c4880e5ad6126f437cb583fdb73b1 | #!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
import re
import os
import glob
import sys
g_symbol2data = {}
g_symbol2sections = {}
g_section_tags = [ "Syntax:"
,"Description:"
,"Method Signatures:"
,"Examples:"
,"Affected By:"
,"Exceptional Situations:"
,"See Also:"
,"Side Effects:"
,"Notes."
]
def read_file(path):
lines = open(path).readlines()
return BeautifulSoup(''.join(lines))
def vim_as_tag(s):
# XXX: Fixme. Doesn't work.
#for symbol in g_symbol2sections.keys():
# s = s.replace(symbol, '|'+symbol+'|')
return s
def xml_markup_fixup(s):
return s.replace("&", "&").replace('"', '"').replace("<", "<").replace(">", ">").replace(" .", ".").replace(" ;", ";").replace(" ,", ",")
def vim_description(s, convert_symbol_to_tags=False):
"Split into 70-column lines, right-justified w/ 8 spaces."
# vim markup fixup
if convert_symbol_to_tags:
s = vim_as_tag(s)
# change earmuffs into \earmuff
s = s.replace('*', '\\*')
lines = []
for para in s.split('\n'):
line = []
for w in para.split():
length = len(' '.join(line))
if length + len(w) < 79:
line.append(w)
else:
lines.append(' '.join(line))
line = [w]
lines.append(' '.join(line))
s = ' '*8 + ('\n'+' '*8).join(lines)
return s
def extract_section(soup, symbol):
"""Return (next soup, current section contents, current section name)"""
section = []
# assume this is only happens at the end of the file
if soup.contents[0] == u'\n':
return None, [], ""
if len(soup.contents) == 2:
if soup.contents[1].strip() == u'None.':
# the section is noted as empty, forward to next section
return soup.nextSibling.nextSibling, [], ""
# it's most likely it's here, but not sure. oh well!
title = soup.contents[0].string
#print >> sys.stderr, "SYMBOL:", symbol, "[", title, "]"
soup = soup.nextSibling.nextSibling
lines = []
while soup and len(soup.findAll(text=re.compile("[A-Z][a-z]+:"))) == 0:
# fix for Examples
line = [e.strip() for e in soup.recursiveChildGenerator()
if isinstance(e, unicode)]
lines.append(' '.join(line))
soup = soup.nextSibling
if len(lines):
soup_data = '\n'.join(lines)
# xml-ish markup fixup
section = xml_markup_fixup(soup_data)
return soup, section, title
def extract_symbol_info(symbol, soup):
"Expects a lower-case unicode symbol name"
#
# read through the the sections
#
g_symbol2sections[symbol] = {}
# special-case Syntax (first item) to start from a known location
soup = soup.find(text=u'Syntax:').parent.parent
soup, section, title = extract_section(soup, symbol)
if title and section:
g_symbol2sections[symbol][title] = section
# rest of the sectionv
while soup:
soup, section, title = extract_section(soup, symbol)
if title and section:
g_symbol2sections[symbol][title] = section
#
# ==================================================================================
#
#-4:-3 bombs on f_upper_.htm (more than one syntax item)
#-8:-5 works
functions = glob.glob("/usr/share/doc/hyperspec/Body/f_*")
macros = glob.glob("/usr/share/doc/hyperspec/Body/m_*")
for path in functions + macros:
print >> sys.stderr, "Reading", path
soup = read_file(path)
symbols = soup.findAll(lambda tag: tag.name == u"a" and tag.get('name', None) != None)
for symbol in symbols:
s = symbol.get('name')
g_symbol2data[s] = (path, soup)
for symbol, (path, soup) in g_symbol2data.items():
print >> sys.stderr, " +", symbol
extract_symbol_info(symbol, soup)
symbols = g_symbol2sections.keys()
symbols.sort()
for symbol in symbols:
print " "*(79-len(symbol)-3), "*%s*" % symbol.lower()
s = g_symbol2sections[symbol]
if "Syntax:" in s:
syntax = s["Syntax:"]
lines = syntax.split('\n')
for line in lines:
line = line.strip().lower()
if line.startswith(symbol.lower()+" "):
parts = line.split()
form = [parts[0]]
i = 1
for arg in parts[i:]:
arg.strip()
if arg == u"=>":
break
if arg.startswith(u'&'):
fmt = '%s'
else:
fmt = '{%s}'
form.append(fmt % arg)
i += 1
print " ".join(form + parts[i:])
if "Arguments and Values:" in s:
print vim_description(s["Arguments and Values:"])
if "Description:" in s:
print " "*8 + "Description:\n", vim_description(s["Description:"], True)
#if "Examples:" in s:
# print " "*8 + "Examples:\n", s["Examples:"] #vim_description(s["Examples:"])
if "Exceptional Situations:" in s:
print " "*8 + "Exceptional Situations:\n", vim_description(s["Exceptional Situations:"], True)
if "See Also:" in s:
print " "*8 + "See Also:\n", vim_description(s["See Also:"], True)
|
11,633 | 89b5489bb04c73f0d8c9e0694f06fe5e1f25392f | from . base import * # NOQA
from . info import * # NOQA
from . peers import * # NOQA
from . signal import * # NOQA
if __name__ == "__main__":
unittest.main()
|
11,634 | debfdf6f15ce16ec2700e02fa997e5f6aeacc927 | from urllib.request import urlopen
from urllib.error import HTTPError
import json
from statbank import config
from statbank.url import URL
from statbank.error import RequestError, StatbankError
class Request:
"""Represent a request to the statbank api.
After initializing the request, one can read an appropriate property to
parse the response as the corresponding format.
"""
def __init__(self, *segments, **params):
"""Perform a request in each language and return a list of responses.
Positional arguments become url segments appended to the base url,
while keyword arguments turn into query parameters.
"""
self.url = URL(*segments, **params)
@property
def raw(self):
"""Make request to url and return the raw response object.
"""
try:
return urlopen(str(self.url))
except HTTPError as error:
try:
# parse error body as json and use message property as error message
parsed = self._parsejson(error)
raise RequestError(parsed['message']) from None
except ValueError:
# when error body is not valid json, error might be caused by server
raise StatbankError() from None
@property
def json(self):
"""Parse raw response as json and return nested dicts/lists.
"""
return self._parsejson(self.raw)
@property
def csv(self):
"""Parse raw response as csv and return row object list.
"""
lines = self._parsecsv(self.raw)
# set keys from header line (first line)
keys = next(lines)
for line in lines:
yield dict(zip(keys, line))
@staticmethod
def _parsecsv(x):
"""Deserialize file-like object containing csv to a Python generator.
"""
for line in x:
# decode as utf-8, whitespace-strip and split on delimiter
yield line.decode('utf-8').strip().split(config.DELIMITER)
@staticmethod
def _parsejson(x):
"""Deserialize file-like object containing json to a Python obejct.
"""
return json.loads(x.read().decode('utf-8'))
|
11,635 | 9a68ca4c50d0961fc7641ab64b336f1ca4b084cf | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author hongbin@youzan.com
import os
import math
from pyspark import SQLContext
from pyspark import SparkContext
import ta.adx as tr
local_path = os.path.dirname(__file__)
import bintrade_tests.sparklib as slib
def test_tr():
dSC = [
{"symbol":"xxxx", "date":"39855", "high":30.1983,"low":29.4072,"close":29.8720},
{"symbol":"xxxx", "date":"39856", "high":30.2776,"low":29.3182,"close":30.2381},
{"symbol":"xxxx", "date":"39857", "high":30.4458,"low":29.9611,"close":30.0996},
{"symbol":"xxxx", "date":"39861", "high":29.3478,"low":28.7443,"close":28.9028},
{"symbol":"xxxx", "date":"39862", "high":29.3477,"low":28.5566,"close":28.9225},
{"symbol":"xxxx", "date":"39863", "high":29.2886,"low":28.4081,"close":28.4775},
{"symbol":"xxxx", "date":"39864", "high":28.8334,"low":28.0818,"close":28.5566},
{"symbol":"xxxx", "date":"39867", "high":28.7346,"low":27.4289,"close":27.5576},
{"symbol":"xxxx", "date":"39868", "high":28.6654,"low":27.6565,"close":28.4675},
{"symbol":"xxxx", "date":"39869", "high":28.8532,"low":27.8345,"close":28.2796},
{"symbol":"xxxx", "date":"39870", "high":28.6356,"low":27.3992,"close":27.4882},
{"symbol":"xxxx", "date":"39871", "high":27.6761,"low":27.0927,"close":27.2310},
{"symbol":"xxxx", "date":"39874", "high":27.2112,"low":26.1826,"close":26.3507},
{"symbol":"xxxx", "date":"39875", "high":26.8651,"low":26.1332,"close":26.3309},
{"symbol":"xxxx", "date":"39876", "high":27.4090,"low":26.6277,"close":27.0333},
{"symbol":"xxxx", "date":"39877", "high":26.9441,"low":26.1332,"close":26.2221},
{"symbol":"xxxx", "date":"39878", "high":26.5189,"low":25.4307,"close":26.0144},
{"symbol":"xxxx", "date":"39881", "high":26.5189,"low":25.3518,"close":25.4605},
{"symbol":"xxxx", "date":"39882", "high":27.0927,"low":25.8760,"close":27.0333},
{"symbol":"xxxx", "date":"39883", "high":27.6860,"low":26.9640,"close":27.4487},
{"symbol":"xxxx", "date":"39884", "high":28.4477,"low":27.1421,"close":28.3586},
{"symbol":"xxxx", "date":"39885", "high":28.5267,"low":28.0123,"close":28.4278},
{"symbol":"xxxx", "date":"39888", "high":28.6654,"low":27.8840,"close":27.9530},
{"symbol":"xxxx", "date":"39889", "high":29.0116,"low":27.9928,"close":29.0116},
{"symbol":"xxxx", "date":"39890", "high":29.8720,"low":28.7643,"close":29.3776},
{"symbol":"xxxx", "date":"39891", "high":29.8028,"low":29.1402,"close":29.3576},
{"symbol":"xxxx", "date":"39892", "high":29.7529,"low":28.7127,"close":28.9107},
{"symbol":"xxxx", "date":"39895", "high":30.6546,"low":28.9290,"close":30.6149},
{"symbol":"xxxx", "date":"39896", "high":30.5951,"low":30.0304,"close":30.0502},
{"symbol":"xxxx", "date":"39897", "high":30.7635,"low":29.3863,"close":30.1890},
{"symbol":"xxxx", "date":"39898", "high":31.1698,"low":30.1365,"close":31.1202},
{"symbol":"xxxx", "date":"39899", "high":30.8923,"low":30.4267,"close":30.5356},
{"symbol":"xxxx", "date":"39902", "high":30.0402,"low":29.3467,"close":29.7827},
{"symbol":"xxxx", "date":"39903", "high":30.6645,"low":29.9906,"close":30.0402},
{"symbol":"xxxx", "date":"39904", "high":30.5951,"low":29.5152,"close":30.4861},
{"symbol":"xxxx", "date":"39905", "high":31.9724,"low":30.9418,"close":31.4670},
{"symbol":"xxxx", "date":"39906", "high":32.1011,"low":31.5364,"close":32.0515},
{"symbol":"xxxx", "date":"39909", "high":32.0317,"low":31.3580,"close":31.9724},
{"symbol":"xxxx", "date":"39910", "high":31.6255,"low":30.9220,"close":31.1302},
{"symbol":"xxxx", "date":"39911", "high":31.8534,"low":31.1994,"close":31.6551},
{"symbol":"xxxx", "date":"39912", "high":32.7055,"low":32.1308,"close":32.6360},
{"symbol":"xxxx", "date":"39916", "high":32.7648,"low":32.2298,"close":32.5866},
{"symbol":"xxxx", "date":"39917", "high":32.5766,"low":31.9724,"close":32.1903},
{"symbol":"xxxx", "date":"39918", "high":32.1308,"low":31.5562,"close":32.1011},
{"symbol":"xxxx", "date":"39919", "high":33.1215,"low":32.2101,"close":32.9335},
{"symbol":"xxxx", "date":"39920", "high":33.1909,"low":32.6262,"close":33.0027},
{"symbol":"xxxx", "date":"39923", "high":32.5172,"low":31.7642,"close":31.9425},
{"symbol":"xxxx", "date":"39924", "high":32.4379,"low":31.7840,"close":32.3883},
{"symbol":"xxxx", "date":"39925", "high":33.2207,"low":32.0912,"close":32.4875},
{"symbol":"xxxx", "date":"39926", "high":32.8343,"low":32.1903,"close":32.8046},
{"symbol":"xxxx", "date":"39927", "high":33.6169,"low":32.7648,"close":33.3792},
{"symbol":"xxxx", "date":"39930", "high":33.7459,"low":33.0423,"close":33.4188},
{"symbol":"xxxx", "date":"39931", "high":33.5971,"low":33.0522,"close":33.1711},
{"symbol":"xxxx", "date":"39932", "high":34.0825,"low":33.3297,"close":33.6268},
{"symbol":"xxxx", "date":"39933", "high":34.5780,"low":33.7260,"close":33.9638},
{"symbol":"xxxx", "date":"39934", "high":34.2214,"low":33.6962,"close":34.0529},
{"symbol":"xxxx", "date":"39937", "high":34.7663,"low":34.2015,"close":34.7266},
{"symbol":"xxxx", "date":"39938", "high":34.7364,"low":34.3105,"close":34.6969},
{"symbol":"xxxx", "date":"39939", "high":35.0140,"low":34.1420,"close":34.7067},
{"symbol":"xxxx", "date":"39940", "high":34.9447,"low":33.5674,"close":33.8944},
{"symbol":"xxxx", "date":"39941", "high":34.4194,"low":33.5674,"close":33.9142},
{"symbol":"xxxx", "date":"39944", "high":34.3995,"low":33.3692,"close":34.0331},
{"symbol":"xxxx", "date":"39945", "high":34.1619,"low":33.2108,"close":33.6169},
{"symbol":"xxxx", "date":"39946", "high":33.3396,"low":32.6560,"close":32.7154},
{"symbol":"xxxx", "date":"39947", "high":33.3892,"low":32.7747,"close":33.0819},
{"symbol":"xxxx", "date":"39948", "high":33.5079,"low":32.9235,"close":33.0621},
{"symbol":"xxxx", "date":"39951", "high":33.9638,"low":33.0820,"close":33.9240},
{"symbol":"xxxx", "date":"39952", "high":34.4194,"low":33.6368,"close":34.0825},
{"symbol":"xxxx", "date":"39953", "high":34.7167,"low":33.8647,"close":33.9638},
{"symbol":"xxxx", "date":"39954", "high":33.9440,"low":33.0027,"close":33.3396},
{"symbol":"xxxx", "date":"39955", "high":33.6567,"low":33.0127,"close":33.2306},
{"symbol":"xxxx", "date":"39959", "high":34.5086,"low":32.8738,"close":34.4691},
{"symbol":"xxxx", "date":"39960", "high":34.8653,"low":34.1124,"close":34.2312},
{"symbol":"xxxx", "date":"39961", "high":34.7464,"low":33.8944,"close":34.6275},
{"symbol":"xxxx", "date":"39962", "high":35.1725,"low":34.4393,"close":35.0536},
{"symbol":"xxxx", "date":"39965", "high":36.1633,"low":35.2816,"close":36.0543},
{"symbol":"xxxx", "date":"39966", "high":36.4504,"low":35.7768,"close":36.1038},
{"symbol":"xxxx", "date":"39967", "high":36.0344,"low":35.5985,"close":35.9948},
{"symbol":"xxxx", "date":"39968", "high":36.4504,"low":36.0048,"close":36.4011},
{"symbol":"xxxx", "date":"39969", "high":36.7380,"low":36.0839,"close":36.4407},
{"symbol":"xxxx", "date":"39972", "high":36.6091,"low":35.7868,"close":36.3318},
{"symbol":"xxxx", "date":"39973", "high":36.8270,"low":36.3318,"close":36.6091},
{"symbol":"xxxx", "date":"39974", "high":36.8369,"low":35.9552,"close":36.4803},
{"symbol":"xxxx", "date":"39975", "high":36.8865,"low":36.4110,"close":36.4803},
{"symbol":"xxxx", "date":"39976", "high":36.3802,"low":35.8659,"close":36.3119},
{"symbol":"xxxx", "date":"39979", "high":35.9948,"low":35.2516,"close":35.5688},
{"symbol":"xxxx", "date":"39980", "high":35.8561,"low":35.1923,"close":35.2220},
{"symbol":"xxxx", "date":"39981", "high":35.8759,"low":35.1230,"close":35.5578},
{"symbol":"xxxx", "date":"39982", "high":35.7273,"low":35.2418,"close":35.4896},
{"symbol":"xxxx", "date":"39983", "high":36.0688,"low":35.6225,"close":35.8703},
{"symbol":"xxxx", "date":"39986", "high":35.6025,"low":34.7394,"close":34.7990},
{"symbol":"xxxx", "date":"39987", "high":34.9775,"low":34.4915,"close":34.7196},
{"symbol":"xxxx", "date":"39988", "high":35.5827,"low":34.9974,"close":35.3049},
{"symbol":"xxxx", "date":"39989", "high":36.0688,"low":34.9974,"close":35.9993},
{"symbol":"xxxx", "date":"39990", "high":36.2076,"low":35.7612,"close":36.0786},
{"symbol":"xxxx", "date":"39993", "high":36.4555,"low":35.8307,"close":36.1580},
{"symbol":"xxxx", "date":"39994", "high":36.4359,"low":35.8207,"close":36.0885},
{"symbol":"xxxx", "date":"39995", "high":36.5448,"low":36.0985,"close":36.1084},
{"symbol":"xxxx", "date":"39996", "high":35.8107,"low":35.2156,"close":35.3149},
{"symbol":"xxxx", "date":"40000", "high":35.2552,"low":34.7593,"close":35.1263},
{"symbol":"xxxx", "date":"40001", "high":35.2058,"low":34.2335,"close":34.2534},
{"symbol":"xxxx", "date":"40002", "high":34.5908,"low":34.0252,"close":34.4320},
{"symbol":"xxxx", "date":"40003", "high":34.7296,"low":34.3725,"close":34.4915},
{"symbol":"xxxx", "date":"40004", "high":34.8586,"low":34.2833,"close":34.6403},
{"symbol":"xxxx", "date":"40007", "high":35.3149,"low":34.2038,"close":35.3049},
{"symbol":"xxxx", "date":"40008", "high":35.5034,"low":35.1165,"close":35.4338},
{"symbol":"xxxx", "date":"40009", "high":36.6342,"low":35.8505,"close":36.6243},
{"symbol":"xxxx", "date":"40010", "high":37.1401,"low":36.4259,"close":37.0608},
{"symbol":"xxxx", "date":"40011", "high":37.2691,"low":36.8722,"close":37.2592},
{"symbol":"xxxx", "date":"40014", "high":37.6956,"low":37.3087,"close":37.6162},
{"symbol":"xxxx", "date":"40015", "high":37.8742,"low":37.3385,"close":37.8742},
{"symbol":"xxxx", "date":"40016", "high":38.3801,"low":37.8245,"close":38.1917},
{"symbol":"xxxx", "date":"40017", "high":39.1736,"low":38.0825,"close":39.0347},
{"symbol":"xxxx", "date":"40018", "high":39.0546,"low":38.4693,"close":38.7372},
{"symbol":"xxxx", "date":"40021", "high":39.0944,"low":38.5586,"close":39.0347},
{"symbol":"xxxx", "date":"40022", "high":39.2729,"low":38.6181,"close":39.1538},
{"symbol":"xxxx", "date":"40023", "high":39.1141,"low":38.6876,"close":39.0249},
{"symbol":"xxxx", "date":"40024", "high":39.8581,"low":39.1935,"close":39.2530},
{"symbol":"xxxx", "date":"40025", "high":39.5330,"low":39.0944,"close":39.1339},
{"symbol":"xxxx", "date":"40028", "high":39.7392,"low":39.3225,"close":39.7193},
{"symbol":"xxxx", "date":"40029", "high":39.8680,"low":39.4513,"close":39.7193},
{"symbol":"xxxx", "date":"40030", "high":39.8185,"low":39.1439,"close":39.4117},
{"symbol":"xxxx", "date":"40031", "high":39.6102,"low":38.9257,"close":39.0644},
{"symbol":"xxxx", "date":"40032", "high":39.7491,"low":39.2729,"close":39.5605},
{"symbol":"xxxx", "date":"40035", "high":39.5309,"low":39.0249,"close":39.2827},
{"symbol":"xxxx", "date":"40036", "high":39.1836,"low":38.7372,"close":38.9454},
{"symbol":"xxxx", "date":"40037", "high":39.8978,"low":38.8860,"close":39.5506},
{"symbol":"xxxx", "date":"40038", "high":39.8383,"low":39.3225,"close":39.7689},
{"symbol":"xxxx", "date":"40039", "high":39.6399,"low":38.9653,"close":39.3126},
{"symbol":"xxxx", "date":"40042", "high":38.6975,"low":38.1520,"close":38.1717},
{"symbol":"xxxx", "date":"40043", "high":38.7968,"low":38.3007,"close":38.7174},
{"symbol":"xxxx", "date":"40044", "high":39.0844,"low":38.2312,"close":38.9852},
{"symbol":"xxxx", "date":"40045", "high":39.5209,"low":38.8860,"close":39.4415},
{"symbol":"xxxx", "date":"40046", "high":40.0368,"low":39.4217,"close":39.9671},
{"symbol":"xxxx", "date":"40049", "high":40.2450,"low":39.7788,"close":39.9276},
{"symbol":"xxxx", "date":"40050", "high":40.4335,"low":39.9276,"close":40.0468},
{"symbol":"xxxx", "date":"40051", "high":40.2757,"low":39.7589,"close":39.9771},
{"symbol":"xxxx", "date":"40052", "high":40.1458,"low":39.3522,"close":40.0763},
{"symbol":"xxxx", "date":"40053", "high":40.7509,"low":39.8383,"close":40.1161},
{"symbol":"xxxx", "date":"40056", "high":39.8780,"low":39.4713,"close":39.7093},
{"symbol":"xxxx", "date":"40057", "high":40.2649,"low":38.8464,"close":38.9653},
{"symbol":"xxxx", "date":"40058", "high":39.1538,"low":38.7272,"close":38.9356},
{"symbol":"xxxx", "date":"40059", "high":39.2532,"low":38.7075,"close":39.1935},
{"symbol":"xxxx", "date":"40060", "high":40.0566,"low":39.2035,"close":40.0368},
{"symbol":"xxxx", "date":"40064", "high":40.4534,"low":40.1359,"close":40.4137},
{"symbol":"xxxx", "date":"40065", "high":40.9593,"low":40.2649,"close":40.7609},
{"symbol":"xxxx", "date":"40066", "high":41.1973,"low":40.6717,"close":41.1478},
{"symbol":"xxxx", "date":"40067", "high":41.2867,"low":40.8898,"close":41.1874},
{"symbol":"xxxx", "date":"40070", "high":41.3759,"low":40.8799,"close":41.3461},
{"symbol":"xxxx", "date":"40071", "high":41.6140,"low":41.2371,"close":41.4750},
{"symbol":"xxxx", "date":"40072", "high":42.1397,"low":41.5049,"close":42.0802},
{"symbol":"xxxx", "date":"40073", "high":42.3183,"low":41.8719,"close":42.0703},
{"symbol":"xxxx", "date":"40074", "high":42.3193,"low":41.9122,"close":42.1406},
{"symbol":"xxxx", "date":"40077", "high":42.3988,"low":41.8625,"close":42.2696},
{"symbol":"xxxx", "date":"40078", "high":42.5279,"low":42.1404,"close":42.3492},
{"symbol":"xxxx", "date":"40079", "high":42.8655,"low":42.0781,"close":42.1505},
{"symbol":"xxxx", "date":"40080", "high":42.3888,"low":41.4752,"close":41.7732},
{"symbol":"xxxx", "date":"40081", "high":41.7931,"low":41.2867,"close":41.4059},
{"symbol":"xxxx", "date":"40084", "high":42.3591,"low":41.5944,"close":42.1107},
{"symbol":"xxxx", "date":"40085", "high":42.3194,"low":41.7335,"close":41.9221},
{"symbol":"xxxx", "date":"40086", "high":42.3019,"low":41.3065,"close":41.9519},
{"symbol":"xxxx", "date":"40087", "high":41.8328,"low":40.7107,"close":40.7107},
{"symbol":"xxxx", "date":"40088", "high":40.9590,"low":40.4327,"close":40.5916},
{"symbol":"xxxx", "date":"40091", "high":41.1079,"low":40.5419,"close":40.9192},
{"symbol":"xxxx", "date":"40092", "high":41.8427,"low":40.9298,"close":41.6440},
{"symbol":"xxxx", "date":"40093", "high":41.7832,"low":41.4752,"close":41.7633},
{"symbol":"xxxx", "date":"40094", "high":42.3193,"low":41.8427,"close":41.9420},
{"symbol":"xxxx", "date":"40095", "high":42.2498,"low":41.7633,"close":42.1803},
{"symbol":"xxxx", "date":"40098", "high":42.5676,"low":41.9817,"close":42.2696},
{"symbol":"xxxx", "date":"40099", "high":42.4485,"low":42.0711,"close":42.2795},
{"symbol":"xxxx", "date":"40100", "high":42.9876,"low":42.5476,"close":42.8555},
{"symbol":"xxxx", "date":"40101", "high":42.8158,"low":42.5676,"close":42.7561},
{"symbol":"xxxx", "date":"40102", "high":42.6867,"low":42.1803,"close":42.4781},
{"symbol":"xxxx", "date":"40105", "high":42.9647,"low":42.2895,"close":42.9052},
{"symbol":"xxxx", "date":"40106", "high":43.1533,"low":42.6370,"close":42.9151},
{"symbol":"xxxx", "date":"40107", "high":43.5109,"low":42.7561,"close":42.8356},
{"symbol":"xxxx", "date":"40108", "high":43.1832,"low":42.4881,"close":43.0044},
{"symbol":"xxxx", "date":"40109", "high":43.4215,"low":42.7165,"close":42.8257},
{"symbol":"xxxx", "date":"40112", "high":43.4513,"low":42.4881,"close":42.6867},
{"symbol":"xxxx", "date":"40113", "high":42.8059,"low":41.8825,"close":42.0413},
{"symbol":"xxxx", "date":"40114", "high":42.0214,"low":41.0086,"close":41.0980},
{"symbol":"xxxx", "date":"40115", "high":41.8925,"low":41.2470,"close":41.7931},
{"symbol":"xxxx", "date":"40116", "high":41.8526,"low":40.6313,"close":40.6711},
{"symbol":"xxxx", "date":"40119", "high":41.2767,"low":40.3532,"close":40.8399},
{"symbol":"xxxx", "date":"40120", "high":41.0285,"low":40.5519,"close":40.9689},
{"symbol":"xxxx", "date":"40121", "high":41.5747,"low":40.9888,"close":41.0384},
{"symbol":"xxxx", "date":"40122", "high":42.1009,"low":41.4852,"close":42.0512},
{"symbol":"xxxx", "date":"40123", "high":42.3492,"low":41.7832,"close":42.2995},
{"symbol":"xxxx", "date":"40126", "high":43.2129,"low":42.5775,"close":43.2030},
{"symbol":"xxxx", "date":"40127", "high":43.4627,"low":43.0938,"close":43.3122},
{"symbol":"xxxx", "date":"40128", "high":43.8286,"low":43.3024,"close":43.5903},
{"symbol":"xxxx", "date":"40129", "high":43.8484,"low":43.2428,"close":43.3421},
{"symbol":"xxxx", "date":"40130", "high":43.8286,"low":43.3221,"close":43.6995},
{"symbol":"xxxx", "date":"40133", "high":44.3350,"low":43.8088,"close":44.1464},
{"symbol":"xxxx", "date":"40134", "high":44.2854,"low":43.9378,"close":44.2854},
{"symbol":"xxxx", "date":"40135", "high":44.1761,"low":43.7392,"close":44.0401},
{"symbol":"xxxx", "date":"40136", "high":43.7690,"low":43.0442,"close":43.3520},
{"symbol":"xxxx", "date":"40137", "high":43.2428,"low":42.9747,"close":43.1336},
{"symbol":"xxxx", "date":"40140", "high":44.0868,"low":43.5506,"close":43.8286},
{"symbol":"xxxx", "date":"40141", "high":43.8384,"low":43.4215,"close":43.6796},
{"symbol":"xxxx", "date":"40142", "high":43.9080,"low":43.6995,"close":43.8683},
{"symbol":"xxxx", "date":"40144", "high":43.5208,"low":42.5973,"close":43.2030},
{"symbol":"xxxx", "date":"40147", "high":43.3122,"low":42.8059,"close":43.2528},
{"symbol":"xxxx", "date":"40148", "high":43.9876,"low":43.5704,"close":43.6995},
{"symbol":"xxxx", "date":"40149", "high":44.1761,"low":43.6697,"close":43.7591},
{"symbol":"xxxx", "date":"40150", "high":44.1861,"low":43.5704,"close":43.5803},
{"symbol":"xxxx", "date":"40151", "high":44.4144,"low":43.3520,"close":43.8088},
{"symbol":"xxxx", "date":"40154", "high":43.9776,"low":43.5109,"close":43.6002},
{"symbol":"xxxx", "date":"40155", "high":43.6896,"low":43.0540,"close":43.3341},
{"symbol":"xxxx", "date":"40156", "high":43.8088,"low":43.0143,"close":43.7690},
{"symbol":"xxxx", "date":"40157", "high":44.2257,"low":43.9279,"close":43.9876},
{"symbol":"xxxx", "date":"40158", "high":44.2754,"low":43.6400,"close":43.8188},
{"symbol":"xxxx", "date":"40161", "high":44.2854,"low":43.8782,"close":44.2357},
{"symbol":"xxxx", "date":"40162", "high":44.3549,"low":43.8584,"close":43.9876},
{"symbol":"xxxx", "date":"40163", "high":44.3846,"low":43.9776,"close":44.0469},
{"symbol":"xxxx", "date":"40164", "high":43.8981,"low":43.4513,"close":43.5109},
{"symbol":"xxxx", "date":"40165", "high":44.2446,"low":43.7671,"close":44.2246},
{"symbol":"xxxx", "date":"40168", "high":44.8911,"low":44.4335,"close":44.7220},
{"symbol":"xxxx", "date":"40169", "high":45.0502,"low":44.7916,"close":44.9905},
{"symbol":"xxxx", "date":"40170", "high":45.3287,"low":44.9707,"close":45.3188},
{"symbol":"xxxx", "date":"40171", "high":45.7366,"low":45.3586,"close":45.7366},
{"symbol":"xxxx", "date":"40175", "high":46.0548,"low":45.6968,"close":45.9753},
{"symbol":"xxxx", "date":"40176", "high":46.0052,"low":45.7466,"close":45.7863},
{"symbol":"xxxx", "date":"40177", "high":46.0151,"low":45.7167,"close":45.9256},
{"symbol":"xxxx", "date":"40178", "high":46.0301,"low":45.5078,"close":45.5078},
{"symbol":"xxxx", "date":"40182", "high":46.2389,"low":46.0251,"close":46.1743},
{"symbol":"xxxx", "date":"40183", "high":46.2538,"low":45.9157,"close":46.1743},
{"symbol":"xxxx", "date":"40184", "high":46.3036,"low":45.8262,"close":45.8957},
{"symbol":"xxxx", "date":"40185", "high":46.0251,"low":45.6769,"close":45.9256},
{"symbol":"xxxx", "date":"40186", "high":46.3036,"low":45.6868,"close":46.3036},
{"symbol":"xxxx", "date":"40189", "high":46.3931,"low":45.8759,"close":46.1146},
{"symbol":"xxxx", "date":"40190", "high":45.8957,"low":45.2890,"close":45.5376},
{"symbol":"xxxx", "date":"40191", "high":46.2439,"low":45.3685,"close":46.1046},
{"symbol":"xxxx", "date":"40192", "high":46.2737,"low":45.9753,"close":46.1444},
{"symbol":"xxxx", "date":"40193", "high":46.3036,"low":45.4083,"close":45.6073},
{"symbol":"xxxx", "date":"40197", "high":46.3931,"low":45.7068,"close":46.3434},
{"symbol":"xxxx", "date":"40198", "high":46.3572,"low":45.1896,"close":45.6769},
{"symbol":"xxxx", "date":"40199", "high":46.1046,"low":45.0602,"close":45.2492},
{"symbol":"xxxx", "date":"40200", "high":45.2392,"low":43.8069,"close":43.9263},
{"symbol":"xxxx", "date":"40203", "high":44.3639,"low":43.8865,"close":44.0755},
{"symbol":"xxxx", "date":"40204", "high":44.6524,"low":43.8169,"close":44.1152},
{"symbol":"xxxx", "date":"40205", "high":44.6126,"low":43.7771,"close":44.4635},
{"symbol":"xxxx", "date":"40206", "high":44.1949,"low":43.0908,"close":43.3194},
{"symbol":"xxxx", "date":"40207", "high":43.7869,"low":42.4044,"close":42.5635},
{"symbol":"xxxx", "date":"40210", "high":43.0515,"low":42.6530,"close":43.0310},
{"symbol":"xxxx", "date":"40211", "high":43.5483,"low":42.8022,"close":43.4189},
{"symbol":"xxxx", "date":"40212", "high":43.7372,"low":43.1901,"close":43.6576},
{"symbol":"xxxx", "date":"40213", "high":43.4289,"low":42.3944,"close":42.3944},
{"symbol":"xxxx", "date":"40214", "high":42.7923,"low":41.8969,"close":42.7524},
{"symbol":"xxxx", "date":"40217", "high":42.9514,"low":42.4143,"close":42.4442},
{"symbol":"xxxx", "date":"40218", "high":43.2797,"low":42.5337,"close":42.8818},
{"symbol":"xxxx", "date":"40219", "high":43.0808,"low":42.5237,"close":42.7923},
{"symbol":"xxxx", "date":"40220", "high":43.5582,"low":42.5337,"close":43.4388},
{"symbol":"xxxx", "date":"40221", "high":43.6478,"low":42.9315,"close":43.5283},
{"symbol":"xxxx", "date":"40225", "high":44.1152,"low":43.6179,"close":44.0854},
{"symbol":"xxxx", "date":"40226", "high":44.3341,"low":44.0257,"close":44.3341},
{"symbol":"xxxx", "date":"40227", "high":44.6921,"low":44.2146,"close":44.6126},
{"symbol":"xxxx", "date":"40228", "high":44.8115,"low":44.3838,"close":44.5927},
{"symbol":"xxxx", "date":"40231", "high":44.7916,"low":44.3241,"close":44.5032},
{"symbol":"xxxx", "date":"40232", "high":44.5032,"low":43.7174,"close":43.9263},
{"symbol":"xxxx", "date":"40233", "high":44.5529,"low":44.0854,"close":44.3739},
{"symbol":"xxxx", "date":"40234", "high":44.4635,"low":43.5980,"close":44.3639},
{"symbol":"xxxx", "date":"40235", "high":44.6225,"low":44.1649,"close":44.5230},
{"symbol":"xxxx", "date":"40238", "high":45.2492,"low":44.7121,"close":45.1696},
{"symbol":"xxxx", "date":"40239", "high":45.6073,"low":45.2094,"close":45.3089},
{"symbol":"xxxx", "date":"40240", "high":45.5775,"low":45.1896,"close":45.3586},
{"symbol":"xxxx", "date":"40241", "high":45.5675,"low":45.1596,"close":45.5078},
{"symbol":"xxxx", "date":"40242", "high":46.2937,"low":45.7466,"close":46.1942},
{"symbol":"xxxx", "date":"40245", "high":46.3931,"low":46.1942,"close":46.2837},
{"symbol":"xxxx", "date":"40246", "high":46.8009,"low":46.1543,"close":46.5423},
{"symbol":"xxxx", "date":"40247", "high":47.0298,"low":46.5523,"close":46.9203},
{"symbol":"xxxx", "date":"40248", "high":47.0993,"low":46.7313,"close":47.0993},
{"symbol":"xxxx", "date":"40249", "high":47.2784,"low":46.8507,"close":47.1093},
{"symbol":"xxxx", "date":"40252", "high":47.1292,"low":46.6617,"close":46.9899},
{"symbol":"xxxx", "date":"40253", "high":47.3579,"low":46.9800,"close":47.2884},
{"symbol":"xxxx", "date":"40254", "high":47.6464,"low":47.2784,"close":47.4177},
{"symbol":"xxxx", "date":"40255", "high":47.6265,"low":47.3480,"close":47.5768},
{"symbol":"xxxx", "date":"40256", "high":47.7476,"low":47.1203,"close":47.2896},
{"symbol":"xxxx", "date":"40259", "high":47.9069,"low":47.0505,"close":47.7177},
{"symbol":"xxxx", "date":"40260", "high":48.0961,"low":47.5683,"close":48.0488},
{"symbol":"xxxx", "date":"40261", "high":47.9667,"low":47.7078,"close":47.8173},
{"symbol":"xxxx", "date":"40262", "high":48.3949,"low":47.6978,"close":47.7476},
{"symbol":"xxxx", "date":"40263", "high":48.1260,"low":47.5385,"close":47.7974},
{"symbol":"xxxx", "date":"40266", "high":48.1956,"low":47.8969,"close":48.0264},
{"symbol":"xxxx", "date":"40267", "high":48.3451,"low":47.8571,"close":48.1858},
{"symbol":"xxxx", "date":"40268", "high":48.2454,"low":47.8671,"close":47.9567},
{"symbol":"xxxx", "date":"40269", "high":48.4944,"low":47.5883,"close":47.9567},
{"symbol":"xxxx", "date":"40273", "high":48.5143,"low":47.9367,"close":48.4048},
{"symbol":"xxxx", "date":"40274", "high":48.6936,"low":48.1858,"close":48.5442},
{"symbol":"xxxx", "date":"40275", "high":48.6637,"low":48.1658,"close":48.4247},
{"symbol":"xxxx", "date":"40276", "high":48.6139,"low":48.0363,"close":48.5343},
{"symbol":"xxxx", "date":"40277", "high":48.8429,"low":48.4297,"close":48.8230},
{"symbol":"xxxx", "date":"40280", "high":48.9923,"low":48.7334,"close":48.8629},
{"symbol":"xxxx", "date":"40281", "high":49.1416,"low":48.6538,"close":49.1118},
{"symbol":"xxxx", "date":"40282", "high":49.7093,"low":49.2910,"close":49.6993},
{"symbol":"xxxx", "date":"40283", "high":49.9781,"low":49.6594,"close":49.9184},
{"symbol":"xxxx", "date":"40284", "high":49.9084,"low":48.9923,"close":49.3209},
{"symbol":"xxxx", "date":"40287", "high":49.4503,"low":48.6936,"close":49.2910},
{"symbol":"xxxx", "date":"40288", "high":49.6694,"low":49.2213,"close":49.5400},
{"symbol":"xxxx", "date":"40289", "high":49.9781,"low":49.5151,"close":49.8188},
{"symbol":"xxxx", "date":"40290", "high":50.1474,"low":49.0520,"close":50.0976},
{"symbol":"xxxx", "date":"40291", "high":50.3565,"low":49.8785,"close":50.3067},
{"symbol":"xxxx", "date":"40294", "high":50.4362,"low":50.0876,"close":50.1972},
{"symbol":"xxxx", "date":"40295", "high":50.2171,"low":49.0023,"close":49.1318},
{"symbol":"xxxx", "date":"40296", "high":49.4205,"low":48.7732,"close":49.1616},
{"symbol":"xxxx", "date":"40297", "high":50.1176,"low":49.4005,"close":50.0180},
{"symbol":"xxxx", "date":"40298", "high":50.0778,"low":48.9923,"close":49.0296},
{"symbol":"xxxx", "date":"40301", "high":49.9582,"low":49.2213,"close":49.7192},
{"symbol":"xxxx", "date":"40302", "high":49.1118,"low":47.8770,"close":48.2256},
{"symbol":"xxxx", "date":"40303", "high":48.2952,"low":47.4389,"close":47.9767},
{"symbol":"xxxx", "date":"40304", "high":48.1161,"low":41.3746,"close":46.3734},
{"symbol":"xxxx", "date":"40305", "high":46.6025,"low":44.0964,"close":45.2183},
{"symbol":"xxxx", "date":"40308", "high":47.5982,"low":47.1103,"close":47.5683},
{"symbol":"xxxx", "date":"40309", "high":48.1858,"low":47.0007,"close":47.5185},
{"symbol":"xxxx", "date":"40310", "high":48.4545,"low":47.6978,"close":48.4147},
{"symbol":"xxxx", "date":"40311", "high":48.5840,"low":47.5286,"close":47.6480},
{"symbol":"xxxx", "date":"40312", "high":47.3194,"low":46.1842,"close":46.7319},
{"symbol":"xxxx", "date":"40315", "high":47.0705,"low":45.8756,"close":46.8812},
{"symbol":"xxxx", "date":"40316", "high":47.3791,"low":46.0249,"close":46.2340},
{"symbol":"xxxx", "date":"40317", "high":46.4332,"low":45.3577,"close":45.8656},
{"symbol":"xxxx", "date":"40318", "high":45.0889,"low":44.0632,"close":44.1628},
{"symbol":"xxxx", "date":"40319", "high":45.4474,"low":43.3065,"close":44.6507},
{"symbol":"xxxx", "date":"40322", "high":45.1337,"low":44.4516,"close":44.4714},
{"symbol":"xxxx", "date":"40323", "high":44.5710,"low":43.0476,"close":44.5113},
{"symbol":"xxxx", "date":"40324", "high":45.3776,"low":43.9338,"close":44.0134},
{"symbol":"xxxx", "date":"40325", "high":45.6963,"low":44.8996,"close":45.6763},
{"symbol":"xxxx", "date":"40326", "high":45.7958,"low":45.0092,"close":45.4075},
{"symbol":"xxxx", "date":"40330", "high":46.0547,"low":44.9394,"close":44.9892},
{"symbol":"xxxx", "date":"40331", "high":46.0847,"low":44.9394,"close":46.0547},
{"symbol":"xxxx", "date":"40332", "high":46.5725,"low":45.8854,"close":46.4929},
{"symbol":"xxxx", "date":"40333", "high":46.0945,"low":44.7478,"close":44.9021},
{"symbol":"xxxx", "date":"40336", "high":45.2681,"low":44.0334,"close":44.0831},
{"symbol":"xxxx", "date":"40337", "high":44.2923,"low":43.4060,"close":44.0034},
{"symbol":"xxxx", "date":"40338", "high":44.7205,"low":43.5056,"close":43.6350},
{"symbol":"xxxx", "date":"40339", "high":44.9296,"low":44.0532,"close":44.8798},
{"symbol":"xxxx", "date":"40340", "high":45.3478,"low":44.4216,"close":45.3079},
{"symbol":"xxxx", "date":"40343", "high":46.0448,"low":45.2183,"close":45.2980},
{"symbol":"xxxx", "date":"40344", "high":46.5725,"low":45.2613,"close":46.5128},
{"symbol":"xxxx", "date":"40345", "high":46.9410,"low":46.3037,"close":46.7020},
{"symbol":"xxxx", "date":"40346", "high":46.9809,"low":46.4431,"close":46.8514},
{"symbol":"xxxx", "date":"40347", "high":47.2298,"low":46.7509,"close":46.8906},
{"symbol":"xxxx", "date":"40350", "high":47.5690,"low":46.2221,"close":46.4915},
{"symbol":"xxxx", "date":"40351", "high":47.0303,"low":46.0426,"close":46.1324},
{"symbol":"xxxx", "date":"40352", "high":46.3618,"low":45.5836,"close":45.9428},
{"symbol":"xxxx", "date":"40353", "high":45.8829,"low":45.0948,"close":45.2444},
{"symbol":"xxxx", "date":"40354", "high":45.5537,"low":44.8454,"close":45.1646},
{"symbol":"xxxx", "date":"40357", "high":45.4440,"low":44.7057,"close":45.0050},
{"symbol":"xxxx", "date":"40358", "high":44.3565,"low":42.9697,"close":43.2690},
{"symbol":"xxxx", "date":"40359", "high":43.5683,"low":42.5407,"close":42.6106},
{"symbol":"xxxx", "date":"40360", "high":42.8600,"low":41.6728,"close":42.4909},
{"symbol":"xxxx", "date":"40361", "high":42.7218,"low":41.9920,"close":42.3711},
{"symbol":"xxxx", "date":"40365", "high":43.2690,"low":42.1516,"close":42.5008},
{"symbol":"xxxx", "date":"40366", "high":43.9075,"low":42.6006,"close":43.8577},
{"symbol":"xxxx", "date":"40367", "high":44.2767,"low":43.5783,"close":44.0971},
{"symbol":"xxxx", "date":"40368", "high":44.5261,"low":43.9774,"close":44.5161},
{"symbol":"xxxx", "date":"40371", "high":44.9252,"low":44.3565,"close":44.6458},
{"symbol":"xxxx", "date":"40372", "high":45.3941,"low":44.6957,"close":45.2245},
{"symbol":"xxxx", "date":"40373", "high":45.7034,"low":45.1347,"close":45.4539},
{"symbol":"xxxx", "date":"40374", "high":45.6335,"low":44.8853,"close":45.4938},
{"symbol":"xxxx", "date":"40375", "high":45.5219,"low":44.1969,"close":44.2368},
{"symbol":"xxxx", "date":"40378", "high":44.7057,"low":43.9973,"close":44.6159},
{"symbol":"xxxx", "date":"40379", "high":45.1546,"low":43.7579,"close":45.1546},
{"symbol":"xxxx", "date":"40380", "high":45.6535,"low":44.4563,"close":44.5361},
{"symbol":"xxxx", "date":"40381", "high":45.8730,"low":45.1347,"close":45.6635},
{"symbol":"xxxx", "date":"40382", "high":45.9927,"low":45.2744,"close":45.9528},
{"symbol":"xxxx", "date":"40385", "high":46.3518,"low":45.8031,"close":46.3319},
{"symbol":"xxxx", "date":"40386", "high":46.6112,"low":46.1024,"close":46.3119},
{"symbol":"xxxx", "date":"40387", "high":46.4716,"low":45.7732,"close":45.9428},
{"symbol":"xxxx", "date":"40388", "high":46.3020,"low":45.1447,"close":45.6036},
{"symbol":"xxxx", "date":"40389", "high":45.9827,"low":44.9651,"close":45.7034},
{"symbol":"xxxx", "date":"40392", "high":46.6811,"low":46.1025,"close":46.5614},
{"symbol":"xxxx", "date":"40393", "high":46.5913,"low":46.1423,"close":46.3618},
{"symbol":"xxxx", "date":"40394", "high":46.8806,"low":46.3918,"close":46.8287},
{"symbol":"xxxx", "date":"40395", "high":46.8108,"low":46.4117,"close":46.7210},
{"symbol":"xxxx", "date":"40396", "high":46.7409,"low":45.9428,"close":46.6511},
{"symbol":"xxxx", "date":"40399", "high":47.0801,"low":46.6811,"close":46.9704},
{"symbol":"xxxx", "date":"40400", "high":46.8407,"low":46.1723,"close":46.5639},
{"symbol":"xxxx", "date":"40401", "high":45.8131,"low":45.1048,"close":45.2943},
{"symbol":"xxxx", "date":"40402", "high":45.1347,"low":44.3465,"close":44.9352},
{"symbol":"xxxx", "date":"40403", "high":44.9551,"low":44.6059,"close":44.6159},
{"symbol":"xxxx", "date":"40406", "high":45.0050,"low":44.1969,"close":44.6957},
{"symbol":"xxxx", "date":"40407", "high":45.6734,"low":44.9252,"close":45.2664},
{"symbol":"xxxx", "date":"40408", "high":45.7133,"low":45.0050,"close":45.4440},
{"symbol":"xxxx", "date":"40409", "high":45.3542,"low":44.4563,"close":44.7556},
{"symbol":"xxxx", "date":"40410", "high":44.9252,"low":44.4363,"close":44.8154},
{"symbol":"xxxx", "date":"40413", "high":45.2345,"low":44.3565,"close":44.3765},
{"symbol":"xxxx", "date":"40414", "high":44.0173,"low":43.3638,"close":43.5484},
{"symbol":"xxxx", "date":"40415", "high":44.1570,"low":43.1693,"close":43.9674},
{"symbol":"xxxx", "date":"40416", "high":44.2168,"low":43.3987,"close":43.4386},
{"symbol":"xxxx", "date":"40417", "high":44.0572,"low":42.8700,"close":43.9674},
{"symbol":"xxxx", "date":"40420", "high":44.1470,"low":43.4985,"close":43.5085},
{"symbol":"xxxx", "date":"40421", "high":43.7479,"low":43.0795,"close":43.3588},
{"symbol":"xxxx", "date":"40422", "high":44.8055,"low":43.9674,"close":44.6558},
{"symbol":"xxxx", "date":"40423", "high":45.1746,"low":44.6259,"close":45.1546},
{"symbol":"xxxx", "date":"40424", "high":45.9129,"low":45.4440,"close":45.9029},
{"symbol":"xxxx", "date":"40428", "high":45.9228,"low":45.5188,"close":45.5936},
{"symbol":"xxxx", "date":"40429", "high":46.3419,"low":45.7133,"close":46.1423},
{"symbol":"xxxx", "date":"40430", "high":46.5913,"low":46.2122,"close":46.3219},
{"symbol":"xxxx", "date":"40431", "high":46.5614,"low":46.1423,"close":46.4915},
{"symbol":"xxxx", "date":"40434", "high":47.2597,"low":46.8307,"close":47.1400},
{"symbol":"xxxx", "date":"40435", "high":47.5890,"low":46.9704,"close":47.3395},
{"symbol":"xxxx", "date":"40436", "high":47.6887,"low":47.0801,"close":47.6388},
{"symbol":"xxxx", "date":"40437", "high":47.8683,"low":47.4293,"close":47.8284},
{"symbol":"xxxx", "date":"40438", "high":48.1400,"low":47.7500,"close":47.9950},
{"symbol":"xxxx", "date":"40441", "high":48.9300,"low":48.1101,"close":48.8299},
{"symbol":"xxxx", "date":"40442", "high":49.1700,"low":48.6100,"close":48.8200},
{"symbol":"xxxx", "date":"40443", "high":49.0210,"low":48.4200,"close":48.6900},
{"symbol":"xxxx", "date":"40444", "high":49.1600,"low":48.3200,"close":48.6700},
{"symbol":"xxxx", "date":"40445", "high":49.6900,"low":49.1500,"close":49.6600},
{"symbol":"xxxx", "date":"40448", "high":49.7500,"low":49.3500,"close":49.3900},
{"symbol":"xxxx", "date":"40449", "high":49.5400,"low":48.5900,"close":49.3700},
{"symbol":"xxxx", "date":"40450", "high":49.5300,"low":49.1100,"close":49.2900},
{"symbol":"xxxx", "date":"40451", "high":49.8400,"low":48.7500,"close":49.0700},
{"symbol":"xxxx", "date":"40452", "high":49.5300,"low":48.7810,"close":49.0100},
{"symbol":"xxxx", "date":"40455", "high":49.0500,"low":48.2000,"close":48.4800},
{"symbol":"xxxx", "date":"40456", "high":49.7600,"low":49.0000,"close":49.6600},
{"symbol":"xxxx", "date":"40457", "high":49.7100,"low":48.9100,"close":49.2300},
{"symbol":"xxxx", "date":"40458", "high":49.5400,"low":49.0000,"close":49.4100},
{"symbol":"xxxx", "date":"40459", "high":49.8700,"low":49.0800,"close":49.7500},
{"symbol":"xxxx", "date":"40462", "high":50.0200,"low":49.6200,"close":49.7700},
{"symbol":"xxxx", "date":"40463", "high":50.2100,"low":49.2600,"close":50.1100},
{"symbol":"xxxx", "date":"40464", "high":50.7500,"low":50.2800,"close":50.5200},
{"symbol":"xxxx", "date":"40465", "high":50.6400,"low":50.1700,"close":50.4200},
{"symbol":"xxxx", "date":"40466", "high":51.5000,"low":50.6300,"close":51.4900},
{"symbol":"xxxx", "date":"40469", "high":51.7200,"low":51.3000,"close":51.3000},
{"symbol":"xxxx", "date":"40470", "high":51.3000,"low":50.4200,"close":50.8200},
{"symbol":"xxxx", "date":"40471", "high":51.5700,"low":50.8700,"close":51.1900},
{"symbol":"xxxx", "date":"40472", "high":51.7101,"low":50.7900,"close":51.2900},
{"symbol":"xxxx", "date":"40473", "high":51.6900,"low":51.2100,"close":51.6400},
{"symbol":"xxxx", "date":"40476", "high":52.2300,"low":51.8500,"close":51.8900},
{"symbol":"xxxx", "date":"40477", "high":52.1500,"low":51.4200,"close":52.0300},
{"symbol":"xxxx", "date":"40478", "high":52.2300,"low":51.6600,"close":52.1900},
{"symbol":"xxxx", "date":"40479", "high":52.4500,"low":51.8400,"close":52.3000},
{"symbol":"xxxx", "date":"40480", "high":52.4900,"low":52.1698,"close":52.1800},
{"symbol":"xxxx", "date":"40483", "high":52.7500,"low":51.9800,"close":52.2200},
{"symbol":"xxxx", "date":"40484", "high":52.9300,"low":52.5750,"close":52.7800},
{"symbol":"xxxx", "date":"40485", "high":53.0400,"low":52.3600,"close":53.0200},
{"symbol":"xxxx", "date":"40486", "high":53.8614,"low":53.5000,"close":53.6700},
{"symbol":"xxxx", "date":"40487", "high":53.8100,"low":53.5100,"close":53.6700},
{"symbol":"xxxx", "date":"40490", "high":53.8300,"low":53.4499,"close":53.7375},
{"symbol":"xxxx", "date":"40491", "high":54.0401,"low":53.2100,"close":53.4500},
{"symbol":"xxxx", "date":"40492", "high":53.7700,"low":53.1000,"close":53.7150},
{"symbol":"xxxx", "date":"40493", "high":53.4800,"low":52.6600,"close":53.3850},
{"symbol":"xxxx", "date":"40494", "high":53.3672,"low":52.1100,"close":52.5100},
{"symbol":"xxxx", "date":"40497", "high":52.8800,"low":52.2900,"close":52.3150},
{"symbol":"xxxx", "date":"40498", "high":52.2462,"low":50.8500,"close":51.4500},
{"symbol":"xxxx", "date":"40499", "high":51.8700,"low":51.3500,"close":51.6000},
{"symbol":"xxxx", "date":"40500", "high":52.7900,"low":52.1300,"close":52.4300},
{"symbol":"xxxx", "date":"40501", "high":52.5900,"low":52.1400,"close":52.4700},
{"symbol":"xxxx", "date":"40504", "high":52.9100,"low":52.1700,"close":52.9100},
{"symbol":"xxxx", "date":"40505", "high":52.4500,"low":51.7700,"close":52.0700},
{"symbol":"xxxx", "date":"40506", "high":53.2500,"low":52.5600,"close":53.1200},
{"symbol":"xxxx", "date":"40508", "high":53.1300,"low":52.6700,"close":52.7700},
{"symbol":"xxxx", "date":"40511", "high":52.9000,"low":52.1000,"close":52.7300},
{"symbol":"xxxx", "date":"40512", "high":52.7355,"low":51.8800,"close":52.0850},
{"symbol":"xxxx", "date":"40513", "high":53.4600,"low":52.8400,"close":53.1900},
{"symbol":"xxxx", "date":"40514", "high":53.8100,"low":53.2100,"close":53.7300},
{"symbol":"xxxx", "date":"40515", "high":53.9400,"low":53.5000,"close":53.8700},
{"symbol":"xxxx", "date":"40518", "high":53.9500,"low":53.6800,"close":53.8450},
{"symbol":"xxxx", "date":"40519", "high":54.5200,"low":53.8200,"close":53.8800},
{"symbol":"xxxx", "date":"40520", "high":54.1500,"low":53.6899,"close":54.0800},
{"symbol":"xxxx", "date":"40521", "high":54.4400,"low":53.9500,"close":54.1350},
{"symbol":"xxxx", "date":"40522", "high":54.5500,"low":54.0900,"close":54.4950},
{"symbol":"xxxx", "date":"40525", "high":54.7400,"low":54.2700,"close":54.3000},
{"symbol":"xxxx", "date":"40526", "high":54.6200,"low":54.2300,"close":54.3950},
{"symbol":"xxxx", "date":"40527", "high":54.7000,"low":54.0300,"close":54.1600},
{"symbol":"xxxx", "date":"40528", "high":54.6600,"low":54.0600,"close":54.5800},
{"symbol":"xxxx", "date":"40529", "high":54.6800,"low":54.4100,"close":54.5200},
{"symbol":"xxxx", "date":"40532", "high":54.7600,"low":54.1600,"close":54.5600},
{"symbol":"xxxx", "date":"40533", "high":54.8900,"low":54.6200,"close":54.8900},
{"symbol":"xxxx", "date":"40534", "high":54.9600,"low":54.7900,"close":54.8850},
{"symbol":"xxxx", "date":"40535", "high":54.8700,"low":54.6100,"close":54.7420},
{"symbol":"xxxx", "date":"40539", "high":54.8600,"low":54.2100,"close":54.7700},
{"symbol":"xxxx", "date":"40540", "high":54.9200,"low":54.5500,"close":54.6700},
{"symbol":"xxxx", "date":"40541", "high":54.9000,"low":54.7300,"close":54.7900},
{"symbol":"xxxx", "date":"40542", "high":54.8000,"low":54.5500,"close":54.6600},
{"symbol":"xxxx", "date":"40543", "high":54.6200,"low":54.2100,"close":54.4600},
{"symbol":"xxxx", "date":"40546", "high":55.6900,"low":54.9500,"close":55.3100},
{"symbol":"xxxx", "date":"40547", "high":55.5500,"low":54.9200,"close":55.2650},
{"symbol":"xxxx", "date":"40548", "high":55.7600,"low":55.0700,"close":55.7400},
{"symbol":"xxxx", "date":"40549", "high":55.9600,"low":55.6800,"close":55.9200},
{"symbol":"xxxx", "date":"40550", "high":56.0500,"low":55.3200,"close":55.8700},
{"symbol":"xxxx", "date":"40553", "high":56.1800,"low":55.5800,"close":56.0800},
{"symbol":"xxxx", "date":"40554", "high":56.3600,"low":55.9500,"close":56.1600},
{"symbol":"xxxx", "date":"40555", "high":56.5600,"low":56.2000,"close":56.5550},
{"symbol":"xxxx", "date":"40556", "high":56.7300,"low":56.4100,"close":56.5750},
{"symbol":"xxxx", "date":"40557", "high":57.0200,"low":56.4600,"close":57.0000},
{"symbol":"xxxx", "date":"40561", "high":57.2300,"low":56.4900,"close":57.1600},
{"symbol":"xxxx", "date":"40562", "high":57.2600,"low":56.3200,"close":56.5100},
{"symbol":"xxxx", "date":"40563", "high":56.3500,"low":55.6800,"close":56.1100},
{"symbol":"xxxx", "date":"40564", "high":56.4900,"low":55.6500,"close":55.6800},
{"symbol":"xxxx", "date":"40567", "high":56.4600,"low":55.6800,"close":56.4500},
{"symbol":"xxxx", "date":"40568", "high":56.5500,"low":56.0500,"close":56.5325},
{"symbol":"xxxx", "date":"40569", "high":56.9800,"low":56.4500,"close":56.8300},
{"symbol":"xxxx", "date":"40570", "high":57.3500,"low":56.9200,"close":57.1800},
{"symbol":"xxxx", "date":"40571", "high":57.2200,"low":55.4700,"close":55.7300},
{"symbol":"xxxx", "date":"40574", "high":56.1600,"low":55.3900,"close":56.0000},
{"symbol":"xxxx", "date":"40575", "high":57.1800,"low":56.3600,"close":57.0500},
{"symbol":"xxxx", "date":"40576", "high":57.1700,"low":56.8400,"close":56.9550},
{"symbol":"xxxx", "date":"40577", "high":57.1400,"low":56.4000,"close":57.0575},
{"symbol":"xxxx", "date":"40578", "high":57.4200,"low":56.9000,"close":57.3750},
{"symbol":"xxxx", "date":"40581", "high":57.9700,"low":57.4000,"close":57.6500},
{"symbol":"xxxx", "date":"40582", "high":58.0700,"low":57.5600,"close":58.0250},
{"symbol":"xxxx", "date":"40583", "high":58.1200,"low":57.7500,"close":57.9300},
]
dfSC = slib.sqlContext.createDataFrame(dSC)
rdd = tr.cal(slib.sc, slib.sqlContext, dfSC)
i = 0
ladx = rdd.collect()
for each in ladx:
#print each["date"], each["tr"], each["pdm1"], each["mdm1"], each["tr14"], each["adx"]
#print each["date"], each["pdm14"], each["mdm14"]
#print each["date"], each["pdi14"], each["mdi14"]
#print each["di14diff"], each["di14sum"]
print each["date"], each["dx"], each["adx"] |
11,636 | 847017250000d22c70d67fa4cfb8cab00797a3d9 | # Create a counter of cards. Sort the keys. For each card, next W cards should have a count greater than it's count. Subtract curr count from next W cards.
class Solution(object):
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
if len(hand)%W != 0:
return False
counter = {}
for i in hand:
counter[i] = counter.get(i, 0) + 1
keys = sorted(counter.keys())
for card in keys:
curr_card_occ = counter[card]
if curr_card_occ == 0:
continue
for i in range(card, card+W):
if i not in counter:
return False
if counter[i] < curr_card_occ:
return False
counter[i] -= curr_card_occ
return True |
11,637 | 6847fff27c97f199992923f974046d9b3239c1bf | # Python imports.
import numpy as np
# Other imports.
from ..StateClass import State
class D4RLAntMazeState(State):
def __init__(self, position, others, done):
"""
Args:
position (np.ndarray)
others (np.ndarray)
done (bool)
"""
self.position = position
self.others = others
features = self.position.tolist() + self.others.tolist()
State.__init__(self, data=features, is_terminal=done)
def __str__(self):
return "x: {}\ty: {}\tothers:{}\tterminal: {}\n".format(self.position[0],
self.position[1],
self.others,
self.is_terminal())
def __repr__(self):
return str(self)
|
11,638 | 41cb9452b61f216079af28d462e4fa3af17af121 | #!/usr/bin/env python
import cv2
import cv_bridge
import rospy
import sys
import os
from sensor_msgs.msg import Image
class DoMagic:
def __init__(self):
rospy.init_node('magical_node', anonymous=True)
# self.image_input_name = rospy.resolve_name('/camera/depth/image_rect_raw')
image_sub = rospy.Subscriber(self.image_input_name, Image, self.callback)
self.bridge = cv_bridge.CvBridge()
self.ctr = 0
self.image = None
def callback(self, msg):
if __name__ == '__main__':
node = DoMagic()
rospy.spin()
|
11,639 | ded10c64c1eb41ba2791f9a197e07e2f481d65fb | # -*- coding: utf-8 -*-
__author__ = 'Gennady Denisov <denisovgena@gmail.com>'
import webapp2
try:
import conf
loader = conf.JINJA_ENVIRONMENT
except ImportError:
# Stub configuration
from jinja2 import Environment, PackageLoader
loader = Environment(loader=PackageLoader('core', 'tests'))
def get_template(loader, template_name):
"""
Gets template to be rendered.
:param loader: loader is jinja environment instance
:param template_name: name of template
:returns: template to render
"""
return loader.get_template(template_name)
class BaseHandler(webapp2.RequestHandler):
"""
Base request handler for application
"""
template_name = None
def get_context(self, *args, **kwargs):
"""
Returns context as dictionary
"""
ctx = {}
for k, v in kwargs.iteritems():
ctx[k] = v
if args:
for idx, arg in enumerate(args):
ctx['arg_%d' % idx] = arg
return ctx
def render_response(self, *args, **kwargs):
"""
Renders context into template
"""
if self.template_name is not None:
template = get_template(loader, self.template_name)
self.response.write(template.render(**self.get_context(*args, **kwargs)))
else:
raise ValueError('No template provided.')
def get(self, *args, **kwargs):
return self.render_response(*args, **kwargs)
|
11,640 | 9a02c504bac6d669da7ffdb7e7a0505a2be25986 | import socket
def send(ip, msg):
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# my_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)
# my_socket.sendto(msg.encode(), ('<broadcast>', 8988))
try:
my_socket.sendto(msg.encode(), (ip, 8988))
except:
pass
my_socket.close()
def send_message(ip, message):
msg = "%TEXT% : " + message
send(ip, msg)
def send_control(ip, control):
msg = '$' + control + '$'
send(ip, msg)
|
11,641 | 45d4ded0fa2679d19e323a88306856a543c7ba0a | #! usr/bin/env python3
# -*- coding:utf-8 -*-
import pymongo
from ๆๅๆ่ไฟกๆฏ.craw_jobs.setting import *
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
def save_to_mongo(result):
if db['zhilianzhaopin'].insert(result):
print('ๆๅ
ฅๆฐๆฎๅบๆๅ', result)
return None |
11,642 | 4b4af27357871e737b850d87931976ab92b73767 | from django.contrib import admin
from .models import *
admin.site.register(Registration)
admin.site.register(Attendance)
admin.site.register(Messages)
admin.site.register(Feedback)
admin.site.register(Requests)
admin.site.register(Course)
admin.site.register(Blogs)
admin.site.register(Exam)
admin.site.register(Exam_results)
|
11,643 | a05fc82167cd6e2085a3a3fcf05e1e30e0e91f25 | import re
f = open('STEP_OP.json')
wf= open('text_ip.txt', 'w')
while True:
ind = 0
original_data = f.readline()
j=1
if len(original_data) <= 1:
continue
else:
a = original_data.split(',')
try:
aa = a[3]
except IndexError:
print(a)
n = aa.index(':')
print('\n'+aa[n+2:-2])
wf.write(aa[n+2:-2]+'\n')
f.close()
wf.close() |
11,644 | beb987188ebdbe61d3a974f2add797515c0e0d79 | # coding=utf-8
"""
__author__ = 'th'
"""
import urllib2
url1 = 'http://192.168.3.177:8001/httpdns?host=live-ch.cloutropy.com'
url2 = 'http://192.168.3.177:8002/httpdns?host=live-ch.cloutropy.com'
url3 = 'http://192.168.3.177:8003/httpdns?host=live-ch.cloutropy.com'
url4 = 'http://192.168.3.177:8004/httpdns?host=live-ch.cloutropy.com'
headers1 = {'X-Real-IP': '1.0.2.0',
'Content-Type': 'application/json',
'Accept': 'application/json',
'Host': '192.168.3.177:8001'
}
headers2 = {'X-Real-IP': '1.0.2.0',
'Content-Type': 'application/json',
'Accept': 'application/json',
'Host': '192.168.3.177:8002'
}
headers3 = {'X-Real-IP': '1.0.2.0',
'Content-Type': 'application/json',
'Accept': 'application/json',
'Host': '192.168.3.177:8003'
}
headers4 = {'X-Real-IP': '1.0.2.0',
'Content-Type': 'application/json',
'Accept': 'application/json',
'Host': '192.168.3.177:8004'
}
data = None
while True:
req1 = urllib2.Request(url1, data, headers1)
req2 = urllib2.Request(url2, data, headers2)
req3 = urllib2.Request(url3, data, headers3)
req4 = urllib2.Request(url4, data, headers4)
response1 = urllib2.urlopen(req1)
response2 = urllib2.urlopen(req2)
html1 = response1.read()
html2 = response2.read()
print html1, html2
|
11,645 | 1f31c12bfe806c681bd0e29fe04419c99a65009f |
@app.route('/3dclustering')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def _3dclustering():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'_3dclustering'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/no_transaction_vs_amount')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def no_transaction_vs_amount():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'no_transaction_vs_amount'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
print request_id
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='latest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
print ret
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
print ret
break
return str(ret['data'])
@app.route('/no_transaction_vs_harmonic')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def no_transaction_vs_harmonic():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'no_transaction_vs_harmonic'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/amount_vs_harmonic')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def amount_vs_harmonic():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'amount_vs_harmonic'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/label_counts')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def label_counts():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'label_counts'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/no_transactions_statics')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def no_transactions_statics():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'no_transactions_statics'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/amount_statics')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def amount_statics():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'amount_statics'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
@app.route('/harmonic_statics')
@cross_origin(origin='*',headers=['Content-Type','Authorization'])
def harmonic_statics():
ip = request.remote_addr
producer = KafkaProducer(bootstrap_servers=['kafka-server:9092'], compression_type='gzip')
topic = "requests4"
rand_id = str(uuid.uuid4())
request_id = str.encode(rand_id)
r = {'request_id':request_id, 'action':'harmonic_statics'}
r = json.dumps(r)
req_data = str(r)
req_data = str.encode(req_data)
ret = producer.send(topic, req_data)
time.sleep(1)
global cunsumer
#consumer = KafkaConsumer(bootstrap_servers=['kafka-server:9092'],auto_offset_reset='earliest')
#consumer.subscribe(['responses2'])
ret = ''
for message in consumer:
ret = message.value
try:
json_object = json.loads(ret)
if(json_object['response_id'] != request_id):
continue
ret = json_object
except ValueError, e:
continue
#print ret
break
return str(ret['data'])
|
11,646 | defe86a35743cfffde2041368ecff8f8ac3e6f01 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
start_app = True
database_name = 'basement'
WTF_CSRF_ENABLED = True
SECRET_KEY = 'L3tmein!'
SQLALCHEMY_DATABASE_URI = 'postgresql://' + 'postgres' + ':@localhost/' + database_name
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
11,647 | 20ddf799a9d03cb6f02cb063a7e2aeeefe2a04e7 | number = "428122498997587283996116951397957933569136949848379417125362532269869461185743113733992331379856446362482129646556286611543756564275715359874924898113424472782974789464348626278532936228881786273586278886575828239366794429223317476722337424399239986153675275924113322561873814364451339186918813451685263192891627186769818128715595715444565444581514677521874935942913547121751851631373316122491471564697731298951989511917272684335463436218283261962158671266625299188764589814518793576375629163896349665312991285776595142146261792244475721782941364787968924537841698538288459355159783985638187254653851864874544584878999193242641611859756728634623853475638478923744471563845635468173824196684361934269459459124269196811512927442662761563824323621758785866391424778683599179447845595931928589255935953295111937431266815352781399967295389339626178664148415561175386725992469782888757942558362117938629369129439717427474416851628121191639355646394276451847131182652486561415942815818785884559193483878139351841633366398788657844396925423217662517356486193821341454889283266691224778723833397914224396722559593959125317175899594685524852419495793389481831354787287452367145661829287518771631939314683137722493531318181315216342994141683484111969476952946378314883421677952397588613562958741328987734565492378977396431481215983656814486518865642645612413945129485464979535991675776338786758997128124651311153182816188924935186361813797251997643992686294724699281969473142721116432968216434977684138184481963845141486793996476793954226225885432422654394439882842163295458549755137247614338991879966665925466545111899714943716571113326479432925939227996799951279485722836754457737668191845914566732285928453781818792236447816127492445993945894435692799839217467253986218213131249786833333936332257795191937942688668182629489191693154184177398186462481316834678733713614889439352976144726162214648922159719979143735815478633912633185334529484779322818611438194522292278787653763328944421516569181178517915745625295158611636365253948455727653672922299582352766484"
print(sum([int(num) for i, num in enumerate(number) if num == number[i-1]]))
|
11,648 | e8136eacb0865b19acd883f832da6006495d909b | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
cur_node = self.head
while cur_node:
print(cur_node.data)
cur_node = cur_node.next
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def prepend(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def insert(self, data, prev_node):
if not prev_node:
print("SOL")
return
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
def delete_node(self, key):
cur_node = self.head
if cur_node and cur_node.data == key:
self.head = cur_node.next
cur_node = None
return
prev = None
while cur_node and cur_node.data != key:
prev = cur_node
cur_node = cur_node.next
if cur_node is None:
return
prev.next = cur_node.next
cur_node = None
def delete_node_at_position(self, position):
cur_node = self.head
if position == 0:
self.head = cur_node.next
return
prev = None
count = 1
while cur_node and count != position:
prev = cur_node
cur_node = cur_node.next
count += 1
if cur_node is None:
return
prev.next = cur_node.next
cur_node = None
def len_iterative(self):
count = 0
cur_node = self.head
while cur_node:
count += 1
cur_node = cur_node.next
return count
def len_recursive(self, node):
if node is None:
return 0
return 1 + self.len_recursive(node.next)
def sum_2_lists(self, llist):
p = self.head
q = llist.head
sum_list = LinkedList()
carry = 0
while p or q:
if not p:
i = 0
else:
i = p.data
if not q:
j = 0
else:
j = q.data
s = i + j + carry
if s >= 10:
carry = 1
remainder = s % 10
sum_list.append(remainder)
else:
carry = 0
sum_list.append(s)
if p:
p = p.next
if q:
q = q.next
sum_list.print_list()
llist1 = LinkedList()
llist1.append(5)
llist1.append(6)
llist1.append(3)
llist1.print_list()
llist2 = LinkedList()
llist2.append(8)
llist2.append(4)
llist2.append(2)
llist2.print_list()
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`")
llist1.sum_2_lists(llist2) |
11,649 | 614c0d12feb4b333e86ada93d1bb7c467f3e2010 | import h5py
import os
import glob
import argparse
from plantsegtools.utils import H5_FORMATS
def parse_crop(crop_str):
'''Return a tuple with a slice object from a string (ex. "[:, 0:620, 420:1750]") '''
crop_str = crop_str.replace('[', '').replace(']', '')
return tuple(
(slice(*(int(i) if i else None for i in part.strip().split(':'))) if ':' in part else int(part.strip())) for
part in crop_str.split(','))
def parse():
parser = argparse.ArgumentParser()
# Required
parser.add_argument("--path", type=str, required=True, help='path to h5 file or'
' to a directory for batch processing'
' of multiple files')
parser.add_argument('--crop', type=str, help='[ZYX] cropping to apply (e.g. "[:, 0:620, 420:1750]")', default='[:,:,:]', required=False)
return parser.parse_args()
class get3dds():
''' Class used by the h5 walker function to store the names of all 3d datasets and of all others'''
def __init__(self):
self.sets3d = []
self.sets_n3d = []
def __call__(self, name, node):
if isinstance(node, h5py.Dataset):
if len(node.shape) > 2:
self.sets3d.append(name)
else:
self.sets_n3d.append(name)
return None
if __name__ == '__main__':
'''Crop all >=3D datasets of an h5 file, leaves other untouched, saves a copy.'''
args = parse()
# Setup input path
if os.path.isfile(args.path):
all_files = [args.path]
elif os.path.isdir(args.path):
all_files = glob.glob(os.path.join(args.path, f'*{H5_FORMATS}'))
else:
raise NotImplementedError
for i, file_path in enumerate(all_files, 1):
# Prep IO
path = os.path.dirname(file_path)
fname = os.path.splitext(os.path.basename(file_path))[0]
outfname = f"{fname}_crop.h5"
outpath = os.path.join(path, outfname)
print(f"Processing {fname}.h5 ({i}/{len(all_files)})")
with h5py.File(file_path, 'r') as f:
with h5py.File(outpath, 'w') as fc:
# Traverse the file hierarchy and save names of 3D datasets and the others
d3ds = get3dds()
f.visititems(d3ds)
for ds in d3ds.sets3d:
# Process the 3D datasets --> crop
if len(f[ds].shape) == 3:
crop = parse_crop(args.crop)
else:
crop = parse_crop(args.crop)
crop = (slice(None, None, None), *crop)
crop_ds = f[ds][crop]
# store the attributes
_temp = {}
for k, v in f[ds].attrs.items():
_temp[k] = v
# Write cropped dataset to outfile
fc.create_dataset(ds, data=crop_ds, compression='gzip')
# reassign the attributes
for k, v in _temp.items():
fc[ds].attrs[k] = v
for ds in d3ds.sets_n3d:
# For the not 3D datasets, copy them to the outfile
# store the attributes
_temp = {}
for k, v in f[ds].attrs.items():
_temp[k] = v
# Copy dataset to outfile
fc.create_dataset(ds, data=f[ds], compression='gzip')
# reassign the attributes
for k, v in _temp.items():
fc[ds].attrs[k] = v
i += 1
|
11,650 | 365df7277fbd0bfa0063cfe6dcdd01c4c841a897 | import torch
from torch.utils.data import Dataset as dataset
from utility import get_train_data
from utility import get_test_data
class Dataset(dataset):
def __init__(self, training):
self.training = training
if training is True:
self.data, self.label = get_train_data()
self.data = torch.FloatTensor(self.data) / 255
self.label = torch.LongTensor(self.label)
else:
self.data = get_test_data()
self.data = torch.FloatTensor(self.data)
def __getitem__(self, index):
data = self.data[index, :, :, :]
if self.training is False:
return data
else:
label = self.label[index]
return data, label
def __len__(self):
return self.data.shape[0]
train_ds = Dataset(True)
test_ds = Dataset(False)
|
11,651 | 8db9b96bd3fb522a2eb5cd47cb451a4a3f5e3e31 | import os
import threading
import webview
from time import time
from API.API import Api
def get_entrypoint():
"""
Mounts web application
"""
def exists(path):
return os.path.exists(os.path.join(os.path.dirname(__file__), path))
if exists('../gui/index.html'): # unfrozen development
return '../gui/index.html'
if exists('../Resources/gui/index.html'): # frozen py2app
return '../Resources/gui/index.html'
if exists('./gui/index.html'):
return './gui/index.html'
raise Exception('No index.html found')
def set_interval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
entry = get_entrypoint()
@set_interval(1)
def update_ticker():
pass
if __name__ == '__main__':
# Starts the application
window = webview.create_window('Argo Editor', entry, js_api=Api(),fullscreen=True)
webview.start(update_ticker, debug=True)
|
11,652 | 500340660b5f235856bf89fe37cda909fab9d15f | #!/usr/bin/env python3
import os
import shutil
import subprocess
TARGET = ".docs"
VERSION = "clang_9_0"
if os.path.isdir(TARGET):
shutil.rmtree(TARGET)
os.mkdir(TARGET)
for (name, features) in [("default", VERSION), ("runtime", f"runtime,{VERSION}")]:
subprocess.call(["cargo", "clean"])
subprocess.call(["cargo", "doc", f"--features={features}", "--no-deps"])
print(f"Copying docs to {TARGET}/{name}...")
shutil.copytree(f"target/doc", f"{TARGET}/{name}")
os.chdir(TARGET)
subprocess.call(["git", "init"])
subprocess.call(["git", "remote", "add", "origin", "git@github.com:KyleMayes/clang-sys.git"])
subprocess.call(["git", "checkout", "--orphan", "gh-pages"])
subprocess.call(["git", "add", "-A"])
subprocess.call(["git", "commit", "-m", "\"Update documentation\""])
subprocess.call(["git", "push", "origin", "gh-pages", "--force"])
os.chdir("..")
shutil.rmtree(TARGET)
|
11,653 | afe2f40a3a6f6554f074550393902c515288bab9 | """PIMAP Sense component that listens for TCP packets.
PIMAP Sense TCP is a PIMAP Sense component that starts a multi-process server on a given
host and port. PIMAP Sense TCP supports both IPv4 and IPv6.
License:
Author: Sam Mansfield
"""
import ast
import ctypes
import multiprocessing
import numpy as np
import socket
import time
from pimap import pimaputilities as pu
class PimapSenseTcp:
def __init__(self, host="localhost", port=31416, sample_type="tcp", ipv6=False,
sense_workers=3, pimap_workers=1, system_samples=False, app=""):
"""Constructor for PIMAP Sense TCP
Arguments:
host (optional): The host of the TCP server. Defaults to "localhost".
port (optional): The port of the TCP server. Defaults to 31415.
sample_type (optional): The sample type given to non-pimap sensed data.
Defaults to "udp".
ipv6 (optional): Whether the address is IPv6. Defaults to False.
sense_workers (optional): The number of simultaneous TCP connections.
Defaults to 3.
pimap_workers (optional): The number of processes to create pimap data from
sensed data. Defaults to 3.
system_samples (optional): A boolean value that indicates whether system_samples
are produced that report the throughput of this component. Defaults to False.
app (optional): A name of the application running, which is used to append
to the name of they sample_type of system_samples,
e.g. sample_type:"system_samples_app". Defaults to "".
Exceptions:
socket.error:
If attempting to bind to an invalid address.
ValueError:
If a non-integer port is given or a port not in the range of 1024-65535.
"""
self.host = host
self.port = int(port)
if self.port < 1024 or self.port > 65535:
raise(ValueError("Port must be an integer in the range 1024-65535."))
self.sample_type = str(sample_type)
self.ipv6 = bool(ipv6)
self.sense_workers = int(sense_workers)
self.pimap_workers = int(pimap_workers)
self.system_samples = bool(system_samples)
self.app = str(app)
# System Samples Setup
self.sensed_data = 0
self.system_samples_updated = time.time()
self.system_samples_period = 1.0
self.latencies = []
# Socket Setup
if not self.ipv6:
addrinfo = socket.getaddrinfo(self.host, self.port, socket.AF_INET,
socket.SOCK_STREAM)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = addrinfo[0][4]
else:
addrinfo = socket.getaddrinfo(self.host, self.port, socket.AF_INET6,
socket.SOCK_STREAM)
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
address = addrinfo[0][4]
self.socket.settimeout(1.0)
try: self.socket.bind(address)
except socket.error as e:
self.socket.close()
raise e
self.max_buffer_size = 4096
self.host = self.socket.getsockname()[0]
self.socket.listen(self.sense_workers)
# Address Lookup Setup
# Address lookup is by the tuple (patient_id, device_id) -> IP address.
# TODO: Under development! May be used in the future for PIMAP commands.
self.addresses_by_id = {}
# Multiprocess Setup
self.running = multiprocessing.Value(ctypes.c_bool, True)
self.running.value = True
self.pimap_data_queue = multiprocessing.Queue()
self.received_address_queue = multiprocessing.Queue()
self.sense_worker_processes = []
for i in range(self.sense_workers):
worker_process = multiprocessing.Process(target=self._sense_worker, daemon=True)
self.sense_worker_processes.append(worker_process)
worker_process.start()
self.pimap_worker_processes = []
for i in range(self.pimap_workers):
worker_process = multiprocessing.Process(
target=self._create_pimap_data_and_add_to_queue, daemon=True)
self.pimap_worker_processes.append(worker_process)
worker_process.start()
time.sleep(0.1)
def _create_pimap_data_and_add_to_queue(self):
while self.running.value:
if not self.received_address_queue.empty():
(processed, address) = self.received_address_queue.get()
# If valid PIMAP sample/metric is received add it to the queue.
# Assume that if there is one valid PIMAP datum than all the data is PIMAP data.
if pu.validate_datum(processed[0] + ";;"):
pimap_data = list(map(lambda x: x + ";;", processed))
for pimap_datum in pimap_data:
patient_id = pu.get_patient_id(pimap_datum)
device_id = pu.get_device_id(pimap_datum)
self.pimap_data_queue.put(pimap_datum)
# TODO: Under development! May be used in the future for PIMAP commands.
self.addresses_by_id[(patient_id, device_id)] = address
else:
for datum in processed:
patient_id = address[0]
device_id = address[1]
sample = datum
pimap_datum = pu.create_pimap_sample(self.sample_type, patient_id,
device_id, sample)
self.pimap_data_queue.put(pimap_datum)
# TODO: Under development! May be used in the future for PIMAP commands.
self.addresses_by_id[(patient_id, device_id)] = address
def _sense_worker(self):
"""Worker process
Used internally to create TCP server processes.
"""
terminator = ";;"
while self.running.value:
try:
(conn, address) = self.socket.accept()
with conn:
received_coded = conn.recv(self.max_buffer_size)
received = ""
while received_coded:
received += received_coded.decode()
if terminator in received:
received_split = received.split(terminator)
processed = received_split[:-1]
received = received_split[-1]
self.received_address_queue.put((processed, address))
received_coded = conn.recv(self.max_buffer_size)
except socket.timeout: continue
def sense(self):
"""Core interaction of PIMAP Sense TCP.
Returns:
A list of PIMAP samples/metrics sensed since last call to sense().
"""
# Get all PIMAP data from the queue.
pimap_data = []
while not self.pimap_data_queue.empty():
pimap_data.append(self.pimap_data_queue.get())
# Sort the PIMAP data by timestamp. The PIMAP data can be out of order because we are
# using multiple processes to sense it.
pimap_data.sort(key=lambda x: float(pu.get_timestamp(x)))
timestamps = (list(map(lambda x: float(pu.get_timestamp(x)), pimap_data)))
self.latencies.extend(time.time() - np.array(timestamps))
# Track the amount of sensed PIMAP data.
self.sensed_data += len(pimap_data)
# If system_samples is True and a system_sample was not created within the last
# system_samples period, create a system_sample.
pimap_system_samples = []
if (self.system_samples and
(time.time() - self.system_samples_updated > self.system_samples_period)):
sample_type = "system_samples"
if self.app != "":
sample_type += "_" + self.app
# Identify PIMAP Sense using the host and port.
patient_id = "sense"
device_id = (self.host, self.port)
sensed_data_per_s = self.sensed_data/(time.time() - self.system_samples_updated)
sample = {"throughput":sensed_data_per_s}
if len(self.latencies) > 0:
sample["latency"] = np.mean(self.latencies)
system_sample = pu.create_pimap_sample(sample_type, patient_id, device_id, sample)
pimap_system_samples.append(system_sample)
# Reset system_samples variables.
self.system_samples_updated = time.time()
self.sensed_data = 0
self.latencies = []
return pimap_data + pimap_system_samples
def close(self):
"""Safely stop the TCP server.
Terminates server processes and closes the socket.
"""
self.running.value = False
# Empty queues or processes won't join.
while not self.pimap_data_queue.empty(): self.pimap_data_queue.get()
while not self.received_address_queue.empty(): self.received_address_queue.get()
for worker_process in self.sense_worker_processes:
worker_process.join()
for worker_process in self.pimap_worker_processes:
worker_process.join()
self.socket.close()
# Deprecated Methods: May be used in the future.
#
# # get_max_buffer_size: Get the max buffer size.
# def get_max_buffer_size(self):
# return self.max_buffer_size
#
# # send_command: Sends the given command to the address based on the given patient_id
# # and device_id.
# # Under Development!
# def send_command(self, patient_id, device_id, command):
# """Under Development!
#
# """
# if (str(patient_id), str(device_id)) in self.addresses_by_id:
# address = self.addresses_by_id[(str(patient_id), str(device_id))]
# self.socket.sendto(command.encode(), address)
|
11,654 | 09f5f6f69bca0d08c9bce1c6632f9f167232db6c | n = int(input("Enter number : "))
print((n-1), n, (n+1)) |
11,655 | d7aa24f1bda0a37a62212b822ddc644e1ccaa97c | """rgd URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from main.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^category/(?P<id>\d+)$', category),
url(r'^top/(?P<id>\d+).html$', top_detail, name="top_detail"),
url(r'^journal/(?P<id>\d+).html$', journal_detail, name="issue_detail"),
url(r'^article/(?P<id>\d+).html$', article_detail, name="article_detail"),
url(r'^$', home, name='home')
]
|
11,656 | 627856994dd23f26f8450235f8e548a1b9e2feed | #!/usr/bin/env python3
# Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import numpy as np
import tensorrt as trt
import test_util as tu
def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None
def np_to_trt_dtype(np_dtype):
if np_dtype == bool:
return trt.bool
elif np_dtype == np.int8:
return trt.int8
elif np_dtype == np.int32:
return trt.int32
elif np_dtype == np.float16:
return trt.float16
elif np_dtype == np.float32:
return trt.float32
return None
# The 'nonzero' model that we use for data dependent shape is naturally
# not support batching, because the layer output is not trivially separable
# based on the request batch size.
# input_shape is config shape
def create_data_dependent_modelfile(
models_dir, model_name, input_shape, input_dtype=np.int32, min_dim=1, max_dim=32
):
trt_input_dtype = np_to_trt_dtype(input_dtype)
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
)
# input
in0 = network.add_input("INPUT", trt_input_dtype, input_shape)
# layers
non_zero = network.add_non_zero(in0)
# configure output
out0 = non_zero.get_output(0)
out0.name = "OUTPUT"
network.mark_output(out0)
# optimization profile
min_shape = []
opt_shape = []
max_shape = []
for i in input_shape:
if i == -1:
min_shape = min_shape + [min_dim]
opt_shape = opt_shape + [int((max_dim + min_dim) / 2)]
max_shape = max_shape + [max_dim]
else:
min_shape = min_shape + [i]
opt_shape = opt_shape + [i]
max_shape = max_shape + [i]
profile = builder.create_optimization_profile()
profile.set_shape("INPUT", min_shape, opt_shape, max_shape)
config = builder.create_builder_config()
config.add_optimization_profile(profile)
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 20)
# serialized model
engine_bytes = builder.build_serialized_network(network, config)
model_version_dir = models_dir + "/" + model_name + "/1"
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
def create_data_dependent_modelconfig(
models_dir, model_name, input_shape, input_dtype=np.int32
):
config_dir = models_dir + "/" + model_name
config = """
name: "{}"
platform: "tensorrt_plan"
max_batch_size: 0
input [
{{
name: "INPUT"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT"
data_type: {}
dims: [ {} ]
}}
]
""".format(
model_name,
np_to_model_dtype(input_dtype),
tu.shape_to_dims_str(input_shape),
np_to_model_dtype(np.int32),
tu.shape_to_dims_str((len(input_shape), -1)),
)
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--models_dir", type=str, required=True, help="Top-level model directory"
)
FLAGS, unparsed = parser.parse_known_args()
# Fixed input shape
create_data_dependent_modelfile(
FLAGS.models_dir, "plan_nobatch_nonzero_fixed", (4, 4)
)
create_data_dependent_modelconfig(
FLAGS.models_dir, "plan_nobatch_nonzero_fixed", (4, 4)
)
# Dynamic input shape
create_data_dependent_modelfile(
FLAGS.models_dir, "plan_nobatch_nonzero_dynamic", (-1, -1)
)
create_data_dependent_modelconfig(
FLAGS.models_dir, "plan_nobatch_nonzero_dynamic", (-1, -1)
)
|
11,657 | 052870a5ce530de4c4de3f3bcfd7c2f13cdec525 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
import math
class Poisson:
def __init__(self, lmbda, noMuestras):
self.lmbda = lmbda
self.noMuestras = noMuestras
self.muestras = [0]*noMuestras
"""
Method --simula()
Mรฉtodo para crear las muestras a simular.
"""
def simula(self):
muestras = [0]*self.noMuestras
for j in range(0,self.noMuestras):
muestras[j] = self.poisson()
return muestras
"""
Method (auxiliar) -- poisson()
@return x: Dondรฉ X es una variable aleatoria que se distribuye Poisson.
Mรฉtodo auxiliar para crear una una variable aleatoria que se destribuye Poisson.
"""
def poisson(self):
x = 0
u = random.uniform(0,1)
p = math.exp(-1*self.lmbda)
F = p
i = 0
while(True):
if u < F:
x = i
break
i += 1
p = (self.lmbda*p)/(i)
F = F + p
return x
"""
Method -- presentaMuestras()
Mรฉtodo para mejorar la representaciรณn de cadena del arreglo de muestras.
"""
def presentaMuestras(self):
self.muestras = self.simula()
n = len(self.muestras) #longitud del arreglo
s = "[ "
j = 0
if n == 0:
return s + "]"
for i in range(0,n):
if i == n-1:
s = s + str.format("%.2f" % self.muestras[i]) + " ]"
else:
if j == 15:
s = s + str.format("%.2f" % self.muestras[i]) + ", \n "
j = 0
else:
s = s + str.format("%.2f" % self.muestras[i]) + ", "
j += 1
print(s)
"""
Method -- grafica()
Mรฉtodo para graficar las muestras, haciendo uso del mรณdulo seaborn de python.
"""
def grafica(self):
if len(self.muestras) > 1:
ax = sns.distplot(self.muestras,
kde=True,
bins=100,
color='skyblue',
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Poisson Distribution', ylabel='Frequency')
plt.show()
|
11,658 | 4dcdf59df010e00c6c50d2899cbe7dd2fadf1912 | # noqa pylint: disable=too-many-lines, line-too-long, invalid-name, unused-argument, redefined-builtin, broad-except, fixme
"""
Creates the Area charts
All steps required to generate area charts.
"""
# Built-in Modules
import itertools
import json
import sys
import traceback
# Third-party Modules
from matplotlib import pyplot as plt
from matplotlib import patches
# My Modules
import chart_tools
LOG = chart_tools.LOG
PAYLOAD = chart_tools.payload
P_DICT = PAYLOAD['p_dict']
K_DICT = PAYLOAD['k_dict']
PLUG_DICT = PAYLOAD['prefs']
PROPS = PAYLOAD['props']
CHART_NAME = PROPS['name']
X_OBS = ''
Y_OBS_TUPLE = () # Y values
Y_OBS_TUPLE_REL = {} # Y values relative to chart (cumulative value)
Y_COLORS_TUPLE = () # Y area colors
LOG['Threaddebug'].append("chart_area.py called.")
plt.style.use(f"Stylesheets/{PROPS['id']}_stylesheet")
if PLUG_DICT['verboseLogging']:
LOG['Threaddebug'].append(f"{PAYLOAD}")
def __init__():
...
try:
ax = chart_tools.make_chart_figure(
width=P_DICT['chart_width'], height=P_DICT['chart_height'], p_dict=P_DICT
)
chart_tools.format_axis_x_ticks(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
chart_tools.format_axis_y(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
for area in range(1, 9, 1):
suppress_area = P_DICT.get(f"suppressArea{area}", False)
# If the area is suppressed, remind the user they suppressed it.
if suppress_area:
LOG['Info'].append(
f"[{CHART_NAME}] Area {area} is suppressed by user setting. You can re-enable it "
f"in the device configuration menu."
)
# ============================== Plot the Areas ===============================
# Plot the areas. If suppress_area is True, we skip it.
if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area:
# If area color is the same as the background color, alert the user.
if P_DICT[f'area{area}Color'] == P_DICT['backgroundColor'] and not suppress_area:
LOG['Warning'].append(
f"[{CHART_NAME}] Area {area} color is the same as the background color (so "
f"you may not be able to see it)."
)
data_path = PLUG_DICT['dataPath']
area_source = P_DICT[f'area{area}Source']
data_column = chart_tools.get_data(data_source=f'{data_path}{area_source}', logger=LOG)
if PLUG_DICT['verboseLogging']:
LOG['Threaddebug'].append(f"[{CHART_NAME}] Data for Area {area}: {data_column}")
# Pull the headers
P_DICT['headers'].append(data_column[0][1])
del data_column[0]
# Pull the observations into distinct lists for charting.
for element in data_column:
P_DICT[f'x_obs{area}'].append(element[0])
P_DICT[f'y_obs{area}'].append(float(element[1]))
# ============================= Adjustment Factor =============================
# Allows user to shift data on the Y axis (for example, to display multiple binary
# sources on the same chart.)
if PROPS[f'area{area}adjuster'] != "":
temp_list = []
for obs in P_DICT[f'y_obs{area}']:
expr = f"{obs}{PROPS[f'area{area}adjuster']}"
temp_list.append(chart_tools.eval_expr(expr=expr))
P_DICT[f'y_obs{area}'] = temp_list
# ================================ Prune Data =================================
# Prune the data if warranted
dates_to_plot = P_DICT[f'x_obs{area}']
try:
limit = float(PROPS['limitDataRangeLength'])
except ValueError:
limit = 0
if limit > 0:
y_obs = P_DICT[f'y_obs{area}']
new_old = PROPS['limitDataRange']
x_index = f'x_obs{area}'
y_index = f'y_obs{area}'
P_DICT[x_index], P_DICT[y_index] = chart_tools.prune_data(
x_data=dates_to_plot,
y_data=y_obs,
limit=limit,
new_old=new_old,
logger=LOG
)
# ======================== Convert Dates for Charting =========================
P_DICT[f'x_obs{area}'] = \
chart_tools.format_dates(list_of_dates=P_DICT[f'x_obs{area}'], logger=LOG)
_ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']]
# We need to plot all the stacks at once, so we create some tuples to hold the data we
# need later.
Y_OBS_TUPLE += (P_DICT[f'y_obs{area}'],)
Y_COLORS_TUPLE += (P_DICT[f'area{area}Color'],)
X_OBS = P_DICT[f'x_obs{area}']
# ================================ Annotations ================================
# New annotations code begins here - DaveL17 2019-06-05
for _ in range(1, area + 1, 1):
tup = ()
# We start with the ordinal list and create a tuple to hold all the lists that
# come before it.
for k in range(_, 0, -1):
tup += (P_DICT[f'y_obs{area}'],)
# The relative value is the sum of each list element plus the ones that come before
# it (i.e., tup[n][0] + tup[n-1][0] + tup[n-2][0]
Y_OBS_TUPLE_REL[f'y_obs{area}'] = [sum(t) for t in zip(*tup)]
annotate = P_DICT[f'area{area}Annotate']
precision = int(PROPS.get(f'area{area}AnnotationPrecision', "0"))
if annotate:
for xy in zip(P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}']):
ax.annotate(
f"{float(xy[1]):.{precision}f}",
xy=xy,
xytext=(0, 0),
zorder=10,
**K_DICT['k_annotation']
)
y_data = chart_tools.hide_anomalies(data=Y_OBS_TUPLE[0], props=PROPS, logger=LOG)
ax.stackplot(
X_OBS,
Y_OBS_TUPLE,
edgecolor=None,
colors=Y_COLORS_TUPLE,
zorder=10,
lw=0,
**K_DICT['k_line']
)
# ============================== Y1 Axis Min/Max ==============================
# Min and Max are not 'None'. The p_dict['data_array'] contains individual data points and
# doesn't take into account the additive nature of the plot. Therefore, we get the axis scaling
# values from the plot and then use those for min/max.
_ = [P_DICT['data_array'].append(node) for node in ax.get_ylim()]
chart_tools.format_axis_y1_min_max(p_dict=P_DICT, logger=LOG)
# Transparent Chart Fill
if P_DICT['transparent_charts'] and P_DICT['transparent_filled']:
ax.add_patch(
patches.Rectangle(
(0, 0), 1, 1,
transform=ax.transAxes,
facecolor=P_DICT['faceColor'],
zorder=1
)
)
# ================================== Legend ===================================
if P_DICT['showLegend']:
# Amend the headers if there are any custom legend entries defined.
counter = 1
final_headers = []
headers = P_DICT['headers']
# headers = [_ for _ in P_DICT['headers']]
# headers = [_.decode('utf-8') for _ in P_DICT['headers']]
for header in headers:
if P_DICT[f'area{counter}Legend'] == "":
final_headers.append(header)
else:
final_headers.append(P_DICT[f'area{counter}Legend'])
counter += 1
# Set the legend
# Reorder the headers and colors so that they fill by row instead of by column
num_col = int(P_DICT['legendColumns'])
iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)])
final_headers = list(iter_headers)
iter_colors = itertools.chain(*[Y_COLORS_TUPLE[i::num_col] for i in range(num_col)])
final_colors = list(iter_colors)
# Note that the legend does not support the PolyCollection created by the stackplot.
# Therefore, we have to use a proxy artist. https://stackoverflow.com/a/14534830/2827397
p1 = patches.Rectangle((0, 0), 1, 1)
p2 = patches.Rectangle((0, 0), 1, 1)
legend = ax.legend(
[p1, p2],
final_headers,
loc='upper center',
bbox_to_anchor=(0.5, -0.15),
ncol=num_col,
prop={'size': float(P_DICT['legendFontSize'])}
)
# Set legend font color
_ = [text.set_color(P_DICT['fontColor']) for text in legend.get_texts()]
# Set legend area color
num_handles = len(legend.legendHandles)
_ = [legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)]
frame = legend.get_frame()
frame.set_alpha(0)
for area in range(1, 9, 1):
suppress_area = P_DICT.get(f'suppressArea{area}', False)
if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area:
# Note that we do these after the legend is drawn so that these areas don't affect the
# legend.
# We need to reload the dates to ensure that they match the area being plotted
# dates_to_plot = self.format_dates(p_dict[f'x_obs{area}'])
# =============================== Best Fit Line ===============================
if PROPS.get(f'line{area}BestFit', False):
chart_tools.format_best_fit_line_segments(
ax=ax,
dates_to_plot=P_DICT[f'x_obs{area}'],
line=area,
p_dict=P_DICT,
logger=LOG
)
_ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']]
# =============================== Min/Max Lines ===============================
if P_DICT[f'plotArea{area}Min']:
ax.axhline(
y=min(Y_OBS_TUPLE_REL[f'y_obs{area}']),
color=P_DICT[f'area{area}Color'],
**K_DICT['k_min']
)
if P_DICT[f'plotArea{area}Max']:
ax.axhline(
y=max(Y_OBS_TUPLE_REL[f'y_obs{area}']),
color=P_DICT[f'area{area}Color'],
**K_DICT['k_max']
)
if PLUG_DICT.get('forceOriginLines', True):
ax.axhline(
y=0,
color=P_DICT['spineColor']
)
# ================================== Markers ==================================
# Note that stackplots don't support markers, so we need to plot a line (with no width)
# on the plot to receive the markers.
if P_DICT[f'area{area}Marker'] != 'None':
ax.plot_date(
P_DICT[f'x_obs{area}'],
Y_OBS_TUPLE_REL[f'y_obs{area}'],
marker=P_DICT[f'area{area}Marker'],
markeredgecolor=P_DICT[f'area{area}MarkerColor'],
markerfacecolor=P_DICT[f'area{area}MarkerColor'],
zorder=11,
lw=0
)
if P_DICT[f'line{area}Style'] != 'None':
ax.plot_date(
P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}'],
zorder=10,
lw=1,
ls='-',
marker=None,
color=P_DICT[f'line{area}Color']
)
chart_tools.format_custom_line_segments(
ax=ax,
plug_dict=PLUG_DICT,
p_dict=P_DICT,
k_dict=K_DICT,
logger=LOG,
orient="horiz"
)
chart_tools.format_grids(p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
chart_tools.format_title(p_dict=P_DICT, k_dict=K_DICT, loc=(0.5, 0.98))
chart_tools.format_axis_x_label(dev=PROPS, p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
chart_tools.format_axis_y1_label(p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
chart_tools.format_axis_y_ticks(p_dict=P_DICT, k_dict=K_DICT, logger=LOG)
# Note that subplots_adjust affects the space surrounding the subplots and not the fig.
# plt.subplots_adjust(
# top=0.90,
# bottom=0.20,
# left=0.10,
# right=0.90,
# hspace=None,
# wspace=None
# )
chart_tools.save(logger=LOG)
except Exception as sub_error:
tb = traceback.format_exc()
tb_type = sys.exc_info()[1]
LOG['Debug'].append(f"[{CHART_NAME}] {tb}")
LOG['Critical'].append(f"[{CHART_NAME}] Error type: {tb_type} in {__file__.split('/')[-1]}")
json.dump(LOG, sys.stdout, indent=4)
|
11,659 | 26cd2eef5380815ed39af0374710588e8e0ce8cd | import pytest
import time
from .pages.login_page import LoginPage
from .pages.basket_page import BasketPage
from .pages.product_page import ProductPage
@pytest.mark.need_review
@pytest.mark.user_adding_to_basket
class TestUserAddToBasketFromProductPage:
@pytest.fixture(scope="function", autouse=True)
def setup(self, browser):
self.page = LoginPage(browser, 'http://selenium1py.pythonanywhere.com/')
self.page.open()
self.page.go_to_login_page()
self.page.register_new_user(str(time.time()) + "@fakemail.org", str(time.time()) + "pa33w0rd")
self.page.should_be_authorized_user()
@pytest.mark.xfail(reason='ะขะตัั ัะฟะฐะดะตั, ะฟะพัะพะผั ััะพ ะฒ ะบะพะฝัะต ะธัะฟะพะปัะทัะตััั ะผะตัะพะด'
'ั ะพะถะธะดะฐะฝะธะตะผ ะบะพะณะดะฐ ัะปะตะผะตะฝั ะธััะตะทะฝะตั, ะฟะพััะพะผั ะฟะพะผะตัะตะฝ,'
'ะฝะพ ะฟะพ ัััะธ ัะตัั ะพััะฐะฑะฐััะฒะฐะตั ะฟะพ ัะฒะพะตะน ะปะพะณะธะบะต')
def test_user_can_add_product_to_basket(self, browser):
self.link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer0'
self.page = ProductPage(browser, self.link)
self.page.open()
self.page.should_be_visible_button()
self.page.adding_item_to_basket()
self.page.solve_quiz_and_get_code()
self.page.item_added_to_basket()
self.page.should_be_disappear()
def test_user_cant_see_success_message(self, browser):
self.link = 'http://selenium1py.pythonanywhere.com/ru/catalogue/coders-at-work_207/'
self.page = ProductPage(browser, self.link)
self.page.open()
self.page.should_not_be_success_message()
@pytest.mark.need_review
@pytest.mark.xfail(reason='ะขะตัั ัะฟะฐะดะตั, ะฟะพัะพะผั ััะพ ะฒ ะบะพะฝัะต ะธัะฟะพะปัะทัะตััั ะผะตัะพะด'
'ั ะพะถะธะดะฐะฝะธะตะผ ะบะพะณะดะฐ ัะปะตะผะตะฝั ะธััะตะทะฝะตั, ะฟะพััะพะผั ะฟะพะผะตัะตะฝ,'
'ะฝะพ ะฟะพ ัััะธ ัะตัั ะพััะฐะฑะฐััะฒะฐะตั ะฟะพ ัะฒะพะตะน ะปะพะณะธะบะต')
def test_guest_can_add_product_to_basket(browser):
link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer0'
page = ProductPage(browser, link)
page.open()
page.should_be_visible_button()
page.adding_item_to_basket()
page.solve_quiz_and_get_code()
page.item_added_to_basket()
page.should_be_disappear()
@pytest.mark.need_review
def test_guest_should_see_login_link_on_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/"
page = ProductPage(browser, link)
page.open()
page.should_be_login_link()
@pytest.mark.need_review
def test_guest_can_go_to_login_page_from_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/"
page = ProductPage(browser, link)
page.open()
page.go_to_login_page()
@pytest.mark.need_review
def test_guest_cant_see_product_in_basket_opened_from_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/"
page = ProductPage(browser, link)
page.open()
page.go_to_basket_page()
basket_page = BasketPage(browser, browser.current_url)
basket_page.should_not_be_items_in_basket()
basket_page.should_be_text_basket_is_empty()
|
11,660 | f92b63b8700d532a4724ae21fa1afe6f2b66a1e1 | """
Keyword Recommenders.
"""
import math
import config
import random
from utils import timeit
from collections import defaultdict
class NonStatisticalMixin(object):
def reset(self):
pass
def round_statistics(self):
pass
def experiment_statistics(self):
pass
class RandomRecommender(NonStatisticalMixin):
def __init__(self, dbm, *ignored):
"""
@param dbm a DatabaseManager
@param limit the (maximal) number of recommended products at a time
"""
self.limit = 0
self.dbm = dbm
self.all_products = []
def __str__(self):
return 'Random Recommender[N=%d]' % self.limit
def set_limit(self, limit):
self.limit = limit
@timeit
def preprocess(self, query_train_table):
# retrieve all products at once as there aren't many (< 4000)
query = '''SELECT DISTINCT pageinfo FROM visit
WHERE pagetype = 'product' AND pageinfo != '' AND pageinfo != 'null' AND userid IN (
SELECT user_id FROM %s
)''' % query_train_table
self.all_products = [(row['pageinfo'], 1.0) for row in self.dbm.get_rows(query)]
def recommend(self, query):
return random.sample(self.all_products, self.limit)
class HottestRecommender(NonStatisticalMixin):
def __init__(self, dbm, *ignored):
"""
@param dbm a DatabaseManager
@param limit the (maximal) number of recommended products at a time
"""
self.limit = 0
self.dbm = dbm
self.recommend_list = []
def __str__(self):
return 'Hottest Recommender[N=%d]' % self.limit
def set_limit(self, limit):
self.limit = limit
@timeit
def preprocess(self, query_train_table):
for row in self.dbm.get_rows('''SELECT pageinfo, COUNT(id) count FROM visit
WHERE pagetype = 'product' AND pageinfo != '' AND userid IN (
SELECT user_id FROM %s
) GROUP BY pageinfo ORDER BY count DESC LIMIT %d''' % (query_train_table, self.limit)):
self.recommend_list.append((row['pageinfo'], row['count']))
#print self.recommend_list
def recommend(self, query):
return self.recommend_list
class KeywordRecommender(object):
def __init__(self, dbm, ws, rm):
"""
Make sure to source rec_tables.sql before using this class.
@param dbm a DatabaseManager
@param ws a WordSegmenter
@param rm a RelevanceMeasure
"""
self.limit = 0
self.dbm = dbm
self.ws = ws
self.rm = rm
self.reset()
def reset(self):
self._related_product_cache = {}
self._not_enough_recs = 0
self._round_results = []
def set_limit(self, limit):
self.limit = limit
def __str__(self):
return 'Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)
def preprocess(self, query_train_table):
self.query_train_table = query_train_table
# empty cache so that cache from last round does not interfere with next round
self._related_product_cache = {}
self._not_enough_recs = 0
self.dbm.begin()
self.dbm.query('TRUNCATE TABLE keyword');
self.dbm.query('TRUNCATE TABLE keyword_query');
self.dbm.query('TRUNCATE TABLE keyword_product_weight');
# these methods can be overridden by sub-classes
self._build_keyword_product_mapping()
self._build_product_keyword_mapping()
self._measure_relevance()
self.dbm.commit()
@timeit
def _build_keyword_product_mapping(self):
self.keyword_count = defaultdict(int)
self.keyword_product_count = defaultdict(lambda: defaultdict(int))
for qrow in self.dbm.get_rows('SELECT id, query FROM %s' % self.query_train_table):
# GROUP_CONCAT returns a comma-separeted string
products = [(qprow['product_name'], qprow['sequences']) for qprow in self.dbm.get_rows('SELECT product_name, GROUP_CONCAT(sequence) AS sequences FROM query_product WHERE query_id = %s GROUP BY product_name', (qrow['id'],))]
# remove duplicate
keywords = set(self.ws.segment(qrow['query']))
for kw in keywords:
self.keyword_count[kw] += 1
# store keyword-query relations in db
self.dbm.insert('INSERT INTO keyword_query (keyword, query_id) VALUES (%s, %s)', (kw, qrow['id']))
for p, sequences in products:
# get product sequence in this session
count = self.get_browse_count(sequences)
# update keyword_product_count
for kw in keywords:
self.keyword_product_count[kw][p] += count
def get_browse_count(self, sequences):
"""Overrideable by sub-class.
Multiple browses in a session always count 1."""
return 1
@timeit
def _build_product_keyword_mapping(self):
# construct product_keyword_count
# it's actually equivalent to keyword_product_count, but can let compute
# related_product_count faster
self.product_keyword_count = defaultdict(dict)
for kw, dt in self.keyword_product_count.iteritems():
for p, c in dt.iteritems():
self.product_keyword_count[p][kw] = c
@timeit
def _measure_relevance(self):
# calculate keyword-product relevance
all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product')
for keyword, count in self.keyword_count.iteritems():
# will be used for statistics
self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count))
related_product_number = len(self.keyword_product_count[keyword].keys())
related_product_count = sum(self.keyword_product_count[keyword].values())
for product, count in self.keyword_product_count[keyword].iteritems():
related_keyword_number = len(self.product_keyword_count[product].keys())
related_keyword_count = sum(self.product_keyword_count[product].values())
# delegate to sub-classes
relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number)
self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance))
def round_statistics(self):
"""Get number of query, keywords, products, keyword-product relations of current round."""
n_query = self.dbm.get_value("SELECT COUNT(*) FROM %s" % self.query_train_table)
n_keyword = self.dbm.get_value("SELECT COUNT(*) FROM keyword")
n_product = self.dbm.get_value("SELECT COUNT(DISTINCT product) FROM keyword_product_weight")
n_relation = self.dbm.get_value("SELECT COUNT(*) FROM keyword_product_weight")
self._round_results.append((n_query, self._not_enough_recs, n_keyword, n_product, n_relation))
if config.verbose:
print 'Round statistics: query: %d (not enough %d), keyword: %d, product: %d, relation: %d, A/M: %.2f%%' % (n_query, self._not_enough_recs, n_keyword, n_product, n_relation, 100.0*n_relation / (n_keyword*n_product))
def experiment_statistics(self):
# stands for: query, not-enough, keyword, product, relation, a/m
sums = [0, 0, 0, 0, 0, 0]
for data in self._round_results:
for i in range(5):
sums[i] += data[i]
sums[5] += 100.0*data[4]/(data[2]*data[3])
n = float(len(self._round_results))
n_query, not_enough_recs, n_keyword, n_product, n_relation, am = [s/n for s in sums]
print 'Experiment statistics:\nquery: %.2f (not enough %.2f), keyword: %.2f, product: %.2f, relation: %.2f, A/M: %.2f%%' % (n_query, not_enough_recs, n_keyword, n_product, n_relation, am)
def recommend(self, query):
keywords = self.ws.segment(query)
product_weight = defaultdict(float)
# gather product weights
for kw in keywords:
for product, weight in self.__fetch_related_products(kw):
product_weight[product] += weight
# convert dict to list for sorting
product_weight_list = [item for item in product_weight.iteritems()]
product_weight_list.sort(key=lambda t: t[1], reverse=True)
if len(product_weight_list) < self.limit:
self._not_enough_recs += 1
return product_weight_list[:self.limit]
def __fetch_related_products(self, keyword):
if not self._related_product_cache.has_key(keyword):
self._related_product_cache[keyword] = [(row['product'], row['weight']) for row in self.dbm.get_rows('SELECT product, weight FROM keyword_product_weight WHERE keyword = %s', (keyword,))]
return self._related_product_cache[keyword]
class KeywordRecommenderHottestFallback(KeywordRecommender):
"""A recommender which uses KeywordRecommender's recommendations first,
but turns to HottestRecommender if its recommendations are not enough."""
def __init__(self, *args):
"""Identical to that of KeywordRecommender"""
super(KeywordRecommenderHottestFallback, self).__init__(*args)
self.hottest_recommender = HottestRecommender(*args)
def __str__(self):
return 'Keyword Recommender with Hottest Recommender fallback with %s[N=%d]' % (self.rm, self.limit)
def set_limit(self, limit):
self.hottest_recommender.set_limit(limit)
super(KeywordRecommenderHottestFallback, self).set_limit(limit)
def preprocess(self, query_train_table):
super(KeywordRecommenderHottestFallback, self).preprocess(query_train_table)
self.hottest_recommender.preprocess(query_train_table)
def recommend(self, query):
recommendations = super(KeywordRecommenderHottestFallback, self).recommend(query)
num_rec = len(recommendations)
if num_rec == self.limit:
return recommendations
# ask HottestRecommender for more
# note that create list in order not to break HottestRecommender.recommend_list
hot_recommendations = self.hottest_recommender.recommend(query)[:self.limit-num_rec]
# ensure hot_recommendations's weight is no greater than any from keyword recommendations
max_hot_rec_weight = hot_recommendations[0][1]
min_key_rec_weight = recommendations[-1][1] if num_rec > 0 else max_hot_rec_weight
recommendations.extend((t[0], 1.0*min_key_rec_weight*t[1]/max_hot_rec_weight) for t in hot_recommendations)
return recommendations
from operator import mul
def product(numbers):
return reduce(mul, numbers)
class LinearSequenceKeywordRecommender(KeywordRecommender):
"""A tentative method using heuristic information of sequence distribution."""
def _heuristic_weight(self, sequence):
#return -math.log(abs(sequence-26)+1, 2)/8.0 + 1.125
return -math.log(abs(sequence-3)+1, 2)/8.0 + 1.125
def get_browse_count(self, sequences):
seqs = sequences.split(',')
#return sum(self._heuristic_weight(int(seq)) for seq in seqs)
#return sum(self._heuristic_weight(int(seq)) for seq in seqs) * math.log(len(seqs))
return product(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs)
#return sum(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs)
def __str__(self):
return 'Linear Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)
class WeightedSequenceRelevanceMixin(object):
@timeit
def _measure_relevance(self):
# calculate keyword-product relevance
all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product')
for keyword, count in self.keyword_count.iteritems():
self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count))
related_product_number = len(self.keyword_product_count[keyword].keys())
related_product_count = sum(self.keyword_product_count[keyword].values())
for product, count in self.keyword_product_count[keyword].iteritems():
related_keyword_number = len(self.product_keyword_count[product].keys())
related_keyword_count = sum(self.product_keyword_count[product].values())
# get average sequence from database
# TODO: very inefficient, get a group all average sequences for a keyword at once
#avg_sequence = self.dbm.get_value('select avg(sequence) from query_product where product_name = %s AND query_id in (select query_id from keyword_query where keyword = %s)', (product, keyword))
avg_sequence = 1
relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number, avg_sequence)
# sub-class can override sequence_weight
# relevance *= self.sequence_weight(avg_sequence)
self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance))
def sequence_weight(self, avg_sequence):
return 1
# ensure WSRM._measure_relevance will be called with putting it before KeywordRecommender
# ref: http://python-history.blogspot.com/2010/06/method-resolution-order.html
class SequenceKeywordRecommender(WeightedSequenceRelevanceMixin, LinearSequenceKeywordRecommender):
"""This recommender weights browse count by distribution of sequence."""
@timeit
def preprocess(self, query_train_table):
# first, get sequence distribution
max_occurrence = self.dbm.get_value('SELECT MAX(c) FROM (SELECT sequence, COUNT(sequence) c FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence) T' % query_train_table)
self.sequence_dist = {row['sequence']: float(row['ratio']) for row in self.dbm.get_rows('SELECT sequence, COUNT(sequence)/%d ratio FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence' % (max_occurrence,query_train_table))}
self.pivot_seq = max(self.sequence_dist.iteritems(), key=lambda t:t[1])[0]
# then, call KeywordRecommender's preprocess
super(SequenceKeywordRecommender, self).preprocess(query_train_table)
def _heuristic_weight(self, sequence):
weight = self.sequence_dist[sequence]
if self.pivot_seq-sequence < 0:
return weight
return 1 + weight
def __str__(self):
return 'Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)
class RelevanceMeasure(object):
"""Defines the RelevanceMeasure interface."""
def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):
"""
@param count number of times the keyword visit the product
@param related_product_info the tuple (related_product_number, related_product_count)
@param related_keyword_info the tuple (related_keyword_number, related_keyword_count)
@param all_product_number number of all products
"""
raise NotImplemented
class BCMeasure(RelevanceMeasure):
def get_relevance(self, count, *ignored):
return count
def __str__(self):
return 'BC'
class BCIPFMeasure(RelevanceMeasure):
def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):
ipf = math.log(1.0 * all_product_number / related_product_info[0])
return count * ipf
def __str__(self):
return 'BC-IPF'
class BFIPFMeasure(RelevanceMeasure):
def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):
bf = 1.0 * count / related_keyword_info[1]
ipf = math.log(1.0 * all_product_number / related_product_info[0])
return bf * ipf
def __str__(self):
return 'BF-IPF'
if __name__ == '__main__':
import config
from database import DatabaseManager
from word_segment import SpaceWordSegmenter
dbm = DatabaseManager(config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME)
try:
word_segmenter = SpaceWordSegmenter()
rmeasure = BCIPFMeasure()
recommender = KeywordRecommender(10, dbm, word_segmenter, rmeasure)
recommender.preprocess('query_train')
finally:
dbm.close()
|
11,661 | 70e0b0808c817e0609f9c37ef7abd90699a01bc3 | from tkinter import *
from tkinter.messagebox import showinfo
pale_blue = '#c4d9ed'
mid_pale_blue = '#c0d6ea'
darker_pale_blue = '#aaceef'
class ResizingCanvas(Canvas):
def __init__(self, container, **settings):
settings.setdefault('bg', 'white')
settings.setdefault('bd', 0)
settings.setdefault('highlightthickness', 0)
super().__init__(container, **settings)
self.bind('<Configure>', self.on_resize)
self.width = self.winfo_reqwidth()
self.height = self.winfo_reqheight()
def on_resize(self, event):
#Determine the ration of old width/height to new width/height
wscale = float(event.width) / self.width
hscale = float(event.height) / self.height
self.width = event.width
self.height = event.height
self.config(width=self.width, height=self.height)
#Scale all the items in the canvas
self.scale('all', 0, 0, wscale, hscale)
class TKDesigner(Tk):
@staticmethod
def add_to_grid(widget, row, column, **settings):
settings = TKDesigner.get_custom_settings(Grid, **settings)
settings.setdefault('sticky', 'ew')
settings.setdefault('ipadx', 10)
settings.setdefault('ipady', 5)
widget.grid(row=row, column=column, **settings)
@staticmethod
def clear_widget(widget):
for child in widget.winfo_children():
child.destroy()
@staticmethod
def settings(**settings):
return settings
@staticmethod
def find_centre_of(a, b):
return (a + b) / 2
@staticmethod
def get_bold_font(font):
bold_font = font[0:2] + ('bold',)
return bold_font
@staticmethod
def set_font_size(font, size):
new_font = (font[0],) + (size,)
if len(font) > 2: new_font += font[2:]
return new_font
@staticmethod
def create_popup(message, window_name='Notice'):
showinfo(window_name, message)
@staticmethod
def centre_window(window, centre_on=None):
window.update()
offsetx = 0
offsety = 0
parent_width = window.winfo_screenwidth()
parent_height = window.winfo_screenheight()
if centre_on is not None:
centre_on.update()
offsetx = centre_on.winfo_x()
offsety = centre_on.winfo_y()
parent_width = centre_on.winfo_width()
parent_height = centre_on.winfo_height()
width = window.winfo_width()
height = window.winfo_height()
x = (parent_width // 2) - (width // 2) + offsetx
y = (parent_height // 2) - (height // 2) + offsety
window.geometry(f'+{x}+{y}')
@staticmethod
def get_custom_settings(widget_type, **settings):
custom_settings = []
def padding():
padding = settings['padding']
settings['padx'] = padding
settings['pady'] = padding
del settings['padding']
def cooldown():
def cooldown_function(widget):
def cooldown_delay(event=None):
if event is not None:
command(event)
else:
command()
try:
widget.config(state='disabled')
widget.after(delay * 1000, lambda: widget.config(state='active'))
except TclError:
pass
# If the button's been destroyed then this will raise an error
if iscommand:
widget.config(command=cooldown_delay)
else:
widget.bind(button, cooldown_delay)
delay = settings['cooldown']
iscommand = True
if 'command' in settings:
command = settings['command']
del settings['command']
elif 'bind' in settings:
iscommand = False
command = settings['bind']
button = settings['button']
del settings['bind']
del settings['button']
del settings['cooldown']
return cooldown_function
def bind():
def bind_function(widget):
widget.bind(button, command)
command = settings['bind']
button = settings['mouse']
del settings['bind']
del settings['mouse']
return bind_function
custom_setting_methods = {
Button: {
'padding': padding,
'cooldown': cooldown,
'bind': bind
},
Label: {
'padding': padding
},
Grid: {
'padding': padding
}
}
#Determine what custom settings are in the settings
for custom_setting in custom_setting_methods[widget_type].keys():
if custom_setting in settings:
method = custom_setting_methods[widget_type][custom_setting]()
if method is not None:
custom_settings.append(method)
if widget_type is Grid:
return settings
return custom_settings, settings
@staticmethod
def set_custom_settings(widget, custom_settings):
for setting in custom_settings:
setting(widget)
def __init__(self):
super().__init__()
self._header_font = ('Helvetica', 12, 'bold')
self._label_font = ('Helvetica', 10)
self._background = 'white'
self.set_background(self._background)
def set_background(self, colour):
self._background = colour
self.configure(background=colour)
def get_background_colour(self):
return self._background
def get_container(self, container):
if container is None:
return self
else: return container
def clear_ui(self):
for child in self.winfo_children():
if child is Toplevel:
print("Toplevel child found")
child.destroy()
def set_header_font(self, font):
if not isinstance(font, tuple):
return
self._header_font = font
def set_label_font(self, font):
if not isinstance(font, tuple):
return
self._label_font = font
def create_label(self, text, container=None, **settings):
container = self.get_container(container)
custom_settings, settings = self.get_custom_settings(Label, **settings)
settings = self.set_default_widget_settings(**settings)
label = Label(container, text=str(text), **settings)
self.set_custom_settings(label, custom_settings)
return label
def create_header(self, text, container=None, **settings):
header = self.create_label(text, container, **settings)
header.config(font=self._header_font)
return header
def create_button(self, text, container=None, **settings):
container = self.get_container(container)
custom_settings, settings = self.get_custom_settings(Button, **settings)
settings = self.set_default_widget_settings(**settings)
button = Button(container, text=text, **settings)
self.set_custom_settings(button, custom_settings)
return button
def create_entry(self, container=None, **settings):
container = self.get_container(container)
settings = self.set_default_widget_settings(**settings)
return Entry(container, settings)
def set_default_widget_settings(self, **settings):
settings.setdefault('bg', self._background)
settings.setdefault('font', self._label_font)
return settings
def set_current_size_as_min(self, minwidth=200, minheight=20):
self.update()
self.minsize(max(self.winfo_width(), minwidth), max(self.winfo_height(), minheight))
def create_row_from_dict(self, dictionary, container, row, bg, label_method=None,
column_formatting=None, row_formatting=None, whitelisted_headers=None):
"""
Creates a row from a dictionary
:param dictionary: The dictionary from which to create the row
:param container: The container of the row
:param row: The current row in the grid
:param bg: The background colour of the row
:param label_method: The method used to create the label
:param column_formatting: A dictionary containing methods which return a formatted value
:param row_formatting: A list containing methods which will output label formatting
:param whitelisted_headers: If entered, only use headers in this list
"""
label_method = self.create_label if label_method is None else label_method
if column_formatting is None: column_formatting = {}
if row_formatting is None: row_formatting = []
label_settings = {}
label_settings.setdefault('bg', bg)
for row_formator in row_formatting:
label_settings.update(row_formator(dictionary))
headers = list(dictionary.keys())
for i in range(len(headers)):
header = headers[i]
if whitelisted_headers is not None and header not in whitelisted_headers:
continue
value = dictionary[header]
if header in column_formatting:
value = column_formatting[header](value)
elif 'default' in column_formatting:
value = column_formatting['default'](value)
TKDesigner.add_to_grid(label_method(value, container=container, **label_settings), row, i)
def create_row_from_list(self, row_list, container, row, bg, label_method=None,
whitelisted_values=None, **column_formatting):
"""
Creates a row from a list
:param row_list: The list from which to create the row
:param container: The container of the row
:param row: The current row of the grid
:param bg: The background colour of the row
:param label_method: The method used to create the label
:param whitelisted_values: If entered, only use values in this list
:param column_formatting: A dictionary containing methods which return formatted values
which are matched by a key, which corresponds to the list index.
"""
label_method = self.create_label if label_method is None else label_method
for i in range(len(row_list)):
value = row_list[i]
if whitelisted_values is not None and value not in whitelisted_values:
continue
if str(i) in column_formatting:
value = column_formatting[str(i)](value)
elif 'default' in column_formatting:
value = column_formatting['default'](value)
TKDesigner.add_to_grid(label_method(value, container=container, bg=bg), row, i)
def grid_from_list_of_dict(self, dictionary_list, container=None, bg1=None, bg2=pale_blue,
column_formatting=None, row_formatting=None, whitelisted_headers=None):
"""
Makes a grid from a list of dictionaries
:param dictionary_list: The list of dictionaries from which to create the grid
:param container: The container of the frame for which the grid will be contained
:param bg1: Alternate row colour 1
:param bg2: Alternate row colour 2
:param column_formatting: A dictionary containing methods which return formatted values
:param row_formatting: A list containing methods which will output label formatting
:param whitelisted_headers: If entered, only use headers in this list
:return: Frame containing the grid
"""
container = self.get_container(container)
bg1 = self._background if bg1 is None else bg1
frame = Frame(container)
headers = list(dictionary_list[0].keys())
row = 0
self.create_row_from_list(headers, container=frame, row=row, bg=bg1, label_method=self.create_header,
whitelisted_values=whitelisted_headers, default=lambda x: x.capitalize())
self.set_column_weights(len([h for h in headers if whitelisted_headers is None or h in whitelisted_headers]),
container=frame)
for dictionary in dictionary_list:
row += 1
bg = bg1 if row % 2 == 0 else bg2
self.create_row_from_dict(dictionary, container=frame, row=row, bg=bg, column_formatting=column_formatting,
row_formatting=row_formatting, whitelisted_headers=whitelisted_headers)
return frame
def grid_from_list(self, grid_list, item_to_list_converter, headers=None, container=None,
bg1=None, bg2=pale_blue, **column_formatting):
"""
Makes a grid from a list of items
:param grid_list: The list of items from which to create the grid
:param item_to_list_converter: A method which converts the items in the list to an array of columns
:param headers: The grid headers
:param container: The container of the frame for which the grid will be contained
:param bg1: Alternate row colour 1
:param bg2: Alternate row colour 2
:param column_formatting: A dictionary containing methods which return formatted values
:return: Frame containing the grid
"""
container = self.get_container(container)
bg1 = self._background if bg1 is None else bg1
frame = Frame(container)
row = -1
if headers is not None:
row += 1
self.create_row_from_list(headers, container=frame, row=row, bg=bg1,
label_method=self.create_header)
for item in grid_list:
row += 1
bg = bg1 if row % 2 == 0 else bg2
row_list = item_to_list_converter(item)
self.create_row_from_list(row_list, container=frame, row=row, bg=bg, **column_formatting)
if len(grid_list) != 0:
#Set the column weights in this grid using the first item in the gridlist
self.set_column_weights(len(item_to_list_converter(grid_list[0])), container=frame)
return frame
def set_column_weights(self, num_columns, container=None, weights=1, **settings):
container = self.get_container(container)
for i in range(num_columns):
weight = weights[i] if isinstance(weights, tuple) else weights
container.columnconfigure(i, weight=weight, **settings)
def create_button_entry_pair(self, button_settings, entry_settings, container=None):
#TODO: Make a more flexible cooldown/custom parameters system.
container = self.get_container(container)
button_settings.setdefault('button', '<Button-1>')
button = self.create_button(container=container, **button_settings)
content = StringVar()
button.content = content
if 'text' in entry_settings:
content.set(entry_settings['text'])
del entry_settings['text']
entry = self.create_entry(container, textvariable=content, **entry_settings)
return button, entry
def create_text(self, canvas, x, y, text, **settings):
settings.setdefault('font', self._label_font)
canvas.create_text(x, y, text=text, **settings)
def create_single_row(self, *widgets_info, container=None, weights=1, **settings):
"""
Creates a single row of widgets
:param container: The container for this row
:param widgets_info: A tuple containing a widget creation function and a dictionary of its parameters
:param weights: The weighting for the columns
:param settings: The grid settings
:return: Frame containing the row
"""
container = self.get_container(container)
frame = Frame(container)
for i in range(len(widgets_info)):
widget_info = widgets_info[i]
widget_creator = widget_info[0]
widget_settings = widget_info[1]
widget = widget_creator(container=frame, **widget_settings)
self.add_to_grid(widget, row=0, column=i, **settings)
self.set_column_weights(len(widgets_info), container=frame, weights=weights)
return frame
def create_vertical_space(self, height, container=None, **settings):
container = self.get_container(container)
frame = Frame(container, height=height, bg=self._background)
settings.setdefault('fill', X)
frame.pack(settings)
def create_temporary_popup(self, message, window_name='Notice', time_to_close=1):
popup = Toplevel(bg=self._background)
popup.title(window_name)
self.create_label(text=message, container=popup).pack(fill=X, padx=30, pady=5)
self.centre_window(popup, centre_on=self)
self.after(time_to_close * 1000, popup.destroy)
def create_centred_popup(self, message, window_name='Notice', centre_on=None):
popup = Toplevel(bg=self._background)
popup.title(window_name)
self.create_label(text=message, container=popup).pack(fill=X, padx=30, pady=5)
self.centre_window(popup, centre_on=self if centre_on is None else centre_on)
|
11,662 | 91dad0a38540f9e384c98e653cd90ce47dc1fa13 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 18:56:26 2019
@author: Randy Jr
"""
x=5
y=10
z=15
print(x)
print(y)
print(z)
print(hex(id(x)))
print(hex(id(y)))
print(hex(id(z))) |
11,663 | fa39cfd8bb51a9fa77ff25774d1bbf527d1536d8 | def extra_end(str):
return 3*str[len(str)-2:len(str)]
|
11,664 | dcbbe04de85846c7f0896bba9063630511f2cb27 | from util_function import read_txt
import os
import sys
import pandas as pd
from argparse import ArgumentParser
from scipy.spatial.distance import euclidean, pdist, squareform, cdist
import numpy as np
from tqdm import tqdm
import json
if __name__ == '__main__':
file_dir = "/home/fei/Research/Dataset/zdock_decoy/2_decoys_bm4_zd3.0.2_irad/"
pdb_folder = "/run/media/fei/easystore/5_new_pdb_6deg/3_pdb"
target_name_path = os.path.join(file_dir, "caseID_part2.lst")
decoy_name_folder = os.path.join(file_dir, "5_decoy_name")
GLY_sidechain_folder = os.path.join(file_dir, "9_GLY_sidechain")
exec_path = os.path.join(file_dir, "9_GLY_sidechain","reduce-master", "reduce_src", "reduce")
target_name_list = read_txt(target_name_path)
for target_name in target_name_list:
###1.decoy pdb
decoy_name_list = read_txt(os.path.join(decoy_name_folder, target_name + "_positive_decoy_name.txt"))\
+ read_txt(os.path.join(decoy_name_folder, target_name + "_cluster_decoy_name.txt"))
for decoy_name in tqdm(decoy_name_list):
decoy_path = os.path.join(pdb_folder, target_name, decoy_name)
decoy_H_path = os.path.join(GLY_sidechain_folder, target_name, "H." + decoy_name)
GLY_pdb = os.path.join(GLY_sidechain_folder, target_name, "GLY." + decoy_name)
if not os.path.exists(os.path.join(GLY_sidechain_folder, target_name)):
os.mkdir(os.path.join(GLY_sidechain_folder, target_name))
if not os.path.exists(GLY_pdb):
# "./reduce -NOFLIP /home/fei/Desktop/complex.4.pdb > /home/fei/Desktop/complexH.4.pdb"
cmd = exec_path + ' -NOFLIP ' + decoy_path + " > " + decoy_H_path
os.system(cmd)
pdb_lines = []
with open(decoy_H_path, 'r') as file:
line = file.readline().rstrip('\n') # call readline()
while line:
if (line[0:6].strip(" ") == 'ATOM' and line[17:20].strip(" ") == 'GLY' and line[12:16].strip(" ") == 'HA2'):
pdb_lines.append(line)
line = file.readline().rstrip('\n')
os.remove(decoy_H_path)
f = open(GLY_pdb, 'w')
f.write("\n".join(pdb_lines))
f.write("\n")
f.close()
##2.native DSSP
# decoy_path = os.path.join(pdb_folder, target_name+".pdb")
# decoy_H_path = os.path.join(GLY_sidechain_folder, target_name, "H." + target_name+".pdb")
# GLY_pdb = os.path.join(GLY_sidechain_folder, target_name, "GLY."+ target_name+".pdb")
# if not os.path.exists(os.path.join(GLY_sidechain_folder, target_name)):
# os.mkdir(os.path.join(GLY_sidechain_folder, target_name))
# # "./reduce -NOFLIP /home/fei/Desktop/complex.4.pdb > /home/fei/Desktop/complexH.4.pdb"
# cmd = exec_path + ' -NOFLIP ' + decoy_path + " > " + decoy_H_path
# os.system(cmd)
# pdb_lines = []
# with open(decoy_H_path, 'r') as file:
# line = file.readline().rstrip('\n') # call readline()
# while line:
# if (line[0:6].strip(" ") == 'ATOM' and line[17:20].strip(" ") == 'GLY' and line[12:16].strip(
# " ") == 'HA2'):
# pdb_lines.append(line)
# line = file.readline().rstrip('\n')
# os.remove(decoy_H_path)
# f = open(GLY_pdb, 'w')
# f.write("\n".join(pdb_lines))
# f.write("\n")
# f.close()
|
11,665 | b9cec09d46f620b8ed4c77638940d769fa8944ba | import math
import random
import gym
import numpy as np
from torch.optim import Adam
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import matplotlib.pyplot as plt
from utils import compute_return,compute_advantage
from models import ActorCritic
from multiprocessing_env import SubprocVecEnv
num_envs = 16
env_name = "Pendulum-v0"
# TODO : rajouter env reset
def make_env():
def _thunk():
env = gym.make(env_name)
return env
return _thunk
envs = [make_env() for i in range(num_envs)]
envs = SubprocVecEnv(envs)
env = gym.make(env_name)
def plot(frame_idx, rewards):
clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.title('frame %s. reward: %s' % (frame_idx, rewards[-1]))
plt.plot(rewards)
plt.show()
def test_env(vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0)
act = model.get_action(state)[0]
next_state, reward, done, _ = env.step(act)
state = next_state
if vis: env.render()
total_reward += reward
return total_reward
def generate_batch(mini_batch_size, states, actions, log_probs, returns, advantage):
batch_size = states.size(0)
for _ in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
#print(states[rand_ids, :])
yield states[rand_ids, :], actions[rand_ids, :], log_probs[rand_ids, :], returns[rand_ids, :], advantage[rand_ids, :]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantages, clip_param=0.2):
for _ in range(ppo_epochs):
for state, action, old_log_probs, return_, advantage in generate_batch(mini_batch_size, states, actions, log_probs, returns, advantages):
value = model.predict_value(state)
entropy = model.get_dist(state).entropy().mean()
new_log_probs = model.get_log_prob(state, action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss = 1 * critic_loss + actor_loss - 0.001 * entropy
model.actor_optimizer.zero_grad()
model.critic_optimizer.zero_grad()
loss.backward()
model.actor_optimizer.step()
model.critic_optimizer.step()
num_inputs = envs.observation_space
num_outputs = envs.action_space
# Hyper-parameters
NB_STEP = 128
UPDATE_EPOCH = 10
MINI_BATCH_SIZE = 512
SIZES = [64]
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.2
REWARD_THRESHOLD = 190
model = ActorCritic(num_inputs, num_outputs, SIZES)
frame_idx = 0
test_rewards = []
#env_render = False
state = envs.reset()
early_stop = False
PATH = "saved_models/model_ppo_pendulum.pt"
while not early_stop :
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
for _ in range(NB_STEP):
state = torch.FloatTensor(state)
value = model.predict_value(state)
action = model.get_action(state)
action = action.squeeze(0)
next_state, reward, done, _ = envs.step(action)
log_prob = model.get_log_prob(state, action)
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1))
states.append(state)
actions.append(action)
state = next_state
frame_idx += 1
if frame_idx % 1000 == 0:
test_reward = np.mean([test_env() for _ in range(10)])
test_rewards.append(test_reward)
#plot(frame_idx, test_rewards)
print("test reward: ")
print(test_reward)
if test_reward > REWARD_THRESHOLD :
early_stop = True
torch.save(model.state_dict(), PATH)
next_state = torch.FloatTensor(next_state)
next_value = model.predict_value(next_state)
returns = compute_advantage(next_value, rewards, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
ppo_update(UPDATE_EPOCH, MINI_BATCH_SIZE, states, actions, log_probs, returns, advantage)
|
11,666 | 740a1cbf68cd26d5ff5a2f97aee670391e6d17bb | from multiprocessing import Process
from proxy_api import app
from getter import Getter
from tester import Tester
import time
TESTER_CYCLE = 20
GETTER_CYCLE = 300
TESTER_ENABLE = True
GETTER_ENABLE = True
API_ENABLE = True
API_HOST = 'localhost'
API_PORT = '5000'
class Scheduler():
def schedule_tester(self, cycle=TESTER_CYCLE):
'''
ๅฎๆถๆต่ฏไปฃ็
:param cycle: ๅฎๆถๆถ้ฟ
'''
tester = Tester()
while True:
print('ๆต่ฏๅจๅผๅง่ฟ่ก')
tester.run()
time.sleep(cycle)
def schedule_getter(self, cycle=GETTER_CYCLE):
'''
ๅฎๆถ่ทๅไปฃ็
:param cycle: ๅฎๆถๆถ้ฟ
'''
getter = Getter()
while True:
print('ๅผๅงๆๅไปฃ็')
getter.run()
time.sleep(cycle)
def schedule_api(self):
'''
ๅผๅฏ API
'''
app.run(API_HOST, API_PORT)
def run(self):
print('ไปฃ็ๆฑ ๅผๅง่ฟ่ก')
# ๅฏนๅบๆฐๅปบไธไธช Process ่ฟ็จ๏ผ่ฐ็จ start() ่ฟ่ก
# ไปฃ็ๆฑ ๆต่ฏๆจกๅ่ฟ่ก
if TESTER_ENABLE:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
# ไปฃ็ๆ่ทๆจกๅ่ฟ่ก
if GETTER_ENABLE:
getter_process = Process(target=self.schedule_getter)
getter_process.start()
# apiๆจกๅ่ฟ่ก
if API_ENABLE:
api_process = Process(target=self.schedule_api)
api_process.start()
scheduler = Scheduler()
scheduler.run()
|
11,667 | ce7ccd81c1384ad5f38f2949474dfe625e92f4fb | #include <stdio.h>
int main()
{
long long n;
int count = 0;
printf("Enter an integer:");
scanf("%lld", &n);
while(n != 0)
{
// n = n/10
n /= 10;
++count;
}
printf("Number of digits: %d",count);
}
|
11,668 | a098359b0ff05ec6c79db703ac4aa793da9b83db | # Copyright 2015 Jared Rodriguez (jared.rodriguez@rackspace.com)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import zmq
from mercury.rpc.ping2 import ping as ping2
RETRIES = 5 # TODO: YAML
PING_TIMEOUT = 2500 # TODO: YAML
BACK_OFF = .42
log = logging.getLogger(__name__)
def ping(ctx, host):
failures = 0
result = False
while failures < RETRIES:
_timeout = int((PING_TIMEOUT + (failures and PING_TIMEOUT or 0) * (failures**BACK_OFF)))
result = ping2(ctx, host, timeout=_timeout)
if result:
break
failures += 1
return result
def pinger(server, mercury_id, db_controller):
ctx = zmq.Context()
while True:
log.debug('Pinging %s : %s' % (mercury_id, server))
result = ping(ctx, server)
if not result:
break
time.sleep(5) # TODO: YAML
# Scan jobs for any tasks targeting this node
# 1. Fail the task
# 2. Signal to any active worker threads to stop processing the task
log.info('%s : %s ping timeout' % (mercury_id, server))
# log.debug('Attempting to destroy thread 0mq context')
# ctx.destroy() ## HANGS!!!!
# log.debug('Context destroyed')
db_controller.delete(mercury_id) # Threads persist
def spawn(server, mercury_id, db_controller):
thread = threading.Thread(target=pinger, args=[server, mercury_id, db_controller])
log.info('Spawning pinger thread: %s : %s' % (mercury_id, server))
thread.start()
|
11,669 | 2b9df1348f5ae224f88cf7e14d9c8e77f7ab44bf | def get_sets(problem):
first_choice = int(problem[0]) - 1
second_choice = int(problem[5]) - 1
f_line = problem[1:5][first_choice]
s_line = problem[6:11][second_choice]
first_set = {x for x in f_line.strip().split(' ')}
second_set = {x for x in s_line.strip().split(' ')}
return first_set, second_set
def chunks(l, n):
if n<1:
n=1
return [l[i:i+n] for i in range(0, len(l), n)]
def solve(set_one, set_two):
inters = set_one.intersection(set_two)
leng = len(inters)
if leng == 1:
return inters.pop()
elif leng == 0:
return 'Volunteer cheated!'
else:
return 'Bad magician!'
def main():
content = None
with open("input.txt", "r") as f:
content = f.read().strip()
lines = [x.strip() for x in content.split('\n')]
test_cases = int(lines[0])
probs = chunks(lines[1:], 10)
with open("output.txt", "w") as f:
for i, el in enumerate([solve(*get_sets(l)) for l in probs]):
f.write("Case #{}: {}\n".format(i+1, el))
if __name__ == '__main__':
main()
|
11,670 | cccc045e640785d03d493d4b2a9cd7ae3af3d846 | import Tkinter as t
import time , socketio
from PIL import ImageTk,Image
class App:
def __init__(self,ip):
self.top = t.Tk()
self.top.title('Sockets ping pong')
#self.pos = [None,'Left','Right'][p]
self.top.tk_bisque()
self.ip = ip
self.intro()
self.top.protocol("WM_DELETE_WINDOW",self.disconnect)
self.top.geometry('400x400+500+500')
self.top.mainloop()
def disconnect(self):
try:
self.socket.disconnect()
except:
pass
self.top.destroy()
def connect(self,e):
self.l2['text'] = 'Connecting ... to'
self.socket = socketio.Client()
self.socket.on('connect',self.connected)
self.socket.on('my_name',self.my_name)
self.socket.on('getmoved',self.getmoved)
self.socket.on('startBall',self.startBall)
self.socket.on('getupdated',self.getupdated)
self.socket.connect(self.ip)
#self.connected()
def my_name(self,data):
d = data
#print 'got',data,type(data)
self.l2['text']= 'Connection Established !'
#self.l3['label'] = 'Press Enter to Proceed'
self.top.after(500,self.playshow,0,d)
#self.playshow(side = d)
def connected(self):
#print ('connected',self)
self.socket.emit('get_my_name')
def getmoved(self,data):
own,other = data
w = [self.p1,self.p2][own]
#print ('own',own,'other',other)
w.place(y = other)
def moveup(self,e):
w = self.own
try:
self.y
except:
self.y = w.winfo_y()
self.y += -5
self.socket.emit('move',[self.owncode,self.y])
def movedo(self,e):
w = self.own
try:
self.y
except:
self.y = w.winfo_y()
self.y += 5
self.socket.emit('move',[self.owncode,self.y])
#w.place(y = self.y)
def playshow(self, hide=0 , side = 0):
try:
self.p1 #paddle 1 / bar1
except:
self.intro(hide = 1)
self.p1 = t.Button(self.top,text='Left',height = 15,bg='green', width= 8)
self.p2 = t.Button(self.top,text='Right',height = 15,bg='green', width = 8)
self.p1.svar = t.StringVar()
self.p2.svar = t.StringVar()
self.p1.ivar = t.IntVar(0)
self.p2.ivar = t.IntVar(0)
self.score1 = t.Label(self.top,textvariable = self.p1.ivar,font = 'system 20')
self.score2 = t.Label(self.top,textvariable = self.p2.ivar,font = 'system 20')
self.s1 = t.Label(self.top,textvariable = self.p1.svar,font = 'system 20')
self.s2 = t.Label(self.top,textvariable = self.p2.svar,font = 'system 20')
self.own = [self.p1,self.p2] [side]
self.owncode = side
self.top.title('{}'.format(self.own['text']))
self.other = [self.p1,self.p2] [not side]
#self.socket.emit('update_score',self.owncode)
self.top.bind('<KeyPress-Up>',self.moveup)
self.top.bind('<KeyPress-Down>',self.movedo)
#self.scores()
#self.top.bind('KeyRelease-Return',startball)
if hide:
for i in (self.p1,self.p2):
i.place(relheight=0,relwidth=0)
else:
self.p1.place(relx = 0.1, rely=0.2,anchor = 'nw')
self.p2.place(relx = 0.9, rely=0.2,anchor = 'ne')
self.s1.place(relx = 0.2,rely = 0.01, anchor = 'n')
self.s2.place(relx = 0.8,rely = 0.01, anchor = 'n')
self.score1.place(relx = 0.2,rely = 0.1, anchor = 'n')
self.score2.place(relx = 0.8,rely = 0.1, anchor = 'n')
#self.socket.emit('startBall')
self.y = self.own.winfo_y()
self.startBall()
def intro(self,hide = 0):
try:
self.f1
except:
self.f1 = t.Frame(self.top)
self.l1 = t.Label(self.f1,padx=10,pady=10,relief='groove',bd=10,text='Sockets ping pong',font = 'system 30 bold')
self.l2 = t.Label(self.f1,text='Press Enter to connect to server @',font = 'system 10 bold')
self.l3 = t.Label(self.f1,text='{}'.format(self.ip),font = 'system 10 bold')
self.name = t.StringVar()
self.t1 = t.Label(self.f1,text = 'github.com/new-AF',font = 'system 10 bold',relief = 'groove')
self.top.bind('<KeyRelease-Return>',self.connect)
for i in ('l1 l2 l3 t1'.split() ):
w = getattr(self,i)
w.pack(fill='x',expand=1)
if hide:
#self.f1.place(x=0,y=0,relheight=0,relwidth=0)
self.f1.destroy()
else:
self.f1.place(x=0,y=0,relwidth=1,relheight=1)
self.t1.focus()
def createBall(self, w = 20, h= 20):
try:
self.ballW
except:
self.ballW = t.Canvas(self.top, width = w, height = h, relief= 'flat',borderwidth = 0)
self.ballW.create_oval( 0,0,w,h, fill = 'black')
def startBall(self, hide=0,reset = 0):
self.ball = [ self.top.winfo_width()/2 , self.top.winfo_height()/2 ]
self.ball = map(int,self.ball)
self.Inc = [3, 3]
if reset:
return
try:
self.ballW
except:
self.createBall()
self.moveBall()
def moveBall(self):
try:
self.moving
except:
self.moving = 1
if self.moving:
x,y = self.ball
self.ballW.place( x = self.ball[0] , y = self.ball[1] )
if self.collide(self.own) or self.collide(self.other):
self.Inc[0] *= -1
if x >= self.top.winfo_width() or x<=0:
self.sendScore(x)
self.startBall(0,1)
if y >= self.top.winfo_height() or y<=0:
self.Inc[1] *= -1
self.ball[0] += self.Inc[0]
self.ball[1] += self.Inc[1]
self.top.after(30, self.moveBall )
def collide(self,tar):
x,y = self.ball
w = self.ballW.winfo_width()
h = self.ballW.winfo_height()
tx,ty = tar.winfo_x(), tar.winfo_y()
th, tw = tar.winfo_height(), tar.winfo_width()
c1 = abs((x+w/2) - (tx + tw/2)) <= 5
c2 = y + h >= ty and y+h <= ty + th
return c1 and c2
def getupdated(self,data):
d = data
w = [self.own,self.other][not d]
old = w.ivar.get()
w.ivar.set(old+1)
def sendScore(self,x):
#print 'sending score'
self.socket.emit('update_score', (x < self.top.winfo_width()/2) and self.owncode )
if __name__ == '__main__':
App(ip = 'http://damp-basin-29915.herokuapp.com')
|
11,671 | 6bb49d5c9219b4ae8962d86c8105982bcc0bbf7c | s="oevdnuiav leiruhfod AIDHUVUCIhao kdshfopcjnd odicuhs ndfzb hocauhwi adfijanslv"
r=s.split(" ")
print(type(r))
print(r)
print(r[2])
|
11,672 | 8e4c026eba08f044cb29bd848bb85950e70d7fc9 | '''
Given a list, reverse the order without using extra arrays.
'''
def reverse(arr):
left = 0
right = len(arr)-1
print(left, right)
while left < right:
arr[left] = arr[left] + arr[right]
arr[right] = arr[left] - arr[right]
arr[left] = arr[left] - arr[right]
left += 1
right -= 1
def main():
arr = [1, 2, 3, 4, 5]
print(arr)
reverse(arr)
print(arr)
if __name__=="__main__":
main() |
11,673 | 73897ecc30f2a30a1a7af472b2fe1749ad723f07 | """Auto-deploy credentials when enabling special remotes
This is the companion of the ``annexRepo__enable_remote`` patch, and simply
removes the webdav-specific credential handling in ``siblings()``.
It is no longer needed, because credential deployment moved to a lower
layer, covering more special remote types.
Manual credential entry on ``enableremote`` is not implemented here, but easily
possible following the patterns from `datalad-annex::` and
``create_sibling_webdav()``
"""
import logging
from datalad_next.datasets import LegacyAnnexRepo as AnnexRepo
from datalad.support.exceptions import (
AccessDeniedError,
AccessFailedError,
CapturedException,
)
from . import apply_patch
# use same logger as -core
lgr = logging.getLogger('datalad.distribution.siblings')
# This function is taken from datalad-core@2ed709613ecde8218a215dcb7d74b4a352825685
# datalad/distribution/siblings.py
# Changes
# - removed credential lookup for webdav-remotes
# - exception logging via CapturedException
def _enable_remote(ds, repo, name, res_kwargs, **unused_kwargs):
result_props = dict(
action='enable-sibling',
path=ds.path,
type='sibling',
name=name,
**res_kwargs)
if not isinstance(repo, AnnexRepo):
yield dict(
result_props,
status='impossible',
message='cannot enable sibling of non-annex dataset')
return
if name is None:
yield dict(
result_props,
status='error',
message='require `name` of sibling to enable')
return
# get info on special remote
sp_remotes = {
v['name']: dict(v, uuid=k)
for k, v in repo.get_special_remotes().items()
}
remote_info = sp_remotes.get(name, None)
if remote_info is None:
yield dict(
result_props,
status='impossible',
message=("cannot enable sibling '%s', not known", name))
return
try:
repo.enable_remote(name)
result_props['status'] = 'ok'
except (AccessDeniedError, AccessFailedError) as e:
CapturedException(e)
result_props['status'] = 'error'
# TODO should use proper way of injecting exceptions in result records
result_props['message'] = str(e)
yield result_props
apply_patch(
'datalad.distribution.siblings', None, '_enable_remote', _enable_remote)
|
11,674 | 31b435811c847b4f6ef3701ed7b134ce26df3bc2 | from marshmallow import fields, Schema
from pystac.models.geojson_type import GeojsonType
from pystac.models.item import ItemSchema
from pystac.models.base import STACObject
class Collection(STACObject):
def __init__(
self, features, collection_id
):
"""STAC Catalog item collection
Args:
features (List[Item]):
"""
self.features = features
self.id = collection_id
@property
def type(self):
return GeojsonType.FeatureCollection.value
@property
def dict(self):
return dict(
type=self.type,
id=self.id,
features=[feature.dict for feature in self.features]
)
@property
def json(self):
return CollectionSchema().dumps(
self
)
class CollectionSchema(Schema):
type = fields.Str()
id = fields.Str()
features = fields.Nested(ItemSchema, many=True)
|
11,675 | 447415c6fde2df40cba19beae95c7bb5027a382d | from django.urls import include
from django.conf.urls import url
from api.admin import views_feed
urlpatterns = [
url(r'^$', views_feed.main),
url(r'^getAllSources$', views_feed.get_all_sources)
] |
11,676 | bda8db4f724bec74f9a0af89b6b73e0e52554a3a | #coding=utf-8
s = u'aaaaaaaaa'
print s |
11,677 | 076e3b44cf793d6a9bac7054f52423f8e68476ed | '''
(c) 2018, charlesg@unixrealm.com - Fork from QSTK
https://charlesg.github.io/pftk/
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license.
@author: Sourabh Bajaj
@maintainer: Charles Gagnon
@contact: charlesg@unixrealm.com
@summary: Demonstrates the use of the CVXOPT portfolio optimization call.
'''
# QSTK Imports
import pftk.pftkutil.qsdateutil as du
import pftk.pftkutil.tsutil as tsu
import pftk.pftkutil.data_access as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def getFrontier(na_data):
'''Function gets a 100 sample point frontier for given returns'''
# Special Case with fTarget=None, just returns average rets.
(na_avgrets, na_std, b_error) = tsu.OptPort(na_data, None)
# Declaring bounds on the optimized portfolio
na_lower = np.zeros(na_data.shape[1])
na_upper = np.ones(na_data.shape[1])
# Getting the range of possible returns with these bounds
(f_min, f_max) = tsu.getRetRange(na_data, na_lower, na_upper,
na_avgrets, s_type="long")
# Getting the step size and list of returns to optimize for.
f_step = (f_max - f_min) / 100.0
lf_returns = [f_min + x * f_step for x in range(101)]
# Declaring empty lists
lf_std = []
lna_portfolios = []
# Calling the optimization for all returns
for f_target in lf_returns:
(na_weights, f_std, b_error) = tsu.OptPort(na_data, f_target,
na_lower, na_upper, s_type="long")
lf_std.append(f_std)
lna_portfolios.append(na_weights)
return (lf_returns, lf_std, lna_portfolios, na_avgrets, na_std)
def main():
'''Main Function'''
# S&P 100
ls_symbols = ['AAPL', 'ABT', 'ACN', 'AEP', 'ALL', 'AMGN', 'AMZN', 'APC', 'AXP', 'BA', 'BAC', 'BAX', 'BHI', 'BK', 'BMY', 'CAT', 'C', 'CL', 'CMCSA', 'COF', 'COP', 'COST', 'CPB', 'CSCO', 'CVS', 'CVX', 'DD', 'DELL', 'DIS', 'DOW', 'DVN', 'EBAY', 'EMC', 'EXC', 'F', 'FCX', 'FDX', 'GD', 'GE', 'GILD', 'GOOG', 'GS', 'HAL', 'HD', 'HNZ', 'HON', 'HPQ', 'IBM', 'INTC', 'JNJ', 'JPM', 'KO', 'LLY', 'LMT', 'LOW', 'MA', 'MCD', 'MDT', 'MET', 'MMM', 'MO', 'MON', 'MRK', 'MS', 'MSFT', 'NKE', 'NOV', 'NSC', 'NWSA', 'NYX', 'ORCL', 'OXY', 'PEP', 'PFE', 'PG', 'PM', 'QCOM', 'RF', 'RTN', 'SBUX', 'SLB', 'SO', 'SPG', 'T', 'TGT', 'TWX', 'TXN', 'UNH', 'UPS', 'USB', 'UTX', 'VZ', 'WFC', 'WMB', 'WMT', 'XOM']
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('EODHistoricalData')
ls_all_syms = c_dataobj.get_all_symbols()
# Bad symbols are symbols present in portfolio but not in all syms
ls_bad_syms = list(set(ls_symbols) - set(ls_all_syms))
for s_sym in ls_bad_syms:
i_index = ls_symbols.index(s_sym)
ls_symbols.pop(i_index)
# Start and End date of the charts
dt_end = dt.datetime(2010, 1, 1)
dt_start = dt_end - dt.timedelta(days=365)
dt_test = dt_end + dt.timedelta(days=365)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
ldt_timestamps_test = du.getNYSEdays(dt_end, dt_test, dt_timeofday)
# Reading just the close prices
df_close = c_dataobj.get_data(ldt_timestamps, ls_symbols, "close")
df_close_test = c_dataobj.get_data(ldt_timestamps_test, ls_symbols, "close")
# Filling the data for missing NAN values
df_close = df_close.fillna(method='ffill')
df_close = df_close.fillna(method='bfill')
df_close_test = df_close_test.fillna(method='ffill')
df_close_test = df_close_test.fillna(method='bfill')
# Copying the data values to a numpy array to get returns
na_data = df_close.values.copy()
na_data_test = df_close_test.values.copy()
# Getting the daily returns
tsu.returnize0(na_data)
tsu.returnize0(na_data_test)
# Calculating the frontier.
(lf_returns, lf_std, lna_portfolios, na_avgrets, na_std) = getFrontier(na_data)
(lf_returns_test, lf_std_test, unused, unused, unused) = getFrontier(na_data_test)
# Plotting the efficient frontier
plt.clf()
plt.plot(lf_std, lf_returns, 'b')
plt.plot(lf_std_test, lf_returns_test, 'r')
# Plot where the efficient frontier would be the following year
lf_ret_port_test = []
lf_std_port_test = []
for na_portfolio in lna_portfolios:
na_port_rets = np.dot(na_data_test, na_portfolio)
lf_std_port_test.append(np.std(na_port_rets))
lf_ret_port_test.append(np.average(na_port_rets))
plt.plot(lf_std_port_test, lf_ret_port_test, 'k')
# Plot indivisual stock risk/return as green +
for i, f_ret in enumerate(na_avgrets):
plt.plot(na_std[i], f_ret, 'g+')
# # Plot some arrows showing transistion of efficient frontier
# for i in range(0, 101, 10):
# plt.arrow(lf_std[i], lf_returns[i], lf_std_port_test[i] - lf_std[i],
# lf_ret_port_test[i] - lf_returns[i], color='k')
# Labels and Axis
plt.legend(['2009 Frontier', '2010 Frontier',
'Performance of \'09 Frontier in 2010'], loc='lower right')
plt.title('Efficient Frontier For S&P 100 ')
plt.ylabel('Expected Return')
plt.xlabel('StDev')
plt.savefig('tutorial8.pdf', format='pdf')
if __name__ == '__main__':
main()
|
11,678 | 10e7f23e5ef0cb74bec458ba824a97a69f9a3b16 | import socket
import time
import pickle
#create a socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind ip_address and port_number to socket
#s.bind((socket.gethostname(), 5688))
s.bind(("0.0.0.0", 5688))
#max connections that socket can listen at a time
s.listen(5)
HEADER_LENGTH = 10
#accept client connection
while True:
client, address = s.accept()
print(f"Connection established from {address} client!")
print(f"Client info: {client}")
msg = 'Welcome to the Server!'
msg = pickle.dumps(msg)
msg = bytes(f'{len(msg):<{HEADER_LENGTH}}', "utf-8") + msg
#client.send(bytes(msg, "utf-8"))
client.send(msg)
time.sleep(3)
"""#Receiving data from client
recv_msg = client.recv(1024)
print(recv_msg.decode("utf-8"))
client.send(recv_msg)"""
msg = {"name":"yashas", "message": "hello world"}
msg = pickle.dumps(msg)
msg = bytes(f'{len(msg):<{HEADER_LENGTH}}', "utf-8") + msg
client.send(msg)
'''while True:
time.sleep(3)
msg = f'the current timr is {time.time()}'
msg = f'{len(msg):<{HEADER_LENGTH}}' + msg
client.send(bytes(msg, "utf-8"))'''
"""while True:
recv_msg = client.recv(1024)
recv_msg = recv_msg.decode("utf-8")
if not recv_msg:
s.close()
print(recv_msg)
client.send(bytes(recv_msg, "utf-8"))"""
s.close()
|
11,679 | 1f0370c7525160486e0f2629bb14f4a8bd594b7b | #Write a Program for sqaure of sum of n numbers
num=int(input("Enter the numbers"))
sm=0
for i in range(1,num+1):
sm = sm+i
sm=pow(sm,2)
print(sm)
|
11,680 | e5e38de7b4850ec5d8058d73b90157b3561cb39f | import os
import time
import datetime
import hashlib
import urllib
import sqlite3
import tweepy as tp
import numpy as np
import cv2
import oauth # oauthใฎ่ช่จผใญใผ
try:
import dlib
use_dlib = True
except ImportError:
use_dlib = False
class StreamListener(tp.StreamListener):
def __init__(self, api):
"""ใณใณในใใฉใฏใฟ"""
self.api = api
# ไฟๅญๅ
self.old_date = datetime.date.today()
self.mkdir()
# ๆคๅบใซๅฟ
่ฆใชใใกใคใซ
self.cascade = cv2.CascadeClassifier("lbpcascade_animeface.xml")
if use_dlib:
self.eye_detector = dlib.simple_object_detector("detector_eye.svm")
def on_status(self, status):
"""UserStreamใใ้ฃใใงใใStatusใๅฆ็ใใ"""
# Tweetใซ็ปๅใใคใใฆใใใ
is_media = False
# ๆฅไปใฎ็ขบ่ช
now = datetime.date.today()
if now != self.old_date:
self.old_date = now
self.dbfile.commit()
self.dbfile.close()
self.mkdir()
# TweetใRTใใฉใใ
if hasattr(status, "retweeted_status"):
status = status.retweeted_status
# Tweetใๅผ็จใใคใผใใใฉใใ
if hasattr(status, "quoted_status"):
status = status.quoted_status
# ่คๆฐๆใฎ็ปๅใใคใผใใฎใจใ
if hasattr(status, "extended_entities"):
if 'media' in status.extended_entities:
status_media = status.extended_entities
is_media = True
# ไธๆใฎ็ปๅใใคใผใใฎใจใ
elif hasattr(status, "entities"):
if 'media' in status.entities:
status_media = status.entities
is_media = True
# ็ปๅใใคใใฆใใใจใ
if is_media:
# ่ชๅใฎใใคใผใใฏ้ฃใฐใ
if status.user.screen_name != "marron_general" and status.user.screen_name != "marron_recbot":
for image in status_media['media']:
if image['type'] != 'photo':
break
# URL, ใใกใคใซๅ
media_url = image['media_url']
root, ext = os.path.splitext(media_url)
filename = str(self.fileno).zfill(5)
# ใใฆใณใญใผใ
try:
temp_file = urllib.request.urlopen(media_url + ":large").read()
except:
print("Download Error")
continue
# md5ใฎๅๅพ
current_md5 = hashlib.md5(temp_file).hexdigest()
# ใใงใซๅๅพๆธใฟใฎ็ปๅใฏ้ฃใฐใ
if current_md5 in self.file_md5:
print("geted : " + status.user.screen_name +"-" + filename + ext)
continue
image = cv2.imdecode(np.asarray(bytearray(temp_file), dtype=np.uint8), 1)
faces = self.cascade.detectMultiScale(image,\
scaleFactor=1.11,\
minNeighbors=2,\
minSize=(64, 64))
# ไบๆฌกๅ
ใฎ้กใๆคๅบใงใใชใๅ ดๅ
if len(faces) <= 0:
print("skiped : " + status.user.screen_name + "-" + filename + ext)
else:
eye = False #็ฎใฎ็ถๆ
facex = []
facey = []
facew = []
faceh = []
if use_dlib:
# ้กใ ใๅใๅบใใฆ็ฎใฎๆค็ดข
for i, area in enumerate(faces):
x, y, width, height = tuple(area[0:4])
face = image[y:y+height, x:x+width]
# ๅบๆฅใ็ปๅใใ็ฎใๆคๅบ
eyes = self.eye_detector(face)
if len(eyes) > 0:
facex.append(x)
facey.append(y)
facew.append(width)
faceh.append(height)
eye = True
# ็ฎใใใฃใใชใ็ปๅๆฌไฝใไฟๅญ
if use_dlib == False or eye:
# ไฟๅญ
out = open(self.base_path + filename + ext, "wb")
out.write(temp_file)
out.close()
# ๅๅพๆธใฟใจใใฆMD5ใไฟๅญ
self.file_md5.append(current_md5)
# ใใใทใฅใฟใฐใใใใฐไฟๅญใใ
tags = []
if hasattr(status, "entities"):
if "hashtags" in status.entities:
for hashtag in status.entities['hashtags']:
tags.append(hashtag['text'])
# ใใผใฟใใผในใซไฟๅญ
url = "https://twitter.com/" + status.user.screen_name + "/status/" + status.id_str
self.dbfile.execute("insert into list values('" + filename + ext + "','" + \
status.user.screen_name + "','" + url + "'," + str(status.favorite_count) + "," + \
str(status.retweet_count) + ",'" + str(tags).replace("'","") + "','" + str(datetime.datetime.now()) + \
"','" + str(facex) + "','" + str(facey) + "','" + str(facew) + "','" + str(faceh) +"')")
self.dbfile.commit()
print("saved : " + status.user.screen_name + "-" + filename + ext)
if tags != []:
print(" tags : " + str(tags))
self.fileno += 1
else:
print("noEye : " + status.user.screen_name + "-" + filename + ext)
def mkdir(self):
"""ไฟๅญ็จใฎใใฉใซใใผใ็ๆใใๅฟ
่ฆใชๅคๆฐใๅๆๅใใ"""
self.base_path = "./" + self.old_date.isoformat() + "/"
if os.path.exists(self.base_path) == False:
os.mkdir(self.base_path)
self.fileno = 0
self.file_md5 = []
self.dbfile = sqlite3.connect(self.base_path + "list.db")
try:
self.dbfile.execute("create table list (filename, username, url, fav, retweet, tags, time, facex, facey, facew, faceh)")
except:
None
def main():
"""ใกใคใณ้ขๆฐ"""
auth = oauth.get_oauth()
stream = tp.Stream(auth, StreamListener(tp.API(auth)), secure=True)
print('Start Streaming!')
while True:
try:
stream.userstream()
except KeyboardInterrupt:
exit()
except:
print('UserStream Error')
time.sleep(60)
if __name__ == '__main__':
main()
|
11,681 | bbff3a28d13d51414794b54d7436099e97bf75df | from django.contrib import admin
from .models import ShortURL
@admin.register(ShortURL)
class ShortURLAdmin(admin.ModelAdmin):
list_display = ('short_id', 'redirect_url')
readonly_fields = ('short_id', 'creator_ip', 'transitions_number')
list_filter = ('redirect_url',)
search_fields = ('redirect_url',)
|
11,682 | ba2846910f242997a41c1ffe4c76499566c2d115 | # -*- coding: utf-8 -*-
"""
Binary Counter learning project using generator function
For a given integer n > 0, count up and print from 1 to n in binary
Example:
n = 5:
>> 1
>> 10
>> 11
>> 100
>> 101
"""
def binary_count(n):
if type(n) != int or n <= 0: # catch out of bounds
print('Please input a positive integer.')
else:
for x in print_to_bin(n):
print(x)
def print_to_bin(n):
a = '1' # Binary number for int 1
for i in range(n):
yield a
a = bin_add(a)
def bin_add(b):
'''
Function to iterate through a string b representing binary value and
add one to the binary value. Returns the binary value of b + 1.
'''
# If b is already all 1's it can only progress to a binary number
# of n+1 length.
if '0' not in b:
b = '1' + '0' * len(b)
else:
arr_b = [digit for digit in b]
# Starting from the last digit, flip 0s to 1s and vice versa.
# If digit at -i is 0, just flip to 1 and the function is done.
# However if digit at -i is 1, flip to 0 and flip the preceding
# digit -j as well. If -j is a 0 then the function is complete,
# but if -j is 1 then the loop iterates to digit at -j and does
# it for -j-1 and so on until a 0 is flipped to a 1.
for i in range(1, len(arr_b) + 1):
if arr_b[-i] == '0':
arr_b[-i] = '1'
break
elif arr_b[-i] == '1':
arr_b[-i] = '0'
j = i + 1
if j < len(arr_b):
if arr_b[-j] == '0':
arr_b[-j] = '1'
break
else:
continue
b = ''.join(arr_b) # Convert array representation to str
return b
|
11,683 | 12fe0ebc65aed324182bafc07aef3a02328b52fb | from flask import Flask, request, render_template, redirect, flash, jsonify, session
from surveys import Question, Survey, satisfaction_survey
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config['SECRET_KEY'] = 'this-is-secret'
debug = DebugToolbarExtension(app)
@app.route('/')
def survey_welcome_page():
'''Welcomes the user to the survey. Contains a start survey button'''
return render_template('welcome.html', survey = satisfaction_survey)
@app.route('/questions/<int:question_no>', methods=["POST"])
def create_responseList(question_no):
'''creates response list in session for each user'''
session['response'] = []
return redirect('/questions/0')
@app.route('/questions/<int:question_no>')
def display_question(question_no):
'''Prompts the user to answer each question contained in the survey instance'''
##question_number = question_no
print(f"i am inside questions/0")
if question_no == 0:
session['question_number'] = 0
elif question_no != session['question_number'] :
flash("Youโre trying to access an invalid question")
question_no = session['question_number']
session['question_number'] = question_no
return render_template('question.html', question = satisfaction_survey.questions[question_no], question_number = session['question_number'])
@app.route('/answers', methods=['POST'])
def collect_answers():
'''Collects the user's response to the survey question by appending their answer into the responses list. Redirects the user to the next question'''
session['response'].append(request.form['choice'])
session['question_number'] += 1
next_question = session['question_number']
if next_question < len(satisfaction_survey.questions):
return redirect(f'/questions/{next_question}')
else :
return render_template('/thank-you.html') |
11,684 | ab711f13b95e7f3479596ad52d0c79fb436b36b3 | #!/usr/bin/env python
import os, sys, requests, json
from pprint import pprint
from utils.UrlUtils import UrlUtils
def check_int(es_url, es_index, hash_id):
"""Query for interferograms with specified input hash ID."""
query = {
"query":{
"bool":{
"must":[
{"term":{"metadata.input_hash_id":hash_id}},
]
}
}
}
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code != 200:
print >>sys.stderr, "Failed to query %s:\n%s" % (es_url, r.text)
print >>sys.stderr, "query: %s" % json.dumps(query, indent=2)
print >>sys.stderr, "returned: %s" % r.text
r.raise_for_status()
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0: id = 'NONE'
else: id = result['hits']['hits'][0]['_id']
return total, id
if __name__ == "__main__":
uu = UrlUtils()
es_url = uu.rest_url
es_index = '%s_interferogram' % uu.grq_index_prefix
total, id = check_int(es_url, es_index, sys.argv[1])
with open('interferograms_found.txt', 'w') as f:
f.write("%d\n%s\n" % (total, id))
|
11,685 | 6a28c66c34c4692976145097e8865ad16122b063 | import os
from os.path import join
import PIL.Image as pimg
import numpy as np
from tqdm import trange
import data_utils
data_dir = '/home/kivan/datasets/Cityscapes/orig/gtFine'
save_dir = '/home/kivan/datasets/Cityscapes/2048x1024/labels'
def prepare_data(name):
root_dir = join(data_dir, name)
cities = next(os.walk(root_dir))[1]
for city in cities:
print(city)
city_dir = join(root_dir, city)
image_list = next(os.walk(city_dir))[2]
image_list = [x for x in image_list if x.find('labelIds') >= 0]
city_save_dir = join(save_dir, name, city)
os.makedirs(city_save_dir, exist_ok=True)
for i in trange(len(image_list)):
img = np.array(pimg.open(join(city_dir, image_list[i])))
img, _ = data_utils.convert_ids(img, ignore_id=19)
img_prefix = image_list[i][:-20]
save_path = join(city_save_dir, img_prefix + '.png')
img = pimg.fromarray(img)
img.save(save_path)
prepare_data('train')
prepare_data('val')
|
11,686 | 8b011e89ce886f13558ab292073393f5329edff0 | from RFEM.initModel import *
from RFEM.enums import *
class MemberSetLoad():
def __init__(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_direction = LoadDirectionType.LOAD_DIRECTION_LOCAL_Z,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_direction (enum): Load Case Enumeration
magnitude (float): Load Magnitude
comment (str, optional): Comments
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Member Sets No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_FORCE
clientObject.load_type = load_type.name
# Member Load Distribution
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
clientObject.magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Force(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution= MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction= MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
force_eccentricity: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameter
force_eccentricity (bool): Force Eccentricity Option
comment (str, optional): Comments
params (dict, optional): Parameters
for LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [magnitude]
for LOAD_DISTRIBUTION_UNIFORM_TOTAL:
load_parameter = [magnitude]
for LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [relative_distance = False, magnitude, distance_a]
for LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude, count_n, distance_a, distance_b]
for LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, relative_distance_c = False, magnitude, distance_a, distance_b, distance_c]
for LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for LOAD_DISTRIBUTION_TRAPEZOIDAL:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_TAPERED:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for LOAD_DISTRIBUTION_VARYING_IN_Z:
load_parameter = [[distance, delta_distance, magnitude], ...]
params:
{'eccentricity_horizontal_alignment': MemberSetLoadEccentricityHorizontalAlignment.ALIGN_NONE,
'eccentricity_vertical_alignment': MemberSetLoadEccentricityVerticalAlignment.ALIGN_NONE,
'eccentricity_section_middle': MemberSetLoadEccentricitySectionMiddle.LOAD_ECCENTRICITY_SECTION_MIDDLE_CENTER_OF_GRAVITY,
'is_eccentricity_at_end_different_from_start': False,
'eccentricity_y_at_end': 0.0,
'eccentricity_y_at_start': 0.0,
'eccentricity_z_at_end': 0.0,
'eccentricity_z_at_start': 0.0}
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_FORCE
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution= load_distribution.name
#Load Magnitude and Parameters
if load_parameter == []:
raise Exception("WARNING: Load parameter cannot be empty. Kindly check list inputs completeness and correctness.")
else:
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM" or load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM_TOTAL":
if len(load_parameter) == 1:
clientObject.magnitude = load_parameter[0]
else:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_1":
if len(load_parameter) == 3:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
if load_parameter[0] == False:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_absolute = load_parameter[2]
else:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_relative = load_parameter[2]
else:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_CONCENTRATED_1. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_N":
if len(load_parameter) == 6:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude = load_parameter[2]
clientObject.count_n = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
else:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_N. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2x2":
if len(load_parameter) == 7:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.distance_c_is_defined_as_relative = load_parameter[2]
clientObject.magnitude = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
if load_parameter[2] == False:
clientObject.distance_c_absolute = load_parameter[6]
else:
clientObject.distance_c_relative = load_parameter[6]
else:
raise Exception("WARNING: Load parameter array length should be 7 for LOAD_DISTRIBUTION_CONCENTRATED_N. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2":
if len(load_parameter) == 6:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
else:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_2. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
elif load_distribution.name == "LOAD_DISTRIBUTION_TRAPEZOIDAL":
if len(load_parameter) == 6:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
else:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_TAPERED":
if len(load_parameter)==6:
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
else:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_PARABOLIC":
if len(load_parameter)==3:
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
else:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING_IN_Z":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Member Load Direction
clientObject.load_direction = load_direction.name
#Force Eccentiricity
clientObject.has_force_eccentricity = force_eccentricity
if force_eccentricity == True:
if 'eccentricity_horizontal_alignment' and 'eccentricity_vertical_alignment' and 'eccentricity_section_middle' \
'is_eccentricity_at_end_different_from_start' and 'eccentricity_y_at_end' and 'eccentricity_y_at_start' \
'eccentricity_z_at_end' and 'eccentricity_z_at_start' in params:
pass
else:
raise Exception("WARNING: Params does not contain all the necessary parameters. Kindly check dictionary")
params_ecc = {'eccentricity_horizontal_alignment': MemberSetLoadEccentricityHorizontalAlignment.ALIGN_NONE,
'eccentricity_vertical_alignment': MemberSetLoadEccentricityVerticalAlignment.ALIGN_NONE,
'eccentricity_section_middle': MemberSetLoadEccentricitySectionMiddle.LOAD_ECCENTRICITY_SECTION_MIDDLE_CENTER_OF_GRAVITY,
'is_eccentricity_at_end_different_from_start': False,
'eccentricity_y_at_end': 0.0,
'eccentricity_y_at_start': 0.0,
'eccentricity_z_at_end': 0.0,
'eccentricity_z_at_start': 0.0}
params_ecc.update(params)
if params_ecc['is_eccentricity_at_end_different_from_start'] == False:
clientObject.eccentricity_horizontal_alignment= params_ecc['eccentricity_horizontal_alignment'].name
clientObject.eccentricity_vertical_alignment= params_ecc['eccentricity_vertical_alignment'].name
clientObject.eccentricity_section_middle = params_ecc['eccentricity_section_middle'].name
clientObject.eccentricity_y_at_end= params_ecc['eccentricity_y_at_start']
clientObject.eccentricity_y_at_start= params_ecc['eccentricity_y_at_start']
clientObject.eccentricity_z_at_end= params_ecc['eccentricity_z_at_start']
clientObject.eccentricity_z_at_start= params_ecc['eccentricity_z_at_start']
elif params_ecc['is_eccentricity_at_end_different_from_start'] == True:
clientObject.eccentricity_horizontal_alignment= params_ecc['eccentricity_horizontal_alignment']
clientObject.eccentricity_vertical_alignment= params_ecc['eccentricity_vertical_alignment']
clientObject.eccentricity_section_middle = params_ecc['eccentricity_section_middle']
clientObject.eccentricity_y_at_end= params_ecc['eccentricity_y_at_end']
clientObject.eccentricity_y_at_start= params_ecc['eccentricity_y_at_start']
clientObject.eccentricity_z_at_end= params_ecc['eccentricity_z_at_end']
clientObject.eccentricity_z_at_start= params_ecc['eccentricity_z_at_start']
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if 'eccentricity_horizontal_alignment' or 'eccentricity_vertical_alignment' or 'eccentricity_section_middle' or 'is_eccentricity_at_end_different_from_start' or 'eccentricity_y_at_end' or 'eccentricity_y_at_start' or 'eccentricity_z_at_end' or 'eccentricity_z_at_start':
pass
else:
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Moment(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution= MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction= MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
comment (str, optional): Comments
params (dict, optional): Parameters
for LOAD_DISTRIBUTION_UNIFORM:
load_parameter = magnitude
for LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [relative_distance = False, magnitude, distance_a]
for LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude, count_n, distance_a, distance_b]
for LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, relative_distance_c = False, magnitude, distance_a, distance_b, distance_c]
for LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for LOAD_DISTRIBUTION_TRAPEZOIDAL:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_TAPERED:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_MOMENT
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution= load_distribution.name
#Load Magnitude and Parameters
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
try:
len(load_parameter)==1
except:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_1":
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_CONCENTRATED_1. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
if load_parameter[0] == False:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_absolute = load_parameter[2]
else:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_relative = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_N":
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_N. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude = load_parameter[2]
clientObject.count_n = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2x2":
try:
len(load_parameter)==7
except:
raise Exception("WARNING: Load parameter array length should be 7 for LOAD_DISTRIBUTION_CONCENTRATED_2x2. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.distance_c_is_defined_as_relative = load_parameter[2]
clientObject.magnitude = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
if load_parameter[2] == False:
clientObject.distance_c_absolute = load_parameter[6]
else:
clientObject.distance_c_relative = load_parameter[6]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2":
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_2. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
elif load_distribution.name == "LOAD_DISTRIBUTION_TRAPEZOIDAL":
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_TAPERED":
try:
len(load_parameter)==4
except:
raise Exception("WARNING: Load parameter array length should be 4 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if load_parameter[0] == False:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if load_parameter[1] == False:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_PARABOLIC":
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Member Load Direction
clientObject.load_direction = load_direction.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Mass(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
individual_mass_components: bool=False,
mass_components = [],
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
individual_mass_components (bool): Individiual Mass Components Option
mass_components (list): Mass Components
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
clientObject.load_type = MemberSetLoadType.E_TYPE_MASS.name
# Member Load Distribution
clientObject.load_distribution= MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Individual Mass Components
if type(individual_mass_components) == bool:
pass
else:
raise Exception("WARNING: Type of individual mass components should be bool. Kindly check inputs correctness.")
clientObject.individual_mass_components = individual_mass_components
# Mass magnitude
if individual_mass_components == False:
clientObject.mass_global = mass_components[0]
else:
clientObject.mass_x = mass_components[0]
clientObject.mass_y = mass_components[1]
clientObject.mass_z = mass_components[2]
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Temperature(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
load_over_total_length (bool): Load Over Total Length Option
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [tt, tb]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
for load_over_total_length: bool= False:
load_parameter = [tt1, tt2, tb1, tb2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_over_total_length: bool= True:
load_parameter = [tt1, tt2, tb1, tb2]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [tt1, tt2, tb1, tb2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [tb1, tb2, tb3, tt1, tt2, tt3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_TEMPERATURE
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==2
except:
raise Exception("WARNING: Load parameter array length should be 2 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_t_b = load_parameter[0]
clientObject.magnitude_t_t = load_parameter[1]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter)==8
except:
raise Exception("WARNING: Load parameter array length should be 8 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_t_b_1 = load_parameter[0]
clientObject.magnitude_t_b_2 = load_parameter[1]
clientObject.magnitude_t_t_1 = load_parameter[2]
clientObject.magnitude_t_t_2 = load_parameter[3]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
if load_parameter[4] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[6]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[6]
if load_parameter[5] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[7]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[7]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter)==8
except:
raise Exception("WARNING: Load parameter array length should be 8 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_t_b_1 = load_parameter[0]
clientObject.magnitude_t_b_2 = load_parameter[1]
clientObject.magnitude_t_t_1 = load_parameter[2]
clientObject.magnitude_t_t_2 = load_parameter[3]
if type(load_parameter[4]) == bool:
pass
else:
raise Exception("WARNING: Type of the fourth load parameter should be bool. Kindly check inputs correctness.")
if load_parameter[4] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[6]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[6]
if type(load_parameter[5]) == bool:
pass
else:
raise Exception("WARNING: Type of the fifth load parameter should be bool. Kindly check inputs correctness.")
if load_parameter[5] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[7]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[7]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_t_b_1 = load_parameter[0]
clientObject.magnitude_t_b_2 = load_parameter[1]
clientObject.magnitude_t_b_3 = load_parameter[2]
clientObject.magnitude_t_t_1 = load_parameter[3]
clientObject.magnitude_t_t_2 = load_parameter[4]
clientObject.magnitude_t_t_3 = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==4
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = load_parameter[i][2]
mlvlp.magnitude_delta_t = load_parameter[i][3]
mlvlp.magnitude_t_t = load_parameter[i][2]
mlvlp.magnitude_t_b = load_parameter[i][3]
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def TemperatureChange(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
load_over_total_length (bool): Load Over Total Length Option
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [tc, delta_t]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
for load_over_total_length: bool= False:
load_parameter = [delta_t_1, delta_t_2, t_c_1, t_c_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_over_total_length: bool= True:
load_parameter = [delta_t_1, delta_t_2, t_c_1, t_c_2]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [delta_t_1, delta_t_2, t_c_1, t_c_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [delta_t_1, delta_t_2, delta_t_3, t_c_1, t_c_2, t_c_3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_TEMPERATURE_CHANGE
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==2
except:
raise Exception("WARNING: Load parameter array length should be 2 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_delta_t = load_parameter[0]
clientObject.magnitude_t_c = load_parameter[1]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter)==8
except:
raise Exception("WARNING: Load parameter array length should be 8 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_delta_t_1 = load_parameter[0]
clientObject.magnitude_delta_t_2 = load_parameter[1]
clientObject.magnitude_t_c_1 = load_parameter[2]
clientObject.magnitude_t_c_2 = load_parameter[3]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of the load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
if load_parameter[4] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[6]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[6]
if load_parameter[5] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[7]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[7]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter)==8
except:
raise Exception("WARNING: Load parameter array length should be 8 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_delta_t_1 = load_parameter[0]
clientObject.magnitude_delta_t_2 = load_parameter[1]
clientObject.magnitude_t_c_1 = load_parameter[2]
clientObject.magnitude_t_c_2 = load_parameter[3]
if load_parameter[4] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[6]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[6]
if load_parameter[5] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[7]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[7]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_delta_t_1 = load_parameter[0]
clientObject.magnitude_delta_t_2 = load_parameter[1]
clientObject.magnitude_delta_t_3 = load_parameter[2]
clientObject.magnitude_t_c_1 = load_parameter[3]
clientObject.magnitude_t_c_2 = load_parameter[4]
clientObject.magnitude_t_c_3 = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==4
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = load_parameter[i][2]
mlvlp.magnitude_delta_t = load_parameter[i][3]
mlvlp.magnitude_t_t = load_parameter[i][2]
mlvlp.magnitude_t_b = load_parameter[i][3]
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def AxialStrain(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_X,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
load_over_total_length (bool): Load Over Total Length Option
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [epsilon]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
load_parameter = [epsilon1, epsilon2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [epsilon1, epsilon2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [epsilon1, epsilon2, epsilon3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_AXIAL_STRAIN
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==1
except:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of the load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
if load_parameter[2] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if load_parameter[2] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def AxialDisplacement(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_X,
magnitude : float = 0.0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Set
load_direction (enum): Load Direction Enumeration
magnitude (float): Load Magnitude
comment (str, optional): Comments
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_AXIAL_DISPLACEMENT
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
clientObject.magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Precamber(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (enum): Load Parameters
load_over_total_length (bool): Load Over Total Lenth Option
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [magnitude]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_PRECAMBER
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==1
except:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of the load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
if load_parameter[2] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if load_parameter[2] == True:
clientObject.distance_a_is_defined_as_relative = True
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_is_defined_as_relative = False
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3] == True:
clientObject.distance_b_is_defined_as_relative = True
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_is_defined_as_relative = False
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def InitialPrestress(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_X,
magnitude : float = 0.0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_direction (enum): Load Direction Enumeration
magnitude (float): Load Magnitude
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_INITIAL_PRESTRESS
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
clientObject.magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Displacement(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
load_over_total_length (bool): Load Over Total Length Option
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [magnitude]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_a]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_a, distance_b]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_c_is_defined_as_relative = False, distance_a, distance_b, distance_c]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [magnitude_1, magnitude_2, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_a, distance_b]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_DISPLACEMENT
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==1
except:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_1:
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_CONCENTRATED_1. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[2]
else:
clientObject.distance_a_absolute = load_parameter[2]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_N:
try:
len(load_parameter)==5
except:
raise Exception("WARNING: Load parameter array length should be 5 for LOAD_DISTRIBUTION_CONCENTRATED_N. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
clientObject.distance_b_is_defined_as_relative = load_parameter[2]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[3]
else:
clientObject.distance_a_absolute = load_parameter[3]
if load_parameter[2]:
clientObject.distance_b_relative = load_parameter[4]
else:
clientObject.distance_b_absolute = load_parameter[4]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2x2:
try:
len(load_parameter)==7
except:
raise Exception("WARNING: Load parameter array length should be 7 for LOAD_DISTRIBUTION_CONCENTRATED_2x2. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
clientObject.distance_b_is_defined_as_relative = load_parameter[2]
clientObject.distance_c_is_defined_as_relative = load_parameter[3]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[2]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
if load_parameter[3]:
clientObject.distance_c_relative = load_parameter[6]
else:
clientObject.distance_c_absolute = load_parameter[6]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_2. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of the load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter)==6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter)==3
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def Rotation(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = [],
load_over_total_length: bool= False,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter (list): Load Parameters
load_over_total_length (bool): Load Over Total Length
comment (str, optional): Comment
params (dict, optional): Parameters
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
load_parameter = [magnitude]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_a]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_a, distance_b]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [magnitude, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_c_is_defined_as_relative = False, distance_a, distance_b, distance_c]
for load_distrubition = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [magnitude_1, magnitude_2, distance_a_is_defined_as_relative = False, distance_b_is_defined_as_relative = False, distance_a, distance_b]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZIODAL:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
load_parameter = [magnitude_1, magnitude_2, distance_a_relative = False, distance_a_relative = False, a_distance, b_distance]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_ROTATION
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = load_distribution.name
# Member Load Direction
clientObject.load_direction = load_direction.name
#Load Magnitude
if load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM:
try:
len(load_parameter)==1
except:
raise Exception("WARNING: Load parameter array length should be 1 for LOAD_DISTRIBUTION_UNIFORM. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_1:
try:
len(load_parameter) == 3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_CONCENTRATED_1. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[2]
else:
clientObject.distance_a_absolute = load_parameter[2]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_N:
try:
len(load_parameter) == 5
except:
raise Exception("WARNING: Load parameter array length should be 5 for LOAD_DISTRIBUTION_CONCENTRATED_N. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
clientObject.distance_b_is_defined_as_relative = load_parameter[2]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[3]
else:
clientObject.distance_a_absolute = load_parameter[3]
if load_parameter[2]:
clientObject.distance_b_relative = load_parameter[4]
else:
clientObject.distance_b_absolute = load_parameter[4]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2x2:
try:
len(load_parameter) == 7
except:
raise Exception("WARNING: Load parameter array length should be 7 for LOAD_DISTRIBUTION_CONCENTRATED_2x2. Kindly check list inputs completeness and correctness.")
clientObject.magnitude = load_parameter[0]
clientObject.distance_a_is_defined_as_relative = load_parameter[1]
clientObject.distance_b_is_defined_as_relative = load_parameter[2]
clientObject.distance_c_is_defined_as_relative = load_parameter[3]
if load_parameter[1]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[2]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
if load_parameter[3]:
clientObject.distance_c_relative = load_parameter[6]
else:
clientObject.distance_c_absolute = load_parameter[6]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_2:
try:
len(load_parameter) == 6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_CONCENTRATED_2. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TRAPEZOIDAL:
try:
len(load_parameter) == 6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TRAPEZOIDAL. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
if type(load_over_total_length) == bool:
pass
else:
raise Exception("WARNING: Type of the load over total length should be bool. Kindly check inputs correctness.")
if load_over_total_length == False:
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.load_is_over_total_length = True
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_TAPERED:
try:
len(load_parameter) == 6
except:
raise Exception("WARNING: Load parameter array length should be 6 for LOAD_DISTRIBUTION_TAPERED. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.distance_a_is_defined_as_relative = load_parameter[2]
clientObject.distance_b_is_defined_as_relative = load_parameter[3]
if load_parameter[2]:
clientObject.distance_a_relative = load_parameter[4]
else:
clientObject.distance_a_absolute = load_parameter[4]
if load_parameter[3]:
clientObject.distance_b_relative = load_parameter[5]
else:
clientObject.distance_b_absolute = load_parameter[5]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_PARABOLIC:
try:
len(load_parameter) == 3
except:
raise Exception("WARNING: Load parameter array length should be 3 for LOAD_DISTRIBUTION_PARABOLIC. Kindly check list inputs completeness and correctness.")
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution == MemberSetLoadDistribution.LOAD_DISTRIBUTION_VARYING:
try:
len(load_parameter[0])==3
except:
print("WARNING: MemberSetLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = clientModel.factory.create('ns0:member_set_load.varying_load_parameters')
for i in range(len(load_parameter)):
mlvlp = clientModel.factory.create('ns0:member_set_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
mlvlp.magnitude_t_c = 0.0
mlvlp.magnitude_delta_t = 0.0
mlvlp.magnitude_t_t = 0.0
mlvlp.magnitude_t_b = 0.0
clientObject.varying_load_parameters.member_set_load_varying_load_parameters.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def PipeContentFull(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_direction_orientation = MemberSetLoadDirectionOrientation.LOAD_DIRECTION_FORWARD,
specific_weight : float = 0.0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_direction_orientation (enum): Load Direction Orientation Enumeration
specific_weight (float): Specific Weight
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_PIPE_CONTENT_FULL
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Member Load Direction
clientObject.load_direction = MemberSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE.name
#Member Load Orientation
clientObject.load_direction_orientation = load_direction_orientation.name
#Load Magnitude
clientObject.magnitude = specific_weight
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def PipeContentPartial(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
load_direction_orientation = MemberSetLoadDirectionOrientation.LOAD_DIRECTION_FORWARD,
specific_weight : float = 0.0,
filling_height : float = 0.0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
load_direction_orientation (enum): Load Direction Orientation Enumeration
specific_weight (float): Specific Weight
filling_height (float): Filling Height
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_PIPE_CONTENT_PARTIAL
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Member Load Direction
clientObject.load_direction = MemberSetLoadDirection.LOAD_DIRECTION_GLOBAL_Z_OR_USER_DEFINED_W_TRUE.name
#Member Load Orientation
clientObject.load_direction_orientation = load_direction_orientation.name
#Load Magnitude
clientObject.magnitude = specific_weight
#Filling Height
clientObject.filling_height = filling_height
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def PipeInternalPressure(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
pressure : float = 0.0,
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
pressure (float): Pressure
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_PIPE_INTERNAL_PRESSURE
clientObject.load_type = load_type.name
# Member Load Distribution
clientObject.load_distribution = MemberSetLoadDistribution.LOAD_DISTRIBUTION_UNIFORM.name
# Member Load Direction
clientObject.load_direction = MemberSetLoadDirection.LOAD_DIRECTION_LOCAL_X.name
#Load Magnitude
clientObject.magnitude = pressure
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject)
def RotaryMotion(self,
no: int = 1,
load_case_no: int = 1,
member_sets: str = '1',
angular_acceleration : float = 0.0,
angular_velocity : float = 0.0,
axis_definition_type = MemberSetLoadAxisDefinitionType.AXIS_DEFINITION_TWO_POINTS,
axis_orientation = MemberSetLoadAxisDefinitionAxisOrientation.AXIS_POSITIVE,
axis_definition = MemberSetLoadAxisDefinition.AXIS_X,
axis_definition_p1 = [],
axis_definition_p2 = [],
comment: str = '',
params: dict = {}):
"""
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
member_sets (str): Assigned Member Sets
angular_acceleration (float): Angular Acceleration
angular_velocity (float): Angular Velocity
axis_definition_type (enum): Axis Definition Type Enumeration
axis_orientation (enum): Axis Orientation Enumeration
axis_definition (enum): Axis Definition Enumeration
axis_definition_p1 (list):Axis Definition First Point
axis_definition_p2 (list): Axis Definition Second Point
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Load
clientObject = clientModel.factory.create('ns0:member_set_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Members No. (e.g. '5 6 7 12')
clientObject.member_sets = ConvertToDlString(member_sets)
# Member Load Type
load_type = MemberSetLoadType.LOAD_TYPE_ROTARY_MOTION
clientObject.load_type = load_type.name
#Angular Acceleration
clientObject.angular_acceleration = angular_acceleration
#Angular Velocity
clientObject.angular_velocity = angular_velocity
#Axis Definition Type
clientObject.axis_definition_type = axis_definition_type.name
#Axis definition
if axis_definition_type == MemberSetLoadAxisDefinitionType.AXIS_DEFINITION_TWO_POINTS.name:
clientObject.axis_definition_p1_x = axis_definition_p1[0]
clientObject.axis_definition_p1_y = axis_definition_p1[1]
clientObject.axis_definition_p1_z = axis_definition_p1[2]
clientObject.axis_definition_p2_x = axis_definition_p2[0]
clientObject.axis_definition_p2_y = axis_definition_p2[1]
clientObject.axis_definition_p2_z = axis_definition_p2[2]
elif axis_definition_type == MemberSetLoadAxisDefinitionType.AXIS_DEFINITION_POINT_AND_AXIS.name:
clientObject.axis_definition_p1_x = axis_definition_p1[0]
clientObject.axis_definition_p1_y = axis_definition_p1[1]
clientObject.axis_definition_p1_z = axis_definition_p1[2]
clientObject.axis_definition_axis = axis_definition.name
clientObject.axis_definition_axis_orientation = axis_orientation.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Member Load to client model
clientModel.service.set_member_set_load(load_case_no, clientObject) |
11,687 | a847c9444c8db32e4c8375df084416476b177028 |
import networkx as nx
import matplotlib.pyplot as plt
import os
G_fb = nx.read_edgelist(os.path.join('DataSets','Graph.csv'), delimiter=',', create_using = nx.Graph(), nodetype = int)
print(nx.info(G_fb))
#Create network layout for visualizations
spring_pos = nx.spring_layout(G_fb)
plt.axis("off")
print('Plotting')
nx.draw_networkx(G_fb, pos = spring_pos, with_labels = False, node_size = 15)
plt.plot()
from multiprocessing import Pool
import itertools
def partitions(nodes, n):
"Partitions the nodes into n subsets"
nodes_iter = iter(nodes)
while True:
partition = tuple(itertools.islice(nodes_iter,n))
if not partition:
return
yield partition
def btwn_pool(G_tuple):
return nx.betweenness_centrality_source(*G_tuple)
# def between_parallel(G, processes = 1):
# p = Pool(processes=processes)
# part_generator = 4*len(p._pool)
# node_partitions = list(partitions(G.nodes(), int(len(G)/part_generator)))
# num_partitions = len(node_partitions)
#
# bet_map = p.map(btwn_pool,
# zip([G]*num_partitions,
# [True]*num_partitions,
# [None]*num_partitions,
# node_partitions))
#
# bt_c = bet_map[0]
# for bt in bet_map[1:]:
# for n in bt:
# bt_c[n] += bt[n]
# return bt_c
bt = nx.betweenness_centrality_source(G_fb)
top = 10
max_nodes = sorted(bt.iteritems(), key = lambda v: -v[1])[:top]
bt_values = [5]*len(G_fb.nodes())
bt_colors = [0]*len(G_fb.nodes())
for max_key, max_val in max_nodes:
bt_values[max_key] = 150
bt_colors[max_key] = 2
plt.axis("off")
print('Plotting')
nx.draw_networkx(G_fb, pos = spring_pos, cmap = plt.get_cmap("rainbow"), node_color = bt_colors, node_size = bt_values, with_labels = False)
plt.plot()
import community
parts = community.best_partition(G_fb)
values = [parts.get(node) for node in G_fb.nodes()]
plt.axis("off")
print('Plotting')
nx.draw_networkx(G_fb, pos = spring_pos, cmap = plt.get_cmap("jet"), node_color = values, node_size = 35, with_labels = False)
plt.plot()
|
11,688 | f57ec3bfbfd96f119617ee724829e48e9ed91208 | from projective_normgraph.normgraph import *
# Graph parameters
for variant in NormGraphVariant:
for t in xrange(2, 7):
if variant == NormGraphVariant.MNG and t % 2 == 0:
continue
for A in NormGraph.it(t, variant):
if A.num_vertices() > 200:
break
print "Testing {}".format(A)
g = A.sage_graph()
assert g.num_verts() == A.num_vertices()
assert g.num_edges() == A.num_edges()
assert g.average_degree() == A.average_degree()
assert g.density() == A.density()
assert g.automorphism_group().order() == A.automorphism_group_order()
|
11,689 | cc84b981b0c9e87520e7619b3754370b39064782 | import math
import cv2
import numpy
def multiply_matrices(array1, array2):
if (array1.shape[1] == array2.shape[0]):
rows = array1.shape[0]
columns = array2.shape[1]
product = numpy.zeros((rows, columns), numpy.float64)
for i in range(len(array1)):
for j in range(len(array2[0])):
for k in range(len(array2)):
product[i][j] += array1[i][k] * array2[k][j]
return product
return "Invalid matrix dimensions"
def initialize(filename): # pass the filename
image = cv2.imread(filename) # saves image as variable
return image # returns the image
def border(image): # pass the image
columns = image.shape[1] # gets width from image
rows = image.shape[0] # gets height from image
print("Original size: " + str(columns) + " x " + str(rows) + "\n") # prints original width and height
scale = float(input("Enter the scale factor: ")) # gets scale number
new_columns = columns * scale # calculates new width
new_rows = rows * scale # calculates new height
new_columns = int(new_columns) # converts width to integer
new_rows = int(new_rows) # converts height to integer
print("\nScaled size: " + str(new_columns) + " x " + str(new_rows)) # print new dimensions
new_image = numpy.zeros((new_rows, new_columns, 3), numpy.float32) # makes blank image (array)
# calculate new image copy start point
column_difference = (new_columns // 2) - (columns // 2) # middle column minus half width of image
row_difference = (new_rows // 2) - (rows // 2) # middle row minus half height of image
# places original image onto new image
for y in range(rows): # iterates through each row
for x in range(columns): # iterates through each column
new_image[y + row_difference][x + column_difference] = image[y][
x] # transfers pixel information using coordinates
return new_image # returns image with border
def rotate(new_image): # pass border image
new_columns = new_image.shape[1] # gets width from image
new_rows = new_image.shape[0] # gets height from image
theta_degrees = float() # variable for input
total_degrees = float() # variable for sum of inputs
while theta_degrees != -1: # continues until input is -1
theta_degrees = float(input("\nEnter number of degrees to rotate (-1 to stop): ")) # gets input
if theta_degrees != -1: # only continues if input isn't -1
total_degrees = total_degrees + theta_degrees # increases sum of degrees
theta_radians = total_degrees * (math.pi / 180) # converts degrees to radians
rotated_image = numpy.zeros((new_rows, new_columns, 3), numpy.float32) # makes blank image (array)
# rotates image
for y in range(new_rows): # iterates through each row
for x in range(new_columns): # iterates through each column
x_t = (x - (new_columns / 2)) # converts x with origin in center (transposed x)
y_t = (y - (new_rows / 2)) # converts y with origin in center (transposed y)
x_r = ((x_t * math.cos(theta_radians)) - (
y_t * math.sin(theta_radians))) # rotates x pixel (rotated x)
y_r = ((x_t * math.sin(theta_radians)) + (
y_t * math.cos(theta_radians))) # rotates y pixel (rotated y)
x_f = (x_r + (new_columns / 2)) # converts x with default origin (final x)
y_f = (y_r + (new_rows / 2)) # converts y with default origin (final y)
x_f = int(x_f) # converts final x to int (pixel coordinate)
y_f = int(y_f) # converts final y to int (pixel coordinate)
if (x_f < new_columns) and (y_f < new_rows): # cuts off pixels that are out of range
rotated_image[y_f][x_f] = new_image[y][x] # transfers pixel information using coordinates
return rotated_image # returns rotated image
def color_error(border_image, rotated_image, theta_degrees): # pass border image and rotated image
new_columns = border_image.shape[1] # gets width from image
new_rows = border_image.shape[0] # gets height from image
distance_sum = float(0)
theta_radians = theta_degrees * (math.pi / 180)
for y in range(new_rows): # iterates through each row
for x in range(new_columns): # iterates through each column
x_t = (x - (new_columns / 2)) # converts x with origin in center (transposed x)
y_t = (y - (new_rows / 2)) # converts y with origin in center (transposed y)
x_r = ((x_t * math.cos(theta_radians)) - (y_t * math.sin(theta_radians))) # rotates x pixel (rotated x)
y_r = ((x_t * math.sin(theta_radians)) + (y_t * math.cos(theta_radians))) # rotates y pixel (rotated y)
x_f = (x_r + (new_columns / 2)) # converts x with default origin (final x)
y_f = (y_r + (new_rows / 2)) # converts y with default origin (final y)
x_f = int(x_f) # converts final x to int (pixel coordinate)
y_f = int(y_f) # converts final y to int (pixel coordinate)
if (x_f < new_columns) and (y_f < new_rows):
border_pixel = border_image[y, x]
border_b = border_pixel[0] # blue color
border_g = border_pixel[1] # green color
border_r = border_pixel[2] # red color
rotated_pixel = rotated_image[y_f, x_f]
rotated_b = rotated_pixel[0] # blue color
rotated_g = rotated_pixel[1] # green color
rotated_r = rotated_pixel[2] # red color
distance = float(
((border_b - rotated_b) ** 2) + ((border_g - rotated_g) ** 2) + ((border_r - rotated_r) ** 2))
distance_sqrt = float(math.sqrt(distance))
distance_sum = distance_sum + distance_sqrt
total_pixels = float(new_columns * new_rows)
color_error_value = float(distance_sum / total_pixels)
return color_error_value
def rounding_error(border_image, rotated_image, theta_degrees):
new_columns = border_image.shape[1] # gets width from image
new_rows = border_image.shape[0] # gets height from image
error_sum = float(0)
theta_radians = theta_degrees * (math.pi / 180)
for y in range(new_rows): # iterates through each row
for x in range(new_columns): # iterates through each column
x_t = (x - (new_columns / 2)) # converts x with origin in center (transposed x)
y_t = (y - (new_rows / 2)) # converts y with origin in center (transposed y)
x_r = ((x_t * math.cos(theta_radians)) - (y_t * math.sin(theta_radians))) # rotates x pixel (rotated x)
y_r = ((x_t * math.sin(theta_radians)) + (y_t * math.cos(theta_radians))) # rotates y pixel (rotated y)
x_f = (x_r + (new_columns / 2)) # converts x with default origin (final x)
y_f = (y_r + (new_rows / 2)) # converts y with default origin (final y)
x_float = x_f # saves float value
y_float = y_f # saves float value
x_f = int(x_f) # converts final x to int (pixel coordinate)
y_f = int(y_f) # converts final y to int (pixel coordinate)
if (x_f < new_columns) and (y_f < new_rows):
x_difference = x_float - x_f
y_difference = y_float - y_f
error_sum = error_sum + x_difference + y_difference
total_pixels = float(new_columns * new_rows)
rounding_error_value = float(error_sum / total_pixels)
return rounding_error_value
def main():
array1 = numpy.zeros((3, 4), numpy.float64)
array1[0] = [1, 2, 3, 4]
array1[1] = [5, 6, 7, 8]
array1[2] = [-1, -2, -3, -4]
array2 = numpy.zeros((4, 3), numpy.float64)
array2[0] = [1, 2, 3]
array2[1] = [-7, 8, -9]
array2[2] = [10, 20, 30]
array2[3] = [3, 5, 7]
print("array1")
print(array1)
print("array2")
print(array2)
print("array1 * array2")
print(multiply_matrices(array1, array2))
print("")
filename = "Ramen.jpg" # makes variable for image
theta_degrees = 360 # makes variable for degrees
image = initialize(filename) # initialize returns image
border_image = border(image) # add_border returns image with border
rotated_image = rotate(border_image) # rotate returns rotated image
color_error_value = color_error(border_image, rotated_image, theta_degrees)
print("\nColor error = " + str(color_error_value))
rounding_error_value = rounding_error(border_image, rotated_image, theta_degrees)
print("\nRounding error = " + str(rounding_error_value))
cv2.imwrite("rotated" + filename, rotated_image)
cv2.imshow("new" + filename, rotated_image)
main()
|
11,690 | ba905b5438ee8e04a8bff20c92f763779b35377b | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 19:21:45 2021
@author: DL
"""
'''
20/2/4 ํจ์ ์ถ๊ฐ (isRange)
20/1/26 ํ
์ด๋ธ ๋ก๋ฉ ๋ฑ ์์
'''
import os
import pandas as pd
import urllib
import re
import pdb
import time
import requests
import numpy as np
import pandas as pd
import time
import datetime
# ํ์ EDSM API ํธ์ถ
import json
from pdb import set_trace as s
from constants import Constants
import matplotlib.pyplot as plt
# matplotlib ์ค์
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=BIGGER_SIZE)
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def doesFileExist(fileName):
tempPath = os.getcwd()
if os.path.exists(os.path.join(tempPath, fileName)):
return True
else:
print(f'Check if "{fileName}" file exists')
return False
def geoConvert(df, addr_col, geo_cols, rstFileName):
'''
naver ์ฃผ์ API๋ฅผ ์ด์ฉํ์ฌ ๋๋ก๋ช
์ฃผ์, ์๊ฒฝ๋ ๋ฑ์ ์ ๋ณด๋ฅผ ๊ฒ์ ํ
๊ฒฐ๊ณผ๋ฅผ ์ฐ์ธก์ ํตํฉํ์ฌ ๋ฐํ
'''
# ์ฃผ์ ์ ๋ณด ๋ณํ
geo = df[addr_col].map(searchMap)
# ๋ณํ๋ ์ ๋ณด์ ์ปฌ๋ผ ์ ๋ณด geo_cols๋ฅผ ์ง์ ํ๊ณ dataFrame์ผ๋ก ๋ณํ
geo_df = pd.DataFrame(geo.tolist(), columns = geo_cols)
# ๊ธฐ์กด ์ ๋ณด์ ํตํฉ
result = pd.concat([df.reset_index(drop=True),
geo_df.reset_index(drop=True)], axis=1)
result.to_excel(rstFileName, index=False)
return result
def searchMap(search_text):
'''
๋ณธ API์ ๋ํ ์ค๋ช
์ ์๋์ ์ฃผ์ ์ฐธ์กฐ
https://apidocs.ncloud.com/ko/ai-naver/maps_geocoding/geocode/
์ 3,000,000ํ๊น์ง ๋ฌด๋ฃ
https://apidocs.ncloud.com/ko/ai-naver/maps_geocoding/
Return example
-------------
{
"status": "OK",
"meta": {
"totalCount": 1,
"page": 1,
"count": 1
},
"addresses": [
{
"roadAddress": "๊ฒฝ๊ธฐ๋ ์ฑ๋จ์ ๋ถ๋น๊ตฌ ๋ถ์ ๋ก 6 ๊ทธ๋ฆฐํฉํ ๋ฆฌ",
"jibunAddress": "๊ฒฝ๊ธฐ๋ ์ฑ๋จ์ ๋ถ๋น๊ตฌ ์ ์๋ 178-1 ๊ทธ๋ฆฐํฉํ ๋ฆฌ",
"englishAddress": "6, Buljeong-ro, Bundang-gu, Seongnam-si, Gyeonggi-do, Republic of Korea",
"addressElements": [
{
"types": [
"POSTAL_CODE"
],
"longName": "13561",
"shortName": "",
"code": ""
}
],
"x": "127.10522081658463",
"y": "37.35951219616309",
"distance": 20.925857741585514
}
],
"errorMessage": ""
}
๊ฒฐ๊ณผ ํญ๋ชฉ
--------
๋๋ก๋ช
์ง๋ฒ ์๋ฌธ ๊ด์ญ ๊ธฐ์ด ์๋ฉด๋ ๋ฆฌ ๋๋ก๋ช
๊ฑด๋ฌผ๋ฒํธ ๊ฑด๋ฌผ๋ช
๋ฒ์ง ์ฐํธ๋ฒํธ ๊ฒฝ๋ ์๋
'''
from configparser import ConfigParser
parser = ConfigParser()
parser.read('config.ini')
# naver geocoding์ ์ฌ์ฉํ ๋ ํ์ํ ์ ๋ณด ๋ก๋ฉ
client_id = parser.get('naver_id', 'client_id')
client_secret = parser.get('naver_id', 'client_secret')
encText = urllib.parse.quote(search_text)
url = 'https://naveropenapi.apigw.ntruss.com/map-geocode/v2/geocode?query='+encText
# url = "https://openapi.naver.com/v1/map/geocode.xml?query=" + encText # xml ๊ฒฐ๊ณผ
request = urllib.request.Request(url)
request.add_header("X-NCP-APIGW-API-KEY-ID",client_id)
request.add_header("X-NCP-APIGW-API-KEY",client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if(rescode==200):
response_body = response.read()
mystr = response_body.decode('utf-8')
# print(mystr)
# mylst = [mystr['addresses']
mystr = eval(mystr)
# ๊ฒ์ ๊ฒฐ๊ณผ๊ฐ 1๊ฐ์ธ ๊ฒฝ์ฐ
if mystr['meta']['totalCount'] == 1:
mystr1 = []
mystr1.append(mystr['addresses'][0]['roadAddress'])
mystr1.append(mystr['addresses'][0]['jibunAddress'])
mystr1.append(mystr['addresses'][0]['englishAddress'])
mystr1.append(mystr['addresses'][0]['addressElements'][0]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][1]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][2]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][3]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][4]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][5]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][6]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][7]['longName'])
mystr1.append(mystr['addresses'][0]['addressElements'][8]['longName'])
mystr1.append(mystr['addresses'][0]['x'])
mystr1.append(mystr['addresses'][0]['y'])
return mystr1
else:
return mystr
# # false, true๋ฅผ ๋๋ฌธ์๋ก replace๋ฅผ ํด์ฃผ๊ณ ,
# mystr = mystr.replace('true',"True")
# mystr = mystr.replace('false',"False")
#
# # string -> json ํ์
์ผ๋ก ๋ฐ๊พธ์
# mydic = eval(mystr)
#
# # ์ฐจ๋ก๋๋ก ๋ผ์๋ง์ถ๋ค ๋ณด๋ฉด ์๋์ ๊ฐ์ผ๋ก ์ถ๋ ฅ ํ ์ ์๋ค.
# print(mydic['result']['items'][0]['point']['y'])
# print(mydic['result']['items'][0]['point']['x'])
else:
print("Error Code:" + rescode)
def geoCheck(fileName, addr_ori_col, addr_converted_col, jibun_addr_col,
comp_addr_col, chk_col):
'''
์ฃผ์ ๊ฒ์์ด ์คํจํ ๊ฒฝ์ฐ ์ ๋ฐ์ดํฐ์ ์ฃผ์ ์ ๋ณด ๋๋ ์ง๋ฒ ์ฃผ์๋ฅผ
๊ฒ์์ฉ ์ฃผ์๋ก ์ ์ฅํ๊ณ ์ฃผ์ ๋ณํ์ด ์คํจํจ์ ๊ธฐ๋ก
'''
if doesFileExist(fileName):
# ๋ฐ์ดํฐ ๋ก๋ฉ
data = pd.read_excel(fileName)
# ๋๋ก๋ช
์ฃผ์ ๊ฒ์์ด ์คํจํ ๊ฒฝ์ฐ๋ฅผ ํ์ธ
mask1 = data[addr_converted_col]=='status'
mask2 = data[addr_converted_col].isnull()
mask = mask1 | mask2
mask_inv = ~mask
idx1 = data[mask1].index
idx2 = data[mask2].index
idx = data[mask].index
idx_inv = data[mask_inv].index
# ์ฃผ์ ๊ฒ์์ด ์คํจํจ์ ๊ธฐ์ฌ
data.loc[idx, chk_col] = 'fail'
# ๊ฒ์์ฉ ์ฃผ์ comp_addr_col ์์ฑ
# idx1 : ์ฃผ์ ๊ฒ์ ๊ฒฐ๊ณผ = status
# ์ด์ ์ ๋ณด๋ฅผ ๋ณต์ฌ
oldAddr = data.loc[idx1, addr_ori_col]
data.loc[idx1, comp_addr_col] = oldAddr
# idx2 : ๋๋ก๋ช
์ฃผ์๊ฐ ์กด์ฌํ์ง ์๋ ๊ฒฝ์ฐ, ์ง๋ฒ ์ฃผ์ ์ ์ฅ
jibunAddr = data.loc[idx2, jibun_addr_col]
data.loc[idx2, comp_addr_col] = jibunAddr
# idx_inv : ๋๋ก๋ช
์ฃผ์ ๋ณต์ฌ
roadAddr = data.loc[idx_inv, addr_converted_col]
data.loc[idx_inv, comp_addr_col] = roadAddr
# ์๋ก์ด ํ์ผ ์ด๋ฆ์ผ๋ก ์ ์ฅ
newFileName = ''.join(fileName.split('.')[:-1])+'_chk.'+\
fileName.split('.')[-1]
data.to_excel(newFileName, index=False)
return data
# ๊ธธ์ด๊ฐ ๊ธธ์ด์ ๋ฆฌ์คํธ ํํ๋ก ์ ์ํ๊ณ , ์ถํ ํฉ์นจ
patList = ['์์ธํน๋ณ์|์์ธ|๋ถ์ฐ๊ด์ญ์|๋ถ์ฐ|๋๊ตฌ๊ด์ญ์|๋๊ตฌ',
'์ธ์ฒ๊ด์ญ์|์ธ์ฒ|๊ด์ฃผ๊ด์ญ์|๊ด์ฃผ|๋์ ๊ด์ญ์|๋์ ',
'์ธ์ฐ๊ด์ญ์|์ธ์ฐ|์ธ์ข
ํน๋ณ์์น์|์ธ์ข
|๊ฒฝ๊ธฐ๋|๊ฒฝ๊ธฐ',
'๊ฐ์๋|๊ฐ์|์ถฉ์ฒญ๋ถ๋|์ถฉ๋ถ|์ถฉ์ฒญ๋จ๋|์ถฉ๋จ|์ ๋ผ๋ถ๋|์ ๋ถ',
'์ ๋ผ๋จ๋|์ ๋จ|๊ฒฝ์๋ถ๋|๊ฒฝ๋ถ|๊ฒฝ์๋จ๋|๊ฒฝ๋จ',
'์ ์ฃผํน๋ณ์์น๋|์ ์ฃผ']
pat = '|'.join(patList)
reSIDO = re.compile(pat)
# ์ฌ์
์ฅ๋ช
์ ์ฒ๋ฆฌ
irrelevant_regex = re.compile(r'[^๊ฐ-ํฃ\s0-9a-zA-Z]')
multispace_regex = re.compile(r'\s\s+')
stock_word_regex = re.compile(r'\(์ฃผ\)|\(์\)|\(์ \)|\(์ทจ\)|\(ํ\)|\(์ฌ\)|์ฃผ\)|์ฃผ์ํ์ฌ')
# ๊ด์ญ์๋ ์ถ์ฝํ ์ค์
sido_dict = {'์์ธํน๋ณ์':'์์ธ',
'๋ถ์ฐ๊ด์ญ์':'๋ถ์ฐ',
'๋๊ตฌ๊ด์ญ์':'๋๊ตฌ',
'์ธ์ฒ๊ด์ญ์':'์ธ์ฒ',
'๊ด์ฃผ๊ด์ญ์':'๊ด์ฃผ',
'๋์ ๊ด์ญ์':'๋์ ',
'์ธ์ฐ๊ด์ญ์':'์ธ์ฐ',
'์ธ์ข
ํน๋ณ์์น์':'์ธ์ข
',
'๊ฒฝ๊ธฐ๋':'๊ฒฝ๊ธฐ',
'๊ฐ์๋':'๊ฐ์',
'์ถฉ์ฒญ๋ถ๋':'์ถฉ๋ถ',
'์ถฉ์ฒญ๋จ๋':'์ถฉ๋จ',
'์ ๋ผ๋ถ๋':'์ ๋ถ',
'์ ๋ผ๋จ๋':'์ ๋จ',
'๊ฒฝ์๋ถ๋':'๊ฒฝ๋ถ',
'๊ฒฝ์๋จ๋':'๊ฒฝ๋จ',
'์ ์ฃผํน๋ณ์์น๋':'์ ์ฃผ'}
def extract_sido(string):
'''
๋ฌธ์ฅ์ ํฌํจ๋ ๊ด์ญ์๋ ๊ฐ์ ์ถ์ถ
'''
global reSIDO
matchObj = reSIDO.match(str(string).strip())
if matchObj != None:
result = matchObj.group()
return result
else:
return None
def assign_no_symbols_name(df, col):
'''
df์ col๊ฐ์ ํฌํจ๋ ํน์๋ฌธ์, ๊ณต๋ฐฑ, ์ฃผ์ํ์ฌ ๋ฑ์ ๋ฌธ์ ์ ๊ฑฐ
'''
return df.assign(
newName=df[col]
.str.replace(stock_word_regex, ' ')
.str.replace(irrelevant_regex, ' ')
.str.replace(multispace_regex, ' ')
.str.strip()
)
def modifyIndex(data, idx_col):
'''
๋น๊ต ๋์ ํ์ผ์ idx_col์ด ์ค๋ณต์ธ ๊ฒฝ์ฐ ์ค๋ณต๋์ง ์๊ฒ ๋ณ๊ฒฝ
idx_col์ ํ์ฌ index๋ก ์ง์ ๋์ง ์์ ์ํ์์ ๊ฐ์
'''
data_ = data.copy()
# idx_col์ ๊ฐ์ ํ์ธ
data_grp = data_.groupby(idx_col)[idx_col].count()
# ์ค๋ณต ๋ฐ์ดํฐ ํ์ธ
duplicatedIndex = data_grp[data_grp>1].index.tolist()
# ์ค๋ณต๋ฐ์ดํฐ์ ๋ํด ์ธ๋ฑ์ค post_fix ์ถ๊ฐ ๋ฐ ๋ณ๊ฒฝ
mask = data_[idx_col].isin(duplicatedIndex)
newIndex = [idx+'_'+str(i) for i, idx, in enumerate(data_.loc[mask, idx_col])]
data_.loc[mask, idx_col] = newIndex
return data_
def getCustNoInfoFromEDSM():
'''
EDSM์ ์ ๋ณด์ ๊ณต๋์๋ฅผ ์๋ฃํ ๊ณ ๊ฐ๋ฒํธ์ ๋ํ ์ ๋ณด๋ฅผ ๋ฐ๋ ๊ธฐ๋ฅ
return๊ฐ์ ์์ผ๋ฉฐ, CustNoAPIRst.xlsx๋ฅผ ์ ์ฅํจ
'''
df_list = []
api_key = Constants.EDSM_API_KEY
# API ํธ์ถ
url =f'https://opm.kepco.co.kr:11080/OpenAPI/getCustNoList.do?serviceKey={api_key}&returnType=02'
req = requests.get(url)
html = req.text
now = time.localtime()
now_str = f'{now.tm_year:04d}{now.tm_mon:02d}{now.tm_mday:02d}_{now.tm_hour:02d}{now.tm_min:02d}{now.tm_sec:02d}'
#JSON์ด ๋ฐํ๋์ง ์์ ๊ฒฝ์ฐ ์ค๋ฅ ์ฒ๋ฆฌ
try:
html = html.replace('\\u0000', '')
json_data = json.loads(html)
df_tmp = pd.DataFrame(json_data['custNoInfoList'])
except:
print('์๋ฌ ๋ฐ์')
df_tmp.to_excel(f'EDSM_CustNo_API_{now_str}.xlsx')
print('getting API done')
class ElecData:
'''
21-02-15
๋ฐ์ดํฐ ๊ตฌ๋ถ์ 'postGres' -> 'post'๋ก ์
21-02-02 ์ฃผ์ ์์
hive ๋ฐ์ดํฐ
tb_opm_001 ํ
์ด๋ธ์ ์ด์ฉํ์ฌ ์ฌ์
์ฅ์ ๋งค์นญํ ํ์
์ฌ์
์ฅ ์ ๋ณด tb_a01_018 ํ
์ด๋ธ์ ์ด์ฉํ์ฌ
๊ธฐ๋ณธ ์ค์ (์ ๊ณ ์ ๋ณด, ๊ณ ๊ฐ๋์ํํฉ) ๋ก๋ ํ '์๋','๋ถ๋ฌธ', '์
์ข
' ์ ๋ณด๋ฅผ ์ถ๊ฐ
postGres ๋ฐ์ดํฐ
์ถํ ์ค๋ช
์ถ๊ฐ
'''
def __init__(self, fileName, period, dataCat = 'post',
match = True, # ๋งค์นญ
select = True, busiDiv = None, # ์๋ฃ ์ ํ
category = None, enteCode = None,
diff = False # ๋น๊ต
):
self.fileName = fileName
self.period = period
self.busiDiv = busiDiv
self.category = category
self.enteCode = enteCode
self.srcPath = os.getcwd()
self.dataPath = f'{self.srcPath}\\..\\data'
self.jijung = self.load_jijung()
self.tb_opm_001 = self.load_tb_opm_001()
self.tb_a01_018 = self.load_tb_a01_018()
# self.edsm_custNo_api = self.load_edsm_custNo_api()
# self.edsm_custNo_excel= self.load_edsm_custNo_excel()
self.diff = diff
# postGres ๋งค์นญ ๊ฒฐ๊ณผ, EDSM ํ์ผ ๋น๊ต, ํ์ํ ๊ฒฝ์ฐ๋ง ์ํ
if diff:
self.compEdsm = self.compare_edsm()
# ํ์ผ์ ์ด๋ฆ์ ๋ณด๊ณ hive, postGres ์ฌ๋ถ ํ๋จ
# ํ์ผ ์ด๋ฆ์ ์ง์ ๋ ์ ์ํ์ฌ ์์ฑ ํ์
if 'hive' in fileName:
dataCat = 'hive'
elif 'post' in fileName:
dataCat = 'post'
self.dataCat = dataCat
# ์ ๋ ฅ๋ฐ์ดํฐ ๋ก๋
if dataCat == 'post':
self.data = self.load_postGres()
elif dataCat == 'hive':
self.data = self.load_hive()
# ์ ๋ ฅ๋ฐ์ดํฐ ๋งค์นญ
if match == True:
# ente_code ์ถ๊ฐ
self.match_data_tb_opm_001()
# ๋ถ๋ฌธ, ์
์ข
/์ฉ๋ ์ถ๊ฐ
self.match_jijung()
# ๋ถ๋ฌธ, ์
์ข
/์ฉ๋ ์ถ๊ฐ
self.match_tb_a01_018()
if select == True:
# ๊ธฐ๊ฐ ์ ํ
if dataCat =='post':
self.data = self.select_period(date_col='meter_dd',
date_format='%Y%m%d')
elif dataCat =='hive':
self.data = self.select_period(date_col='part_key_mr_ymd',
date_format='%Y%m%d')
# ๋ถ๋ฌธ, ์
์ข
/์ฉ๋๋ก ๋ฐ์ดํฐ ์ ํ
# ํน์ ์ฌ์
์ฅ์ ์ ํํ๋ค๋ฉด
if enteCode:
self.data = self.select_enteCode()
# ์ฉ๋ ๊ฐ์ด ์ค์ ๋์ด ์๋ค๋ฉด
elif category:
self.data = self.select_category()
# ๋ถ๋ฌธ์ด ์ค์ ๋์ด ์๋ค๋ฉด
elif busiDiv:
self.data = self.select_busiDiv()
def load_tb_a01_018(self, fileName = Constants.TB_A01_018):
dtype = {'ENTE_CODE':str,'KEMC_OLDX_CODE':str,
'CITY_PROV_CODE':str}
usecols = Constants.TB_A01_018_USE_COLS
print('TB_A01_018(์ ๊ณ ์
์ฒด ์ ๋ณด) ๋ฐ์ดํฐ ๋ก๋')
data = load_pickle_first(self.dataPath, fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, dtype = dtype, usecols = usecols)
sido = data.CITY_PROV_CODE.map(Constants.SIDODict)
data['์๋'] = sido
busiCat = data.KEMC_OLDX_CODE.map(Constants.busiCatDict)
data['์
์ข
'] = busiCat
divCat = data.KEMC_OLDX_CODE.map(Constants.divCatDict)
data['๋ถ๋ฌธ'] = divCat
return data
def select_enteCode(self):
# postGres์ธ ๊ฒฝ์ฐ์๋ ์ ์ฒ๋ฆฌ๋ฅผ ๊ฑฐ์น ente_code ์ฌ์ฉ
if self.dataCat =='post':
mask = self.data['ente'].isin(self.enteCode)
# hive์ธ ๊ฒฝ์ฐ์๋ tb_opm_001 ENTE_CODE ์ฌ์ฉ
elif self.dataCat =='hive':
mask = self.data['ENTE_CODE'].isin(self.enteCode)
return self.data.loc[mask]
def select_category(self):
mask = self.data['์
์ข
'].isin(self.category)
return self.data.loc[mask]
def select_busiDiv(self):
mask = self.data['๊ตฌ๋ถ'].isin(self.busiDiv)
return self.data.loc[mask]
def select_period(self, date_col, date_format):
self.data[date_col] = pd.to_datetime(self.data[date_col], format=date_format)
period_split = self.period.split(':')
mask1 = self.data[date_col] >= period_split[0]
mask2 = self.data[date_col] <= period_split[1]
mask = mask1 & mask2
return self.data.loc[mask]
def match_jijung(self):
# ํ์ํ ์ ๋ณด๋ง ์ถ์ถ
jijung = self.jijung[['ENTE', 'YEAR']]
if self.dataCat == 'post':
self.data = pd.merge(self.data, jijung, left_on = 'ente',
right_on='ENTE', how='left')
# hive๋ ente_code๊ฐ ๋งค์นญ๋์ด ์์ง ์์ผ๋ฏ๋ก ENTE_CODE ์ด์ฉ
# hive์ postGres๊ฐ ๊ฒฐ๊ณผ๊ฐ ์ฐจ์ด๊ฐ ์์์ ์ธ์งํด์ผ ํจ
elif self.dataCat == 'hive':
self.data = pd.merge(self.data, jijung, left_on = 'ENTE_CODE',
right_on='ENTE', how='left')
def match_tb_a01_018(self):
# ํ์ํ ์ ๋ณด๋ง ์ถ์ถ
tb_a01_018 = self.tb_a01_018[['ENTE_CODE', '์๋','๋ถ๋ฌธ', '์
์ข
']]
if self.dataCat == 'post':
self.data = pd.merge(self.data, tb_a01_018, left_on = 'ente',
right_on='ENTE_CODE', how='left')
# hive๋ ente_code๊ฐ ๋งค์นญ๋์ด ์์ง ์์ผ๋ฏ๋ก ENTE_CODE ์ด์ฉ
# hive์ postGres๊ฐ ๊ฒฐ๊ณผ๊ฐ ์ฐจ์ด๊ฐ ์์์ ์ธ์งํด์ผ ํจ
elif self.dataCat == 'hive':
self.data = pd.merge(self.data, tb_a01_018, left_on = 'ENTE_CODE',
right_on='ENTE_CODE', how='left')
def add_ente_info(self):
pass
def match_data_tb_opm_001(self):
'''
์ ๊ณ tb_opm_001์ ente_code, ํ์ ๊ณ ๊ฐ๋ฒํธ ๊ด๊ณ๋ฅผ ์ด์ฉํ์ฌ
ํ์ ์ ๋ ฅ๋ฐ์ดํฐ์ ente_code, ๋ถ๋ฌธ, ์
์ข
/์ฉ๋ ๋ฑ์ ๋งค์นญ
'''
# ํ์ ๋ฐ์ดํฐ๋ฅผ ์ ๊ณ tb_opm_001์ ์ด์ฉํ์ฌ ๋งค์นญ์ ํ ๋๋
# ์ ๊ณ tb_opm_001์ ๋จผ์ ์ ์ฒ๋ฆฌํด์ผ ํ๋ ์ฌ๊ธฐ์๋ ์๋ต
# ์ ์ฒ๋ฆฌ๋ฅผ ์งํํ์ง ์์ผ๋ฏ๋ก ๋งค์นญ๋์ผ ํ๋ ์ฌ์
์ฅ์ด ์ผ๋ถ ๋๋ฝ๋จ
# ์ถํ ์๊ฐ์ด ํ์ฉ๋๋ฉด ์ ์ฒ๋ฆฌ ๋ก์ง ์ถ๊ฐ
ente = self.tb_opm_001.loc[:,['CUST_NO', 'ENTE_CODE']]
self.data = pd.merge(self.data, ente, left_on='cust_no',
right_on='CUST_NO', how='left')
mask_matched = self.data.ENTE_CODE.notnull()
matched = self.data.loc[mask_matched , ['cust_no', 'meter_no', 'ENTE_CODE']].nunique()
matched.rename('matched', inplace=True)
mask_unmatched = self.data.ENTE_CODE.isnull()
unmatched = self.data.loc[mask_unmatched, ['cust_no', 'meter_no']].nunique()
unmatched.rename('unmatched', inplace=True)
total = pd.concat([matched, unmatched], axis=1)
# postGres๋ ๋งค์นญ์ด ์ ํํ ๋์๋์ง ํ์ธํ๋ ์ฉ๋๋ก ์ถ๊ฐ ๋น๊ต
if (self.dataCat == 'post') and (self.diff == True):
mask_diff = self.data['ente']!=self.data['ENTE_CODE']
self.diff = self.data.loc[mask_diff]
self.diff.to_excel('diff.xlsx')
self.status = total
def compare_edsm(self):
'''
2๊ฐ ์๋ฃ์ ๊ณ ๊ฐ๋ฒํธ ๊ฐ์๋ฅผ ๋น๊ต
'''
grp_excel = self.edsm_custNo_excel.groupby('๊ณ ๊ฐ๋ฒํธ')['๊ณ ๊ฐ๋ช
'].count()
grp_excel.rename('excel', inplace=True)
grp_api = self.edsm_custNo_api.groupby('custNo')['custNm'].count()
grp_api.rename('api', inplace=True)
grp_total = pd.concat([grp_excel, grp_api], axis=1)
grp_total.to_excel('grp_total.xlsx')
if grp_total.isnull().sum().sum():
print('Check EDSM data!')
return grp_total
def load_edsm_custNo_api(self, fileName = Constants.EDSM_CUSTNO_API):
dtype = {'custNo':str}
print('EDSM_CustNo_API(ํ์ EDSM ๊ณ ๊ฐ๋ฒํธ API) ๋ก๋')
return load_pickle_first(self.dataPath, fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, dtype = dtype)
def load_tb_opm_001(self, fileName = Constants.TB_OPM_001):
dtype = {'ENTE_CODE':str,'CUST_NO':str}
usecols = Constants.TB_OPM_001_USE_COLS
print('tb_opm_001(์ ๊ณ ํ์ ๊ณ ๊ฐ๋ฒํธ) ๋ฐ์ดํฐ ๋ก๋')
return load_pickle_first(self.dataPath, fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, dtype = dtype, usecols = usecols)
def load_postGres(self):
dtype = {'ente':str,'cust_no':str, 'meter_no':str,
'kemc_oldx_code':str}
# ์ ๋ ฅ๋ฐ์ดํฐ ์ธ ๋ค๋ฅธ ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉ์ฝ์ ํ ๋๋ ์์
# usecols = Constants.POSTGRES_USE_COLS
print('postGres ๋ฐ์ดํฐ ๋ก๋')
data = load_pickle_first(self.dataPath,
self.fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, dtype = dtype)
return data
def load_hive(self):
dtype = {'tb_day_lp_30day_bfor_data.cust_no':str,
'tb_day_lp_30day_bfor_data.meter_no':str}
print('hive ๋ฐ์ดํฐ ๋ก๋')
data = load_pickle_first(self.dataPath,
self.fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, dtype = dtype)
# columns ์ด๋ฆ ์ ๋ฆฌ
new_columns = [col.replace(Constants.HIVE_ELEC_TABLE,'') for col in data.columns]
data.columns = new_columns
# ๋ถํ์ํ ์ปฌ๋ผ ์ญ์
if 'Unnamed: 0' in data.columns:
data.drop('Unnamed: 0', axis=1, inplace=True)
return data
def load_edsm_custNo_excel(self, fileName = Constants.EDSM_AGREEMENT):
print('custNo(ํ์ EDSM ๊ณ ๊ฐ๋์ํํฉ excel) ๋ก๋')
dtype = {'๊ณ ๊ฐ๋ฒํธ':str}
return load_pickle_first(self.dataPath, fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, skiprows=1, dtype = dtype)
def load_jijung(self, fileName = Constants.JIJUNG):
'''
jijung table์๋ ์
์ฒด๋ช
, ๋ถ๋ฌธ, ์
์ข
, ๊ธฐ์
๊ท๋ชจ ๋ฑ์ ์ ๋ณด๊ฐ ์์
jijung table์ ์ ๋ ฅ๋ฐ์ดํฐ๋ฅผ ์์ง ์์ํ 2019๋
7์ ์ดํ์
์์ฑ๋ jijung table์ ํตํฉํด์ ์ฌ์ฉํด์ผ ๋๋ฝ์ ๋ฐฉ์งํ ์ ์์
'''
dtype = {'ENTE':str}
usecols = Constants.JIJUNG_USE_COLS
print('jijung ๋ก๋')
return load_pickle_first(self.dataPath, fileName,
reload = Constants.RELOAD,
# reload = True,
pickleWrite=True, usecols = usecols, dtype=dtype)
def timer(func):
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
total = time.time() - start
print('์์์๊ฐ {:.1f}s\n'.format(total))
return result
return wrapper
@timer
def load_pickle_first(myDir, fileName, reload = False, pickleWrite=True, **kwargs):
'''
pickle ํ์ผ์ด ์กด์ฌํ๋ฉด pickle ํ์ผ์ ๋ก๋
pickle ํ์ผ์ด ์กด์ฌํ์ง ์์ผ๋ฉด pickle ํ์ผ์ ๋ก๋ํ๊ณ pickle ํ์ผ ์ ์ฅ
'''
exists, path = chkFileExists(myDir, fileName)
if exists:
# s()
fileName_pkl = ''.join(fileName.split('.')[:-1])+'.pkl'
extension = fileName.split('.')[-1]
if reload:
# ํ์ผ์ ์กด์ฌ์ฌ๋ถ๋ ์ด์ ์ ํ์ธํ ๊ฒ์ผ๋ก ์ฌํ์ธ ๋ถํ์
data = load_csv_excel_file(path, fileName, **kwargs)
# ๋ค์ ๋ฒ ๋ก๋ฉ์ ์ํด pickle ํ์ผ ์ ์ฅ
if pickleWrite:
data.to_pickle(os.path.join(path, fileName_pkl))
else:
# ๋จผ์ pickle ํ์ผ ๋ก๋
try:
data = pd.read_pickle(os.path.join(path, fileName_pkl))
except:
data = load_csv_excel_file(path, fileName, **kwargs)
# ๋ค์ ๋ฒ ๋ก๋ฉ์ ์ํด pickle ํ์ผ ์ ์ฅ
if pickleWrite:
data.to_pickle(os.path.join(path, fileName_pkl))
if 'usecols' in kwargs:
data = data[kwargs['usecols']]
return data
else:
raise FileNotFoundError('check if {fileName} exists')
def load_csv_excel_file(path, fileName, **kwargs):
'''
excel, csv ํ์ผ ๋ก๋
'''
# ํ์ผ ํ์ฅ์ ํ์ธ
extension = fileName.split('.')[-1]
print(f'(csv ๋๋ excel ํ์ผ ๋ก๋)')
try:
# pdb.set_trace()
if (extension == 'xlsx') or (extension == 'xls'):
data = pd.read_excel(os.path.join(path, fileName), **kwargs)
elif (extension == 'csv'):
data = pd.read_csv(os.path.join(path, fileName), low_memory=False, **kwargs)
return data
# ํ์ผ์ด ์๋ ๊ฒฝ์ฐ
except FileNotFoundError:
raise FileNotFoundError
def chkFileExists(myDir, fileName):
'''
ํ์ ํด๋๊น์ง ๊ฒ์ํด์ ํ์ผ์ด ์กด์ฌํ๋ ๊ฒฝ์ฐ ๊ฒฝ๋ก ๋ฐํ
'''
for root, dirs, files in os.walk(myDir, topdown=False):
if fileName in files:
return True, root
return False, ''
class Error(Exception):
"""Base class for exceptions in this module."""
pass
def melt_elec(df, id_vars, value_vars):
'''๊ฐ๋กํ ๋ฐ์ดํฐ๋ฅผ ์ธ๋กํ์ผ๋ก ์ ํ'''
df_melt = df.melt(id_vars = id_vars, value_vars=value_vars)
date = pd.to_datetime(df_melt['meter_dd'], format='%Y-%m-%d')
hour = df_melt['variable'].str[-4:-2].astype(float) * pd.Timedelta('1h')
minute = df_melt['variable'].str[-2:].astype(float) * pd.Timedelta('1m')
new_date = date + hour + minute
df_melt[id_vars] = new_date
# ๋ถํ์ ์ปฌ๋ผ ์ญ์
df_melt = df_melt.set_index('meter_dd')
df_melt = df_melt.drop('variable', axis=1)
return df_melt
def isRange(strDate):
'''
๋ ์ง ๊ตฌ๊ฐ์ผ๋ก ์ง์ ๋์ด ์๋์ง ์ฌ๋ถ ํ์ธ
'''
# ๋ ์ง๊ฐ ๊ตฌ๊ฐ์ผ๋ก ์ง์ ๋์ด ์๋ค๋ฉด
if len(strDate.split(':'))==2:
date1 = strDate.split(':')[0]
date2 = strDate.split(':')[1]
return True, date1, date2
elif len(strDate.split(':'))==1:
return False, strDate
def viewHeader(myDir, fileName, reload = False, pickleWrite=False, **kwargs):
'''
๋์ฉ๋ ํ์ผ์ ๋ก๋ํ๊ธฐ ์ ์ ํ์ผ์ header๋ฅผ ํ์ธ
'''
exists, path = chkFileExists(myDir, fileName)
if exists:
# s()
fileName_pkl = ''.join(fileName.split('.')[:-1])+'.pkl'
extension = fileName.split('.')[-1]
if reload:
# ํ์ผ์ ์กด์ฌ์ฌ๋ถ๋ ์ด์ ์ ํ์ธํ ๊ฒ์ผ๋ก ์ฌํ์ธ ๋ถํ์
data = load_csv_excel_file(path, fileName, **kwargs)
# ๋ค์ ๋ฒ ๋ก๋ฉ์ ์ํด pickle ํ์ผ ์ ์ฅ
if pickleWrite:
data.to_pickle(os.path.join(path, fileName_pkl))
else:
# ๋จผ์ pickle ํ์ผ ๋ก๋
try:
data = pd.read_pickle(os.path.join(path, fileName_pkl))
# ๋ง์ฝ nrows๋ฅผ ์ค์ ํ์๋ค๋ฉด ์ผ๋ถ๋ง ๋ฐ์ดํฐ๋ฅผ ๋ฐ
if 'nrows' in kwargs:
data = data.iloc[:kwargs['nrows']]
except:
data = load_csv_excel_file(path, fileName, **kwargs)
# ๋ค์ ๋ฒ ๋ก๋ฉ์ ์ํด pickle ํ์ผ ์ ์ฅ
if pickleWrite:
data.to_pickle(os.path.join(path, fileName_pkl))
if 'usecols' in kwargs:
data = data[kwargs['usecols']]
return data
else:
raise FileNotFoundError('check if {fileName} exists')
def grpEnteLoadRate(df, data_type = 'post', ente_col = 'ente',
cust_no_col = 'cust_no', date_col='meter_dd',
elec_kwd = 'elcp'):
'''
postGres์ ๋ฐ์ดํฐ๋ฅผ ์
์ฒด์ฝ๋๋ณ, ๊ณ ๊ฐ๋ฒํธ๋ณ ์ ๋ฆฌ
1)์ ๋ ฅ์ฌ์ฉ๋ ํฉ๊ณ
2)์ ๋ ฅ๋ฐ์ดํฐ ๊ธฐ๊ฐ
test
------
df = data.data.copy()
ente_col = 'ente'
cust_no_col = 'cust_no'
date_col='meter_dd'
elec_kwd = 'elcp'
'''
# postGres ๋ฐ์ดํฐ์ธ ๊ฒฝ์ฐ
if data_type == 'post':
# ์ ๋ ฅ์ฌ์ฉ๋ ์ปฌ๋ผ ์ ํ
elec_col = [col for col in df.columns if elec_kwd in col]
# ์
์ฒด๋ณ, ๊ณ ๊ฐ๋ฒํธ๋ณ, ๋ ์ง๋ณ ์ ๋ ฅ์ฌ์ฉ๋ ํฉ์ฐ
# ๊ณ์ธก๊ธฐ๊ฐ ์ฌ๋ฌ ๊ฐ ์กด์ฌํ ์ ์์ผ๋ฏ๋ก ๊ณ ๊ฐ๋ฒํธ๋ก ๋จผ์ ํตํฉ ํ์
# ๊ณ ๊ฐ๋ฒํธ๋ก ๋จผ์ ํตํฉํ์ง ์์ผ๋ฉด ์ผ์๋ณ unstack์ ์ค๋ฅ๊ฐ ๋ฐ์ํจ
grp1 = df.groupby([ente_col, cust_no_col, date_col])\
[elec_col].sum()
# ์ผ์๋ณ๋ก ํฉ๊ณ, ํ๊ท , ์ต๋๊ฐ ๊ณ์ฐ
grp1['elecSum'] = grp1[elec_col].sum(axis=1)
grp1['elecAvg'] = grp1[elec_col].mean(axis=1)
grp1['elecMax'] = grp1[elec_col].max(axis=1)
grp2 = grp1[['elecSum', 'elecAvg', 'elecMax']].unstack()
elecAvg = grp2.loc[:, ('elecAvg', slice(None))].values
elecMax = grp2.loc[:, ('elecMax', slice(None))].values
# elecMaxYear = elecMax.max(axis=1)
elecMaxYear = np.nanmax(elecMax, axis=1)
elecMaxYear = elecMaxYear.reshape(-1, 1)
# ์ ๋ ฅ์ฌ์ฉ๋ ์ ๋ณด ์ถ์ถ
cust_no_elecCons = grp2.loc[:,('elecSum', slice(None))]
# test
# grp2_sel = grp2.loc[(slice(None), '0135429711'), :]
# grp2_sel.to_excel('grp2_sel.xlsx')
# index ๋ฒํธ ํ์ธ
# for i, idx in enumerate(list(grp2.index.get_level_values(1))):
# if idx == '0135429711':
# print(i)
## ๋ถํ์จ ๊ณ์ฐ
# ๋ ์ง ์ ๋ณด ์ถ์ถ, ํ๊ท ์ ๋ ฅ์์ 2๋ฒ์งธ ์์ค์ ์ปฌ๋ผ ์ ๋ณด ์ถ์ถ
dates = grp2.loc[:,('elecAvg', slice(None))].columns.get_level_values(1)
# ๋ฉํฐ์ธ๋ฑ์ค ์ค์
idx_lst = [['loadRate'], dates]
idx_load = pd.MultiIndex.from_product(idx_lst, names = ['l1', 'l2'])
# ๋ถ์๊ฐ 0์ด๊ฑฐ๋ nan์ธ ๊ฒฝ์ฐ๋ ์ ์ธํ๊ณ ๋ถํ์จ ๊ณ์ฐ
elecDayLoadRate = np.divide(elecAvg, elecMax,
where=(~np.isnan(elecMax))&(elecMax!=0))
elecDayLoadRate_byYearMax = np.divide(elecAvg, elecMaxYear,
where=(~np.isnan(elecMaxYear))&(elecMaxYear!=0))
# ๊ณ ๊ฐ๋ฒํธ ๋จ์์ ์ผ ๋ถํ์จ ๊ณ์ฐ
cust_no_load_by_dailyMax = pd.DataFrame(elecDayLoadRate, index=grp2.index,
columns = idx_load)
cust_no_load_by_yearlyMax = pd.DataFrame(elecDayLoadRate_byYearMax, index=grp2.index,
columns = idx_load)
# cust_no_load_by_yearlyMax.to_excel('cust_no_load_by_yearlyMax.xlsx')
# grp_load.to_excel('grp_load.xlsx')
# cust_no = '0135429711'
# ์ฌ์ฉ๋, ๋ถํ์จ(์ผ์ต๋), ๋ถํ์จ(๊ธฐ๊ฐ์ต๋)
return cust_no_elecCons, cust_no_load_by_dailyMax, cust_no_load_by_yearlyMax
def load_json(path, fileName):
'''
json ํ์ผ์ ์ฝ์ด์ dataFrame์ผ๋ก ์ ์ฅ
test
-----
path = 'D:\๋ฐ์ดํฐ ๋ถ์\data'
fileName = 'custLst_210216.txt'
'''
exists, path = chkFileExists(path, fileName)
# ํ์ผ์ด ์กด์ฌํ๋ ๊ฒฝ์ฐ
if exists:
with open(os.path.join(path, fileName), 'r', encoding='utf-8') as f:
json_str = f.read()
import json
json_data = json.loads(json_str)
df = pd.DataFrame(json_data['custNoInfoList'])
return df
else:
print('No files')
def load_cust_no_list_hive(path, fileName, date):
'''
json ํ์ผ์ ์ฝ์ด์ dataFrame์ผ๋ก ์ ์ฅ
test
-----
path = 'D:\๋ฐ์ดํฐ ๋ถ์\data'
fileName = 'hive_tb_cust_no_list_20201201_20210217.csv'
'''
exists, path = chkFileExists(path, fileName)
# ํ์ผ์ด ์กด์ฌํ๋ ๊ฒฝ์ฐ
if exists:
df = pd.read_csv(os.path.join(path, fileName), dtype={'tb_cust_no_list.cust_no':str})
hive_table = 'tb_cust_no_list.'
# s()
df.columns = df.columns.str.replace(hive_table, '')
df.cust_no = df.cust_no.str.zfill(10)
df['writ_dtm'] = pd.to_datetime(df['writ_dtm'], format='%Y-%m-%d %H:%M:%S')
df['writ_dtm'] = df['writ_dtm'].apply(lambda x:datetime.datetime.strftime(x, format='%Y%m%d'))
mask = df['writ_dtm'] == date
return df[mask]
else:
print('No files')
if __name__ == '__main__':
getCustNoInfoFromEDSM()
# pd.set_option('display.max_columns', 500)
# pd.set_option('display.max_rows', 500)
# small change
|
11,691 | 98230a8015e3cb8f001b1d19acc8fbb357c8e835 | def palindrome_tester(number):
if str(number) == str(number)[::-1]:
return True
else:
return False
def check_palindrome():
"""
Runs through all 6-digit numbers and checks the mentioned conditions.
The function prints out the numbers that satisfy this condition.
Note: It should print out the first number (with a palindrome in its last 4 digits),
not all 4 "versions" of it.
"""
for current_number in range (100000,999999):
if palindrome_tester(str(current_number)[2:6]) == 1:
possible_pali = current_number
current_number = current_number+1
if palindrome_tester(str(current_number)[1:6]) == 1:
current_number = current_number+1
if palindrome_tester(str(current_number)[1:5]) == 1:
current_number = current_number+1
if palindrome_tester(str(current_number)) == 1:
print (possible_pali)
|
11,692 | e20095b806641e8b457a996831a6e11df74f14a7 | from confluent_kafka import Consumer, KafkaError
from confluent_kafka import Producer
settings = {
#'bootstrap.servers': '192.41.108.22:9092',
'bootstrap.servers': '192.168.0.25:29092',
'group.id': 'copy-topic',
'client.id': 'client-1',
'enable.auto.commit': True,
'session.timeout.ms': 6000,
'default.topic.config': {'auto.offset.reset': 'smallest'}
}
maxmsgs = 10000
source = 'ztf_20200404_programid1'
dest = 'ztf_test'
c = Consumer(settings)
p = Producer(settings)
c.subscribe([source])
n = 0
o = -1
try:
while n < maxmsgs:
msg = c.poll(0.1)
if msg is None:
continue
elif not msg.error():
#print ("Got message with offset " + str(msg.offset()))
p.produce(dest, value=msg.value())
o = msg.offset()
else:
print ("Error")
n += 1
finally:
c.close()
p.flush()
print ("Copied {:d} messages up to offset {:d}".format(n,o))
|
11,693 | a42bf5255e190783676f8c3d2d222d042a1f4657 | # Generated by Django 2.2.3 on 2019-08-17 11:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedpage', '0054_auto_20190817_1858'),
]
operations = [
migrations.AddField(
model_name='profile',
name='notifications',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='feedpage.Notification'),
preserve_default=False,
),
]
|
11,694 | d7c9d40c7d5290f5f0ea3f8b4039861b46a39eb9 | #!/usr/bin/env python
import roslib
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import mlpy.wavelet as wave
import time
class image_converter:
def __init__(self):
self.flag = True
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("thermal/image_raw",Image,self.callback)
#self.image_sub = rospy.Subscriber("camera/image",Image, self.callback)
self.f = open('Pulso2w', 'w')
self.cuenta=0
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
self.t0=time.time()
self.cv_image=None
# self.thrs1 = 500
# self.thrs2 = 1200
self.thrs1 = 70
self.thrs2 = 80
self.trackers = []
self.datos=[]
self.proceso=False
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.tracking_state = 0
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
h, w = self.cv_image.shape[:2]
xo, yo = self.drag_start
x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
self.selection = None
if x1-x0 > 0 and y1-y0 > 0:
self.selection = (x0, y0, x1, y1)
else:
self.drag_start = None
if self.selection is not None:
self.tracking_state = 1
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 1), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def callback(self,data):
try:
#self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
self.cv_image = self.bridge.imgmsg_to_cv2(data, "mono8")
except CvBridgeError as e:
print(e)
self.cv_image = cv2.blur(self.cv_image, (5,5))
#gray = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
gray= self.cv_image.copy()
#vise = self.cv_image.copy()
#self.t0=time.time()-self.t0
#print self.t0
#print self.flag
#print self.proceso
#print '___'
if self.selection and self.flag:
x0, y0, x1, y1 = self.selection
x1=x0+30
y1=y0+30
self.selection=x0,y0,x1,y1
print x0
print x1
print y0
print y1
#self.track_window = (x0-30, y0-30, x1-x0+30, y1-y0+30)
self.track_window = (x0, y0, x1, y1)
self.flag=False
vis_roi = gray[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
cv2.rectangle(gray,(x0,y0),(x1,y1),75,1)
self.trackers.append(vis_roi)
cv2.imshow("Image window", gray)
cv2.setMouseCallback("Image window", self.onmouse)
# edge = cv2.Canny(self.cv_image[y0:y1, x0:x1], self.thrs1, self.thrs2, apertureSize=3)
# vise = vise[y0:y1, x0:x1]
# vise[edge != 0] = (255self.trackers.append(tracker) # cv2.imshow('edge', vise)
# x=np.mean(gray[y0:y1, x0:x1])
# self.datos.append(x)
# self.cv_image = cv2.blur(self.cv_image, (5,5))
#if not self.flag:
if self.flag:
cv2.imshow("Image window", gray)
cv2.setMouseCallback("Image window", self.onmouse)
# cv2.waitKey(1)
if not self.flag and self.proceso:
x0, y0, x1, y1 = self.selection
#if self.cuenta<2148 : #256:
if self.cuenta<1124 : #256:
self.trackers.append(gray[y0:y1, x0:x1])
self.cuenta+=1
if self.cuenta==1:
self.t0=time.time()
#print self.cuenta
else:
print x0
print x1
print y0
print y1
self.t0=time.time()-self.t0
print self.t0
self.proceso=False
self.flag=True
self.cuenta=0
i=0.0
for tracker in self.trackers:
i+=1
cv2.imwrite('Pulso2_%i.bmp'%i,tracker)
x=np.mean(tracker)
self.datos.append(x)
self.f.write(str(x))
self.f.write(' ')
#print x
ndatos=np.array(self.datos)
N=float(len(ndatos))
ndatos=ndatos[51:N-50]
#ndatos=ndatos[:N-1]
#[1:]
print len(ndatos)
print '___'
#print ndatos
scales = wave.autoscales(N=ndatos.shape[0], dt=1, dj=0.25, wf='dog', p=2)
X = wave.cwt(x=ndatos, dt=1, scales=scales, wf='dog', p=2)
#Xf=X[15:,:] #quita las bajas frecuencias
#scales=scales[15:]
#Xf=X[:15,:] #quita las altas frecuencias
#scales=scales[:15]
#Xf=X[15:18,:] #quita las altas frecuencias
#scales=scales[15:18]
#Xf=X[15:25,:] #quita las altas frecuencias
#scales=scales[15:25]
Xf=X[18:25,:] #quita las altas frecuencias
scales=scales[18:25]
X2=wave.icwt(Xf, dt=1, scales=scales, wf='dog', p=2)
fig = plt.figure(1)
ax1 = plt.subplot(2,2,1)
p1 = ax1.plot(ndatos)
#ax1.autoscale_view(tight=True)
ax2 = plt.subplot(2,2,2)
p2 = ax2.imshow(np.abs(X), origin='lower', extent=[-4,4,-1,1], aspect=4)
#p2 = ax2.imshow(np.abs(X), interpolation='nearest')
#ax2.autoscale_view(tight=True)
ax3 = plt.subplot(2,2,3)
p3 = ax3.imshow(np.abs(Xf),origin='lower', extent=[-4,4,-1,1], aspect=4)
#p3 = ax3.imshow(np.abs(Xf), interpolation='nearest')
#ax3.autoscale_view(tight=True)
ax4 = plt.subplot(2,2,4)
p4 = ax4.plot(np.abs(X2))
#ax4.autoscale_view(tight=True)
#plt.show()
#self.datos=[]
#ndatos=[]
self.trackers = []
self.datos=[]
#print len(ndatos)
# Number of sample points
nN = len(X2)
# sample spacing
#T = 1.0 / 60.0
T=.01862
x = np.linspace(0.0, nN*T, nN)
yf = fft(X2)
xf = np.linspace(0.0, 1.0/(2.0*T), nN/2)
plt.figure(2)
plt.subplot(211)
plt.plot(X2)
plt.subplot(212)
plt.plot(xf*60, 2.0/nN * np.abs(yf[0:nN/2]))
plt.show()
#print self.trackers.shape
ch =cv2.waitKey(1)
if ch== 27:
# Number of sample points
#Nd = len(self.datos)
#print type(self.datos)
print 'N'
#print self.datos.shape
if ch == ord ('c'):
self.selection = None
self.flag= True
self.trackers = []
self.datos=[]
if ch == ord ('i'):
self.proceso=True
self.flag= False
print 'i'
print self.selection
'''
ndatos=np.array(self.datos)
x=ndatos[5:]
#[1:]
N=float(len(ndatos))
print N
scales = wave.autoscales(N=x.shape[0], dt=1, dj=0.25, wf='dog', p=2)
X = wave.cwt(x=x, dt=1, scales=scales, wf='dog', p=2)
scales=scales[15:]
X2=wave.icwt(Xf, dt=1, scales=scales, wf='dog', p=2)
fig = plt.figure(1)
ax1 = plt.subplot(2,2,1)
p1 = ax1.plot(x)
ax1.autoscale_view(tight=True)
ax2 = plt.subplot(2,2,2)
p2 = ax2.imshow(np.abs(X), interpolation='nearest')
ax3 = plt.subplot(2,2,4)
p3 = ax3.imshow(np.abs(Xf), interpolation='nearest')
ax4 = plt.subplot(2,2,3)
p4 = ax4.plot(np.abs(X2))
plt.show()
self.datos=[]
# Number of sample points
N = len(X2)
# sample spacing
T = 1.0 / 60.0
x = np.linspace(0.0, N*T, N)
yf = fft(X2)
xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
plt.figure(2)
plt.subplot(211)
plt.plot(X2)
plt.subplot(212)
plt.plot(xf, 2.0/N * np.abs(yf[0:N/2]))
plt.show()
'''
'''
N = len(self.datos)
#print type(self.datos)
#print N
#print self.datos.shape
ndatos=np.array(self.datos)
ndatos=ndatos[5:]
#[1:]
N=float(len(ndatos))
# sample spacing
T = 1.0 / 60.0
x = np.linspace(0.0, N*T, N)
yf = fft(self.datos)
xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
plt.figure()
plt.subplot(211)
plt.plot(self.datos)
plt.subplot(212)
plt.plot(xf, 2.0/N * np.abs(yf[0:N/2]))
plt.show()
self.datos=[]
# plt.grid()
'''
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
11,695 | baeae6eab4ad6f60ae2a3993f6fe4cbf21cdbeec | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 18:37:33 2020
@author: Varun
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.holtwinters import SimpleExpSmoothing # SES
from statsmodels.tsa.holtwinters import Holt # Holts Exponential Smoothing
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from datetime import datetime,time
cola=pd.read_excel("F:\\EXCEL R\\ASSIGNMENTS\\forecasting\\CocaCola_Sales_Rawdata.xlsx")
cola.Sales.plot()
###cola['date']=pd.to_datetime(cola.Quarter,format="%b=%y")
heatmap_y_month = pd.pivot_table(data=cola,values="Sales",index="Quarter",aggfunc="mean",fill_value=0)
sns.heatmap(heatmap_y_month,annot=True,fmt="g")
sns.boxplot(x="Quarter",y="Sales",data=cola)
sns.lineplot(x="Quarter",y="Sales",data=cola)
cola.Sales.plot(label="org")
for i in range(2,24,6):
cola["Sales"].rolling(i).mean().plot(label=str(i))
plt.legend(loc=3)
Train = cola.head(30)
Test = cola.tail(12)
# Creating a function to calculate the MAPE value for test data
def MAPE(pred,org):
temp = np.abs((pred-org)/org)*100
return np.mean(temp)
##simple exponential
ses_model = SimpleExpSmoothing(Train["Sales"]).fit()
pred_ses = ses_model.predict(start = Test.index[0],end = Test.index[-1])
MAPE(pred_ses,Test.Sales) ##16.64
###holt method
hw_model = Holt(Train["Sales"]).fit()
pred_hw = hw_model.predict(start = Test.index[0],end = Test.index[-1])
MAPE(pred_hw,Test.Sales)
##8.997
###holt winter exponential smoothing with additive seasonality and additive trend
hwe_model_add_add = ExponentialSmoothing(Train["Sales"],seasonal="add",trend="add",seasonal_periods=4).fit()
pred_hwe_add_add = hwe_model_add_add.predict(start = Test.index[0],end = Test.index[-1])
MAPE(pred_hwe_add_add,Test.Sales)
###4.54
#### Holts winter exponential smoothing with multiplicative seasonality and additive trend
hwe_model_mul_add = ExponentialSmoothing(Train["Sales"],seasonal="mul",trend="add",seasonal_periods=12).fit()
pred_hwe_mul_add = hwe_model_mul_add.predict(start = Test.index[0],end = Test.index[-1])
MAPE(pred_hwe_mul_add,Test.Sales)
####7.59
###out of the four models, holt winter exponential smoothing with additive seasonality and additive trend has least MAPE 4.54
pred_test = pd.Series(hwe_model_add_add.predict(start = cola.index[0],end = cola.index[-1]))
pred_test.index = cola.index
MAPE(pred_test,cola.Sales)
##5.131
##visualization
plt.plot(pred_hwe_add_add.index,pred_hwe_add_add,label="HoltsWinterExponential_1",color="brown") |
11,696 | 54dc65219c82fe82197259f8791d02294b3539bf | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.platform import gfile
import config
from os import listdir
from os.path import join
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
MODEL_DIR = 'inception'
BOUND_BOX_PATH = config.paths['IMG_DIR_SYNTHETIC']
BOTTLENECK_PATH = config.paths['BOTTLENECK_PATH_SYNTHETIC']
def create_bottleneck_file(sess, jpeg_data_tensor, bottleneck_tensor):
file_list = []
file_glob = os.path.join(config.paths['IMG_DIR_SYNTHETIC'], '*.jpg')
file_list.extend(gfile.Glob(file_glob))
file_list = sorted(file_list)
error_list = []
for i, image in enumerate(file_list):
#print(image)
if i % 100 == 0:
print("{}/{} files processed.".format(i, len(file_list)))
# getting the name of the training image
image_name = str(image).split('/')[-1]
path = os.path.join(BOTTLENECK_PATH, image_name+'.txt')
try:
image_data = gfile.FastGFile(image, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
except:
error_list.append(image)
print('ERROR LIST')
for err in error_list:
print(err)
print('end error list')
print('All bottleneck were create.')
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
bottleneck_values = sess.run(bottleneck_tensor, {image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
dest_directory = MODEL_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %(filename, float(count * block_size) / float(total_size)
* 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def create_inception_graph():
with tf.Session() as sess:
model_filename = os.path.join(MODEL_DIR, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, \
resized_input_tensor = (tf.import_graph_def(graph_def, name='',
return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def main():
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (create_inception_graph())
sess = tf.Session()
create_bottleneck_file(sess, jpeg_data_tensor, bottleneck_tensor)
if __name__ == '__main__':
main() |
11,697 | edef1315b2e2ddf75a9d5cd9b49a2ce50a13d1f4 | import datetime
from django.contrib.auth.models import User
from django.db import models
from django.db.models.deletion import CASCADE
class GetFirstOrCreateMixin():
@classmethod
def get_first_or_create(cls, **kwargs):
try:
return cls.objects.filter(**kwargs)[0], False
except:
return cls.objects.create(**kwargs), True
class Crag(GetFirstOrCreateMixin, models.Model):
name = models.TextField()
country = models.TextField()
def __str__(self):
return '{} - {}'.format(self.name, self.country)
class Sector(GetFirstOrCreateMixin, models.Model):
name = models.TextField()
crag = models.ForeignKey(Crag)
def __str__(self):
return '{} - {}'.format(self.name, self.crag.name)
class Route(GetFirstOrCreateMixin, models.Model):
name = models.TextField()
sector = models.ForeignKey(Sector)
grade = models.CharField(max_length=20)
def __str__(self):
return '{} - {} - {}'.format(self.name, self.grade, self.sector)
class ClimbRecord(models.Model):
route = models.ForeignKey(Route)
date = models.DateField(default=datetime.date.today)
user = models.ForeignKey(User, on_delete=CASCADE)
style = models.CharField(max_length=20)
def __str__(self):
return ('{}: {} - {} {} / {}'
.format(self.user.username, self.route.name, self.route.grade, self.style, self.date))
class GradeScore(models.Model):
grade = models.CharField(max_length=20)
score = models.FloatField()
user = models.ForeignKey(User, on_delete=CASCADE)
def __str__(self):
return '{} = {} / {}'.format(self.grade, self.score, self.user)
class Meta:
unique_together = ('user', 'grade',)
class ClimbStyle(models.Model):
style = models.CharField(max_length=20)
multiplier = models.FloatField()
user = models.ForeignKey(User, on_delete=CASCADE)
def __str__(self):
return '{} = {} / {}'.format(self.style, self.multiplier, self.user)
class Meta:
unique_together = ('user', 'style',)
|
11,698 | 27767f505c71503eb7b1a6023d7dbde85c4938b5 | #!/usr/bin/env python
# coding=windows-1252
"""
Genera las carpetas y archivos asociados a una nueva base de datos para
consultar a travรฉs de OpacMarc.
Uso:
python add_db.py <BASE>
Ejemplo:
python add_db.py libros
"""
# Creado: Fernando Gรณmez, 2008-09-20
# TO-DO: dividir en funciones bien simples.
import os
import sys
import shutil
from opac_util import error, APP_DIR, LOCAL_DATA_DIR, LOCAL_DATA, setup_logger
def print_usage():
# The name of this script
SCRIPT_NAME = os.path.basename(sys.argv[0])
print __doc__
# Ver: Python main() functions, by Guido van Rossum <http://www.artima.com/weblogs/viewpost.jsp?thread=4829>
def main(DB_NAME):
logger.info(begin_msg)
# Check mandatory argument
#if len(argv) < 2:
# print_usage()
# sys.exit(0)
#DB_NAME = argv[1]
DB_DIR = os.path.join(LOCAL_DATA_DIR, 'bases', DB_NAME)
if os.path.isdir(DB_DIR):
error("Ya existe un directorio con el nombre '%s'." % DB_NAME)
# Creamos directorios
db_tree = {
'cgi-bin' : ['html', 'pft', 'xis'],
'config' : [],
'db' : ['original', 'public', 'update'],
'htdocs' : ['css', 'docs', 'img', 'js'],
}
# TO-DO: definir una funciรณn recursiva en opac_util.py
os.mkdir(DB_DIR)
for dir_name in db_tree:
os.mkdir(os.path.join(DB_DIR, dir_name))
for subdir_name in db_tree[dir_name]:
os.mkdir(os.path.join(DB_DIR, dir_name, subdir_name))
# Creamos archivos a partir de templates.
# FIXME - los paths deben quedar con la barra correcta (os.sep)
# FIXME - corregir el nombre de archivo que se muestra en el mensaje "Generado el archivo"
for tpl in template_dest:
f1 = open(os.path.join(APP_DIR, 'bin', 'add_db', 'templates', tpl), 'r')
f2 = open(os.path.join(DB_DIR, template_dest[tpl], tpl), 'w')
f2.write(
f1.read().replace('__LOCAL_DATA__', LOCAL_DATA).replace('__DB__', DB_NAME)
)
f1.close()
f2.close()
logger.info('Generado el archivo %s.' % os.path.basename(template_dest[tpl]))
logger.info(end_msg1 % DB_NAME)
# Dummy logo image
logo_src = os.path.join(APP_DIR, 'bin', 'add_db', 'templates', 'db-logo.png')
logo_dst = os.path.join(DB_DIR, 'htdocs', 'img')
shutil.copy(logo_src, logo_dst)
# Plantillas para archivos, y su directorio destino.
# NOTA: Podrรญamos evitar tener que especificar esto aquรญ si en bin/add_db/templates/
# organizรกsemos los archivos dentro de carpetas.
template_dest = {
'db-about.htm' : 'cgi-bin/html',
'db-footer.htm' : 'cgi-bin/html',
'db-header.htm' : 'cgi-bin/html',
'db-extra.htm' : 'cgi-bin/html',
'db-styles.css' : 'htdocs/css',
'db-scripts.js' : 'htdocs/js',
'db-settings.conf' : 'config',
'db-cipar.par' : 'config',
}
begin_msg = '*** Generaciรณn de una nueva base ***'
end_msg1 = '*** Se han creado los directorios y archivos necesarios para trabajar con la base %s. ***\n'
end_msg2 = '''A continuacion, debe copiar la base bibliogrรกfica original en la carpeta
%s/bases/%s/db/original/
y luego ejecutar:
python bin/update_db.py %s
Ademรกs, si desea personalizar la presentacion del OPAC para esta base, puede
editar los siguientes archivos:
- en %s/bases/%s/cgi-bin/html:
- db-about.htm
- db-header.htm
- db-footer.htm
- db-extra.htm
- %s/bases/%s/htdocs/css/db-styles.css
Si necesita imรกgenes para esta base (p.ej. un logo) debe colocarlas en
la carpeta
%s/bases/%s/htdocs/img/
Si necesita modificar algunos parรกmetros de configuraciรณn especรญficamente
para esta base, edite el archivo
%s/bases/%s/config/db-settings.conf
'''
# Define a global logger object
log_file = os.path.join(LOCAL_DATA_DIR, 'logs', 'python.log')
logger = setup_logger(log_file)
if __name__ == "__main__":
# FIXME - si se llama sin argumentos
DB_NAME = sys.argv[1]
main(DB_NAME)
print end_msg2 % ((LOCAL_DATA, DB_NAME) + (DB_NAME,) + (LOCAL_DATA, DB_NAME)*4) # Requiere los parรฉntesis, de lo contrario TypeError
sys.exit(0)
|
11,699 | ee6db616616fe8d5b69e8e045e872fbb2ed9ecaf | #!C:\Users\martin.hanyas\AppData\Local\Programs\Python\Python36\python.exe
import urllib.error
import csv
import os
supported_currencies = ['AUD','BGN','BRL','CAD','CHF','CNY','CZK','DKK','EUR','GBP','HKD','HRK','HUF','IDR',
'ILS','INR','ISK','JPY','KRW','MXN','MYR','NOK','NZD','PHP','PLN','RON','RUB','SEK',
'SGD','THB','TRY','USD','ZAR']
supported_signs = ['ะปะฒ','R$','โฃ','ยฅ','Kฤ','kr','โฌ','ยฃ','Kn','Ft','Rp','โช','โจ','Kr','ยฅ','โฉ','RM','โฑ','zล','L','ั.','เธฟ','โค','$','R']
def translate(currency):
sign_to_currency = {'ะปะฒ':'BGN', 'R$':'BRL', 'โฃ':'CHF', 'ยฅ':'CNY', 'Kฤ':'CZK', 'kr':'DKK', 'โฌ':'EUR', 'ยฃ':'GBP',
'Kn':'HRK', 'Ft':'HUF', 'Rp':'IDR', 'โช':'ILS', 'โจ':'INR', 'Kr':'ISK', 'โฉ':'KRW', 'RM':'MYR',
'โฑ':'PHP', 'zล':'PLN', 'L':'RON', 'ั.':'RUB', 'เธฟ':'THB', 'โค':'TRY', '$':'USD', 'R':'ZAR'}
if currency in sign_to_currency:
return sign_to_currency[currency]
else:
return currency
def download_rates(url):
from urllib.request import urlopen
from io import BytesIO
from zipfile import ZipFile
filename = 'eurofxref.csv'
response = urlopen(url)
zipfile = ZipFile(BytesIO(response.read()))
with open(filename, 'wb') as f:
for line in zipfile.open(filename).readlines():
f.write(line)
return filename
def getrates():
try:
# get actual rates online
filename = download_rates('http://www.ecb.europa.eu/stats/eurofxref/eurofxref.zip')
except urllib.error.URLError:
# use old revision offline
scriptDirectory = os.path.dirname(os.path.realpath(__file__))
filename = scriptDirectory + '/eurofxref-10-07-2018.csv'
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile, skipinitialspace=True, delimiter=',', quotechar='|')
rows = [r for r in reader]
rates = dict(zip(rows[0], rows[1]))
rates.pop('Date')
rates['EUR'] = 1.0
return rates
def convert(amount, input_c, output_c):
input_c = translate(input_c)
output_c = translate(output_c)
output_dict = dict()
rates = getrates()
if(output_c):
output_dict[output_c] = round(amount / float(rates[input_c]) * float(rates[output_c]),3)
else:
for out_cur in supported_currencies:
output_dict[out_cur] = round(amount / float(rates[input_c]) * float(rates[out_cur]),3)
return output_dict
if __name__ == "__main__":
import sys
try:
out_c = sys.argv[3]
except IndexError:
out_c = None
print(convert(float(sys.argv[1]),sys.argv[2],out_c)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.