text stringlengths 38 1.54M |
|---|
# downloading files from the web using python
import requests
#requests.get()
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
# status code 200 corresponds to everything being ok - 404 corresponds to file not being found
if res.status_code == 200:
print("success")
else:
print("request unsuccessful")
print(len(res.text))
print()
print()
print(res.text[:500])
print()
#res.raise_for_status() # raises an exception if there was an error downloading the file
#badRes = requests.get('https://automatetheboringstuff.com/files/abcdefg')
#badRes.raise_for_status() # halts program status
# Python and Unicode: http://bit.ly/unipain
playFile = open('RomeoAndJuliet.txt', 'wb') # This allows us to download a file in chunks and then write it to a file in the cwd
for chunk in res.iter_content(100000):
playFile.write(chunk)
playFile.close() |
# -*- coding:utf-8 -*-
"""
@Author : Lan
@env : Python 3.7.2
@Time : 2019/9/20 1:56 PM
@Desc :
"""
from PyQt5.QtWidgets import *
from gui.stacked_adbkit import AdbKitPage
from gui.stacked_fastbot import Fastbot
from gui.stacked_tidevice import TiDevicePage
class RightStacked(object):
def __init__(self):
self.right_stacked = QStackedWidget()
stack1 = AdbKitPage()
stack2 = TiDevicePage()
stack3 = Fastbot()
self.right_stacked.addWidget(stack1.widget)
self.right_stacked.addWidget(stack2.widget)
self.right_stacked.addWidget(stack3.widget)
self.right_stacked.setCurrentIndex(0) # 设置默认界面
|
from matplotlib import pyplot as plt
movies = ["Энни Холл", "Бен-Гур", "Касабланка", "Ганди", "Вестсайдская история"]
num_oscars = [5,11,3,8,10]
xs = [i + 0.1 for i, _ in enumerate(movies)]
plt.bar(xs, num_oscars)
#добавить подпись к оси Y
plt.ylabel("Количество наград")
#добавить название диаграммы
plt.title("Мои любимые фильмы")
plt.xticks = ([i + 0.5 for i, _ in enumerate(movies)], movies)
#добавить подпись к оси X
#plt.xlabel("Годы")
#вывод на экран
plt.show() |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : SymmetricTree.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/3/10 21:12 cecilia 1.0 镜像对称树
问题描述:
给定一个二叉树,判断是对称镜像树
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def isSymmetricTree(root: TreeNode):
"""
判断是否是镜像对称树
思路:递归 + 迭代
将镜像树看作是两个树,左子树和对方的右子树相同,右子树和左子树相同(空树也是对称树)
:param root:
:return:
"""
def isSymmetricTreeWithRoot(T1: TreeNode, T2: TreeNode):
"""
递归判断两个树
:param T1:
:param T2:
:return:
算法分析:时间复杂度O(N),空间复杂度O(N)
"""
if T1 is None and T2 is None:
return True
if T1 is None or T2 is None:
return False
if T1 and T2 and T1.val != T2.val:
return False
return isSymmetricTreeWithRoot(T1.left, T2.right) and isSymmetricTreeWithRoot(T1.right, T2.left)
if root is None:
return True
return isSymmetricTreeWithRoot(root.left, root.right)
|
import turtle
bob=turtle.Turtle()
bob.fd(100)
bob.lt(90)
bob.fd(100)
bob.lt(90)
bob.fd(100)
bob.lt(90)
bob.fd(100)
print(bob)
turtle.mainloop()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from clients import urls
from django.views.generic import TemplateView
# from rest_framework.authtoken.views import obtain_auth_token
# from clients.urls import router
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'client.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^firstview/', include('clients.urls')),
url(r'^myadmin/', include('clients.urls')),
# url(r'^api/token',obtain_auth_token, name = 'api-token'),
# url(r'api/', include(router.urls)),
# url(r'^$', TemplateView.as_view(template_name='home.html'))
url (r'^$', 'clients.views.home'),
url (r'^loginU/', 'clients.views.loginU')
)
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import unittest
from transformers.integrations import is_deepspeed_available
from transformers.testing_utils import (
CaptureStd,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed
bindir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(f"{bindir}/../../../tests")
from test_trainer import get_regression_trainer # noqa
set_seed(42)
MBART_TINY = "sshleifer/tiny-mbart"
def load_json(path):
with open(path) as f:
return json.load(f)
# a candidate for testing_utils
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
@require_deepspeed
@require_torch_gpu
class TrainerIntegrationDeepSpeed(TestCasePlus):
""" This class is for testing directly via get_regression_trainer """
def setUp(self):
super().setUp()
self.dist_env_1_gpu = dict(
MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
)
self.ds_config_file = f"{self.test_file_dir_str}/ds_config.json"
def test_fake_notebook_no_launcher(self):
# this setup emulates a notebook where a launcher needs to be emulated by hand
with CaptureStd() as cs:
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(local_rank=0, deepspeed=self.ds_config_file)
trainer.train()
assert "DeepSpeed info" in cs.out, "expected DeepSpeed logger output but got none"
def test_early_get_last_lr(self):
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
# not run for the first few dozen steps while loss scale is too large, and thus during
# that time `get_last_lr` will fail if called during that warm up stage,
#
# setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
# `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
with mockenv_context(**self.dist_env_1_gpu):
a = b = 0.0
trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=8,
deepspeed=self.ds_config_file,
per_device_train_batch_size=8,
logging_steps=1,
)
trainer.train()
no_grad_accum_a = trainer.model.a.item()
# it's enough that train didn't fail for this test, but we must check that
# optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
self.assertEqual(no_grad_accum_a, a)
def test_gradient_accumulation(self):
# this test measures that we get identical weights and similar loss with:
# 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
# 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
# since the 2nd should produce the effective batch of 1st, with the same results
#
# I can get an identical loss for a small train_len=32, plus the power of the initial
# dynamic loss scale value set to:
# "fp16.initial_scale_power": 1
# plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
# but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
# the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical
train_len = 64
a = b = 0.0
with mockenv_context(**self.dist_env_1_gpu):
no_grad_accum_trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=train_len,
deepspeed=self.ds_config_file,
per_device_train_batch_size=8,
gradient_accumulation_steps=1,
)
no_grad_accum_result = no_grad_accum_trainer.train()
no_grad_accum_loss = no_grad_accum_result.training_loss
no_grad_accum_a = no_grad_accum_trainer.model.a.item()
no_grad_accum_b = no_grad_accum_trainer.model.b.item()
# make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
self.assertNotEqual(no_grad_accum_a, a)
with mockenv_context(**self.dist_env_1_gpu):
yes_grad_accum_trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=train_len,
deepspeed=self.ds_config_file,
per_device_train_batch_size=4,
gradient_accumulation_steps=2,
)
yes_grad_accum_result = yes_grad_accum_trainer.train()
yes_grad_accum_loss = yes_grad_accum_result.training_loss
yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
self.assertNotEqual(yes_grad_accum_a, a)
# training with half the batch size but accumulation steps as 2 should give the same weights
self.assertEqual(no_grad_accum_a, yes_grad_accum_a)
self.assertEqual(no_grad_accum_b, yes_grad_accum_b)
# see the note above how to get identical loss on a small bs
self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=5)
@slow
@require_deepspeed
@require_torch_gpu
class TestDeepSpeed(TestCasePlus):
""" This class is for testing via an external script """
@require_torch_multi_gpu
def test_basic_distributed(self):
self.run_quick(distributed=True)
def test_do_eval_no_train(self):
# we should not fail if train is skipped
output_dir = self.run_trainer(
eval_steps=1,
max_len=12,
model_name=MBART_TINY,
num_train_epochs=1,
distributed=False,
extra_args_str="--do_eval",
remove_args_str="--do_train",
)
val_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
assert "eval_bleu" in val_metrics
# XXX: need to do better validation beyond just that the run was successful
def run_quick(self, distributed=True, extra_args_str=None, remove_args_str=None):
output_dir = self.run_trainer(
eval_steps=1,
max_len=12,
model_name=MBART_TINY,
num_train_epochs=1,
distributed=distributed,
extra_args_str=extra_args_str,
remove_args_str=remove_args_str,
)
train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
assert "train_runtime" in train_metrics
def run_trainer(
self,
eval_steps: int,
max_len: str,
model_name: str,
num_train_epochs: int,
distributed: bool = True,
extra_args_str: str = None,
remove_args_str: str = None,
):
data_dir = self.examples_dir / "test_data/wmt_en_ro"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_val_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--val_max_target_length {max_len}
--do_train
--num_train_epochs {str(num_train_epochs)}
--per_device_train_batch_size 4
--learning_rate 3e-3
--warmup_steps 8
--predict_with_generate
--logging_steps 0
--save_steps {str(eval_steps)}
--group_by_length
--label_smoothing_factor 0.1
--adafactor
--task translation
--target_lang ro_RO
--source_lang en_XX
""".split()
if extra_args_str is not None:
args.extend(extra_args_str.split())
if remove_args_str is not None:
remove_args = remove_args_str.split()
args = [x for x in args if x not in remove_args]
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config.json".split()
script = [f"{self.examples_dir_str}/seq2seq/run_seq2seq.py"]
num_gpus = get_gpu_count() if distributed else 1
launcher = f"deepspeed --num_gpus {num_gpus}".split()
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"PYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
return output_dir
|
# -*- coding: utf-8 -*-
"""
Numpy DataTypes module.
All data types of numpy arrays are defined in this module.
"""
import numpy as np
from axon.datatypes.base import DataType
class NumpyArray(DataType):
"""Numpy array DataType.
We create a class for multidimension numpy arrays. At initialization the
DataType of the entries must be defined as well as the shape of the numpy
array.
Examples
--------
NumpyArray(Float(), (2,2))
"""
def __init__(self, dtype, shape, **kwargs):
"""Create a Numpy Array DataType."""
# Verify dtype is a valid DataType
if not isinstance(dtype, DataType):
message = 'Given dtype is not a DataType. (type={})'
message = message.format(type(dtype))
raise ValueError(message)
# Verify shape is specified as a tuple or list
if not isinstance(shape, (tuple, list)):
message = 'Shape should be given as a tuple. (type={})'
message = message.format(type(dtype))
raise ValueError(message)
# Verify shape has integer values
for item in shape:
if not isinstance(item, int):
message = ('All entries of shape should be of type int.')
raise ValueError(message)
super().__init__(**kwargs)
self.nparray_item_type = dtype
self.shape = shape
def validate(self, other):
"""Check if argument is a Numpy Array of this type."""
# Verify instance is a np array
if not isinstance(other, np.ndarray):
return False
# Verify shape is correct
if not self.shape == other.shape:
return False
# Verify all entries correspond to the correct DataType
aux = other.reshape(np.prod(other.shape))
for item in aux:
if not self.nparray_item_type.validate(item):
return False
return True
def __eq__(self, other):
"""Check if other is the same NumpyArray DataType."""
if not isinstance(other, DataType):
return False
# Verify it is a list from attribute nparray_item_type
if not hasattr(other, 'nparray_item_type'):
return False
if not self.shape == other.shape:
return False
return self.nparray_item_type == other.nparray_item_type
def __repr__(self):
"""Get full representation."""
return 'NumpyArray({})'.format(repr(self.nparray_item_type))
def __str__(self):
"""Get string representation."""
return 'NumpyArray({})'.format(str(self.nparray_item_type))
|
import sys
sys.path.insert(0, '..')
import torch as T
import unittest
from common.transformer import *
from common.memory import ReplayBuffer
from common.env import Env
from agent import Agent
class TestCase(unittest.TestCase):
# Test network on batch 3D Matrix
def test_network_batch(self):
device = T.device("cuda")
# Create Network
net = Transformer(1024, 4, 17, 2)
# Create a state
states = T.randn((32, 16, 1024), device=device)
out = net(states).sum(dim=0).mean(dim=0).argmax(dim=0)
self.assertTrue(0 <= out.item() <= 16)
# Test network on a 2D Matrix
def test_network_2d(self):
device = T.device("cuda")
# Create Network
net = Transformer(1024, 4, 17, 2)
# Create a state
states = T.randn((9, 1024), device=device)
out = net(states).sum(dim=0).mean(dim=0).argmax(dim=0).cpu()
self.assertTrue(0 <= out.item() <= 16)
def test_agent(self):
env = Env()
agent = Agent(env.observation_space.shape[0],
env.action_space.n,
env,
epsilon=0.01,
capacity=100,
nheads=4,
batch_size=4,
transformer_layers=2)
state = env.reset()
action = agent.pick_action(state)
self.assertTrue(0 <= action <= env.action_space.n)
if __name__ == '__main__':
unittest.main()
|
from ase import build
import gpaw as gp
def main():
atoms = build.bulk( "Al" )
atoms = atoms*(8,4,4)
nMg = int( 0.2*len(atoms) )
for i in range(nMg):
atoms[i].symbol = "Mg"
calc = gp.GPAW( mode=gp.PW(400), xc="PBE", nbands=-10, kpts=(4,4,4) )
atoms.set_calculator( calc )
energy = atoms.get_potential_energy()/len(atoms)
if __name__ == "__main__":
main()
|
import json
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'pythonkss', 'version.json')) as f:
version = json.loads(f.read())
setup(
name='pythonkss',
version=version,
description='Python implementation of KSS',
long_description='See https://github.com/appressoas/pythonkss',
author='Espen Angell Kristiansen',
author_email='espen@appresso.no',
url='https://github.com/appressoas/pythonkss',
license='BSD',
packages=find_packages(exclude=['tests', 'examples']),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
test_suite='runtests.runtests',
install_requires=[
'Markdown',
'Pygments',
'beautifulsoup4',
'html5lib',
'pyyaml'
],
extras_require={
'tests': [
'flake8',
'mock',
'pytest',
],
},
)
|
n = int(input().strip())
# counter of current consecutive ones
current_ones = 0
# counter of max consecutive ones
max_ones = 0
# instead of converting to binary we use remainders to see last number of binary, go until n is less than 0
# converting to a binary - n % 2 // 2 and round down
while n > 0:
remainder = n % 2
if remainder == 1:
current_ones += 1
if current_ones > max_ones:
max_ones = current_ones
else:
current_ones = 0
# divide n by 2 and reset it
n = n // 2
print(max_ones)
|
import traceback, time, sys, json, numpy as np
from PIL import Image
import cv2
from threading import Thread
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
import tensorflow as tf
ACTION_SPACE_SIZE = 3
ENV_SHAPE = (32, 32, 3)
def createModel():
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=ENV_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
###
# TODO:
# Seperate each sheep into an agent.
# Need to send the sheep ID to python with the
# environment data.
# Also need the sheep's doc in general, to act
# on wellness (hunger, hp) and such.
# Sheep need rewards and penalties.
# Maybe something like:
# hp_decreased = -100
# hp_increased = +100
# more_sheep = +50
# less_sheep = -50
##
HP_DECREASE = -100
HP_INCREASE = 100
MORE_SHEEP = 50
LESS_SHEEP = -50
class globalVars():
pass
G = globalVars()
G.history = np.empty((0,32,32,3))
G.legend = [
'empty',
'self',
'herb',
'sheep',
'user',
'wolf',
'impassable'
];
G.colors = {
"empty": (0, 0, 0),
"self": (0, 200, 200),
"herb": (0, 255, 0),
"sheep": (255, 255, 255),
"user": (255, 255, 0),
"wolf": (0, 0, 255),
"impassable": (25, 25, 25)
}
G.env = np.zeros((32, 32, 3), dtype=np.uint8)
def setEnv(input):
G.env = np.array([[G.colors[G.legend[y]] for y in x] for x in input], dtype=np.uint8)
def paintEnv():
img = Image.fromarray(G.env, 'RGB')
img = img.resize((320,320))
cv2.imshow("sheep", np.array(img))
cv2.waitKey(10)
"""
ins = np.zeros((32,32), dtype=np.uint8)
ins[25][25] = G.legend.index("self")
ins[16][20] = G.legend.index("herb")
ins[15][21] = G.legend.index("herb")
ins[15][19] = G.legend.index("herb")
ins[17][19] = G.legend.index("herb")
ins[17][24] = G.legend.index("sheep")
ins[14][18] = G.legend.index("sheep")
ins[7][5] = G.legend.index("wolf")
setEnv(ins)
print(G.env.shape)
"""
model = createModel()
agents = {}
#display = Thread(target=paintEnv)
#display.start()
go = True
def out (msg):
print(msg)
sys.stdout.flush()
def read_in():
global go
i = sys.stdin.readline()[0:-1]
#print(f'"{i}"')
if i == "q":
go = False
return []
return json.loads(i)
def main():
while go:
#out("Give me an array of numbers like [1,2,3]")
#try:
reward = -1
[env, sheep] = read_in()
setEnv(env)
i = sheep["_id"]
if i not in agents:
agents[i] = {
"sheep": sheep,
"history": np.array([G.env]),
"predictions": None,
"rewards": np.array([0])
}
else:
agents[i]["history"] = np.append(agents[i]["history"], [G.env], axis=0)
if sheep["wellness"]["hp"] > agents[i]["sheep"]["wellness"]["hp"]:
reward += HP_INCREASE
elif sheep["wellness"]["hp"] < agents[i]["sheep"]["wellness"]["hp"]:
reward += HP_DECREASE
reward -= sheep["wellness"]["hunger"]
agents[i]["rewards"] = np.append(agents[i]["rewards"], [reward], axis=0)
hist = agents[i]["history"]
rewards = agents[i]["rewards"]
predictions = agents[i]["predictions"]
paintEnv()
decision = model.predict(hist[-1:])
print(decision)
answer = [float(x) for x in decision[-1]]
if predictions == None:
predictions = [decision]
else:
predictions = np.append(agents[i]["predictions"], decision, axis=0)
print(predictions)
print(rewards*0.01)
model.fit(hist, predictions)
#model.train_on_batch(hist[-1:], predictions[-1:], rewards[-1:])
print(answer)
out(json.dumps(answer))
#except Exception as e:
# print(f'"{e}"')
# pass
#start process
if __name__ == '__main__':
main()
else:
out("__name__ isnt __main__")
|
import markdown
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.html import strip_tags
# 分类
class Category(models.Model):
"""
Django 要求模型必须继承 models.Model 类。
Category 只需要一个简单的分类名 name 就可以了。
CharField 指定了分类名 name 的数据类型,CharField 是字符型,
CharField 的 max_length 参数指定其最大长度,超过这个长度的分类名就不能被存入数据库。
设置为主键,设置索引
"""
name = models.CharField(verbose_name=r"类别", max_length=100)
def __str__(self):
return self.name
class Meta:
# ORM在数据库中的表名默认是 app_类名,可以通过db_table可以重写表名。
db_table = 'blog_category'
verbose_name = r"分类"
verbose_name_plural = verbose_name
# 标签
class Tag(models.Model):
"""
标签 Tag 也比较简单,和 Category 一样。
"""
name = models.CharField(verbose_name=r"标签", max_length=100)
def __str__(self):
return self.name
class Meta:
db_table = 'blog_tag'
verbose_name = r"标签"
verbose_name_plural = verbose_name
# 文章
class Post(models.Model):
# 文章标题 唯一性
title = models.CharField(verbose_name=r"标题", max_length=70, unique=True)
# 文章正文,我们使用了 TextField。
# 存储比较短的字符串可以使用 CharField,但对于文章的正文来说可能会是一大段文本,因此使用 TextField 来存储大段文本。
body = models.TextField(verbose_name=r"正文")
# 文章摘要,可以没有文章摘要,但默认情况下 CharField 要求我们必须存入数据,否则就会报错。
# 指定 CharField 的 blank=True 参数值后就可以允许空值了。blank=True是允许填空白,而不是null
excerpt = models.CharField(verbose_name=r"摘要", max_length=200, null=True)
# 文章作者,这里 User 是从 django.contrib.auth.models 导入的。
# django.contrib.auth 是 Django 内置的应用,专门用于处理网站用户的注册、登录等流程,User 是 Django 为我们已经写好的用户模型。
# 这里我们通过 ForeignKey 把文章和 User 关联了起来。
# 因为我们规定一篇文章只能有一个作者,而一个作者可能会写多篇文章,因此这是一对多的关联关系,和 Category 类似。
author = models.ForeignKey(verbose_name=r"作者", to=User, on_delete=models.DO_NOTHING)
# 这两个列分别表示文章的创建时间和最后一次修改时间,存储时间的字段用 DateTimeField 类型。
created_time = models.DateTimeField(verbose_name=r"创建时间", auto_now_add=True, auto_now=False)
modified_time = models.DateTimeField(verbose_name=r"修改时间", auto_now_add=False, auto_now=True, null=True)
# 记录阅读量
# editable设置可否编辑,False之后表单都会忽略它
views = models.PositiveIntegerField(verbose_name=r"阅读量", default=0, editable=False)
# 这是分类与标签,分类与标签的模型我们已经定义在上面。
# 我们在这里把文章对应的数据库表和分类、标签对应的数据库表关联了起来,但是关联形式稍微有点不同。
# 我们规定一篇文章只能对应一个分类,但是一个分类下可以有多篇文章,所以我们使用的是 ForeignKey,即一对多的关联关系。
# 而对于标签来说,一篇文章可以有多个标签,同一个标签下也可能有多篇文章,所以我们使用 ManyToManyField,表明这是多对多的关联关系。
# 同时我们规定文章可以没有标签
category = models.ForeignKey(verbose_name=r"分类", to=Category, null=True, on_delete=models.SET_NULL)
tags = models.ManyToManyField(verbose_name=r"标签", to=Tag, blank=True) # ManyToManyField不能用null???
def __str__(self):
return self.title
# 元信息
class Meta:
db_table = 'blog_post'
verbose_name = r"文章"
verbose_name_plural = verbose_name
ordering = ['-created_time'] # 排序
# 自定义 get_absolute_url 方法
# 记得从 django.urls 中导入 reverse 函数
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
# def save(self, *args, **kwargs):
# # 如果没有填写摘要
# if not self.excerpt:
# # 首先实例化一个 Markdown 类,用于渲染 body 的文本
# md = markdown.Markdown(extensions=[
# 'markdown.extensions.extra',
# 'markdown.extensions.codehilite',
# ])
# # 先将 Markdown 文本渲染成 HTML 文本
# # strip_tags 去掉 HTML 文本的全部 HTML 标签
# # 从文本摘取前 50 个字符赋给 excerpt
# self.excerpt = strip_tags(md.convert(self.body))[:54]
#
# # 调用父类的 save 方法将数据保存到数据库中
# super(Post, self).save(*args, **kwargs)
|
# -*- coding:utf-8 -*-
import sys
import os
import re
import argparse
class CNStringChecker(object):
def __init__(self, input_path, output_path):
#input_path:要检测的文件或文件夹的路径
#output_path:检查结果输出的路径
super(CNStringChecker, self).__init__()
self.files_list = []
self.check_path = None
self.output_path = os.path.abspath(output_path)
self._re_expression = re.compile(r""".*["'].*["'].*""")
self._on_long_commit = False
self._long_commit_marks = ('"""', "'''")
self._short_commit_marks = ('#')
self._string_marks = ('"', "'")
self._convert_path(os.path.abspath(input_path))
#暂时写死
self.ingoreList = ()
return
def collectFiles(self, path, extend_type='.py'):
#收集输入路径中的文件
for dirpath, dirnames, filenames in os.walk(path):
if dirpath.startswith(self.ingoreList):
print dirpath
continue
for item in filenames:
if item.lower().endswith(extend_type):
self.files_list.append(os.path.join(dirpath,item))
return
def checkCNString(self):
#检查文件中的字符串
if self.check_path:
self.collectFiles(self.check_path)
with open(self.output_path,'w') as output_file:
for item in self.files_list:
output = self._extractOneFile(item)
if output:
pos = len(self.check_path) if self.check_path else 0
#print '\n{0}\n'.format(item[pos:])
output_file.write('\n{0}\n'.format(item[pos:]))
for info in output:
#print info
output_file.write(info)
return
def _convert_path(self, path):
if os.path.isdir(path):
self.check_path = path
if os.path.isfile(path):
self.files_list.append(path)
return
def _extractOneFile(self, cur_file):
output = []
self._on_long_commit = False
with open(cur_file,'r') as f:
context = f.readlines()
for num in range(len(context)):
line = self._clean_string(context[num])
if self._re_expression.match(line) and self._check_contain_chinese(line):
output.append('line:{0}\n'.format(num+1))
return output
def _clean_string(self, input_string):
#过滤注释内容
input_string = input_string.strip()
if input_string in self._long_commit_marks:
self._on_long_commit = not self._on_long_commit
return ''
if input_string[0:3] in self._long_commit_marks:
if input_string[-3:] not in self._long_commit_marks:
self._on_long_commit = True
else:
self._on_long_commit = False
return ''
if self._on_long_commit:
return ''
on_string = False
mark_pos = 0
for pos in range(len(input_string)):
if input_string[pos] in self._string_marks:
if not on_string:
on_string = True
mark_pos = pos
elif input_string[mark_pos] == input_string[pos]:
on_string = False
if input_string[pos] in self._short_commit_marks and not on_string:
input_string = input_string[0:pos]
return input_string
return input_string
def _check_contain_chinese(self, line):
try:
for ch in line.decode('utf-8'):
if u'\u4e00' <= ch <= u'\u9fff':
return True
except UnicodeDecodeError, e:
#文件中有些无法decode的字符,暂无中文字符有此问题,所以暂时简单返回False
return False
return False
if __name__ == '__main__':
tool = CNStringChecker(sys.argv[1], sys.argv[2])
tool.checkCNString()
|
import os
from flask import Flask, render_template
from routes import facebook_api, twitter_api, api
from consts import SECRET_KEY
###from clock import job_trigger
###job_trigger()
app = Flask(__name__, template_folder='static')
app.register_blueprint(facebook_api)
app.register_blueprint(twitter_api)
app.register_blueprint(api)
app.secret_key = SECRET_KEY
@app.route("/")
def home():
return render_template("index.html") ###opening index.html on load of application
if __name__ == '__main__':
app.run()
|
"""
负责资源导入、模块检查
"""
import os
import cv2
from stagesep2.logger import logger
from stagesep2.utils import *
def path_to_name(file_path: str) -> str:
""" full path -> file name """
return os.path.splitext(os.path.basename(file_path))[0]
def is_path_existed(file_path: str):
""" check if file is existed """
return os.path.isfile(file_path)
def frame_prepare(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur_gray_frame = cv2.medianBlur(gray_frame, 3)
return blur_gray_frame
class TemplatePicture(object):
def __init__(self, pic_path: str):
if not is_path_existed(pic_path):
raise FileNotFoundError('file not existed: {}'.format(pic_path))
self.pic_name: str = path_to_name(pic_path)
self.pic_path: str = pic_path
self.cv_object = frame_prepare(cv2.imread(self.pic_path))
class TemplateManager(object):
TAG = 'TemplateManager'
def __init__(self, video_name: str):
self.video_name = video_name
# match template 需要模板图片
# 该视频需要的模板图片会被放置在此处
self._match_template_pic_dict = dict()
# eg:
# { pic_name: TemplatePicture(pic_path), }
def add(self, pic_path: str):
new_pic = TemplatePicture(pic_path)
new_pic_name = new_pic.pic_name
self._match_template_pic_dict[new_pic_name] = new_pic
logger.info(self.TAG, msg='LOAD PICTURE', path=pic_path, name=new_pic_name, video=self.video_name)
def remove(self, pic_name: str):
if pic_name in self._match_template_pic_dict:
del self._match_template_pic_dict[pic_name]
return True
logger.warn(self.TAG, msg='no pic named {}'.format(pic_name))
return False
def get_dict(self):
return self._match_template_pic_dict
class SSVideo(object):
""" video object """
def __init__(self, video_path: str):
if not is_path_existed(video_path):
raise FileNotFoundError('file not existed: {}'.format(video_path))
self.video_name = path_to_name(video_path)
self.video_path = video_path
# add template example:
# ssv = SSVideo('some_path/123.mp4')
# ssv.template_manager.add('some_path/123.png')
self.template_manager = TemplateManager(self.video_name)
# degree = rotate * 90, 逆时针
self._rotate = 0
# video info
# total frame count
self.total_frame = None
# first and last frame
self.first_frame = None
self.last_frame = None
# load video base info
self.load_video_info()
@property
def rotate(self):
return self._rotate
@rotate.setter
def rotate(self, value: int):
if not isinstance(value, int):
raise TypeError('rotate should be int')
self._rotate = value
self.load_video_info()
def load_video_info(self):
# TODO need more info?
# get info from video
with video_capture(self) as video_src:
total_frame = video_src.get(cv2.CAP_PROP_FRAME_COUNT)
_, first_frame = video_src.read()
video_src.set(1, total_frame - 1)
_, last_frame = video_src.read()
# prepare, and rotate
first_frame, last_frame = [
frame_prepare(rotate_pic(each, self._rotate))
for each in (first_frame, last_frame)]
# init
self.first_frame = first_frame
self.last_frame = last_frame
self.total_frame = total_frame
class VideoManager(object):
"""
Analyser需要的信息都应该在此处被导入
例如 作为分析主体的 视频
例如 match template需要的 模板图片
"""
TAG = 'VideoManager'
# 待测视频会被添加到这里
# 在分析开始时,会遍历此字典
video_dict = dict()
# eg:
# { video_name: SSVideo(video_path), }
def __init__(self):
raise NotImplementedError('should not init')
@classmethod
def add(cls, video_path: str):
new_video = SSVideo(video_path)
new_video_name = new_video.video_name
cls.video_dict[new_video_name] = new_video
logger.info(cls.TAG, msg='LOAD VIDEO', path=video_path, name=new_video_name)
return new_video
@classmethod
def remove(cls, video_name: str):
if video_name in cls.video_dict:
del cls.video_dict[video_name]
return True
logger.warn(cls.TAG, msg='no video named {}'.format(video_name))
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 02:28:39 2017
@author: liebe
"""
import sys
import numpy as np
from PIL import Image
img1 = np.array( Image.open(sys.argv[1]) )
img2 = np.array( Image.open(sys.argv[2]) )
for i in range( 0, len(img1)):
for j in range( 0, len(img1[i])):
if np.array_equal(img1[i][j], img2[i][j]):
img2[i][j] = 0
result = Image.fromarray(img2)
result.save('ans_two.png')
|
#Soma de valores.
n1 = int (input('Digite um valor: '))
n2 = int (input('Digite um valor: '))
Soma = (n1 + n2)
print ('A Soma entre {} e {} é igual {}!' .format (n1, n2, Soma))
|
# -*- coding:utf-8 -*-
# Date: 04 April 2021
# Author:Yan Zhou a1807782
# Description:The service of user and organization
import controller.Util as Util
from model.ProjectDB import ProjectDB
from model.MemberDB import MemberDB
from model.UserDB import UserDB
import hashlib
import sqlite3
import json
import uuid
import datetime
import random
import sys
sys.path.append('..')
database = UserDB("../EZlabel.db")
memberDB = MemberDB("../EZlabel.db")
projectDB = ProjectDB("../EZlabel.db")
# adapt MD5 to encrypt the password
def toMd5(data):
return hashlib.md5(data.encode(encoding='UTF-8')).hexdigest()
# randomly get the code of 6 chars-> generate org code
def randomCode():
ret = ""
for i in range(6):
num = random.randint(0, 9)
num = chr(random.randint(48,57))
Letter = chr(random.randint(65, 90))
s = str(random.choice([num,Letter]))
ret += s
return ret
# get_all_user, login usage
def get_all_user():
rows = []
results = []
rows = database.search_all_user()
# columns = ["id", "username", "org_id", "password"]
for row in rows:
result = {}
result['id'] = row[0]
result['username'] = row[1]
result['org_id'] = row[2]
result['password'] = row[3]
results.append(result)
# return results
return results
# show the users in the org
def get_user_list(json_search, org_id):
rows = []
results = []
if json_search:
search = json.loads(json_search)
name = search["name"]
rows = database.search_user(org_id, name)
else:
rows = database.search_user(org_id)
# columns = ["id", "uuid" "name", "org_role" "create_date"]
for row in rows:
result = {}
result['id'] = row[0]
result['uuid'] = row[1]
result['name'] = row[2]
res = memberDB.get_project_quantity(row[1])
quantity = len(res)
result['project'] = quantity
details=""
if quantity != 0:
for re in res:
details = details + re[0]+","
details = details[0:-1]
result['detail'] = details
result['org_role'] = row[3]
result['create_date'] = Util.last_modify(row[4])
results.append(result)
# return results
return json.dumps(results)
# signup
def signup(json_user):
# get the uuid
uid = uuid.uuid1().hex
# transfer json to dict
user = json.loads(json_user)
name = user["name"]
password = toMd5(user["password"])
org_code = user["org_code"]
org_name = user["org_name"]
org_id = ""
# add an orgnization if the user choose to create a new org and return the id of org
# org_role, 0 admin,1 member
org_role = "1"
if org_code == "":
org_role = "0"
# insert_org (self, name, creator, create_date, code, edit_date):
not_unique = True
# get the unique code for the org
while not_unique:
code = randomCode()
rows = database.search_org_by_code(code)
if(len(rows)==0):
not_unique = False
org_id = database.insert_org(org_name, name, datetime.datetime.now(), code, datetime.datetime.now())
# rows = database.search_org(org_name)
# if len(rows) > 0:
# org_id = rows[0][0]
# # set org_role = 0(admin) if this user create the org
# org_role = '0'
else:
rows = database.search_org_by_code(org_code)
if len(rows)>0:
org_id = rows[0][0]
else:
raise NameError('Cannot find org code')
# insert user
# (self, uuid, name, password, flag, org_id, org_role, creator, create_date, editor, edit_date)
user_id = database.insert_user(uid, name, password, "1", str(
org_id), org_role, name, datetime.datetime.now(), name, datetime.datetime.now())
# insert member for projects
# default: no role, no permission
# if this user is the creator of org (org_role="0"), then there is no project now, so we don't need to add this member to the project
# if this user is the member of org (org_role="1"), then he/she should be added to all the valid projects in this organization, with role:"4" and permission:""
role = "4"
permission = ""
if org_role == "1":
# add member to all the projects in this orgnization
projects = projectDB.search_project(str(org_id))
for project in projects:
project_id = project[0]
# user_uuid, user_name, role, permission, project_id, creator, create_date, editor, edit_date
memberDB.insert_member(uid, name, role, permission, project_id, "System",
datetime.datetime.now(), "System", datetime.datetime.now())
# get the new added user for adding into users in app.py
rows = database.view_user(user_id)
newUser = {}
if len(rows) > 0:
result = rows[0]
newUser['id'] = result[0]
newUser['username'] = result[1]
newUser['org_id'] = result[2]
newUser['password'] = result[3]
return newUser
# login
def login(json_user):
try:
user = json.loads(json_user)
name = user["name"]
password = toMd5(user["password"])
rows = database.search_user_login(name, password)
if len(rows) > 0:
re = {
'code': 0,
'message': 'Login sucessfully',
}
return json.dumps(re)
else:
re = {
'code': -1,
'message': 'Username or password is incorrect',
}
return json.dumps(re)
except Exception as e:
re = {
'code': -1,
'message': repr(e)
}
return json.dumps(re)
# delete a user (set flag = 0)
def delete_user(json_user):
try:
user = json.loads(json_user)
id = user["id"]
uuid = user["uuid"]
database.update_user_flag(id)
# delete the member from project view
memberDB.delete_member(uuid)
re = {
'code': 0,
'message': 'Deleted Successfully',
}
return json.dumps(re)
except Exception as e:
re = {
'code': -1,
'message': repr(e)
}
return json.dumps(re)
# change password (set password to a new one)
def change_password(json_user):
try:
user = json.loads(json_user)
username = user["name"]
password = ""
if ("current_password" in json_user):
cur_password = toMd5(user["current_password"])
rows = database.search_user_login(username, cur_password)
# the current password is wrong
if len(rows) == 0:
re = {
'code': -1,
'message': "The current password is incorrect!",
}
return re
else:
password = user["new_password"]
passwordMd5 = toMd5(password)
database.update_user_password(username, passwordMd5)
re = {
'code': 0,
'name': username,
'password': passwordMd5,
'message': 'Reseted successfully, please login',
}
return re
else:
password = "666666"
passwordMd5 = toMd5(password)
database.update_user_password(username, passwordMd5)
re = {
'code': 0,
'name': username,
'password': passwordMd5,
'message': 'Reseted successfully, please login',
}
return re
except Exception as e:
re = {
'code': -1,
'message': repr(e)
}
return re
# get the current user's role and permission, in member table
def get_cur_user(current_user_name):
rows = database.get_current_user(current_user_name)
result = {}
if len(rows)>0:
result['name'] = rows[0][0]
result['password'] = rows[0][1]
result['role'] = rows[0][2]
result['org_name'] = rows[0][3]
result['org_code'] = rows[0][4]
# return results
return json.dumps(result)
|
import aiohttp
import json
class RestClient:
def __init__(self, app, loop, limit=100, timeout=60):
self.app = app
connector = aiohttp.TCPConnector(loop=loop, limit=limit)
self.connection_pool = aiohttp.ClientSession(loop=loop, json_serialize=json.dumps, connector=connector,
timeout=aiohttp.ClientTimeout(total=timeout))
async def close(self):
if self.connection_pool and not self.connection_pool.closed:
await self.connection_pool.close()
async def request(self, method, url, **kwargs):
async with self.connection_pool.request(method, url=url, **kwargs) as response:
try:
return await response.json(), response.status, response.content_type
except aiohttp.client.ContentTypeError:
return await response.text(), response.status, response.content_type
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('bhp_form_membershipform', 'bhp_visit_membershipform')
db.rename_table('bhp_form_schedulegroup', 'bhp_visit_schedulegroup')
db.rename_table('bhp_form_visitdefinition', 'bhp_visit_visitdefinition')
db.rename_table('bhp_form_visitdefinition_schedule_group', 'bhp_visit_visitdefinition_schedule_group')
def backwards(self, orm):
pass
models = {
'bhp_content_type_map.contenttypemap': {
'Meta': {'ordering': "['name']", 'unique_together': "(['app_label', 'model'],)", 'object_name': 'ContentTypeMap', 'db_table': "'bhp_common_contenttypemap'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_visit.membershipform': {
'Meta': {'object_name': 'MembershipForm', 'db_table': "'bhp_visit_membershipform'"},
'category': ('django.db.models.fields.CharField', [], {'default': "'subject'", 'max_length': '25', 'null': 'True'}),
'content_type_map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['bhp_content_type_map.ContentTypeMap']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'bhp_visit.schedulegroup': {
'Meta': {'ordering': "['group_name']", 'object_name': 'ScheduleGroup', 'db_table': "'bhp_visit_schedulegroup'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'grouping_key': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'membership_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_visit.MembershipForm']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_visit.visitdefinition': {
'Meta': {'ordering': "['code', 'time_point']", 'object_name': 'VisitDefinition', 'db_table': "'bhp_visit_visitdefinition'"},
'base_interval': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'base_interval_unit': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '10'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '4', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouping': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'lower_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lower_window_unit': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'schedule_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['bhp_visit.ScheduleGroup']", 'symmetrical': 'False'}),
'time_point': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '35', 'db_index': 'True'}),
'upper_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upper_window_unit': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '10'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bhp_visit'] |
import socket
import time
import sys
from threading import Thread
# inputs
DEST_IP = '130.56.253.43'
UDP_DEST_PORT = 5203 # Change later for security
PACKET_SIZE = 2,00 # Datagram size in bytes
NO_OF_PACKETS = 1000 # Number of packets to send
PACKETS_PER_SEC = 800 # Packets to be sent per second
RECIEVE_IP = ''
RECIEVE_PRT = 54321 # Port for incoming packets
BUFFER = 4096
global sock
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def udp_recieve(RECIEVE_IP, RECIEVE_PRT):
ADDR = (RECIEVE_IP, RECIEVE_PRT)
#rcv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
global packets_rcvd
global cum_delay
packets_rcvd = 0
cum_delay = 0.0
try:
sock.bind(ADDR)
print 'Server listening on', ADDR
except Exception:
print 'ERROR - binding failed'
while True:
data, addr = sock.recvfrom(BUFFER)
splitdata = data.split(',')
timecount = splitdata[0].strip("('")
rt_delay = (time.time() - float(timecount))
packet_number = str(splitdata[1].strip("' '"))
packet_number = packet_number.lstrip('0')
# Write to file
outfile = open('udp_testresults.csv',"a").write(str(time.ctime()+','+'received , '+ packet_number+' , '+str(rt_delay)+'\n'))
print (time.ctime()+','+'received , '+ packet_number+' , '+str(rt_delay))
packets_rcvd = packets_rcvd + 1
cum_delay = cum_delay + rt_delay
def udp_send(DEST_IP, UDP_DEST_PORT, PACKET_SIZE, NO_OF_PACKETS,PACKETS_PER_SEC):
IDT = 1./PACKETS_PER_SEC #Inter departure time
packet_count_snd = 0
print "Client Started!"
print "target IP:",DEST_IP
print "target port",UDP_DEST_PORT
print "Packets to send", NO_OF_PACKETS
print "MegaBytes to send/second", PACKETS_PER_SEC*PACKET_SIZE/1000000.0
padding = ''
for j in range(78,PACKET_SIZE):
padding = padding + str(1)
for i in range(1,NO_OF_PACKETS+1):
time.sleep(IDT)
#send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(str(("%.5f" % time.time(),str('%08d' % i), padding)), (DEST_IP, UDP_DEST_PORT) )
packet_count_snd = packet_count_snd+1
time.sleep(5) # wait for packets to be recieved
print packet_count_snd
print packets_rcvd
if __name__ == '__main__':
receiver_thread = Thread(target=udp_recieve, args=(RECIEVE_IP, RECIEVE_PRT))
receiver_thread.daemon=True
receiver_thread.start()
time.sleep(1)
sender_thread = Thread(target=udp_send, args=(DEST_IP, UDP_DEST_PORT, PACKET_SIZE, NO_OF_PACKETS, PACKETS_PER_SEC)).start()
|
# -*- coding: utf-8 -*-
"""
@author: yiping
"""
import numpy as np
from scipy.linalg import pinv,inv
class ELM:
def __init__(self,NumofHiddenNeurons,_lambda):
self.NumHiddenNeurons = NumofHiddenNeurons
self._lambda = _lambda
def ActivationFun(self,x):
x = 1.0 / (1+np.exp(-x))
return x
def Train(self,x,y):
N_data, dim = x.shape
self.inputweight = np.random.rand(self.NumHiddenNeurons,dim)
H = np.dot(x,self.inputweight.T)
H = self.ActivationFun(H)
I = np.identity(H.shape[1])
self.outputweight = np.linalg.lstsq((np.dot(H.T,H) + self._lambda*I), np.dot(H.T,y))[0]
def Test(self,x):
H = np.dot(x,self.inputweight.T)
H = self.ActivationFun(H)
output = np.dot(H,self.outputweight)
return self.ActivationFun(output) |
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
if not nums:
return 0
n = len(nums)
self.res = 0
def helper(idx, S):
if idx == n:
if S == 0:
self.res += 1
return
helper(idx + 1, S + nums[idx])
helper(idx + 1, S - nums[idx])
helper(0, S)
return self.res
def findTargetSumWays1(self, nums, S):
lookup = {}
n = len(nums)
def helper(i, j, S):
if i == n:
if S == 0:
return 1
return 0
if (i, j, S) in lookup:
return lookup[(i, j, S)]
a = helper(i + 1, j, S - nums[i])
b = helper(i + 1, j, S + nums[i])
res = a + b
lookup[(i, j, S)] = res
# print(lookup)
return res
helper(0, n, S)
print(lookup)
return helper(0, n, S)
def findTargetSumWays2(self, nums, S):
from collections import defaultdict
count1 = defaultdict(int)
count1[0] = 1
for num in nums:
count2 = defaultdict(int)
for tmp_sum in count1:
count2[tmp_sum - num] += count1[tmp_sum]
count2[tmp_sum + num] += count1[tmp_sum]
count1 = count2
return count1[S]
def findTargetSumWays3(self, nums, S):
sum_nums = sum(nums)
# print(sum_nums)
def helper(tmp):
dp = [0] * (tmp + 1)
dp[0] = 1
for num in nums:
for i in range(tmp, num - 1, -1):
dp[i] += dp[i - num]
return dp[tmp]
return 0 if sum_nums < S or (S + sum_nums) % 2 != 0 else helper((S + sum_nums) // 2)
a = Solution()
print(a.findTargetSumWays3([1, 1, 1, 1, 1], 3))
print(a.findTargetSumWays3([2, 20, 24, 38, 44, 21, 45, 48, 30, 48, 14, 9, 21, 10, 46, 46, 12, 48, 12, 38], 48))
|
from cloud_controller.middleware import AGENT_HOST, AGENT_PORT
from cloud_controller.middleware.helpers import connect_to_grpc_server
from cloud_controller.middleware.middleware_pb2 import InstanceConfig, CODE, RunParameters
from cloud_controller.middleware.middleware_pb2_grpc import MiddlewareAgentStub
mwa = connect_to_grpc_server(MiddlewareAgentStub, AGENT_HOST, AGENT_PORT)
cfg = InstanceConfig(
instance_id="id",
api_endpoint_ip="0.0.0.0",
api_endpoint_port=8282,
production=False
)
probe = cfg.probes.add()
probe.name = "recognize"
mwa.InitializeInstance(cfg)
mwa.RunProbe(RunParameters(instance_id="id", run_id="0", probe_id="recognize"))
|
# Sweet Traditional Damage Skin
def init():
success = sm.addDamageSkin(2432154)
if success:
sm.chat("The Sweet Traditional Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2432154) |
from rest_framework import serializers
from django.contrib.contenttypes.models import ContentType
from .models import *
class CommentSerializer(serializers.Serializer):
CONTENT_TYPE_SEP = '.'
title = serializers.CharField(max_length=255)
text = serializers.CharField()
author = serializers.CharField(required=False)
object_type = serializers.CharField()
object_id = serializers.IntegerField()
# will be dynamicly changed after validation
content_object = serializers.Field(required=False)
def validate_object_type(self, value):
"""
:rtype: ContentType|None
"""
app_label, _sep, model_name = value.partition(self.CONTENT_TYPE_SEP)
if not all([app_label, model_name]):
raise serializers.ValidationError('Please specify field in proper format')
ct_obj_qs = ContentType.objects.filter(app_label=app_label, model=model_name)
ct_obj = ct_obj_qs.first()
return ct_obj
def validate(self, data):
data = super().validate(data)
content_type = data['object_type']
if content_type:
data['content_object'] = content_type.get_all_objects_for_this_type(pk=data['object_id']).first()
return data
|
import numpy as np
import tensorflow as tf
import cv2
import os
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
cv2.setUseOptimized(True) #
#
###############################################
PATH_TO_CKPT = '/home/zhu/catkin_ws/src/tensorflow_object_detection/object_detection/outputing/ssd_mobilenet_v1_coco/frozen_inference_graph.pb' #
PATH_TO_LABELS = '/home/zhu/catkin_ws/src/tensorflow_object_detection/object_detection/data/mscoco_label_map.pbtxt'
NUM_CLASSES = 90 #
camera_num = 0 #
width, height = 640,480 #
###############################################
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
mv = cv2.VideoCapture(camera_num) #
mv.set(3, width) #
mv.set(4, height)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with detection_graph.as_default():
with tf.Session(graph=detection_graph, config=config) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
while True:
ret, image_source = mv.read() #
image_np = cv2.resize(image_source , (width, height), interpolation=cv2.INTER_CUBIC)
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4)
cv2.imshow("video", image_np)
if cv2.waitKey(1) & 0xFF == ord('q'): #
break
|
from typing import List, Dict, Any, Union, Optional
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool.data.transforms import AbstractTransform
from virtool.tasks.models import SQLTask
from virtool.types import Document
from virtool.utils import get_safely
class AttachTaskTransform(AbstractTransform):
"""
Attaches more complete task data to a document with a `task.id` field.
"""
def __init__(self, pg: AsyncEngine):
self._pg = pg
async def attach_one(self, document, prepared):
return {**document, "task": prepared}
async def attach_many(
self, documents: List[Document], prepared: Dict[int, Any]
) -> List[Document]:
attached = []
for document in documents:
task_id = get_safely(document, "task", "id")
attached.append(
{**document, "task": prepared[task_id] if task_id else None}
)
return attached
async def prepare_one(self, document) -> Optional[Document]:
task_id = get_safely(document, "task", "id")
if task_id:
async with AsyncSession(self._pg) as session:
result = (
await session.execute(select(SQLTask).filter_by(id=task_id))
).scalar()
return result.to_dict()
async def prepare_many(
self, documents: List[Document]
) -> Dict[Union[int, str], Any]:
task_ids = {get_safely(document, "task", "id") for document in documents}
task_ids.discard(None)
task_ids = list(task_ids)
async with AsyncSession(self._pg) as session:
results = await session.execute(
select(SQLTask).filter(SQLTask.id.in_(task_ids))
)
return {task.id: task.to_dict() for task in results.scalars()}
|
import os
import subprocess
from os import listdir
from os.path import isfile, join
import psutil
p = psutil.Process(os.getpid())
p.nice(psutil.REALTIME_PRIORITY_CLASS)
# get all files in gn folder
jarpath = "impl_GANET/out/artifacts/gaNet_jar/gaNet.jar"
cmd = "java -jar "
data_path = "benchmark_gen/gml_files/benchmarks/lfr2/n1000/"
files = [f for f in listdir(data_path) if f.endswith(".gml")]
print files
for f in files:
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/default.properties -R=10", shell=True)
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/higher_r.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large_high_r.properties -R=10", shell=True)
data_path = "benchmark_gen/gml_files/benchmarks/lfr2/n1000b/"
files = [f for f in listdir(data_path) if f.endswith(".gml")]
print files
for f in files:
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/default.properties -R=10", shell=True)
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/higher_r.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large_high_r.properties -R=10", shell=True)
data_path = "benchmark_gen/gml_files/benchmarks/lfr2/n5000/"
files = [f for f in listdir(data_path) if f.endswith(".gml")]
print files
for f in files:
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/default.properties -R=10", shell=True)
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/higher_r.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large_high_r.properties -R=10", shell=True)
data_path = "benchmark_gen/gml_files/benchmarks/lfr2/n5000b/"
files = [f for f in listdir(data_path) if f.endswith(".gml")]
print files
for f in files:
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/default.properties -R=10", shell=True)
# subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/higher_r.properties -R=10", shell=True)
subprocess.call(cmd + jarpath + " -G=D:/alien-pineapple/"+ data_path + f + " -P=impl_GANET/large_high_r.properties -R=10", shell=True) |
# coding: utf-8
# In[1]:
""" Join delta_ids, supply and demand tables.
-------------------------------------------------------------------------------
the result is a table with the normal supply and demand and the delta id
appended to the table.
Author: Rutger Hofste
Date: 20180725
Kernel: python35
Docker: rutgerhofste/gisdocker:ubuntu16.04
Args:
TESTING (Boolean) : Toggle testing case.
SCRIPT_NAME (string) : Script name.
OUTPUT_VERSION (integer) : output version.
DATABASE_ENDPOINT (string) : RDS or postGreSQL endpoint.
DATABASE_NAME (string) : Database name.
TABLE_NAME_AREA_30SPFAF06 (string) : Table name used for areas. Must exist
on same database as used in rest of script.
S3_INPUT_PATH_RIVERDISCHARGE (string) : AWS S3 input path for
riverdischarge.
S3_INPUT_PATH_DEMAND (string) : AWS S3 input path for
demand.
"""
TESTING = 0
OVERWRITE_OUTPUT = 1
SCRIPT_NAME = "Y2018M07D25_RH_Join_Deltas_Values_V01"
OUTPUT_VERSION = 1
DATABASE_ENDPOINT = "aqueduct30v05.cgpnumwmfcqc.eu-central-1.rds.amazonaws.com"
DATABASE_NAME = "database01"
INPUT_TABLE_NAME_LEFT = "global_historical_all_multiple_m_30spfaf06_v02"
INPUT_TABLE_NAME_RIGHT = "y2018m07d25_rh_delta_lookup_table_postgis_v01_v01"
OUTPUT_TABLE_NAME = SCRIPT_NAME.lower() + "_v{:02.0f}".format(OUTPUT_VERSION)
print("INPUT_TABLE_NAME_LEFT: " , INPUT_TABLE_NAME_LEFT,
"\nINPUT_TABLE_NAME_RIGHT: ",INPUT_TABLE_NAME_RIGHT,
"\nOutput Table: " , OUTPUT_TABLE_NAME)
# In[2]:
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
# In[3]:
# imports
import re
import os
import numpy as np
import pandas as pd
import aqueduct3
from datetime import timedelta
from sqlalchemy import *
pd.set_option('display.max_columns', 500)
# In[4]:
F = open("/.password","r")
password = F.read().splitlines()[0]
F.close()
engine = create_engine("postgresql://rutgerhofste:{}@{}:5432/{}".format(password,DATABASE_ENDPOINT,DATABASE_NAME))
connection = engine.connect()
if OVERWRITE_OUTPUT:
sql = text("DROP TABLE IF EXISTS {};".format(OUTPUT_TABLE_NAME))
result = engine.execute(sql)
# In[5]:
columns_to_keep_left = ["pfafid_30spfaf06",
"temporal_resolution",
"year",
"month",
"area_m2_30spfaf06",
"area_count_30spfaf06"]
# In[6]:
sectors = ["pdom",
"pind",
"pirr",
"pliv"]
use_types = ["ww","wn"]
# In[7]:
for sector in sectors:
for use_type in use_types:
columns_to_keep_left.append("{}{}_count_30spfaf06".format(sector,use_type))
columns_to_keep_left.append("{}{}_m_30spfaf06".format(sector,use_type))
# In[8]:
columns_to_keep_left.append("riverdischarge_m_30spfaf06")
columns_to_keep_left.append("riverdischarge_count_30spfaf06")
# In[9]:
columns_to_keep_left
# In[10]:
#columns_to_keep_right = ["pfaf_id","delta_id"]
columns_to_keep_right = ["delta_id"]
# In[11]:
sql = "CREATE TABLE {} AS".format(OUTPUT_TABLE_NAME)
sql += " SELECT "
for column_to_keep_left in columns_to_keep_left:
sql += " l.{},".format(column_to_keep_left)
for column_to_keep_right in columns_to_keep_right:
sql += " r.{},".format(column_to_keep_right)
sql = sql[:-1]
sql += " FROM {} l".format(INPUT_TABLE_NAME_LEFT)
sql += " INNER JOIN {} r ON".format(INPUT_TABLE_NAME_RIGHT)
sql += " l.pfafid_30spfaf06 = r.pfaf_id"
sql += " WHERE r.delta_id >= 0"
if TESTING:
sql += " LIMIT 100"
# In[12]:
print(sql)
# In[13]:
result = engine.execute(sql)
# In[14]:
sql_index = "CREATE INDEX {}pfafid_30spfaf06 ON {} ({})".format(OUTPUT_TABLE_NAME,OUTPUT_TABLE_NAME,"pfafid_30spfaf06")
# In[15]:
result = engine.execute(sql_index)
# In[16]:
engine.dispose()
# In[17]:
end = datetime.datetime.now()
elapsed = end - start
print(elapsed)
# In[ ]:
Previous runs:
0:00:02.641575
|
class GroupHelp:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
# create new group
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
wd.find_element_by_name("submit").click()
wd.find_element_by_xpath("/html/body/div/div[4]/div/i/a").click() |
#MenuTitle: Color Layers
# -*- coding: utf-8 -*-
__doc__="""
Colors glyph layers to differentiate if they are Base Glyphs, Composites or Mixed Composites.
"""
Font = Glyphs.font
baseGlyphs = []
composites = []
mixedComposites = []
for glyph in Font.glyphs:
for layer in glyph.layers:
componentCount = len(layer.components)
pathsCount = len(layer.paths)
if componentCount == 0:
baseGlyphs.append(layer)
else:
if pathsCount == 0:
composites.append(layer)
else:
mixedComposites.append(layer)
#changes layer color
for layer in baseGlyphs:
layer.setColorIndex_(6)
for layer in composites:
layer.setColorIndex_(10)
for layer in mixedComposites:
layer.setColorIndex_(3) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/3/5 下午1:21
# @Author : cicada@hole
# @File : trees.py
# @Desc : 第二章 决策树
# @Link :
from math import log
import random
import json
import operator
'''
计算给定数据集的香农熵
'''
def calcShannonEnt(dataSet):
numEntries = len(dataSet) # 实例总数
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries # 特征i所占的比例
shannonEnt -= prob * log(prob,2)
return shannonEnt
def createDataSet():
dataSet = [
[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing', 'flippers'] #浮出水面是否能生存 是否有脚蹼
return dataSet, labels
'''
划分数据集
1.如果第i个特征值为value
2.剩余的vec从start-i-1,i+1~end,组装成新的vec
3.返回挑选后的数据集合
'''
def splitDataSet(dataSet, axis, value):# 待分数据集、第几个特征特征、特征返回值
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1 :]) # 为了集合中筛出指定的特征featVec[:axis]
# print("------reducedFeatVec", reducedFeatVec)
retDataSet.append(reducedFeatVec)
return retDataSet
'''
选择最好的数据集划分方式:
1.获取数据集的特征数,计算原始香农熵
2.遍历特征,根据单个特征值,划分数据集,求得原始熵-划分集熵,即信息增益
3.更新信息增益,增益最大的特征即为最好的特征
备注:熵越大,信息量越大,信息中如果全为同样的值,则熵为0,信息量最小
'''
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 # 特征数
baseEntropy = calcShannonEnt(dataSet) #计算香农熵
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures): # 遍历特征
featList = [example[i] for example in dataSet] #第i个特征的所有值
uniqueVals = set(featList) # 单个特征中的无序不重复元素
newEntropy = 0.0
# 计算每种划分方式的信息熵
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i ,value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #唯一特征得到的熵
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature # 返回第i个特征
'''
递归构建决策树:
1.递归结束条件:程序遍历完数据集,或者每个分支下所有实例都具有相同分类
2.多数表决法
3.返回出现次数最多的分类
'''
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys():classCount[vote]=0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter,
reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
# 类别完全相同,停止划分
if classList.count(classList[0]) == len(classList):
return classList[0]
# 遍历完所有特征时,返回出现次数最多的
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
# 得到列表包含的所有属性值
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(\
dataSet, bestFeat, value), subLabels)
return myTree
def test():
myDat, labels = createDataSet()
print(myDat)
# myDat[0][-1] = 'maybe'
# shannonEnt = calcShannonEnt(myDat) #熵越高,混合的数据越多
# print(shannonEnt)
retDataSet = splitDataSet(myDat, 1 ,0) #对数据集mgDat,第1个特征为0进行划分
print(retDataSet)
bestFeature = chooseBestFeatureToSplit(myDat) #第i个特征
print(bestFeature)
def test1():
pass
if __name__ == '__main__':
test() |
# The purpose of this section
print("I will now count my chickens:")
# Calculate the number of Hens
print("Hens", 25 + 30 / 6)
# Calculate the numbers of roosters
print("Roosters", 100 - 25 * 3 % 4)
# The purpose of this section
print("Now I will count the eggs:")
# Calculate the number of eggs
print(3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6)
# The math question
print("Is it true that 3 + 2 < 5 - 7?")
# Calculate and compare the condition
print(3 + 2 < 5 - 7)
# The math questions and calculation
print("What is 3 + 2?", 3 + 2)
# The math questions and calculation
print("What is 5 - 7?", 5 - 7)
# The conclusion from the calculation
print("Oh, that's why it's False.")
# Other math question section
print("How about some more.")
# The math questions and calculation
print("Is it greater?", 5 > -2)
# The math questions and calculation
print("Is it greater or equal?", 5 >= -2)
# The math questions and calculation
print("Is it less or equal?", 5 <= -2)
|
#
# example2.py [version 1.0]
# CoSniWa: COde SNIppet stopWAtch [Python port] - example 2
#
# Example2: Two code snippets.
#
# read more on: www.speedupcode.com
#
# (c) Jacek Pierzchlewski, 2017 jacek@pierzchlewski.com
# license: BSD-2-Clause.
#
try:
import cCosniwa as csw
except ImportError:
print("\nERROR: cCosniwa was not found! \n")
def add(iA, iB):
"""
Add iA + iB in a slow way.
"""
for i in range(iB):
iA = iA + 1
def main():
csw.call_start(2) # Start code snippet (loop)
for inxAdd in range(100):
csw.call_start(1) # Start code snippet (add)
add(1, 100000)
csw.call_stop(1) # Stop code snippet (add)
csw.call_stop(2) # Stop code snippet (loop)
# Print the results
csw.resultc()
if __name__ == '__main__':
main()
|
# a tool for mass addition of new books
import os
import natsort
path = os.getcwd() + "/images/math_uchebnik_new" # last part is the folder u have the new images
files = os.listdir(path) # make an array with all the filenames in the directory
file = open("generated.txt", "a") # write generated div elements in a file for copy/paste later
files = natsort.natsorted(files) # sort files names to be in book order
for f in files: # make div elements for every filename in the directory
file.write(
"""<div class="swiper-slide">
<img data-src="images/math_pomagalo/""" + str(f) + """ " class="swiper-lazy">
<div class="swiper-lazy-preloader swiper-lazy-preloader-white"></div>
</div>""" + "\n")
file.close()
|
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import matplotlib as mpl
import numpy as np
import torch
from matplotlib import pyplot as plt
from collections import OrderedDict as odict
from pprint import pprint
import torch
from torch import optim
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import MultivariateNormal, Uniform, Normal
from a03_variable_dur import coef_by_dur_vs_odif as calc
from lib.pylabyk import np2
from lib.pylabyk import numpytorch as npt, yktorch as ykt
from lib.pylabyk.numpytorch import npy, npys
from lib.pylabyk import plt2, np2, localfile
from data_2d import consts, load_data
locfile = localfile.LocalFile(
pth_root='../../Data_2D/Data_2D_Py/a01_RT/aa03_ch_ixn'
)
class Dtb1D(ykt.BoundedModule):
def __init__(
self,
kappa0=10.,
bound0=1.,
diffusion=1.,
y0=0.,
ssq0=1e-6,
dt=1/75,
n_ev = 2 ** 7 + 1,
max_ev = 3
):
super().__init__()
self.kappa = ykt.BoundedParameter(kappa0, 0.01, 50.)
self.bias = ykt.BoundedParameter(0., -0.5, 0.5)
self.bound = ykt.BoundedParameter(bound0, 0.1, 2.5)
self.diffusion = ykt.BoundedParameter(diffusion, 0.99, 1.01)
self.y0 = ykt.BoundedParameter(y0, -0.5, 0.5)
self.ssq0 = ykt.BoundedParameter(ssq0, 1e-6, 1e-1)
self.dt = dt
assert n_ev % 2 == 1 # for padding in conv1d to work
self.n_ev = n_ev
self.max_ev = max_ev
self.ev_bin = torch.linspace(-self.max_ev, self.max_ev, self.n_ev)
self.dev = self.ev_bin[1] - self.ev_bin[0]
self.max_ev_kernel = np.sqrt(diffusion * dt) * 3.5 + 0.5 * 50 * dt
self.ev_bin_kernel = self.ev_bin[
torch.abs(self.ev_bin) < self.max_ev_kernel
]
def forward(self, ev):
"""
@param ev: [condition, frame]
@return: p_absorbed[condition, frame, ch]
@type ev: torch.Tensor
@rtype: torch.Tensor
"""
nt = ev.shape[-1]
n_cond = ev.shape[-2]
batch_shape = ev.shape[:-2]
pev = npt.sumto1(
torch.exp(
Normal(loc=self.y0.v,
scale=torch.sqrt(self.ssq0.v)).log_prob(
self.ev_bin
)
).expand(
batch_shape + torch.Size([n_cond] + [-1])
).unsqueeze(0), -1)
ev = npt.p2st(ev)
norm_kernel = Normal(loc=0.,
scale=torch.sqrt(self.diffusion.v * self.dt))
ev_bin_kernel = self.ev_bin_kernel.expand(
torch.Size([1] * (1 + len(batch_shape)) + [1, -1])
)
pad = ev_bin_kernel.shape[-1] // 2
p_absorbed = torch.empty(
torch.Size([nt])
+ batch_shape
+ torch.Size([n_cond, 2])
)
mask_abs = torch.stack([
torch.clamp(
(-self.bound.v - self.ev_bin) / self.dev,
0., 1.
), # mask_down
torch.clamp(
(self.ev_bin - self.bound.v) / self.dev,
0., 1.
) # mask_up
], -1) # [ch, ev]
mask_in = (
(1. - npt.p2st(mask_abs)[0])
* (1. - npt.p2st(mask_abs)[1])
)
for t, ev1 in enumerate(ev):
kernel = npt.sumto1(
torch.exp(norm_kernel.log_prob(
(ev1[:, None, None] + self.bias.v)
* self.kappa.v * self.dt
+ ev_bin_kernel
)), -1)
pev = F.conv1d(
pev, kernel,
groups=n_cond,
padding=pad
)
a = torch.sum(
pev.unsqueeze(-1) * mask_abs[None, None, :], -2
).squeeze(-3) # [cond, ch]
p_absorbed[t] = a
pev = pev * mask_in[None, None, :]
# print(p_absorbed[t].shape)
# print('--')
return npt.p2en(p_absorbed).transpose(-2, -1) # [cond, fr, ch]
class Dtb2D(ykt.BoundedModule):
pass
class Dtb2DSer(Dtb2D):
pass
class Dtb2DPar(Dtb2D):
pass
class Dtb2DInh(Dtb2D):
pass
class Dtb2DTarg(Dtb2D):
pass
if __name__ == '__main__':
model = Dtb1D()
ev = torch.arange(-5, 6)[:, None] + torch.zeros(1, 10)
p_abs = model(ev)
cost = torch.log(torch.sum(npt.p2st(p_abs)[0]))
cost.backward()
pprint({
k: (v.v.data, v._param.grad) for k, v in model.named_modules() if
k != ''
})
print(p_abs.shape)
n_cond = ev.shape[0]
colors = plt.get_cmap('cool', n_cond)
nt = ev.shape[1]
t = np.arange(nt) * model.dt
n_row = 2
for ch in range(2):
for i, p_abs1 in enumerate(p_abs):
plt.plot(
t,
npy(p_abs1[:, ch]) * np.sign(ch - 0.5),
color=colors(i)
)
plt.show() |
#
# [798] Transform to Chessboard
#
# https://leetcode.com/problems/transform-to-chessboard/description/
#
# algorithms
# Hard (30.52%)
# Total Accepted: 338
# Total Submissions: 1.1K
# Testcase Example: '[[0,1,1,0],[0,1,1,0],[1,0,0,1],[1,0,0,1]]'
#
# An N x N board contains only 0s and 1s. In each move, you can swap any 2 rows
# with each other, or any 2 columns with each other.
#
# What is the minimum number of moves to transform the board into a
# "chessboard" - a board where no 0s and no 1s are 4-directionally adjacent? If
# the task is impossible, return -1.
#
#
# Examples:
# Input: board = [[0,1,1,0],[0,1,1,0],[1,0,0,1],[1,0,0,1]]
# Output: 2
# Explanation:
# One potential sequence of moves is shown below, from left to right:
#
# 0110 1010 1010
# 0110 --> 1010 --> 0101
# 1001 0101 1010
# 1001 0101 0101
#
# The first move swaps the first and second column.
# The second move swaps the second and third row.
#
#
# Input: board = [[0, 1], [1, 0]]
# Output: 0
# Explanation:
# Also note that the board with 0 in the top left corner,
# 01
# 10
#
# is also a valid chessboard.
#
# Input: board = [[1, 0], [1, 0]]
# Output: -1
# Explanation:
# No matter what sequence of moves you make, you cannot end with a valid
# chessboard.
#
#
# Note:
#
#
# board will have the same number of rows and columns, a number in the range
# [2, 30].
# board[i][j] will be only 0s or 1s.
#
#
#
import collections
class Solution(object):
def movesToChessboard(self, board):
N = len(board)
ans = 0
print (collections.Counter(map(tuple, board)))
print (collections.Counter(zip(*board)))
#print (*board)
# For each count of lines from {rows, columns}...
for count in (collections.Counter(map(tuple, board)),
collections.Counter(zip(*board))):
#print (count)
# If there are more than 2 kinds of lines,
# or if the number of kinds is not appropriate ...
if len(count) != 2 or sorted(count.values()) != [N//2, (N+1)//2]:
return -1
# If the lines are not opposite each other, impossible
line1, line2 = count
#print (line1, line2)
# for x, y in zip(line1, line2):
# print (x,y,x^y)
if not all(x ^ y for x, y in zip(line1, line2)):
return -1
# starts = what could be the starting value of line1
# If N is odd, then we have to start with the more
# frequent element
starts = [+(line1.count(1) * 2 > N)] if N%2 else [0, 1]
# To transform line1 into the ideal line [i%2 for i ...],
# we take the number of differences and divide by two
print (line1, starts)
ans += min(sum((i-x) % 2 for i, x in enumerate(line1, start))
for start in starts) // 2
#print (ans)
return ans
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.utils import make_grid
import numpy as np
import pandas as pd
import seaborn as sn # for heatmaps
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
class_names = ['plane', ' car', ' bird', ' cat', ' deer', ' dog', ' frog', 'horse', ' ship', 'truck']
transform = transforms.ToTensor()
train_data = datasets.CIFAR10(root="../Data",download=True,train=True,transform=transform)
test_data = datasets.CIFAR10(root="../Data",download=True,train=False,transform=transform)
torch.manual_seed(101)
train_loader = DataLoader(train_data,batch_size=10,shuffle=True)
test_loader = DataLoader(test_data,batch_size=10,shuffle=False)
for images, labels in train_loader:
break
im = make_grid(images,nrow=5)
plt.figure(figsize=(10,12))
plt.imshow(np.transpose(im.numpy(),(1,2,0)))
plt.show()
class cnn(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3,6,5,1)
self.conv2 = nn.Conv2d(6,16,3,1)
self.fc1 = nn.Linear(16*6*6,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x,2,2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x,2,2)
x = x.view(-1,16*6*6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x,dim=1)
model = cnn()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
import time
start_time = time.time()
epochs = 10
train_losses = []
test_losses = []
train_correct = []
test_correct = []
for i in range(epochs):
trn_corr = 0
tst_corr = 0
# Run the training batches
for b, (X_train, y_train) in enumerate(train_loader):
b+=1
# Apply the model
y_pred = model(X_train)
loss = criterion(y_pred, y_train)
# Tally the number of correct predictions
predicted = torch.max(y_pred.data, 1)[1]
batch_corr = (predicted == y_train).sum()
trn_corr += batch_corr
# Update parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print interim results
if b%1000 == 0:
print(f'epoch: {i:2} batch: {b:4} [{10*b:6}/50000] loss: {loss.item():10.8f} \
accuracy: {trn_corr.item()*100/(10*b):7.3f}%')
train_losses.append(loss)
train_correct.append(trn_corr)
# Run the testing batches
with torch.no_grad():
for b, (X_test, y_test) in enumerate(test_loader):
# Apply the model
y_val = model(X_test)
# Tally the number of correct predictions
predicted = torch.max(y_val.data, 1)[1]
tst_corr += (predicted == y_test).sum()
loss = criterion(y_val, y_test)
test_losses.append(loss)
test_correct.append(tst_corr)
print(f'\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed
plt.plot(train_losses, label='training loss')
plt.plot(test_losses, label='validation loss')
plt.title('Loss at the end of each epoch')
plt.legend();
plt.plot([t/500 for t in train_correct], label='training accuracy')
plt.plot([t/100 for t in test_correct], label='validation accuracy')
plt.title('Accuracy at the end of each epoch')
plt.legend(); |
# -*- coding: UTF-8 -*-
import requests
import xlsxwriter
import json
import sys
import time
import math
import os
import argparse
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--keyword', type=str, default = 'macbook pro')
parser.add_argument('--search_limit', type=int, default=100)
parser.add_argument('--conditions', type=str, default=None)
parser.add_argument('--price_min', type=int, default=32000)
parser.add_argument('--price_max', type=int, default=45000)
parser.add_argument('--start_year', type=int, default=2015)
parser.add_argument('--min_RAM', type=int, default=16)
args = parser.parse_args()
reload(sys)
sys.setdefaultencoding('utf8')
os.chdir(os.path.expanduser("~/Desktop"))
def excel_title(worksheet):
col = row = 0
titles = ['名稱', '價格', '螢幕尺寸', '年份', 'RAM', 'ROM', 'CPU', '商品連結']
# titles = ['RAM', 'ROM', 'CPU']
for title in titles:
worksheet.write(row, col, title)
col += 1
def excel_content(worksheet, data, headers, row, start_year, min_RAM):
url2 = 'https://shopee.tw/api/v2/item/get?itemid=' + str(data['itemid']) + '&shopid=' + str(data['shopid'])
r = requests.get(url2, headers=headers)
api2_data = json.loads(r.text)
name = data['name'].encode('utf-8')
# 用name去分析出 螢幕尺寸 年份 RAM ROM CPU
screenSizeArr = ['12"', '13"', '13.3"', '15"', '16"',
'12″', '13″', '13.3″', '15″', '16″',
'12吋', '13吋', '13.3吋', '15吋', '16吋',
'12寸', '13寸', '13.3寸', '15寸', '16寸',
'12', '13', '13.3', '15', '16']
yearArr = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021']
RAMArr = ['8g', '16g', '32g', '8G', '16G', '32G', '8', '16', '32']
ROMArr = ['128G', '256G', '512G', '1T', '128g', '256g', '512g', '1t', '128', '256', '512']
CPUArr = ['i5', 'i7', 'i9']
soldOutArr = ['已售出', '售出']
macScreenSize = macYear = macRAM = macROM = macCPU = ''
jumpItem = False
for screenSize in screenSizeArr:
if name.find(screenSize) != -1:
macScreenSize = screenSize
for year in yearArr:
if name.find(year) != -1:
macYear = year
for RAM in RAMArr:
if name.find(RAM) != -1:
macRAM = RAM
for ROM in ROMArr:
if name.find(ROM) != -1:
macROM = ROM
for CPU in CPUArr:
if name.find(CPU) != -1:
macCPU = CPU
# 過濾掉已經販賣出去的
for soldOut in soldOutArr:
if name.find(soldOut) != -1:
jumpItem = True
# 確認符合需求才能放入
if min_RAM:
if macRAM == '':
jumpItem = True
elif int(min_RAM) > int(filter(str.isdigit, macRAM)):
jumpItem = True
if start_year:
if macYear == '':
jumpItem = True
elif int(start_year) > int(macYear):
jumpItem = True
if jumpItem == False:
price = str(api2_data['item']['price'] / 100000)
shopUrl = 'https://shopee.tw/' + name + '-i.' + str(data['shopid']) + '.' + str(data['itemid'])
itemInfo = [name, price, macScreenSize, macYear, macRAM, macROM, macCPU, shopUrl]
col = 0
for info in (itemInfo):
worksheet.write(row, col, info)
col += 1
return True
else:
return False
def shopee_scraper(keyword, search_limit=50, conditions=None, price_min=None, price_max=None, start_year=None, RAM=None):
# url = 'https://shopee.tw/search?keyword=' + keyword + '&page=' + n_page + '&sortBy=relevancy'
# 確認要跑多少次
row = 0
runTimes = int(math.ceil(search_limit / 50))
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('shopee.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column("A:A", 70) # 設定A列列寬為40
# Start from the first cell. Rows and columns are zero indexed.
excel_title(worksheet)
row += 1
for runTime in range(0, runTimes):
url = 'https://shopee.tw/api/v2/search_items/?by=price&keyword=' + keyword + '&limit=50&newest=' + str(
50 * runTime)
if conditions:
url += '&conditions=' + conditions
if price_min:
url += '&price_min=' + str(price_min)
if price_max:
url += '&price_max=' + str(price_max)
# print (url)
headers = {
'User-Agent': 'Googlebot',
}
r = requests.get(url, headers=headers)
api1_data = json.loads(r.text)
if len(api1_data['items']) == 0:
break
for data in api1_data['items']:
is_insert = excel_content(worksheet, data, headers, row, str(start_year), str(RAM))
if is_insert:
row += 1
# 因為本身內部運算時間也不少,所以似乎不用這個間隔了...
# time.sleep(0.01)
workbook.close()
# shopee_scraper(keyword,search_limit,conditions,price_min,price_max,start_year,min_RAM)
shopee_scraper(args.keyword, args.search_limit, args.conditions, args.price_min, args.price_max, args.start_year, args.min_RAM)
|
import random
class Player:
def __init__(self,name):
self.name = name
def getName(self):
return self.name
def getGuess(self):
return 0
class HumanPlayer(Player):
def __init__(self,name):
super().__init__(name)
def getGuess(self):
guess = int(input("Enter your guess: "))
return guess
class ComputerPlayer(Player):
def __init__(self,name):
super().__init__(name)
def getGuess(self):
Cguess = random.randint(0,100)
return Cguess
def guessingGame(player1, player2):
answer = random.randint(0,100)
while(True):
print(player1.getName()+"'s turn to guess: ", end="")
guess = player1.getGuess()
if checkForWin(player1,guess,answer):
return
print(player2.getName()+"'s turn to guess: ", end="")
guess = player2.getGuess()
if checkForWin(player2,guess,answer):
return
def checkForWin(player,guess,answer):
print(player.getName(),"guesses", guess)
if answer == guess:
print("You're right! You win!")
return True
elif answer < guess:
print("Your guess is too high.")
else:
print("Your guess is too low.")
return
def main():
guessingGame(HumanPlayer("Kim"),HumanPlayer("Sun"))
guessingGame(HumanPlayer("Kim"),ComputerPlayer("Apple"))
guessingGame(ComputerPlayer("Apple"),ComputerPlayer("Dell"))
if __name__ == "__main__":
main()
|
import cv2
import numpy as np
input_image = cv2.imread("C:/Users/niraj/Anaconda3/Projects/Computer Vision/OpenCV/images/hand.jpg")
input_image_copy = input_image.copy()
cv2.imshow("Original", input_image)
cv2.waitKey(0)
#Grayscalle
grayscaled = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
#Canny edge
# edged = cv2.Canny(grayscaled, 20, 180)
# cv2.imshow("Canny Edge", edged)
# cv2.waitKey(0)
#Thresholding
ret, threshold = cv2.threshold(grayscaled, 170, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Threshold", threshold)
cv2.waitKey(0)
#Find Contours
image,contours, hierarchy = cv2.findContours(image= threshold, mode= cv2.RETR_LIST, method= cv2.CHAIN_APPROX_NONE)
# cv2.imshow("Contours", image)
# cv2.waitKey(0)
#Find sorted contours first
sorted_contours = sorted(contours, key= cv2.contourArea, reverse= False)
#print(sorted_contours)
for i in sorted_contours[:len(sorted_contours)-1]:
convex_hull = cv2.convexHull(i)
cv2.drawContours(input_image, [convex_hull], 0, (0,255,0), 3)
cv2.imshow("Convex Hull", input_image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
class IAve:
comer = ""
class AveVoladora:
volar = ""
class Avenovoladora:
nadar = ""
class Loro:
AveVoladora()
IAve()
class pinguino:
Avenovoladora()
IAve()
|
from functools import reduce
def main():
S = input()
def f(value, element):
if element == "L":
return 2 * value
else:
return 2 * value + 1
ans = reduce(f, S, 1)
print(ans)
if __name__ == '__main__':
main()
|
import time
import sys
import tree.avlQuick
import tree.avlSlow
import tree.simple
import tree.avlGeneric
import hash.simpleHash
method = sys.argv[1]
n = int(sys.argv[2])
arbre = {
'avlQuick': tree.avlQuick,
'simple': tree.simple,
'avlSlow': tree.avlSlow,
'avlGeneric': tree.avlGeneric,
'simpleHash': hash.simpleHash,
}.get(method)
empty = arbre.empty
if method != 'avlGeneric':
insert = arbre.insert
search = arbre.search
else:
def key(value):
return value
def insert(tree, value):
return arbre.insert(tree, value, key)
def search(tree, value):
return arbre.search(tree, value, key)
tree = empty()
t = time.time()
for i in range(1, n):
insert(tree, i)
print("Insertion Time:", time.time() - t)
if 1:
t = time.time()
for i in range(100000):
search(tree, i)
print("Search time:", time.time() - t)
|
def factorial(n):
if n==0:
return 1
return n*factorial(n-1)
if __name__ == "__main__":
n=int(input("Enter the number"))
print(factorial(n))
|
import requests
import time
import json
info = {}
USERNAME = ''
PASSWORD = ''
LOGIN_URL = 'https://app.bupt.edu.cn/uc/wap/login/check'
UPDATE_URL = 'https://app.bupt.edu.cn/ncov/wap/default/save'
session: requests.Session = requests.Session()
def login(username, password):
session.post(LOGIN_URL, data={
'username': username,
'password': password
})
def update():
info["date"] = time.strftime("%Y%m%d", time.localtime())
session.post(UPDATE_URL, data=info)
if __name__ == "__main__":
login(USERNAME, PASSWORD)
update() |
import datetime
import jinja2
import logging
import os
import string
import webapp2
from utils import *
from dbmodels import *
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import memcache
from google.appengine.api import mail
# initializing jinja2
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render_template(self, template, **params):
self.write(self.render_str(template, **params))
class ParentHandler(Handler):
def set_secure_cookie(self, cookie_name, val):
'''
Takes the name and val of the cookie.
Makes the secure value of the cookie by using the val in the input.
Sets the Cookie with the name provided and the secure cookie value.
cookie_name: String
nal = String
'''
cookie_val = make_secure_val(val)
self.response.headers.add_header('Set-Cookie',
"%s=%s; Path=/" % (cookie_name, cookie_val))
def read_secure_cookie(self, cookie_name):
'''
Returns the Value of the cookie (without the hash) if the cookie value
is valid.
Name: String
'''
browser_cookie = self.request.cookies.get(cookie_name)
#logging.info('browser cookie is %s' % browser_cookie)
return browser_cookie and check_secure_val(browser_cookie)
def login(self, user):
'''
Uses the funciton set_secure_cookie() to set the secure cookie value in
order to login the user.
user: User entity
'''
self.set_secure_cookie('user_id', str(user.key().id()))
def logout(self):
'''Sets the cookie to blank'''
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
'''
Overrides webapp2's initialize function. This function is run with
every request.
This function calls webapp2's initialize function to maintain important
functionality.
It reads secure val of the cookie:
if it exists:
it sets the corresponding user to the variable self.logged_in_user.
'''
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
#logging.info('uid is %s' % uid)
self.logged_in_user = uid and User.get_user(uid)
def write_dashboard(self,
function = "main",
error = "",
edit_no = None):
"""
Writes the dashboard page. Either for editing or adding.
edit_no: Int or None
"""
group_users = User.get_group_users()
edit_task_content = ""
done_list = DoneList.todays_done_list(self.logged_in_user.username)
if type(edit_no) is int: # since 0 is equivalent to None
edit_task_content = done_list.tasks[edit_no]
# logging.error('edit_task_content = ' + edit_task_content)
self.render_template("dashboard-" + function + ".html",
title = "Spacecom Workday",
now = date_string(timezone_now()),
user = self.logged_in_user,
group_users = group_users,
done_list = done_list,
edit_no = edit_no,
edit_task_content = edit_task_content,
error = error)
def write_login_form(self, email = "",
username = "",
fullname = "",
all_errors = {"username_error": "",
"password_error": "",
"signup_error": "",
"email_error": "",
"fullname_error": "",
"profile_picture_error": ""}):
self.render_template('login.html',
title = "Spacecom Workday",
email = email,
username = username,
fullname = fullname,
all_errors = all_errors)
def write_verify_page(self):
self.render_template('verify.html',
title = "Spacecom Workday",
user = self.logged_in_user)
class MainPage(ParentHandler):
def get(self):
if self.logged_in_user:
if self.logged_in_user.verified:
self.write_dashboard()
else:
self.write_verify_page()
else:
self.write_login_form()
def post(self):
if self.logged_in_user:
add_task = self.request.get('add_task')
if add_task == "Add":
done_task = self.request.get('done_task')
if done_task:
done_list = DoneList.todays_done_list(
self.logged_in_user.username)
if done_list:
done_list = done_list.update(done_task)
done_list.put()
else:
done_list = DoneList.construct(self.logged_in_user,
done_task)
done_list.put()
done_list.set_done_list_cache()
self.redirect('/')
else:
error = "Task Required!"
self.write_dashboard(error = error)
else:
sel.redirect('/') # to handle case of cookie deletion
class LoginHandler(ParentHandler):
def write_login_form(self,
username_or_email = "",
login_error = "",
reset_msg = ""):
self.render_template('login-only.html',
username_or_email = username_or_email,
login_error = login_error,
reset_msg = reset_msg)
def get(self):
# self.redirect('/')
if not self.logged_in_user:
self.write_login_form()
else:
self.redirect('')
def post(self):
signin = self.request.get('signin')
forgot = self.request.get('forgot')
username_or_email = self.request.get('username_or_email')
if signin == "Sign In":
password = self.request.get('password')
user = User.valid_login(username_or_email, password)
if user:
self.login(user)
self.redirect('/')
else:
login_error = "Email/Username or Password is incorrect."
self.write_login_form(username_or_email = username_or_email,
login_error = login_error)
elif forgot == "Forgot Password":
if not username_or_email:
login_error = "Enter Username or Email to reset password."
self.write_login_form(username_or_email = username_or_email,
login_error = login_error)
else:
user = User.get_user(username_or_email)
if user:
user.send_pw_reset_mail()
reset_msg = ("Password reset link sent to registered "
"email address.")
self.write_login_form(
username_or_email = username_or_email,
reset_msg = reset_msg)
else:
login_error = "User does not exist."
self.write_login_form(
username_or_email = username_or_email,
login_error = login_error)
class SignupHandler(ParentHandler):
def get(self):
self.redirect('/')
def post(self):
signup = self.request.get('signup')
if signup == "Sign Up":
username = self.request.get('username')
email = self.request.get('email')
fullname = self.request.get('fullname')
password = self.request.get('password')
profile_picture = self.request.get('profile_picture')
valid_entries, all_errors = validate_signup(username,
email,
fullname,
password,
profile_picture)
if not valid_entries:
self.write_login_form(email = email,
username = username,
fullname = fullname,
all_errors = all_errors)
else:
existing_user = User.get_user(email)
taken_username = User.get_user(username)
if existing_user or taken_username:
if existing_user:
all_errors[
'email_error'
] = "This email has already been registered."
if taken_username:
all_errors[
'username_error'
] = "This username is already taken."
self.write_login_form(email = email,
username = username,
fullname = fullname,
all_errors = all_errors)
else:
new_user = User.register(username,
email,
fullname,
password,
profile_picture)
new_user.put()
new_user.set_user_caches()
new_user.send_confirmation_mail()
memcache.delete('Spacecom') # del obsolete group cache
self.login(new_user)
self.redirect('/')
class SignoutHandler(ParentHandler):
def get(self):
self.redirect('/')
def post(self):
signout = self.request.get('signout')
if signout == 'Sign Out':
self.logout()
self.redirect('/')
class EditHandler(ParentHandler):
def get(self):
if self.logged_in_user:
if self.logged_in_user.verified:
task_index = self.request.get('task')
self.write_dashboard("edit",
"",
int(task_index))
else:
self.write_verify_page()
else:
self.redirect('/')
def post(self):
if self.logged_in_user:
if self.logged_in_user.verified:
edit_task = self.request.get('edit_task')
delete_task = self.request.get('delete_task')
# logging.error('edit_task = ' + edit_task)
# logging.error('delete_task = ' + delete_task)
done_list = DoneList.todays_done_list(
self.logged_in_user.username)
if edit_task:
done_task = self.request.get('done_task')
if done_task:
done_list = done_list.edit(int(edit_task), done_task)
done_list.put()
done_list.set_done_list_cache()
self.redirect('/')
else:
error = "Task Required!"
self.write_dashboard("edit",
error,
int(edit_task))
elif delete_task:
done_list = done_list.del_task(int(delete_task))
done_list.put()
done_list.set_done_list_cache()
self.redirect('/')
else:
self.write_verify_page()
else:
self.redirect('/')
class ImageHandler(ParentHandler):
def get(self):
img_id = self.request.get('img_id')
user = db.get(img_id)
dimensions = self.request.get('dimensions')
width, height = dimensions and [int(x) for x in dimensions.split('x')]
if user:
# logging.error(images.Image(user.profile_picture).width)
img = user.profile_picture
if not is_img_square(img):
img_square = memcache.get(img_id) # because crop slows loading
if not img_square:
ratios = img_square_ratios(img)
img = images.crop(img,
ratios[0],
ratios[1],
ratios[2],
ratios[3])
try:
set_cache(img_id, img) # because crop slows loading
except:
pass
else:
img = img_square
avatar = images.resize(img, width, height)
self.response.headers['Content-Type'] = 'image/png'
self.write(avatar)
else:
self.write('No image') |
from abc import ABCMeta, abstractmethod
class Specification(object):
"""
Abstract specification object that can form a tree of conditions.
"""
__metaclass__ = ABCMeta
@abstractmethod
def satisfied_by(self, candidate):
"""
Indicates whether a candidate object satisfies this specification.
:param candidate: The candidate object to consider.
:type candidate: object
:returns: True if candidate satisfies specification, else False.
:rtype: bool
"""
pass
def and_(self, other):
"""
:param other: The other Specification to consider in addition.
:type other: Specification
:return: AndSpecification
"""
return AndSpecification(self, other)
def or_(self, other):
"""
:param other: The other specification to consider alternately.
:type other: Specification
:return: OrSpecification
"""
return OrSpecification(self, other)
def not_(self):
"""
Invert this specification.
:return: NotSpecification
"""
return NotSpecification(self)
class AndSpecification(Specification):
"""
Satisfied only if both left and right are satisfied.
"""
def __init__(self, left, right):
"""
:param left: The left specification.
:type left: Specification
:param right: The right specification.
:type right: Specification
"""
self.left = left
self.right = right
def satisfied_by(self, candidate):
""":rtype: bool"""
return (self.left.satisfied_by(candidate)
and self.right.satisfied_by(candidate))
class OrSpecification(Specification):
"""
Satisfied only if either left or right are satisfied.
"""
def __init__(self, left, right):
"""
:param left: The left specification.
:type left: Specification
:param right: The right specification.
:type right: Specification
"""
self.left = left
self.right = right
def satisfied_by(self, candidate):
""":rtype: bool"""
return (self.left.satisfied_by(candidate)
or self.right.satisfied_by(candidate))
class NotSpecification(Specification):
"""
A specification that is satisfied if none of its nested specifications are
satisfied.
"""
def __init__(self, spec):
"""
:param spec: The spec that must not be satisfied.
:type spec: Specification
"""
self.spec = spec
def satisfied_by(self, candidate):
""":rtype: bool"""
return not self.spec.satisfied_by(candidate)
|
print "=====Perulangan menggunakan for====="
print ""
for i in range (1,15):
print "ini adalah perulangan ke - ",i
print ""
print "====================================" |
# Generated by Django 3.0.4 on 2020-03-08 08:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PDF',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tim_created', models.DateTimeField(default=django.utils.timezone.now)),
('time_public', models.TimeField()),
('due_date', models.TimeField()),
('name', models.CharField(max_length=50)),
('type', models.CharField(max_length=50)),
('categories', models.CharField(max_length=10)),
('concepts', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anonymous', models.BooleanField()),
('time_created', models.DateTimeField(default=django.utils.timezone.now)),
('text', models.TextField()),
('author_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_author', to=settings.AUTH_USER_MODEL)),
('parent_thread_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_thread_id', to='main.Comment')),
('pdf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Comment')),
('responding_to', models.ManyToManyField(related_name='comment_responding_to', to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"name": "Test CRND Web Models",
"version": "13.0.0.12.0",
"author": "Center of Research and Development",
"website": "https://crnd.pro",
'summary': 'Module for testing web addons.',
"license": "LGPL-3",
'category': 'Technical Settings',
'depends': [
'crnd_web_diagram_plus',
'crnd_web_list_popover_widget',
'crnd_web_float_full_time_widget',
'crnd_web_m2o_info_widget',
'crnd_web_tree_colored_field',
'crnd_web_on_create_action',
'crnd_web_actions',
'generic_mixin',
'crnd_web_field_domain',
],
'demo': [
'demo/popover_widget.xml',
'demo/float_full_time_widget.xml',
'demo/m2o_info_widget.xml',
'demo/tree_colored_field.xml',
'demo/web_diagram_plus.xml',
'demo/crnd_web_field_domain.xml',
],
'data': [
'security/ir.model.access.csv',
'views/popover_widget_text_model.xml',
'views/popover_widget_html_model.xml',
'views/popover_widget_char_model.xml',
'views/popover_widget.xml',
'views/m2o_info_widget.xml',
'views/float_full_time_widget.xml',
'views/tree_colored_field.xml',
'views/web_diagram_plus.xml',
'views/web_diagram_plus_arrow.xml',
'views/web_diagram_plus_node.xml',
'views/test_crnd_web_model_book.xml',
'views/test_crnd_web_actions.xml',
'views/test_crnd_web_field_domain.xml',
'wizard/book_wizard_create.xml',
'views/assets.xml',
],
'images': [],
'installable': True,
'auto_install': False,
}
|
import sys
import os
import json
import json_ascii
from bitfloor import RAPI
def get_rapi():
if len(sys.argv) < 3:
print "Usage: {0} product_id keyfile".format(sys.argv[0])
#sys.exit(1)
if len(sys.argv) > 2:
path = sys.argv[2]
else:
path = os.path.join(os.path.join('/etc','security','bfl.json'))
if len(sys.argv) > 1:
product_id = sys.argv[1]
else:
product_id = 1 # BTCUSD
with open(path) as f:
config = json.load(f, object_hook=json_ascii.decode_dict)
return RAPI(product_id=product_id, key=config['key'], secret=config['secret'])
|
from spritetools import splitImage
def getFramePosHero():
width, height = 32, 48
frames = []
for y in xrange(4):
frames.append([])
for x in xrange(4):
frames[y].append((x*width,y*height,width,height))
return frames
def getAnimationsHero(fullImage):
colorkey = (255,255,255)
rects = getFramePosHero()
animations = {}
animations['down'] = splitImage(fullImage, rects[0], colorkey);
animations['left'] = splitImage(fullImage, rects[1], colorkey);
animations['right']= splitImage(fullImage, rects[2], colorkey);
animations['up'] = splitImage(fullImage, rects[3], colorkey);
return animations
|
import re
import setuptools
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
extras_require = {
'tests': [
'pytest >=2.5.2,<3',
'pytest-cov >=1.7,<2',
'pytimeparse >=1.1.5,<2',
'mock >=1.0,<2.0',
'unittest2 >=0.5.1,<0.6',
'iso8601 >=0.1,<0.2',
],
}
packages = setuptools.find_packages('.', exclude=('tests', 'tests.*'))
setuptools.setup(
name='pilo',
version=(
re
.compile(r".*__version__ = '(.*?)'", re.S)
.match(open('pilo/__init__.py').read())
.group(1)
),
url='https://github.com/bninja/pilo/',
license=open('LICENSE').read(),
author='egon',
author_email='egon@gb.com',
description='Yet another form parser.',
long_description=open('README.rst').read(),
packages=packages,
package_data={'': ['LICENSE']},
include_package_data=True,
extras_require=extras_require,
tests_require=extras_require['tests'],
install_requires=[],
cmdclass={'test': PyTest},
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
# Generated by Django 2.2.5 on 2019-09-16 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0002_auto_20190915_2012'),
]
operations = [
migrations.AlterField(
model_name='abouttext',
name='position',
field=models.CharField(choices=[('main', 'Main'), ('main1', 'Main 1'), ('main2', 'Main 2'), ('col1', 'Column 1'), ('col2', 'Column 2')], default='main', max_length=5),
),
]
|
"""
url: https://stepik.org/lesson/324755/step/9?unit=307931
На вход программе подается строка текста, содержащая натуральные числа. Из данной строки формируется список чисел. Напишите программу, которая подсчитывает, сколько в полученном списке пар элементов, равных друг другу. Считается, что любые два элемента, равные друг другу образуют одну пару, которую необходимо посчитать.
"""
# variant 1:
a = input().split()
print(sum(a.count(x) - 1 for x in a) // 2)
# variant 2:
a = input().split()
s = 0
for i in range(len(a) - 1):
s += a[i + 1:].count(a[i])
print(s) |
# Basics
import numpy as np
import pandas as pd
import os
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Deep Learning
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, SeparableConv2D, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from keras.callbacks import ReduceLROnPlateau
import cv2
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten,BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.optimizers import RMSprop
from keras.applications import *
from keras.models import Model
import keras
#Reading the images with larger size might take longer
labels = ['PNEUMONIA', 'NORMAL']
img_size = 200
#Setting to 1200 might help. For memory concern, I set it to 200.
def get_data(data_dir):
data = []
for label in labels:
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
# I really tried to keep the RGB. But Kaggle keeps throwing memory error.
# I also tried to keep image size larger. Kaggle keeps throwing memory error in data augmentation.
# So I set img size to 200 and read in the images with grayscale.
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
train = get_data('/content/files/chest_xray/chest_xray/train')
test = get_data('/content/files/chest_xray/chest_xray/test')
val = get_data('/content/files/chest_xray/chest_xray/val')
#Routines for feature label separation.
#Put the features into a list of arrays.
#Put the label into y variable.
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in test:
x_test.append(feature)
y_test.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
#del train
#del test
#del val
#Normalize Images
def normalize_image(img_set):
img_set = np.array(img_set)/255
return img_set
x_train = normalize_image(x_train)
x_val = normalize_image(x_val)
x_test = normalize_image(x_test)
#Resize for transfer learning
def train_reshape(img_set):
img_set = img_set.reshape(-1,img_size, img_size, 1)
return img_set
x_train = train_reshape(x_train)
x_val = train_reshape(x_val)
x_test = train_reshape(x_test)
y_train = np.array(y_train)
y_val = np.array(y_val)
y_test = np.array(y_test)
# We try to increase the train set with augmentation as much as possible.
# However, rotation might influence the prediction.
datagen = ImageDataGenerator(
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.05, # randomly shift images horizontally (fraction of total width)
horizontal_flip = False) # I set this to true at first. It is horrible for the model to detect.
datagen.fit(x_train)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10, verbose=1, mode='auto',
restore_best_weights=True)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience = 5, verbose=1,factor=0.3, mode="max")
callback_list = [monitor, learning_rate_reduction]
input_layer = tf.keras.layers.Input([200,200,1])
conv1 = tf.keras.layers.Conv2D(filters = 32 , kernel_size = (5,5) , padding ='Same',
activation='relu')(input_layer)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2))( conv1)
conv2 = tf.keras.layers.Conv2D(filters = 64 , kernel_size = (3,3) , padding ='Same',
activation='relu')(pool1)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2,2) , strides=(2,2))(conv2)
conv3 = tf.keras.layers.Conv2D(filters = 96 , kernel_size = (3,3) , padding ='Same',
activation='relu')(pool2)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2,2) , strides=(2,2))(conv3)
conv4 = tf.keras.layers.Conv2D(filters = 96 , kernel_size = (3,3) , padding ='Same',
activation='relu')(pool3)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2,2) , strides=(2,2))(conv4)
flatten = tf.keras.layers.Flatten()(pool4)
dense = tf.keras.layers.Dense(512 , activation = 'relu')(flatten)
out = tf.keras.layers.Dense(1 , activation='sigmoid' )(dense)
model = tf.keras.Model(input_layer , out)
model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics = ['accuracy'])
history = model.fit(datagen.flow(x_train,y_train, batch_size = 8) ,epochs = 30 , \
validation_data = datagen.flow(x_val, y_val) , callbacks = callback_list )epochs = [i for i in range(30)]
fig , ax = plt.subplots(1,2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(15,5)
ax[0].plot(epochs , train_acc , label = 'Training Accuracy')
ax[0].plot(epochs , val_acc , label = 'Validation Accuracy')
ax[0].set_title('Training & Validation Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs , train_loss , label = 'Training Loss')
ax[1].plot(epochs , val_loss , label = 'Validation Loss')
ax[1].set_title('Testing Accuracy & Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Training & Validation Loss")
plt.show()
print("Loss " , model.evaluate(x_test,y_test)[0])
print("Accuracy" , model.evaluate(x_test,y_test))
model.save('Pneumonia.h5')
|
import io
import os
from zipfile import ZipFile
import pandas as pd
import requests
from AbstractRemoteData import AbstractRemoteData
class SpamData(AbstractRemoteData):
def __init__(self, url: str = None, file: str = None):
super().__init__()
self._URL_ = url or 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
self._FILE_ = file or 'SpamCollection.txt'
def is_downloaded(self):
return os.path.isfile(self._FILE_)
def download(self) -> str:
res = requests.get(self._URL_)
zipfile = ZipFile(io.BytesIO(res.content))
file = zipfile.read('SMSSpamCollection')
content = file.decode()
return content
def write_file(self, content: str):
with open(self._FILE_, 'w') as f:
f.write(content)
def read_data(self):
df = pd.read_table(self._FILE_, header=None, names=['Class', 'Content'])
df['Content'] = df['Content'].map(self.normalize_text)
df['Label'] = df['Class'].map(lambda c: 0 if c == 'ham' else 1)
df['Text Length'] = df['Content'].map(lambda txt: len(txt.split()))
return df
|
'''
目前先將 wallet 獨立成一個 class
如果未來發現沒有必要可以合併進 node/transaction
'''
class Wallet():
def __init__(self, pub, priv, fee):
self.pub_key = pub
self.priv_key = priv
self.fee = fee |
'''
Define all sums (as a total sum) of each root node based on its depth.
1 # sum at root node 16
/ \
2 3 # sum at 2: 6; sum at 3: 2
/ \ / \
4 5 6 7 # sum at 4: 2
/ \
8 9 # total sum = 26
'''
# Version 2. Shorter style
# O(n) T / O(d) S
# The idea is to find out the formula of depth dependance on each root node.
# More descriptive info under Version 1.
def allKindsOfNodeDepths(root, depth=0):
if root is None:
return 0
# Formula to calculate 1 + 2 + 3 + ... + depth - 1 + depth
depth_sum = depth * (1 + depth) / 2
return depth_sum + allKindsOfNodeDepths(root.left, depth + 1) + allKindsOfNodeDepths(root.right, depth + 1)
# This is the class of the input binary tree.
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
'''
# Version 2. First try
# O(n) T / O(d) S
# 2 steps:
# 1: to find out how many node are on each level (depth)
# and write them down into its array, where each index
# represents its depth.
# 2: to calculate the total sum as the following:
# total_sum += numbersPerDepth[d]*(d + prev_d) # (d + prev_d) -> is
# depth_sum = depth * (1 + depth) / 2 from above as total_sum += numbersPerDepth[d]*(d * (1 + d)/2)
def allKindsOfNodeDepths(root):
if root is None or (root.left is None and root.right is None):
return 0
numbersPerDepth = []
countNumbersPerDepth(root, numbersPerDepth, -1)
return getTotalSum(numbersPerDepth)
def getTotalSum(numbersPerDepth):
total_sum = 0
prev_d = 0
for d in range(len(numbersPerDepth)):
total_sum += numbersPerDepth[d]*(d + prev_d)
prev_d = d + prev_d
return total_sum
def countNumbersPerDepth(node, numbersPerDepth, depth):
depth += 1
if not len(numbersPerDepth) or depth >= len(numbersPerDepth):
numbersPerDepth.append(0)
numbersPerDepth[depth] += 1
if node.left is not None:
countNumbersPerDepth(node.left, numbersPerDepth, depth)
if node.right is not None:
countNumbersPerDepth(node.right, numbersPerDepth, depth)
'''
root = BinaryTree(1)
root.left = BinaryTree(2)
root.left.left = BinaryTree(4)
root.left.left.left = BinaryTree(8)
root.left.left.right = BinaryTree(9)
root.left.right = BinaryTree(5)
root.right = BinaryTree(3)
root.right.left = BinaryTree(6)
root.right.right = BinaryTree(7)
print(allKindsOfNodeDepths(root))
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import TruncatedSVD
def plot(samples, labels):
fig = plt.figure()
ax = Axes3D(fig)
data3d = TruncatedSVD(n_components=3).fit_transform(samples)
# print(data3d.shape)
for i in range(len(labels)):
if labels[i] == 1:
ax.scatter(data3d[:, 0][i], data3d[:, 1][i], data3d[:, 2][i], c='green')
else:
ax.scatter(data3d[:, 0][i], data3d[:, 1][i], data3d[:, 2][i], c='red')
plt.show() |
#!/usr/bin/python
# -*- coding=utf-8 -*-
'''
Export sysstat log info to excel info.
@author: yanyang.xie@thistech.com
@version: 0.1
@since: 12/12/2013
'''
import datetime
import os
import re
import string
import xlwt
current_date = datetime.datetime.now().strftime("%Y-%m-%d")
# sysstat log is in /var/log/sa/
monitor_file = '/var/log/sa/sar' + current_date.split('-')[-1]
report_file_dir = '/tmp/system_monitor/'
report_file_name = 'sysstat-monitor-info-%s.xls' % (current_date)
# if you just want special monitor info, just set its title into filter list
indicator_list = ['%usr', '%nice', '%sys', '%iowait', '%steal', '%idle', 'tcp-tw', 'totsck', 'cswch/s', 'rxpck/s', 'txpck/s']
total_content_sheet_name = 'all_monitor_info'
partial_content_sheet_name = 'partial_monitor_info'
current_content = None
# following list is all the monitoring indicator info list in sysstat log file, each monitoring indicator info will be set into one list
cpu_content_list = []
proc_content_list = []
pswpin_content_list = []
pgpgin_content_list = []
tps_content_list = []
frmpg_content_list = []
kbmemfree_content_list = []
kbswpfree_content_list = []
dentunusd_content_list = []
runq_sz_content_list = []
dev_content_list = []
totsck_content_list = []
rxpck_content_list = []
# Read the sysstat log info and then group those data into different monitoring indicator list
def read_file(monitor_file):
global current_content
if not os.path.exists(monitor_file):
return
pf = open(monitor_file, 'r')
for line in pf:
if string.strip(line) == '' or string.strip(line) == '\n':
continue
if line.find('LINUX RESTART') > 0:
continue
# if the line has monitoring indicator title, it should be the start line of the monitoring statistic info
# so change the current content list to the monitoring indicator content list
if line.find('%iowait') > 0:
current_content = cpu_content_list
elif line.find('proc') > 0:
current_content = proc_content_list
elif line.find('pswpin') > 0:
current_content = pswpin_content_list
elif line.find('pgpgin') > 0:
current_content = pgpgin_content_list
elif line.find('rtps') > 0:
current_content = tps_content_list
elif line.find('frmpg') > 0:
current_content = frmpg_content_list
elif line.find('kbmemfree') > 0:
current_content = kbmemfree_content_list
elif line.find('kbswpfree') > 0:
current_content = kbswpfree_content_list
elif line.find('dentunusd') > 0:
current_content = dentunusd_content_list
elif line.find('runq-sz') > 0:
current_content = runq_sz_content_list
elif line.find('DEV') > 0:
current_content = dev_content_list
elif line.find('totsck') > 0:
current_content = totsck_content_list
elif line.find('rxpck') > 0:
current_content = rxpck_content_list
if line.find('Average') == 0:
current_content = None
if current_content is not None:
# each monitoring indicator info will be split every 4 hours and then monitoring indicator title occurs again
# should ignore the monitoring indicator tile line if it is not appear in the first time
if len(current_content) > 0 and is_title_line(line):
continue
current_content.append(generate_line_content_list(line))
#check whether the line is monitoring indicator title line or not
def is_title_line(line):
titles = ['%iowait', 'proc', 'pswpin', 'pgpgin', 'rtps', 'frmpg', 'kbmemfree', 'kbswpfree', 'dentunusd', 'runq-sz', 'DEV', 'totsck', 'rxpck']
for title in titles:
if line.find(title) > 0:
return True
return False
def generate_line_content_list(line):
timestamp_info = line.split(' ', 1)
timestamp = timestamp_info[0].strip()
t_content = timestamp_info[1].strip().replace('\n', '')
t_content = re.split(r'\s+', t_content)
return [timestamp] + t_content
# To the CPU monitoring indicator, only the 'all' statistics data is useful, so need to filter other data
def convert_cpu_content():
if len(cpu_content_list) == 0:
return []
temp_cpu_content = []
temp_cpu_content.append(cpu_content_list[0])
for c_content in cpu_content_list:
if 'CPU' in c_content:
c_content.remove('CPU')
if 'all' in c_content:
c_content.remove('all')
temp_cpu_content.append(c_content)
return temp_cpu_content
# To the rxpck monitoring indicator, if there are more network interface, the monitoring data will be more.
# If we want to show the data using chart in excel for each network interface, need convert its format from two-dimension to one-dimension
def convert_rxpck_content_list():
if len(rxpck_content_list) == 0:
return []
iface_set = set([])
for content in rxpck_content_list[1:]:
iface_set.add(content[1])
new_titles = [rxpck_content_list[0][0]]
for iface in iface_set:
for title in rxpck_content_list[0][2:]:
new_titles.append(iface + '-' + title)
total_content = []
total_content.append(new_titles)
for i in range(len(rxpck_content_list)):
total_content.append([])
content_index = 1
rxpck_content_list.remove(rxpck_content_list[0])
for i in range(0, len(rxpck_content_list)):
if i % len(iface_set) == 0:
content_index += 1
for j in range(len(rxpck_content_list[i])):
value = rxpck_content_list[i][j]
if re.match('[0-9]{2}:[0-9]{2}:[0-9]{2}', value) and value in total_content[i]:
continue
if value in iface_set:
continue
total_content[content_index].append(rxpck_content_list[i][j])
t_content = []
for content in total_content:
if len(content) > 0:
t_content.append(content)
return t_content
# merge all the monitoring indicator data
def generate_total_content_list():
whole_contents = (totsck_content_list, cpu_content_list, rxpck_content_list, proc_content_list, pswpin_content_list, pswpin_content_list,
pgpgin_content_list, tps_content_list, frmpg_content_list, kbmemfree_content_list,
kbswpfree_content_list, dentunusd_content_list, runq_sz_content_list, runq_sz_content_list, dev_content_list)
max_content_length = 0
for content_list in whole_contents:
if len(content_list) > max_content_length:
max_content_length = len(content_list)
total_content_list = [[] for i in range(max_content_length + 1)]
for content_list in whole_contents:
if content_list is None or len(content_list) == 0:
continue
for i in range(len(content_list)):
for value in content_list[i]:
if re.match('[0-9]{2}:[0-9]{2}:[0-9]{2}', value):
if value in total_content_list[i]:
continue
else:
total_content_list[i].append(value)
else:
total_content_list[i].append(value)
return total_content_list
# Write the monitoring indicator info into local file
def write_to_excel(file_path, file_name, total_content_sheet_name, total_content_list, partial_content_sheet_name, filtered_content_list):
if not os.path.exists(file_path):
os.makedirs(file_path)
t_file = file_path + file_name
if os.path.exists(t_file):
os.remove(t_file)
x_file = xlwt.Workbook()
# write filtered monitor info
total_content_sheet = x_file.add_sheet(partial_content_sheet_name)
for i in range(len(filtered_content_list)):
for j in range(len(filtered_content_list[i])):
value = filtered_content_list[i][j]
if is_number(value):
total_content_sheet.write(i, j, float(filtered_content_list[i][j]))
else:
total_content_sheet.write(i, j, filtered_content_list[i][j])
# write total monitor info
total_content_sheet = x_file.add_sheet(total_content_sheet_name)
for i in range(len(total_content_list)):
for j in range(len(total_content_list[i])):
value = total_content_list[i][j]
if is_number(value):
total_content_sheet.write(i, j, float(total_content_list[i][j]))
else:
total_content_sheet.write(i, j, total_content_list[i][j])
# save excel into local
x_file.save(file_path + file_name)
def is_number(a):
try:
float(a)
return True
except:
return False
# just want to do statistics information in content_list
def filter_total_contents(total_content_list, content_title_list):
content_titles = total_content_list[0]
index_list = [0] # timestamp title must be there
for content_title in content_title_list:
for title in content_titles:
if title.find(content_title) >= 0:
index_list.append(content_titles.index(title))
if len(index_list) == 1:
return []
filtered_content_list = []
for content in total_content_list:
if content is None or len(content) == 0:
continue
tmp_content = []
for index in index_list:
if len(content) > index:
tmp_content.append(content[index])
filtered_content_list.append(tmp_content)
return filtered_content_list
if __name__ == '__main__':
#monitor_file = 'sar12'
#report_file_dir = 'D:\\Work\\source\\test\\load\\vexbj\\'
read_file(monitor_file)
# The format of following monitor is special compared to other monitor info, need convert its format first
cpu_content_list = convert_cpu_content()
rxpck_content_list = convert_rxpck_content_list()
total_content_list = generate_total_content_list()
filtered_content_list = filter_total_contents(total_content_list, indicator_list)
write_to_excel(report_file_dir, report_file_name, total_content_sheet_name, total_content_list, partial_content_sheet_name, filtered_content_list)
|
# Generated by Django 3.1.11 on 2021-05-19 21:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('request', '0002_auto_20210519_2128'),
]
operations = [
migrations.AddField(
model_name='request',
name='archivo',
field=models.FileField(null=True, upload_to='uploads/'),
),
migrations.AlterField(
model_name='request',
name='fecha',
field=models.DateField(null=True),
),
]
|
#!/usr/bin/env python
"""
Created on 9/06/14
@author: Sam Pfeiffer
File to show POIs and it's names in Rviz
"""
# system stuff
import sys
import copy
# ROS stuff
import rospy
import rosparam
from geometry_msgs.msg import PoseArray, Pose, Quaternion
from std_msgs.msg import Header
from tf import transformations
from visualization_msgs.msg import Marker, MarkerArray
from tf.transformations import quaternion_from_euler
global id_marker
id_marker = 99
POI_POSES_TOPIC = '/POI_poses'
POI_TEXTMARKERS_TOPIC = '/POI_names'
def create_marker_from_poi(namespace_poi, poi):
"""Returns a text Marker from the POI"""
# POI looks like:
# ['submap_0', 'point_room_three', -0.809, 6.411, -1.441]
m = Marker()
m.action = m.ADD
m.header.frame_id = "map"
m.header.stamp = rospy.Time.now()
global id_marker
m.id = id_marker
id_marker += 1
m.pose.position.x = poi[2]
m.pose.position.y = poi[3]
m.pose.position.z = 0.5
m.pose.orientation.w = 1.0
m.text = namespace_poi + "/" + poi[1]
m.type = m.TEXT_VIEW_FACING
m.scale.z = 0.2 # This is the size of the text
m.color.r = 1.0
m.color.a = 1.0
return m
def create_pose_from_poi(poi):
"""Returns a Pose from the POI.
POIs are specified like X, Y, RotationYaw"""
# POI looks like:
# ['submap_0', 'point_room_three', -0.809, 6.411, -1.441]
poi_pose = Pose()
poi_pose.position.x = poi[2]
poi_pose.position.y = poi[3]
quat = quaternion_from_euler(0.0, 0.0, poi[4])
poi_pose.orientation = Quaternion(*quat)
return poi_pose
def create_marker_array(markers):
"""Given a list of markers create a MarkerArray"""
ma = MarkerArray()
for marker in markers:
ma.markers.append(marker)
return ma
def create_pose_array(poses):
"""Given a pois_dict create a pose_array with the POIs poses"""
pois_pa = PoseArray()
pois_pa.header.frame_id = "map"
pois_pa.header.stamp = rospy.Time.now()
for pose in poses:
pois_pa.poses.append(pose)
return pois_pa
def get_params(ns):
"""Get the params of each POI and it's location"""
pois_dict = rosparam.get_param(ns)
# Looks like:
# {'numberOfSubMaps': 1,
# 'poi': {'submap_0': {'avoid_that': ['submap_0',
# 'avoid_that',
# -7.298,
# 5.911,
# -2.252],
# 'fetch_and_carry': ['submap_0', 'fetch_and_carry', -2.0, -2.0, 0],
# ... etc, it has submembers called poi, numberOfSubMaps and vo, at least, using mmap
poi_dicts_to_remove = []
for poi_dict_name in pois_dict:
if poi_dict_name != "poi": # and poi_dict_name != "vo": # we only want the pois on a subspace called poi or vo # we dont really want vo
poi_dicts_to_remove.append(poi_dict_name)
for poi_dict_name in poi_dicts_to_remove:
pois_dict.pop(poi_dict_name)
return pois_dict
def usage():
print "Usage:"
print sys.argv[0] + " namespace_pois\n"
print "For example: " + sys.argv[0] + " mmap"
if __name__ == '__main__':
rospy.init_node("show_pois")
rospy.sleep(0.3)
if len(sys.argv) != 2:
usage()
exit(0)
param_namespace = sys.argv[1]
pois_dicts = get_params(param_namespace)
tmp_list_poses = []
tmp_list_markers = []
# print "pois_dicts: ",
# print pois_dicts
for poi_dict_name in pois_dicts:
# print "poi_dict: ",
# print pois_dicts.get(poi_dict_name)
curr_dict = pois_dicts.get(poi_dict_name)
for poi_dict in curr_dict:
# print "poi_dict: ",
# print curr_dict.get(poi_dict)
curr_poi_dict = curr_dict.get(poi_dict)
for poi in curr_poi_dict:
# print "poi:"
# print curr_poi_dict.get(poi)
curr_poi = curr_poi_dict.get(poi)
print "Adding poi: " + str(curr_poi)
tmp_list_poses.append(create_pose_from_poi(curr_poi))
tmp_list_markers.append(create_marker_from_poi(poi_dict_name, curr_poi))
pois_pa = create_pose_array(tmp_list_poses)
pois_ma = create_marker_array(tmp_list_markers)
# print "POIs pose array: ",
# print pois_pa
# print "POIs marker array: ",
# print pois_ma
pa_pub = rospy.Publisher(POI_POSES_TOPIC, PoseArray)
ma_pub = rospy.Publisher(POI_TEXTMARKERS_TOPIC, MarkerArray)
rospy.loginfo("Publishing " + str(len(pois_ma.markers)) + " POIs")
rospy.loginfo("At topics: " + POI_POSES_TOPIC + " " + POI_TEXTMARKERS_TOPIC)
while not rospy.is_shutdown():
pa_pub.publish(pois_pa)
ma_pub.publish(pois_ma)
rospy.sleep(0.3)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from .forms import SignUpForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserAdmin(UserAdmin):
form = SignUpForm
admin.site.register(User, MyUserAdmin)
|
#!/usr/bin/env python
import sys
from problem_3 import get_prime_factors
def main(upper_bound):
factors = {}
for i in range(2, upper_bound+1): # yapf: disable
prime_factors = get_prime_factors(i)
for j in prime_factors:
if j not in factors:
factors[j] = prime_factors[j]
else:
if prime_factors[j] > factors[j]:
factors[j] = prime_factors[j]
result = 1
for prime_factor, exponent in factors.items():
result *= prime_factor**exponent
return result
if __name__ == '__main__':
print(main(int(sys.argv[1])))
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/30 4:22 下午
# @Author : Qingduo-Feng
# @File : FeatureCalculation.py
# @Function:
import numpy as np
import pandas as pd
import math
def eduDis(a, b):
dis = np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
return dis
def straightline(point2, point1):
k = (point2[1] - point1[1]) / (point2[0] - point1[0])
b = point2[1] - k * point2[0]
return k, b
def pointToLineDis(point, line):
if line[0] == 0:
offset = round(abs(point[0]), 2)
elif line[0] == 1:
offset = round(abs(point[1]), 2)
else:
k, b = line[1], line[2]
offset = round(abs(k * point[0] - point[1] + b) / np.sqrt(k ** 2 + 1), 2)
return offset
def isEdge(img, point):
x_max = len(img)
y_max = len(img[0])
x = point[0]
y = point[1]
count = 0
if x + 1 >= x_max - 1 or y + 1 >= y_max - 1:
return True
if img[x + 1, y] > 0:
count += 1
if img[x, y+1] > 0:
count += 1
if img[x-1, y] > 0:
count += 1
if img[x, y-1] > 0:
count += 1
if count < 4:
return True
else:
return False
def calRadius(edge_points):
# calculate radius
radiuses = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
center_x = np.mean(points_cood[:, 0])
center_y = np.mean(points_cood[:, 1])
center = (int(center_x), int(center_y))
tmp_radius = []
for point in points_cood:
# judge if the point is the edge
if not isEdge(edge_points, point):
continue
radius = eduDis(center, point)
tmp_radius.append(radius)
radiuses.append(np.mean(tmp_radius))
return radiuses
def calPerimeter(edge_points):
perimeters = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in range(1, len(points_num)):
points_cood = np.argwhere(points == index)
tmp_perimeter = 0
for point in points_cood:
# judge if the point is the edge
if not isEdge(edge_points, point):
continue
tmp_perimeter += 1
perimeters.append(tmp_perimeter)
return perimeters
def calArea(edge_points):
area = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
area.append(len(points_cood))
return area
def calCompactness(perimeter, area):
p = np.array(perimeter)
a = np.array(area)
return p ** 2 / a
def calSmoothness(edge_points):
smoothness = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
center_x = np.mean(points_cood[:, 0])
center_y = np.mean(points_cood[:, 1])
center = (int(center_x), int(center_y))
tmp_radius = []
for point in points_cood:
# judge if the point is the edge
if not isEdge(edge_points, point):
continue
radius = eduDis(center, point)
tmp_radius.append(radius)
# calculate smoonthness
radius_count = len(tmp_radius)
differences = []
for i in range(0, radius_count):
if radius_count < 3:
differences.append(0)
break
if i == 1:
differences.append(abs(tmp_radius[i] - np.mean([tmp_radius[i + 1], tmp_radius[0]])))
elif i == radius_count - 1:
differences.append(abs(tmp_radius[i] - np.mean([tmp_radius[0], tmp_radius[i - 1]])))
else:
differences.append(abs(tmp_radius[i] - np.mean([tmp_radius[i+1], tmp_radius[i - 1]])))
smoothness.append(np.mean(differences))
return smoothness
def kb(vertex1, vertex2):
x1 = vertex1[0]
y1 = vertex1[1]
x2 = vertex2[0]
y2 = vertex2[1]
if x1 == x2:
return (0, x1) # 0-垂直直线
if y1 == y2:
return (1, y1) # 1-水平直线
else:
k = (y1 - y2) / (x1 - x2)
b = y1 - k * x1
return (2, k, b) # 2-倾斜直线
def calConcavity(edge_points):
# 首先通过每两点之间构建函数,并判断所有点距离该线的符号是否一致,来判断该点是凸还是凹
# 对于所有凹点,计算距离周边两点直线的距离即为concavity
concavity = []
concavity_count = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in points_num:
edges = []
if index == 0:
continue
points_cood = np.argwhere(points == index)
for point in points_cood:
# judge if the point is the edge
if isEdge(edge_points, point):
edges.append(point)
concav_points = []
tmp_count = 0
# begin to calculate the concavity
for i in range(len(edges)):
pre = i
nex = (i+1)%len(edges)
line = kb(edges[pre], edges[nex])
if line[0] == 0:
offset = [vertex[0] - edges[pre][0] for vertex in edges]
elif line[0] == 1:
offset = [vertex[1] - edges[pre][1] for vertex in edges]
else:
k, b = line[1], line[2]
offset = [k * vertex[0] + b - vertex[1] for vertex in edges]
offset = np.array(offset)
large_count = len(np.argwhere(offset >= 0))
small_count = len(np.argwhere(offset <= 0))
if large_count != len(edges) or small_count != len(edges):
# the point is a concav point
concav_points.append(i)
tmp_count += 1
# begin to calculate the value of concavity
tmp_concav = 0
for i in concav_points:
pre = (i - 1) % len(edges)
nex = (i + 1) % len(edges)
point = edges[i]
line = kb(edges[pre], edges[nex])
if line[0] == 0:
offset = point[0] - edges[pre][0]
elif line[0] == 1:
offset = point[1] - edges[pre][1]
else:
k, b = line[1], line[2]
offset = k * point[0] + b - point[1]
offset = abs(offset)
tmp_concav += offset
concavity_count.append(tmp_count)
concavity.append(tmp_concav)
return concavity_count, concavity
def calSymmetry(edge_points):
symmetry = []
points_num = np.unique(edge_points) # get the number of nucleus
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
longest_distance = 0
distpoint1 = []
distpoint2 = []
# calculate the longest distance and the relevant points
# print(points_cood)
for i in points_cood:
for j in points_cood:
if (longest_distance <= eduDis(i, j)):
longest_distance = eduDis(i, j)
distpoint1 = i
distpoint2 = j
# get the straight line equation of distpoint1 and dispoint2
line = kb(distpoint2, distpoint1)
leftside = 0
rightside = 0
for i in points_cood:
if line[0] == 0:
result = i[0]
elif line[0] == 1:
result = i[1]
else:
k, b = line[1], line[2]
result = k * i[0] + b
if (result < i[1]):
leftside = leftside + pointToLineDis(i, line)
else:
rightside = rightside + pointToLineDis(i, line)
symmetry.append(abs(leftside - rightside))
return symmetry
def calFractalDim(edge_points):
fractalDim = []
points_num = np.unique(edge_points) # get the number of nucleus
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
slopes = []
i = 0
while (i < len(points_cood) - 1):
if not isEdge(edge_points, points_cood[i]):
i = i + 1
continue
j = i + 1
while (j < len(points_cood) - 1):
if not isEdge(edge_points, points_cood[j]):
j = j + 1
continue
if (1 < eduDis(points_cood[i], points_cood[j]) < 10):
slope = (points_cood[i][1] - points_cood[j][1]) / (points_cood[i][0] - points_cood[j][0])
if (slope < 0):
if (math.isinf(slope) != True):
if (math.isnan(slope) != True):
slopes.append(slope)
j = j + 1
i = j + 1
# print(slopes)
fractalDim.append(np.mean(slopes))
return fractalDim
def calTexture(edge_points, image):
texture = []
points_num = np.unique(edge_points)
points = np.array(edge_points)
for index in points_num:
if index == 0:
continue
points_cood = np.argwhere(points == index)
intensity_value = []
for point in points_cood:
x = point[0]
y = point[1]
intensity_value.append(image[x,y])
intensity_value = np.array(intensity_value)
texture.append(np.var(intensity_value))
return texture
def feature_extract(center_points, edge_points, image):
feature_arr = []
radius = np.array(calRadius(edge_points))
perimeter = np.array(calPerimeter(edge_points))
area = np.array(calArea(edge_points))
compactness = np.array(calCompactness(perimeter, area))
smoothness = np.array(calSmoothness(edge_points))
concavity_points, concavity = calConcavity(edge_points)
concavity_points = np.array(concavity_points)
concavity = np.array(concavity)
symmetry = np.array(calSymmetry(edge_points))
textture = np.array(calTexture(edge_points, image))
fractal_dimension = calFractalDim(edge_points)
fractal_dimension = np.array(fractal_dimension)
fractal_dimension[np.isnan(fractal_dimension)] = 0
# calculate mean value
feature_arr.append(np.mean(radius))
feature_arr.append(np.mean(perimeter))
feature_arr.append(np.mean(area))
feature_arr.append(np.mean(compactness))
feature_arr.append(np.mean(smoothness))
feature_arr.append(np.mean(concavity))
feature_arr.append(np.mean(concavity_points))
feature_arr.append(np.mean(symmetry))
feature_arr.append(np.mean(fractal_dimension))
feature_arr.append(np.mean(textture))
feature_arr.append(np.std(radius))
feature_arr.append(np.std(perimeter))
feature_arr.append(np.std(area))
feature_arr.append(np.std(compactness))
feature_arr.append(np.std(smoothness))
feature_arr.append(np.std(concavity))
feature_arr.append(np.std(concavity_points))
feature_arr.append(np.std(symmetry))
feature_arr.append(np.std(fractal_dimension))
feature_arr.append(np.std(textture))
feature_arr.append(np.max(radius))
feature_arr.append(np.max(perimeter))
feature_arr.append(np.max(area))
feature_arr.append(np.max(compactness))
feature_arr.append(np.max(smoothness))
feature_arr.append(np.max(concavity))
feature_arr.append(np.max(concavity_points))
feature_arr.append(np.max(symmetry))
feature_arr.append(np.min(fractal_dimension))
feature_arr.append(np.max(textture))
feature_arr = np.array(feature_arr)
feature_arr = np.around(feature_arr, decimals=4)
return feature_arr.tolist()
|
# -*- coding: utf-8 -*-
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from flask_bcrypt import Bcrypt
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
admin = Admin(app)
from .admin import admin_blueprint
from .auth import auth_blueprint
from .api import api_blueprint
from .dashboard import dashboard_blueprint
app.register_blueprint(admin_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(api_blueprint)
app.register_blueprint(dashboard_blueprint)
from app import models
|
import numpy as np
# print(np.sqrt(38912))
x = np.arange(252)
y = np.arange(152)
for x_ in x:
for y_ in y:
if x_ * y_ == 38912:
print(x_)
print('fda;') |
limlam_dir = '/home/havard/Documents/covariance_calculator/limlam_mocker/'
output_dir = 'output_mcmc/'
halos_dir = 'catalogues_for_mcmc/' #'full_cita_catalogues/'#'catalogues/'
pspec_fp = 'comap_test_pspec_10muK.txt'
B_i_fp = 'bin_counts_test_10muK.txt'
mode = 'ps' #'ps'
n_patches = 1
n_walkers = 10
n_noise = 2 # Number of noise realizations per signal realization for the vid
n_realizations = 2 # Number of realizations of CO signal mapinst used to compute average power spectrum and vid
# for each mcmc-step
n_threads = 4
nsteps = 20
|
# Copyright (c) 2017, John Skinner
import unittest
import numpy as np
import transforms3d as tf3d
import util.transform as tf
import dataset.tum.tum_loader as tum_loader
class TestTUMLoader(unittest.TestCase):
def test_make_camera_pose_returns_transform_object(self):
pose = tum_loader.make_camera_pose(10, -22.4, 13.2, 0, 0, 0, 1)
self.assertIsInstance(pose, tf.Transform)
def test_make_camera_pose_location_coordinates(self):
# Order here is right, down, forward
pose = tum_loader.make_camera_pose(10, -22.4, 13.2, 0, 0, 0, 1)
# Change coordinate order to forward, left, up
self.assertNPEqual((13.2, -10, 22.4), pose.location)
def test_make_camera_pose_changes_rotation_each_axis(self):
# Roll, rotation around z-axis
quat = tf3d.quaternions.axangle2quat((0, 0, 1), np.pi / 6)
pose = tum_loader.make_camera_pose(10, -22.4, 13.2, quat[1], quat[2], quat[3], quat[0])
self.assertNPClose((np.pi / 6, 0, 0), pose.euler)
# Pitch, rotation around x-axis
quat = tf3d.quaternions.axangle2quat((1, 0, 0), np.pi / 6)
pose = tum_loader.make_camera_pose(10, -22.4, 13.2, quat[1], quat[2], quat[3], quat[0])
self.assertNPClose((0, -np.pi / 6, 0), pose.euler)
# Yaw, rotation around y-axis
quat = tf3d.quaternions.axangle2quat((0, 1, 0), np.pi / 6)
pose = tum_loader.make_camera_pose(10, -22.4, 13.2, quat[1], quat[2], quat[3], quat[0])
self.assertNPClose((0, 0, -np.pi / 6), pose.euler)
def test_make_camera_pose_combined(self):
for _ in range(10):
loc = np.random.uniform(-1000, 1000, 3)
rot_axis = np.random.uniform(-1, 1, 3)
rot_angle = np.random.uniform(-np.pi, np.pi)
quat = tf3d.quaternions.axangle2quat((-rot_axis[1], -rot_axis[2], rot_axis[0]), rot_angle)
pose = tum_loader.make_camera_pose(-loc[1], -loc[2], loc[0], quat[1], quat[2], quat[3], quat[0])
self.assertNPEqual(loc, pose.location)
self.assertNPClose(tf3d.quaternions.axangle2quat(rot_axis, rot_angle, False), pose.rotation_quat(True))
def test_associate_data_same_keys(self):
desired_result = sorted(
[np.random.uniform(0, 100),
np.random.randint(0, 1000),
np.random.uniform(-100, 100),
"test-{0}".format(np.random.randint(0, 1000))]
for _ in range(20))
int_map = {stamp: int_val for stamp, int_val, _, _ in desired_result}
float_map = {stamp: float_val for stamp, _, float_val, _ in desired_result}
str_map = {stamp: str_val for stamp, _, _, str_val in desired_result}
self.assertEqual(desired_result, tum_loader.associate_data(int_map, float_map, str_map))
def test_associate_data_noisy_keys(self):
random = np.random.RandomState()
desired_result = sorted(
[random.uniform(0, 100),
random.randint(0, 1000),
random.uniform(-100, 100),
"test-{0}".format(random.randint(0, 1000))]
for _ in range(20))
int_map = {stamp: int_val for stamp, int_val, _, _ in desired_result}
float_map = {stamp + random.uniform(-0.02, 0.02): float_val for stamp, _, float_val, _ in desired_result}
str_map = {stamp + random.uniform(-0.02, 0.02): str_val for stamp, _, _, str_val in desired_result}
self.assertEqual(desired_result, tum_loader.associate_data(int_map, float_map, str_map))
def test_associate_data_missing_keys(self):
random = np.random.RandomState()
original_data = sorted(
[idx / 2 + random.uniform(0, 0.01),
random.randint(0, 1000),
random.uniform(-100, 100),
"test-{0}".format(random.randint(0, 1000))]
for idx in range(20))
int_map = {stamp: int_val for stamp, int_val, _, _ in original_data}
float_map = {stamp + random.uniform(-0.02, 0.02): float_val for stamp, _, float_val, _ in original_data
if stamp > 2}
str_map = {stamp + random.uniform(-0.02, 0.02): str_val for stamp, _, _, str_val in original_data
if stamp < 8}
self.assertEqual([inner for inner in original_data if inner[0] > 2 and inner[0] < 8],
tum_loader.associate_data(int_map, float_map, str_map))
def assertNPEqual(self, arr1, arr2):
self.assertTrue(np.array_equal(arr1, arr2), "Arrays {0} and {1} are not equal".format(str(arr1), str(arr2)))
def assertNPClose(self, arr1, arr2):
self.assertTrue(np.all(np.isclose(arr1, arr2)), "Arrays {0} and {1} are not close".format(str(arr1), str(arr2)))
|
import utils, os
from werkzeug.utils import secure_filename
def upload_song(mp3, album, file_name, mp3_path, album_path):
if not utils.verify_extension(mp3.filename, ".mp3"):
raise NameError(mp3.filename)
if not utils.verify_extension(album.filename, ".png"):
raise NameError(album.filename)
secured_name = secure_filename(file_name)
secured_file_name = secured_name.lower()
secured_mp3_name = secured_file_name + '.mp3'
secured_album_name = secured_file_name + '.png'
secured_mp3_path = os.path.join(mp3_path, secured_mp3_name)
secured_album_path = os.path.join(album_path, secured_album_name)
mp3.save(secured_mp3_path)
album.save(secured_album_path)
|
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def vote1(searchstring):
confidence = file('confidence.ppi').readlines()
relations = file('MIPSFirstLevel.anno3').readlines()
con_list = []
con_filtered = []
con_percents = []
rel_list = []
votes = [0]*50
for line in confidence:
for word in line.split():
if(word == searchstring):
con_list.append(line)
for i in con_list:
for word in i.split():
if(not is_number(word) and word !=searchstring):
con_filtered.append(word)
for i in con_filtered:
for line in relations:
for word in line.split():
if (word == i):
rel_list.append(line)
index = 0
val = 0
for i in rel_list:
for word in i.split():
if(is_number(word)):
votes[int(word)]+=1
max_votes = max(votes)
max_index = votes.index(max_votes)
return max_index
def vote2(searchstring):
confidence = file('confidence.ppi').readlines()
relations = file('MIPSFirstLevel.anno3').readlines()
con_list = []
con_filtered = []
con_percents = []
rel_list = []
votes_new = [0]*50
for line in confidence:
for word in line.split():
if(word == searchstring):
con_list.append(line)
for i in con_list:
for word in i.split():
if(word !=searchstring):
con_percents.append(word)
if(not is_number(word) and word !=searchstring):
con_filtered.append(word)
for i in con_filtered:
for line in relations:
for word in line.split():
if (word == i):
rel_list.append(line)
index = 0
val = 0
for i in rel_list:
for word in i.split():
if(not is_number(word)):
index = con_percents.index(word)
val = con_percents[con_percents.index(word)+1]
if(is_number(word)):
votes_new[int(word)]+=float(val)
max_new_votes = max(votes_new)
max_new_index = votes_new.index(max_new_votes)
return max_new_index
def leave_one_out1():
relations = file('MIPSFirstLevel.anno3').readlines()
total = 0
correct = 0
for line in relations:
for word in line.split():
if(not is_number(word)):
tag = vote1(word)
total+=1
if(is_number(word)):
if(int(word) == int(tag)):
correct+=1
print "total: ", total
print "correct: ", correct
print "frac: ", float(correct)/total*100
def leave_one_out2():
relations = file('MIPSFirstLevel.anno3').readlines()
total = 0
correct = 0
for line in relations:
for word in line.split():
if(not is_number(word)):
tag = vote2(word)
total+=1
if(is_number(word)):
if(int(word) == int(tag)):
correct+=1
print "total: ", total
print "correct: ", correct
print "frac: ", float(correct)/total*100
leave_one_out1()
leave_one_out2()
|
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, QLabel, qApp
class UserNameDialog(QDialog):
'''
Класс реализующий стартовый диалог с запросом логина и пароля
пользователя.
'''
def __init__(self):
super().__init__()
self.ok_pressed = False
self.setWindowTitle('Привет!')
self.setFixedSize(175, 135)
self.label = QLabel('Введите имя пользователя:', self)
self.label.move(10, 10)
self.label.setFixedSize(150, 10)
self.client_name = QLineEdit(self)
self.client_name.setFixedSize(154, 20)
self.client_name.move(10, 30)
self.btn_ok = QPushButton('Начать', self)
self.btn_ok.move(10, 105)
self.btn_ok.clicked.connect(self.click)
self.btn_cancel = QPushButton('Выход', self)
self.btn_cancel.move(90, 105)
self.btn_cancel.clicked.connect(qApp.exit)
self.label_passwd = QLabel('Введите пароль:', self)
self.label_passwd.move(10, 55)
self.label_passwd.setFixedSize(150, 15)
self.client_passwd = QLineEdit(self)
self.client_passwd.setFixedSize(154, 20)
self.client_passwd.move(10, 75)
self.client_passwd.setEchoMode(QLineEdit.Password)
self.show()
# Обработчик кнопки ОК, если поле вводе не пустое, ставим флаг и завершаем приложение.
def click(self):
'''Метод обрабтчик кнопки ОК.'''
if self.client_name.text() and self.client_passwd.text():
self.ok_pressed = True
qApp.exit()
if __name__ == '__main__':
app = QApplication([])
dial = UserNameDialog()
app.exec_()
|
from __future__ import unicode_literals
from django.db import models
class Users(models.Model):
name = models.TextField()
email = models.TextField()
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
# def __str__(self):
# return self.name()
class Friendships(models.Model):
user = models.ForeignKey('Users',related_name='usersfriend')
friend = models.ForeignKey('Users', related_name='friendsfriend')
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
|
from django.urls import path
from .views import (
PostListView, PostDetailView, PostCreateView, PostCommentView, PostUpdateView, PostDeleteView, LikeView
)
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', PostListView.as_view()),
path('<int:pk>/', PostDetailView.as_view(), name='post-view'),
path('like/<int:pk>', LikeView, name='like-post'),
path('newpost/', PostCreateView.as_view(), name='post-create'),
path('<int:pk>/comment', PostCommentView.as_view(), name='add-comment'),
path('<int:pk>/update', PostUpdateView.as_view(), name='post-update'),
path('<int:pk>/delete', PostDeleteView.as_view(), name='post-delete'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import json
import time
import sentiment_mod as s
ckey="bwUA9QzIsU6PEszgF81zVFABm"
csecret="H2lQ3G6ot3fYpqmwSRrInBwjCW3aWzbu34F5CVzQ5wmrMXdI23"
atoken="982109105174147073-cUWfMQ70HABuqW21DlMjm0pEBnZ035y"
asecret="IMBMP0w7ImyLmLXppf0WKDwYXRdUqIoPktGMNvfRzfDiH"
class listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
tweet = ascii(all_data["text"])
sentiment_value , confidence = s.sentiment(tweet)
print(tweet , sentiment_value , confidence)
if confidence*100 >=80 :
output = open('twitter-out.txt','a')
output.write(sentiment_value)
output.close()
return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["car"])
|
import math
import matplotlib
import numpy as np
from numpy.core.numerictypes import ScalarType
import pandas as pd
import seaborn as sns
import time
from datetime import date, datetime, time, timedelta
from matplotlib import pyplot as plt
from pylab import rcParams
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from tqdm import tqdm_notebook
# %matplotlib inline
#### Input params ##################
stk_path = "data_final.csv"
test_size = 0.2 # proportion of dataset to be used as test set
cv_size = 0.2 # proportion of dataset to be used as cross-validation set
Nmax = 30 # for feature at day t, we use lags from t-1, t-2, ..., t-N as features
# Nmax is the maximum N we are going to test
fontsize = 14
ticklabelsize = 14
####################################
def get_preds_lin_reg(df, target_col, N, pred_min, offset):
"""
Given a dataframe, get prediction at timestep t using values from t-1, t-2, ..., t-N.
Inputs
df : dataframe with the values you want to predict. Can be of any length.
target_col : name of the column you want to predict e.g. 'adj_close'
N : get prediction at timestep t using values from t-1, t-2, ..., t-N
pred_min : all predictions should be >= pred_min
offset : for df we only do predictions for df[offset:]. e.g. offset can be size of training set
Outputs
pred_list : the predictions for target_col. np.array of length len(df)-offset.
"""
# Create linear regression object
regr = LinearRegression(fit_intercept=True)
pred_list = []
for i in range(offset, len(df['daily'])):
X_train = np.array(range(len(df['daily'][i-N:i]))) # e.g. [0 1 2 3 4]
y_train = np.array(df['daily'][i-N:i]) # e.g. [2944 3088 3226 3335 3436]
X_train = X_train.reshape(-1, 1) # e.g X_train =
# [[0]
# [1]
# [2]
# [3]
# [4]]
# X_train = np.c_[np.ones(N), X_train] # add a column
y_train = y_train.reshape(-1, 1)
# print X_train.shape
# print y_train.shape
# print 'X_train = \n' + str(X_train)
# print 'y_train = \n' + str(y_train)
regr.fit(X_train, y_train) # Train the model
pred = regr.predict(np.array(N).reshape(1,-1))
pred_list.append(pred[0][0]) # Predict the footfall using the model
# If the values are < pred_min, set it to be pred_min
pred_list = np.array(pred_list)
pred_list[pred_list < pred_min] = pred_min
return pred_list
def get_mape(y_true, y_pred):
"""
Compute mean absolute percentage error (MAPE)
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
df = pd.read_csv(stk_path, sep = ",")
# Convert Date column to datetime
# Change all column headings to be lower case, and remove spacing
df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]
df.head()
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = df.plot(x='day', y='daily', style='b-', grid=True)
ax.set_xlabel("Day")
ax.set_ylabel("Price")
# Get sizes of each of the datasets
num_cv = int(cv_size*len(df))
num_test = int(test_size*len(df))
num_train = len(df) - num_cv - num_test
print("num_train = " + str(num_train))
print("num_cv = " + str(num_cv))
print("num_test = " + str(num_test))
# Split into train, cv, and test
train = df[:num_train].copy()
cv = df[num_train:num_train+num_cv].copy()
train_cv = df[:num_train+num_cv].copy()
test = df[num_train+num_cv:].copy()
print("train.shape = " + str(train.shape))
print("cv.shape = " + str(cv.shape))
print("train_cv.shape = " + str(train_cv.shape))
print("test.shape = " + str(test.shape))
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = train.plot(x='day', y='daily', style='b-', grid=True)
ax = cv.plot(x='day', y='daily', style='y-', grid=True, ax=ax)
ax = test.plot(x='day', y='daily', style='g-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test'])
ax.set_xlabel("day")
ax.set_ylabel("price")
RMSE = []
R2 = []
mape = []
for N in range(1, Nmax+1): # N is no. of samples to use to predict the next value
est_list = get_preds_lin_reg(train_cv, 'daily', N, 0, num_train)
cv.loc[:, 'est' + '_N' + str(N)] = est_list
RMSE.append(math.sqrt(mean_squared_error(est_list, cv['daily'])))
R2.append(r2_score(cv['daily'], est_list))
mape.append(get_mape(cv['daily'], est_list))
print('RMSE = ' + str(RMSE))
print('R2 = ' + str(R2))
print('MAPE = ' + str(mape))
cv.head()
# Plot RMSE versus N
matplotlib.rcParams.update({'font.size': 14})
plt.figure(figsize=(12, 8), dpi=80)
plt.plot(range(1, Nmax+1), RMSE, 'x-')
plt.grid()
plt.xlabel('N')
plt.ylabel('RMSE')
plt.xlim([2, 30])
# Plot R2 versus N. Note for R2 larger better.
matplotlib.rcParams.update({'font.size': 14})
plt.figure(figsize=(12, 8), dpi=80)
plt.plot(range(1, Nmax+1), R2, 'x-')
plt.grid()
plt.xlabel('N')
plt.ylabel('R2')
# Plot MAPE versus N. Note for MAPE smaller better.
plt.figure(figsize=(12, 8), dpi=80)
plt.plot(range(1, Nmax+1), mape, 'x-')
plt.grid()
plt.xlabel('N')
plt.ylabel('MAPE')
# Set optimum N
N_opt = 5
# Specify the day you are interested in
day = 200
# Specify the maximum N you want to plot (If Nmax2 is too large it gets very cluttered)
Nmax2 = 5
df_temp = cv[cv['day'] <= day]
plt.figure(figsize=(12, 8), dpi=80)
plt.plot(range(1,Nmax2+2), df_temp[-Nmax2-1:]['daily'], 'bx-')
plt.plot(Nmax2+1, df_temp[-1:]['daily'], 'ys-')
legend_list = ['daily', 'actual_value']
# Plot the linear regression lines and the predictions
color_list = ['r', 'g', 'k', 'y', 'm', 'c', '0.75']
marker_list = ['x', 'x', 'x', 'x', 'x', 'x', 'x']
regr = LinearRegression(fit_intercept=True) # Create linear regression object
for N in range(5, Nmax2+1):
# Plot the linear regression lines
X_train = np.array(range(len(df_temp['daily'][-N-1:-1]))) # e.g. [0 1 2 3 4]
y_train = np.array(df_temp['daily'][-N-1:-1]) # e.g. [2944 3088 3226 3335 3436]
X_train = X_train.reshape(-1, 1)
y_train = y_train.reshape(-1, 1)
regr.fit(X_train, y_train) # Train the model
y_est = regr.predict(X_train) # Get linear regression line
plt.plot(range(Nmax2+1-N,Nmax2+2),
np.concatenate((y_est, np.array(df_temp['est_N'+str(N)][-1:]).reshape(-1,1))),
color=color_list[N%len(color_list)],
marker=marker_list[N%len(marker_list)])
legend_list.append('est_N'+str(N)+'_lr')
# Plot the predictions
plt.plot(Nmax2+1,
df_temp['est_N'+str(N)][-1:],
color=color_list[N%len(color_list)],
marker='o')
legend_list.append('est_N'+str(N))
plt.grid()
plt.xlabel('timestep')
plt.ylabel('USD')
plt.legend(legend_list, bbox_to_anchor=(1.05, 1))
matplotlib.rcParams.update({'font.size': fontsize})
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = train.plot(x='day', y='daily', style='b-', grid=True)
ax = cv.plot(x='day', y='daily', style='y-', grid=True, ax=ax)
ax = test.plot(x='day', y='daily', style='g-', grid=True, ax=ax)
ax = cv.plot(x='day', y='est_N1', style='r-', grid=True, ax=ax)
ax = cv.plot(x='day', y='est_N5', style='m-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'predictions with N=1', 'predictions with N=5'])
ax.set_xlabel("day")
ax.set_ylabel("price")
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = train.plot(x='day', y='daily', style='bx-', grid=True)
ax = cv.plot(x='day', y='daily', style='yx-', grid=True, ax=ax)
ax = test.plot(x='day', y='daily', style='gx-', grid=True, ax=ax)
ax = cv.plot(x='day', y='est_N1', style='rx-', grid=True, ax=ax)
ax = cv.plot(x='day', y='est_N5', style='mx-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'predictions with N=1', 'predictions with N=5'])
ax.set_xlabel("day")
ax.set_ylabel("Price")
# ax.set_xlim([date(2017, 11, 1), date(2017, 12, 30)])
# ax.set_ylim([127, 137])
ax.set_title('Zoom in to dev set')
est_list = get_preds_lin_reg(df, 'daily', N_opt, 0, num_train+num_cv)
test.loc[:, 'est' + '_N' + str(N_opt)] = est_list
print("RMSE = %0.3f" % math.sqrt(mean_squared_error(est_list, test['daily'])))
print("R2 = %0.3f" % r2_score(test['daily'], est_list))
print("MAPE = %0.3f%%" % get_mape(test['daily'], est_list))
test.head()
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = train.plot(x='day', y='daily', style='b-', grid=True)
ax = cv.plot(x='day', y='daily', style='y-', grid=True, ax=ax)
ax = test.plot(x='day', y='daily', style='g-', grid=True, ax=ax)
ax = test.plot(x='day', y='est_N5', style='r-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'predictions with N_opt=5'])
ax.set_xlabel("day")
ax.set_ylabel("price")
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = train.plot(x='day', y='daily', style='bx-', grid=True)
ax = cv.plot(x='day', y='daily', style='yx-', grid=True, ax=ax)
ax = test.plot(x='day', y='daily', style='gx-', grid=True, ax=ax)
ax = test.plot(x='day', y='est_N5', style='rx-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'predictions with N_opt=5'])
ax.set_xlabel("day")
ax.set_ylabel("price")
# ax.set_xlim([date(2018, 6, 1), date(2018, 7, 31)])
# ax.set_ylim([135, 150])
ax.set_title('Zoom in to test set')
# Plot adjusted close over time, only for test set
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
matplotlib.rcParams.update({'font.size': 14})
ax = test.plot(x='day', y='daily', style='gx-', grid=True)
ax = test.plot(x='day', y='est_N5', style='rx-', grid=True, ax=ax)
ax.legend(['test', 'predictions using linear regression'], loc='upper left')
ax.set_xlabel("day")
ax.set_ylabel("Price")
#plt.show()
# from sklearn.preprocessing import StandardScaler
# data = pd.read_csv('data_final.csv')
# data.head()
# # Let’s select some features to explore more :
# df_new = data[['daily']]
# last_30_days = df_new[-30:].values
# scaler = StandardScaler()
# scaler.fit(last_30_days)
# last_30_days_scaled = scaler.transform(last_30_days)
# X_test = []
# X_test.append(last_30_days_scaled)
# X_test = np.array(X_test)
# X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1],1))
# pred_price = regr.predict(X_test)
# pred_price = scaler.inverse_transform(pred_price)
# print('Est Price: ' + str(pred_price))
# Save as
# csv
test_lin_reg = test
test_lin_reg.to_csv("test_lin_reg.csv") |
from pyspark import SparkContext,Row,SQLContext,SparkConf
from elasticsearch import Elasticsearch
from pyspark.mllib.feature import HashingTF, IDF
import imp
import json
import ast
import string
import re
from pyspark.ml.feature import CountVectorizer
from pyspark.ml.feature import StopWordsRemover
from pyspark.ml.clustering import DistributedLDAModel,LDA
RedisConfig = imp.load_source('RedisConfig', '/redis_conf/RedisConfig.py')
from datetime import datetime
import redis
from RedisConfig import RedisConfig
import types
cfg = RedisConfig()
SPARK_IP = cfg.SPARK_IP
SPARK_PORT = cfg.SPARK_PORT
SPARK_APP_NAME = cfg.SPARK_APP_NAME
AWS_ACCESS_KEY_ID = cfg.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = cfg.AWS_SECRET_ACCESS_KEY
conf = SparkConf() \
.setMaster("spark://%s:%s" %
(SPARK_IP, SPARK_PORT)) \
.setAppName(SPARK_APP_NAME)
sc = SparkContext(conf=conf)
sc_sql = SQLContext(sc)
def getValues(keys):
redis_db = redis.Redis(host=REDIS_IP.value, port=REDIS_PORT.value,password=REDIS_PASS.value, db=1)
values=[]
for i in keys:
v=redis_db.get(i)
values.append((i,v))
return values
def process(docs):
result =[]
for doc in docs:
json_data = ast.literal_eval(doc[1])
try:
meta_data = ast.literal_eval(json_data['data'])
desc = meta_data['desc']
name = meta_data['repo_name']
result.append(Row(idd=name,words=desc.split(' ')))
except:
print doc
pass
return result
def stem_words(wordList) :
rows =[]
for word in wordList:
strs =[]
for w in word[1] :
s = w.encode('ascii')
for c in string.punctuation:
s=s.replace(c,"")
if (len(s)>2):
ss=PorterStemmer().stem(s)
strs.append(ss)
row = Row(idd = word[0], words =strs)
rows.append(row)
return rows
def get_names(repos):
names =[]
for repo in repos:
json_data = ast.literal_eval(data[1])
name = ast.literal_eval(json_data['meta'])['repo_name']
names.append((repo[0].encode('ascii'),name))
return names
def doc_name(data):
json_data = ast.literal_eval(data[1])
meta = ast.literal_eval(json_data['data'])
name = meta['repo_name']
stars = json_data['stars']
url =meta['url']
actor=meta['actor']
res = (name,(data[0],stars, url, actor))
return res
def extraxt(x):
d=[]
for word, weight in zip (x[1],x[2]):
w = vocabArray[word].encode('ascii')
d.append((x[0],w,weight))
return d
def write_stars(record):
redis_db = redis.Redis(host=REDIS_IP.value, port=REDIS_PORT.value,password=REDIS_PASS.value, db=10)
for repo in record[1]:
redis_db.rpush(record[0], json.dumps(repo))
#print 'insert to 4', redis_db.get(record[0])
print 'insert', redis_db.rpop(record[0])
return True
def write_terms(word):
redis_db = redis.Redis(host=REDIS_IP.value, port=REDIS_PORT.value,password=REDIS_PASS.value, db=11)
for record in records:
redis_db.set(record[0], json.dumps(record[1]))
print 'insert to 4', redis_db.get(record[0])
def repo_topic(trans):
redis_db = redis.Redis(host=REDIS_IP.value, port=REDIS_PORT.value,password=REDIS_PASS.value, db=5)
res=[]
for record in trans:
key = record.idd.encode('ascii')
value = record.topicDistribution.argmax()
redis_db.set(key, value)
res.append((key, value))
return res
def write_topics(records):
redis_db = redis.Redis(host=REDIS_IP.value, port=REDIS_PORT.value,password=REDIS_PASS.value, db=13)
for record in records:
redis_db.set(record[0], json.dumps(record[1]))
print 'insert', redis_db.get(record[0])
def findMax(record):
w = record[0]
max_w=0
max_t=0
for topic, weight in record[1]:
if (weight > max_w):
max_w=weight
max_t=topic
return (w,max_t,max_w)
if __name__ == "__main__":
REDIS_IP=sc.broadcast(cfg.REDIS_IP)
REDIS_PORT=sc.broadcast(cfg.REDIS_PORT)
REDIS_PASS =sc.broadcast(cfg.REDIS_PASS)
redis_db = redis.Redis(host=cfg.REDIS_IP, port=cfg.REDIS_PORT,password=cfg.REDIS_PASS, db=1)
#retrieve repositories form db
keys = redis_db.keys()
rawData = sc.parallelize(keys)
data = rawData.mapPartitions(getValues)
rawData.unpersist()
#prepare description for LDA
docs = data.mapPartitions(process)
docDF = sc_sql.createDataFrame(docs)
docs.unpersist()
res = StopWordsRemover(inputCol="words", outputCol="filtered").transform(docDF)
df = res.drop("words")
docDF.unpersist()
stem = df.rdd.mapPartitions(lambda x :stem_words(x))
df.unpersist()
#create data fram of repos with their features vectors
df = sc_sql.createDataFrame(stem)
Vector = CountVectorizer(inputCol="words", outputCol="features")
model = Vector.fit(df)
result = model.transform(df)
#LDA modeling
lda = LDA(k=60, maxIter=10,optimizer='em')
ldaModel = lda.fit(result)
transformed = ldaModel.transform(result)
#writre in the form of (repo_name,topic_id) to redis
trans = transformed.rdd.mapPartitions(repo_topic)
#group results as (topic_id, repos)
ful_data = trans.join(data)
topic_repo = ful_data.map(lambda x : (x[1][0],x[1][1]))
topic_repos = topic_repo.groupByKey().map(lambda x : (x[0], list(x[1])))
#sort by number of stars and write to database
sort_stars = topic_repos.map(lambda x : (x[0],sorted(x[1], key=lambda i:ast.literal_eval(i)['stars'],reverse=True)))
sort_stars.foreach(write_stars)
'''vocabulary'''
vocabArray = model.vocabulary
topicIndices = ldaModel.describeTopics(200)
print("The topics described by their top-weighted terms:")
topicIndices.show(truncate=False)
topics = ldaModel.topicsMatrix()
#get words with their topic weights
topic_ind = topicIndices.rdd.flatMap(extraxt)
#group topics weight for word and assign to the max topic weight write to rd (word, (topic, weight))
word_topic=topic_ind.map(lambda x : (x[1],(x[0],x[2]))).groupByKey().map(lambda x : (x[0], list(x[1]))).map(lambda x : findMax(x))
words_topic=word_topic.map(lambda x : (x[0], {'topic':x[1],'weight':x[2]}))
words_topic.foreachPartition(write_terms)
#group words associated with each topic with max weight
topics_json = word_topic.map(lambda x: (x[1],{'word':x[0],'weight':x[2]})).groupByKey().map(lambda x : (x[0], list(x[1])))
topics_json.foreachPartition(write_topics)
sc.stop()
|
"""
Copyright 2021 Andrey Plugin (9keepa@gmail.com)
Licensed under the Apache License v2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
import logging, hashlib, os
from config import BASE_DIR
def hash_(string):
return hashlib.sha1(string.encode()).hexdigest()
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def log(name, filename=None):
# создаём logger
logger = logging.getLogger(name)
logger.setLevel( logging.DEBUG )
# создаём консольный handler и задаём уровень
if filename:
ch = logging.FileHandler(os.path.join( BASE_DIR, "Log" , filename ))
else:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# создаём formatter
formatter = logging.Formatter('%(asctime)s : %(lineno)d : %(name)s : %(levelname)s : %(message)s')
# %(lineno)d :
# добавляем formatter в ch
ch.setFormatter(formatter)
# добавляем ch к logger
logger.addHandler(ch)
# logger.debug('debug message')
# logger.info('info message')
# logger.warn('warn message')
# logger.error('error message')
# logger.critical('critical message')
return logger |
"""
This module is used to add directories listed in various packages'
``test/path.txt`` file to the system path.
If imported from a directory with a file called ``path.txt``,
it will assume that it is inside of a packages ``test`` directory
and modify the system path to add those directories too.
This is done by calling ``add_path('.')`` at the end of this module.
In general, this package should not be used for loading modules.
Its purpose is to temporarily put python scripts into the system's path
so the scripts can be imported for documentation and unit tests.
If your package consists of modules that are regularly imported,
lay out your repository appropriately to live in the ``OpsSpace`` directory.
:author: Daniel Abercrombie <dabercro@mit.edu>
"""
import os
import sys
def add_path(package):
"""
Add the directories for a given package to Python's import path.
The default behavior is to look for ``../<package>/test/path.txt``.
For each directory listed in that file, ``../<package>/<dir>`` will
be added to the system path.
The reason for the leading ``..`` is that the module is designed to be called
from within the ``OpsSpace/docs`` directory or a package's ``test`` directory.
Paths are appended to the front of the path, so directories at the bottom
of the ``test/path.txt`` will be loaded first.
:param str package: Is the name of the package to load the test path for.
"""
path_file_name = '../{0}/test/path.txt'.format(package)
if os.path.exists(path_file_name):
with open(path_file_name, 'r') as path_file:
for directory in path_file.readlines():
sys.path.insert(0, os.path.abspath(
'../{0}/{1}'.format(package, directory.strip('\n'))
))
add_path('.')
|
# -*- coding: utf-8 -*-
"""
1455. Check If a Word Occurs As a Prefix of Any Word in a Sentence
@link https://leetcode.com/problems/check-if-a-word-occurs-as-a-prefix-of-any-word-in-a-sentence/
"""
class Solution:
def isPrefixOfWord(self, sentence: str, searchWord: str) -> int:
word_list = sentence.split(' ')
for i in range(0, len(word_list)):
if word_list[i].startswith(searchWord):
return i + 1
return -1
if __name__ == '__main__':
print(Solution().isPrefixOfWord(sentence='i love eating burger', searchWord='burg'))
print(Solution().isPrefixOfWord(sentence='this problem is an easy problem', searchWord='pro'))
print(Solution().isPrefixOfWord(sentence='i am tired', searchWord='you'))
print(Solution().isPrefixOfWord(sentence='i use triple pillow', searchWord='pill'))
print(Solution().isPrefixOfWord(sentence='hello from the other side', searchWord='they'))
|
import random
import numpy as np
import pandas as pd
global s_state
# global vm_state
global num_servers
global s_info
global price_cal
np.set_printoptions(threshold=np.inf)
class Servers(object):
"""docstring for Server"""
def __init__(self):
super(Servers, self).__init__()
self.n_features = 10
self.num_task_limit = 10
def price_model(self, time_start, time_end, cpu_usage):
a = 0.5
b = 10
price = [0.12, 0.156, 0.165, 0.117, 0.194, 0.192,
0.318, 0.266, 0.326, 0.293, 0.388, 0.359,
0.447, 0.478, 0.513, 0.491, 0.457, 0.506,
0.640, 0.544, 0.592, 0.486, 0.499, 0.292]
total_price = 0
time_start_24 = int(time_start) % 24
time_end_24 = int(time_end) % 24
# calculate price according different situation
if cpu_usage < 0.7:
total_price += ((time_start_24 + 1 - time_start) / 1.0) * (cpu_usage * a) * price[time_start_24]
total_price += ((time_end - time_end_24) / 1.0) * (cpu_usage * a) * price[time_end_24]
for i in range(int(time_start)+1, int(time_end)):
total_price += (cpu_usage * a) * price[i%24]
return total_price
else:
total_price += ((time_start_24 + 1 - time_start) / 1.0) * (0.7 * a + b * (cpu_usage - 0.7) * (cpu_usage - 0.7)) * price[time_start_24]
total_price += ((time_end - time_end_24) / 1.0) * (0.7 * a + b * (cpu_usage - 0.7) * (cpu_usage - 0.7)) * price[time_end_24]
for i in range(int(time_start)+1, int(time_end)):
total_price += (0.7 * a + b * (cpu_usage - 0.7) * (cpu_usage - 0.7)) * price[i%24]
return total_price
def server_state(self, server_info):
global s_state
global vm_state
global num_servers
global s_info
num_servers = len(server_info)
s_state = [[[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0] for j in range(10)] for i in range(len(server_info))]
s_state = np.array(s_state)
s_info = server_info
return s_state
def server_step(self, task_info, action1): #action (server index, queue index)
# global price_cal
global CPU_used
global RAM_used
global s_state
CPU_used = np.zeros((19,1))
RAM_used = np.zeros((19,1))
# Current CPU Utilization
for i in range(len(s_state)):
for j in range(len(s_state[i])):
# print(s_state[i][j][2])
if s_state[i][j][2] != -1:
CPU_used[i] += s_state[i][j][2]
RAM_used[i] += s_state[i][j][3]
Resource_used = np.hstack((CPU_used, RAM_used))
CPU_used[action1] += task_info[2]
RAM_used[action1] += task_info[3]
Resource_used_ = np.hstack((CPU_used, RAM_used))
if CPU_used[action1] > 0.2 and CPU_used[action1] < 0.8:
reward_CPU1 = 1
elif CPU_used[action1] > 1:
reward_CPU1 = -2
else:
reward_CPU1 = -1
if RAM_used[action1] > 0.2 and RAM_used[action1] < 0.8:
reward_RAM1 = 1
elif RAM_used[action1] > 1:
reward_RAM1 = -2
else:
reward_RAM1 = -1
reward1 = reward_CPU1 + reward_RAM1
return Resource_used, reward1, Resource_used_
def vm_step(self, task_info, action1, action2):#task should be global
global s_state
vm_state = s_state[action1]
vm_state_ = s_state[action1]
for i in range(10): #canshu geshu
vm_state_[action2][i] = task_info[i] #
vm_reward = vm_state_
# pop the tasks which task_end earlier than time calculated of incoming task
for i in range(len(vm_reward)):
if vm_reward[i][6] < task_info[5]:
for j in range(10):
vm_reward[i][j] = -1
else:
vm_reward[i][8] = task_info[5]
# here should have a sort finish time
vm_reward = vm_reward[np.lexsort(vm_reward[:,:6:].T)]
# original resource used
CPU_used = 0
RAM_used = 0
for i in range(len(vm_reward)):
if vm_reward[i][2] != -1:
CPU_used += vm_reward[i][2]
RAM_used += vm_reward[i][3]
# calculate reward
reward2 = 0
for i in range(len(vm_reward)):
if vm_reward[i][0] == -1:
continue
else:
reward2 += self.price_model(vm_reward[i][8], vm_reward[i][6], (CPU_used / s_info[i][1]))
CPU_used -= vm_reward[i][2]
for j in range(i+1, len(vm_reward)):
vm_reward[j][8] = vm_reward[i][6]
for j in range(10):
vm_reward[i][j] = -1
trans_vm_state = vm_state[:, 2:4]
trans_vm_state_ = vm_state_[:, 2:4]
trans_vm_state = trans_vm_state.reshape(-1)
trans_vm_state_ = trans_vm_state_.reshape(-1)
# for i in range(1, 9):
# trans_vm_state = np.hstack((trans_vm_state, vm_state[i, 2:4]))
# trans_vm_state_ = np.hstack((trans_vm_state, vm_state[i, 2:4]))
return trans_vm_state, reward2, trans_vm_state_
def time_step(self, task_info, action1, action2, action3):
global s_state
if s_state[action1][action2][0] == -1:
task_info[5] += action3
task_info[6] += action3
else:
if task_info[5] < s_state[action1][action2][6]:
task_info[5] = action3 + s_state[action1][action2][6]
task_info[6] = task_info[5] + task_info[1] - task_info[0]
else:
task_info[5] += action3
task_info[6] += action3
# final updating s_state
temp_s_state = s_state
for i in range(len(temp_s_state)):
for j in range(len(temp_s_state[i])):
# if finished time is earlier than start time of incoming task
if temp_s_state[i][j][6] != -1:
if temp_s_state[i][j][6] < task_info[5]:
# pop the old tasks
for k in range(10):
temp_s_state[i][j][k] = -1
else:
temp_s_state[i][j][8] = task_info[5]
# put the incoming task into server, vm
for i in range(10):
temp_s_state[action1][action2][i] = task_info[i]
# next state, needs to be returned
s_state_ = temp_s_state
# this is the state used to calculate total price
reward_state = temp_s_state
# start of reward state, cut the time_calculated of other performing tasks
for i in range(len(reward_state)):
for j in range(len(reward_state[i])):
if reward_state[i][j][0] != -1:
reward_state[i][j][8] = task_info[5]
# pop the tasks which task_end earlier than time calculated of incoming task
for i in range(len(reward_state)):
for j in range(len(reward_state[i])):
if reward_state[i][j][6] < task_info[5]:
for k in range(10):
reward_state[i][j][k] = -1
else:
reward_state[i][j][8] = task_info[5]
# calculate the money on the right hand side of red line
reward3 = 0
CPU_used = 0
RAM_used = 0
for i in range(len(reward_state[action1])):#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!jia ge tiao jian
if reward_state[action1][i][2] != -1:
CPU_used += reward_state[action1][i][2]
RAM_used += reward_state[action1][i][3]
if CPU_used > s_info[action1][1] or RAM_used > s_info[action1][2] or task_info[6]>task_info[4]:
reward3 = -1
else:
# original resource used
for i in range(len(reward_state)):
# here should have a sort finish time shengxu
reward_state[i] = reward_state[i][np.lexsort(reward_state[i][:,:6:].T)]
# original resource used on the ith server
CPU_used = 0
RAM_used = 0
for j in range(len(reward_state[i])):
if reward_state[i][j][2] != -1:
CPU_used += reward_state[i][j][2]
RAM_used += reward_state[i][j][3]
# calculate reward
for j in range(len(reward_state[i])):
if reward_state[i][j][0] == -1:
continue
else:#
reward3 += self.price_model(reward_state[i][j][8], reward_state[i][j][6], (CPU_used / s_info[i][1]))
# print(reward3)
CPU_used -= reward_state[i][j][2]
for k in range(j+1, len(reward_state[i][j])):
reward_state[i][k][8] = reward_state[i][j][6]
for k in range(10):
reward_state[i][j][k] = -1
# total price on the left hand side of red line
# server_index = action1
# queue_index = action2
money_state = s_state
# calculate resource used
price_cal = 0
# if task_info[4] < task_info[6]:
# reward = -2
# for i in range(len(money_state)):
# CPU_used = 0
# RAM_used = 0
# for j in range(len(money_state[i])):
# if money_state[i][j][2] != -1:
# CPU_used += money_state[i][j][2]
# RAM_used += money_state[i][j][3]
# for i in range(len(money_state)):
# temp_server = np.array(money_state[i])
# temp_server = temp_server[np.lexsort(temp_server[:,:6:].T)]
# money_state[i] = temp_server
money_state[i] = money_state[i][np.lexsort(money_state[i][:,:6:].T)]
for i in range(len(money_state)):
CPU_used = 0
RAM_used = 0
for j in range(len(money_state[i])):
if money_state[i][j][2] != -1:
CPU_used += money_state[i][j][2]
RAM_used += money_state[i][j][3]
for j in range(len(money_state[i])):
if money_state[i][j][6] !=-1:
if money_state[i][j][6] <= task_info[5]:#
latest_endtime = money_state[i][j][6]
price_cal += self.price_model(money_state[i][j][8], money_state[i][j][6], (CPU_used / s_info[i][1]))
CPU_used -= money_state[i][j][2]
RAM_used -= money_state[i][j][3]
for k in range(j+1, 10):# 9 is size of queue
money_state[i][k][8] = money_state[i][j][6]
for k in range(10):
money_state[i][j][k] = -1
else:
price_cal += self.price_model(money_state[i][j][8], task_info[5], (CPU_used / s_info[i][1]))
for k in range(j,10):# 9 is size of queue
money_state[i][k][8] = task_info[5]
break
trans_s_state = s_state[action1][:, 2:4]
trans_s_state_ = s_state_[action1][:, 2:4]
trans_s_state = trans_s_state.reshape(-1)
trans_s_state_ = trans_s_state_.reshape(-1)
if reward3 < 0:
return s_state, trans_s_state, reward3, price_cal, trans_s_state_
else:
reward3 = 1 / reward3
return s_state_, trans_s_state, reward3, price_cal, trans_s_state_
|
#!/usr/bin/python
from sys import argv
import subprocess
replica = argv
name = str(replica[0])
print(name)
for x in range(0,1):
folderName = 'Windows' +str(x)
subprocess.call(['mkdir',folderName])
subprocess.call(['cp', name,folderName])
|
import pandas as pd
import numpy as np
import sys
# transactions=pd.read_csv("transactions_data.csv",header=None)
# space=pd.read_csv('fixture_data.csv',header=0,dtype={'Store': object},skiprows=[1])
# futureSpace=pd.read_csv('futureSpace_data.csv',header=0,dtype={'Store': object},skiprows=[1])
# brandExit=pd.read_csv('exit_data.csv',header=0,skiprows=[1])
def ksMerge(optimizationType,transactions,space,brandExit=None,futureSpace=None):
space.rename(columns={'VSG ': 'VSG'}, inplace=True)
if optimizationType == 'tiered':
def brandExitMung(df, Stores, Categories):
brand_exit = pd.DataFrame(index=Stores, columns=Categories)
for (i, Store) in enumerate(Stores):
for (j, Category) in enumerate(Categories):
if int(Store) in df[Category].unique():
brand_exit[Category].iloc[i] = 1
else:
brand_exit[Category].iloc[i] = 0
return brand_exit
Stores = space['Store']
Metrics = transactions.loc[1, 1:9].reset_index(drop=True)
Categories = transactions[[*np.arange(len(transactions.columns))[1::9]]].loc[0].reset_index(drop=True).values.astype(
str)
spaceData = pd.melt(space, id_vars=['Store', 'Climate', 'VSG'], var_name='Category', value_name='Historical Space')
spaceData['Current Space'] = spaceData['Historical Space']
print(spaceData.columns)
def longTransaction(df, storeList, categories):
df.loc[0, :] = categories
df = pd.concat([storeList, df], axis=1)
df.columns = df.loc[0,]
lPiece = pd.melt(df[2::], id_vars=['Store'], var_name='Category',
value_name=pd.unique(df.loc[1].dropna().values)[0])
return lPiece
masterData = spaceData
for (m, Metric) in enumerate(Metrics):
masterData = pd.merge(left=masterData,
right=longTransaction(transactions.loc[:, int(m + 1)::9], pd.DataFrame(transactions[0]),
Categories), on=['Store', 'Category'], how='outer')
storeTotal = pd.DataFrame(masterData.groupby('Store')['Current Space'].sum()).reset_index()
storeTotal.columns = ['Store', 'Store Space']
storeTotal = storeTotal.sort_values(by='Store').reset_index(drop=True)
if futureSpace is None:
print("we don't have future space")
storeTotal['Future Space']=storeTotal['Store Space']
storeTotal['Entry Space']=0
storeTotal['New Space'] = storeTotal['Store Space'] - storeTotal['Entry Space']
masterData=pd.merge(masterData,storeTotal,on=['Store'])
else:
print('we have future space')
futureSpace = futureSpace.sort_values(by='Store').reset_index(drop=True)
futureSpace=pd.merge(storeTotal,futureSpace,on=['Store'],how='inner')
print('in future space loop')
futureSpace['Entry Space'].fillna(0,inplace=True)
for (i,Store) in enumerate(Stores):
futureSpace['Future Space'].loc[i] = storeTotal['Store Space'].loc[i] if pd.to_numeric(futureSpace['Future Space']).loc[i] == 0 or pd.isnull(pd.to_numeric(futureSpace['Future Space'])).loc[i] else futureSpace['Future Space'].loc[i]
futureSpace['New Space'] = futureSpace['Future Space'] - futureSpace['Entry Space']
masterData=pd.merge(masterData,futureSpace,on=['Store','VSG','Climate'])
masterData = masterData.sort_values(by=['Store', 'Category']).reset_index(drop=True)
if brandExit is None:
masterData['Exit Flag'] = 0
mergeTrad = masterData.copy()
else:
mergeTrad = masterData.copy()
brandExit=pd.melt(brandExitMung(brandExit,Stores,Categories).reset_index(),id_vars=['Store'],var_name='Category',value_name='Exit Flag')
brandExit=brandExit.sort_values(by=['Store','Category']).reset_index(drop=True)
for i in range(0,len(mergeTrad)):
if brandExit['Exit Flag'].loc[i] == 1:
mergeTrad.loc[i,5::]=0
masterData=pd.merge(masterData,brandExit,on=['Store','Category'],how='inner')
mergeTrad=pd.merge(mergeTrad,brandExit,on=['Store','Category'],how='inner')
print('There are ' + str(len(masterData[masterData['Exit Flag'] == 1])) + ' brand exits')
# masterData.to_csv('mergedData.csv',sep=',',index=False)
masterData=masterData.apply(lambda x: pd.to_numeric(x, errors='ignore'))
mergeTrad = mergeTrad.apply(lambda x: pd.to_numeric(x, errors='ignore'))
print('Finished Data Merging')
# masterData.to_csv('MacroMerge.csv',sep=',')
# input('Stop')
return (masterData,mergeTrad) |
# Generated by Django 2.0.4 on 2018-04-10 23:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chambre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_chambre', models.IntegerField(null=True, verbose_name='Numero Chambre')),
('prix_nuit', models.FloatField(verbose_name='Prix Nuit')),
('nbr_places', models.IntegerField(null=True, verbose_name='Nombre de Places')),
('status_chambre', models.BooleanField(default=True, verbose_name='Status Chambre')),
],
),
migrations.CreateModel(
name='ChefReception',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_piece', models.CharField(max_length=200, verbose_name='Numero de Piece')),
('nom', models.CharField(max_length=100, verbose_name='Nom Receptionist')),
('prenom', models.CharField(max_length=200, verbose_name='Prenom')),
('tel', models.CharField(max_length=8, unique=True, verbose_name='Téléphone')),
('email', models.EmailField(max_length=100, unique=True, verbose_name='E-mail')),
('statut_recep', models.BooleanField(default=True, verbose_name='Statut du chef Receptionist')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_piece', models.CharField(max_length=200, unique=True, verbose_name='Numero de Piece')),
('nom_cli', models.CharField(max_length=100, verbose_name='Nom Receptionist')),
('prenom_cli', models.CharField(max_length=250, verbose_name='Prenom')),
('tel_cli', models.CharField(max_length=8, verbose_name='Téléphone')),
('email', models.EmailField(max_length=100, verbose_name='E-mail')),
('statut', models.BooleanField(default=False, verbose_name='Statut Client')),
('date_inscrip', models.DateTimeField(auto_now_add=True, verbose_name='Date INscription')),
('nationalite_cli', models.CharField(max_length=200, verbose_name='Nationalité')),
],
),
migrations.CreateModel(
name='EtatChambre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('etat_chambre', models.CharField(max_length=150, unique=True, verbose_name='Etat Chambre')),
('statut', models.BooleanField(default=True, verbose_name='Status Chambre')),
],
),
migrations.CreateModel(
name='EtatSalle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('etat', models.CharField(max_length=100, verbose_name='Etat Salle')),
('statut_salle', models.BooleanField(default=True, verbose_name='Statut Salle')),
],
),
migrations.CreateModel(
name='EtatTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('etat', models.CharField(max_length=100, verbose_name='Etat Table')),
('statut_table', models.BooleanField(default=False, verbose_name='Statut Table')),
],
),
migrations.CreateModel(
name='Facture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_facture', models.DateTimeField(auto_now_add=True, verbose_name='Date INscription')),
('montant', models.IntegerField(null=True, verbose_name='Montant facture')),
('statut', models.BooleanField(default=False, verbose_name='Statut facture')),
('clients', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Payer', to='GesHotel.Client')),
],
),
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_hotel', models.CharField(max_length=200, verbose_name='Nom Hotel')),
],
),
migrations.CreateModel(
name='MaitreHotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_piece', models.CharField(max_length=200, verbose_name='Numero de Piece')),
('nom', models.CharField(max_length=100, verbose_name='Nom Maitre Hotel')),
('prenom', models.CharField(max_length=250, verbose_name='Prenom Maitre Hotel')),
('tel', models.CharField(max_length=8, verbose_name='Téléphone')),
('email', models.EmailField(max_length=100, verbose_name='E-mail')),
('statut_maitr', models.BooleanField(default=True, verbose_name="Statut du Maitre d'Hotel")),
],
),
migrations.CreateModel(
name='PrixChambreRepo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prix_repos_heure', models.FloatField(verbose_name='Prix Repos /Heur')),
('heure', models.IntegerField(null=True, verbose_name='Heure')),
('chambres', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='Couter', to='GesHotel.Chambre')),
],
),
migrations.CreateModel(
name='Receptionnist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_piece', models.CharField(max_length=200, verbose_name='Numero de Piece')),
('nom', models.CharField(max_length=100, verbose_name='Nom Receptionist')),
('prenom', models.CharField(max_length=250, verbose_name='Prenom')),
('tel', models.CharField(max_length=8, verbose_name='Téléphone')),
('email', models.EmailField(max_length=100, unique=True, verbose_name='E-mail')),
('statut_recep', models.BooleanField(default=True, verbose_name='Statut du Receptionist')),
],
),
migrations.CreateModel(
name='RelaisDomotique',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conso_electricite', models.IntegerField(verbose_name='Consomation Electricité')),
('statut_relait', models.BooleanField(default=False, verbose_name='Statut Relais')),
('chambre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='COnso_1', to='GesHotel.Chambre')),
('clients', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='COnso_2', to='GesHotel.Client')),
],
),
migrations.CreateModel(
name='ReserverChambre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_arriver', models.DateTimeField(auto_now_add=True, verbose_name='Date Arriver')),
('date_sorti', models.DateTimeField(auto_now_add=True, verbose_name='Date Sortie')),
('date_reservation', models.DateTimeField(auto_now_add=True, verbose_name='Date Reservation')),
('statut_reservation', models.BooleanField(default=False, verbose_name='Statut Reservation')),
('chambres', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_chambre', to='GesHotel.Chambre')),
('maitrehotels', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Gerer_chambre', to='GesHotel.MaitreHotel')),
('receptionnists', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_chambre', to='GesHotel.Receptionnist')),
],
),
migrations.CreateModel(
name='ReserverSalle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nbr_personnes', models.IntegerField(null=True, verbose_name='Nombe Personnes')),
('date_reservation', models.DateTimeField(auto_now_add=True, verbose_name='Date Reservation')),
('date_commande_creer', models.DateTimeField(auto_now_add=True, verbose_name='Date Creation Commande')),
('statut_reservation', models.BooleanField(default=False, verbose_name='Statut Reservation')),
('clients', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_salle', to='GesHotel.Client')),
('receptionnists', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_salle', to='GesHotel.Receptionnist')),
],
),
migrations.CreateModel(
name='ReserverTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nbr_personnes', models.IntegerField(null=True, verbose_name='Nombe Personnes')),
('date_reservation', models.DateTimeField(auto_now_add=True, verbose_name='Date Reservation')),
('date_commande_creer', models.DateTimeField(auto_now_add=True, verbose_name='Date Creation Commande')),
('statut_reservation', models.BooleanField(default=False, verbose_name='Statut Reservation')),
('receptionnists', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_table', to='GesHotel.Receptionnist')),
],
),
migrations.CreateModel(
name='Salle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tarif', models.FloatField()),
('statut_table', models.BooleanField(default=False, verbose_name='Statut Table')),
('etatsalles', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Etre_dans', to='GesHotel.EtatSalle')),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_table', models.IntegerField(null=0, unique=True, verbose_name='Numero Table')),
('nbr_palces', models.IntegerField(null=True, verbose_name='Nombre de Places')),
('statut_table', models.BooleanField(default=False, verbose_name='Statut Table')),
('tarif', models.FloatField(verbose_name='Tarif')),
('etattable', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Etre_dans', to='GesHotel.EtatTable')),
('maitrehotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Gerer_Table', to='GesHotel.MaitreHotel')),
],
),
migrations.CreateModel(
name='TypeChambre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_chambre', models.CharField(max_length=150, unique=True, verbose_name='Type Chambre')),
('statut', models.BooleanField(default=True, verbose_name='Status Chambre')),
],
),
migrations.CreateModel(
name='TypePieceClient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_piece', models.CharField(max_length=200, unique=True, verbose_name='Type Piece')),
('statut', models.BooleanField(default=True, verbose_name='Statut Pièce')),
],
),
migrations.CreateModel(
name='TypeSalle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100, unique=True, verbose_name='Type Salle')),
('statut_type', models.BooleanField(default=False, verbose_name='Statut Type')),
],
),
migrations.AddField(
model_name='salle',
name='typesalles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Constituer', to='GesHotel.TypeSalle'),
),
migrations.AddField(
model_name='reservertable',
name='salles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_table', to='GesHotel.Salle'),
),
migrations.AddField(
model_name='reservertable',
name='tables',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_table', to='GesHotel.Table'),
),
migrations.AddField(
model_name='reserversalle',
name='salles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_salle', to='GesHotel.Salle'),
),
migrations.AddField(
model_name='reserverchambre',
name='salles',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reserver_chambre', to='GesHotel.Salle'),
),
migrations.AddField(
model_name='client',
name='typepiececlient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Appartenir', to='GesHotel.TypePieceClient'),
),
migrations.AddField(
model_name='chambre',
name='chefreception',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='Gerer', to='GesHotel.ChefReception'),
),
migrations.AddField(
model_name='chambre',
name='etatchambres',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Voir', to='GesHotel.EtatChambre'),
),
migrations.AddField(
model_name='chambre',
name='hotel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Gerer', to='GesHotel.Hotel'),
),
migrations.AddField(
model_name='chambre',
name='typechambre',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Avoir', to='GesHotel.TypeChambre'),
),
]
|
import copy
import warnings
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
from ..utils import geometry, import_optional_dependency
from ..utils.geospatial_utils import GeoSpatialUtil
from . import plotutil
warnings.simplefilter("always", PendingDeprecationWarning)
class PlotCrossSection:
"""
Class to create a cross sectional plot of a model.
Parameters
----------
ax : matplotlib.pyplot axis
The plot axis. If not provided it, plt.gca() will be used.
model : flopy.modflow object
flopy model object. (Default is None)
modelgrid : flopy.discretization.Grid object
can be a StructuredGrid, VertexGrid, or UnstructuredGrid object
line : dict
Dictionary with either "row", "column", or "line" key. If key
is "row" or "column" key value should be the zero-based row or
column index for cross-section. If key is "line" value should
be an array of (x, y) tuples with vertices of cross-section.
Vertices should be in map coordinates consistent with xul,
yul, and rotation.
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
geographic_coords : bool
boolean flag to allow the user to plot cross section lines in
geographic coordinates. If False (default), cross section is plotted
as the distance along the cross section line.
"""
def __init__(
self,
model=None,
modelgrid=None,
ax=None,
line=None,
extent=None,
geographic_coords=False,
):
self.ax = ax
self.geographic_coords = geographic_coords
self.model = model
if modelgrid is not None:
self.mg = modelgrid
elif model is not None:
self.mg = model.modelgrid
else:
raise Exception("Cannot find model grid")
if self.mg.top is None or self.mg.botm is None:
raise AssertionError("modelgrid top and botm must be defined")
if not isinstance(line, dict):
raise AssertionError("A line dictionary must be provided")
line = {k.lower(): v for k, v in line.items()}
if len(line) != 1:
s = (
"only row, column, or line can be specified in line "
"dictionary keys specified: "
)
for k in line.keys():
s += f"{k} "
raise AssertionError(s)
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
onkey = list(line.keys())[0]
self.__geographic_xpts = None
# un-translate model grid into model coordinates
xcellcenters, ycellcenters = geometry.transform(
self.mg.xcellcenters,
self.mg.ycellcenters,
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
inverse=True,
)
xverts, yverts = self.mg.cross_section_vertices
(
xverts,
yverts,
) = plotutil.UnstructuredPlotUtilities.irregular_shape_patch(
xverts, yverts
)
self.xvertices, self.yvertices = geometry.transform(
xverts,
yverts,
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
inverse=True,
)
if onkey in ("row", "column"):
eps = 1.0e-4
xedge, yedge = self.mg.xyedges
if onkey == "row":
self.direction = "x"
ycenter = ycellcenters.T[0]
pts = [
(xedge[0] - eps, ycenter[int(line[onkey])]),
(xedge[-1] + eps, ycenter[int(line[onkey])]),
]
else:
self.direction = "y"
xcenter = xcellcenters[0, :]
pts = [
(xcenter[int(line[onkey])], yedge[0] + eps),
(xcenter[int(line[onkey])], yedge[-1] - eps),
]
else:
ln = line[onkey]
if not PlotCrossSection._is_valid(ln):
raise ValueError(f"Invalid line representation")
gu = GeoSpatialUtil(ln, shapetype="linestring")
verts = gu.points
xp = []
yp = []
for [v1, v2] in verts:
xp.append(v1)
yp.append(v2)
xp, yp = self.mg.get_local_coords(xp, yp)
if np.max(xp) - np.min(xp) > np.max(yp) - np.min(yp):
# this is x-projection and we should buffer x by small amount
idx0 = np.argmax(xp)
idx1 = np.argmin(xp)
idx2 = np.argmax(yp)
xp[idx0] += 1e-04
xp[idx1] -= 1e-04
yp[idx2] += 1e-03
self.direction = "x"
else:
# this is y-projection and we should buffer y by small amount
idx0 = np.argmax(yp)
idx1 = np.argmin(yp)
idx2 = np.argmax(xp)
yp[idx0] += 1e-04
yp[idx1] -= 1e-04
xp[idx2] += 1e-03
self.direction = "y"
pts = [(xt, yt) for xt, yt in zip(xp, yp)]
self.pts = np.array(pts)
self.xypts = plotutil.UnstructuredPlotUtilities.line_intersect_grid(
self.pts, self.xvertices, self.yvertices
)
if len(self.xypts) < 2:
if len(list(self.xypts.values())[0]) < 2:
s = (
"cross-section cannot be created\n."
" less than 2 points intersect the model grid\n"
f" {len(self.xypts.values()[0])} points"
" intersect the grid."
)
raise Exception(s)
if self.geographic_coords:
# transform back to geographic coordinates
xypts = {}
for nn, pt in self.xypts.items():
xp = [t[0] for t in pt]
yp = [t[1] for t in pt]
xp, yp = geometry.transform(
xp,
yp,
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
)
xypts[nn] = [(xt, yt) for xt, yt in zip(xp, yp)]
self.xypts = xypts
laycbd = []
self.ncb = 0
if self.model is not None:
if self.model.laycbd is not None:
laycbd = list(self.model.laycbd)
self.ncb = np.count_nonzero(laycbd)
if laycbd:
self.active = []
for k in range(self.mg.nlay):
self.active.append(1)
if laycbd[k] > 0:
self.active.append(0)
self.active = np.array(self.active, dtype=int)
else:
self.active = np.ones(self.mg.nlay, dtype=int)
self._nlay, self._ncpl, self.ncb = self.mg.cross_section_lay_ncpl_ncb(
self.ncb
)
top = self.mg.top.reshape(1, self._ncpl)
botm = self.mg.botm.reshape(self._nlay + self.ncb, self._ncpl)
self.elev = np.concatenate((top, botm), axis=0)
self.idomain = self.mg.idomain
if self.mg.idomain is None:
self.idomain = np.ones(botm.shape, dtype=int)
self.projpts = self.set_zpts(None)
# Create cross-section extent
if extent is None:
self.extent = self.get_extent()
else:
self.extent = extent
# this is actually x or y based on projection
self.xcenters = [
np.mean(np.array(v).T[0]) for i, v in sorted(self.projpts.items())
]
self.mean_dx = np.mean(
np.max(self.xvertices, axis=1) - np.min(self.xvertices, axis=1)
)
self.mean_dy = np.mean(
np.max(self.yvertices, axis=1) - np.min(self.yvertices, axis=1)
)
self._polygons = {}
if model is None:
self._masked_values = [1e30, -1e30]
else:
self._masked_values = [model.hnoflo, model.hdry]
# Set axis limits
self.ax.set_xlim(self.extent[0], self.extent[1])
self.ax.set_ylim(self.extent[2], self.extent[3])
@staticmethod
def _is_valid(line):
shapely_geo = import_optional_dependency("shapely.geometry")
if isinstance(
line,
(
list,
tuple,
np.ndarray,
),
):
a = np.array(line)
if (len(a.shape) < 2 or a.shape[0] < 2) or a.shape[1] != 2:
return False
elif not isinstance(
line,
(
geometry.LineString,
shapely_geo.LineString,
),
):
return False
return True
@property
def polygons(self):
"""
Method to return cached matplotlib polygons for a cross
section
Returns
-------
dict : [matplotlib.patches.Polygon]
"""
if not self._polygons:
for cell, poly in self.projpts.items():
if len(poly) > 4:
# this is the rare multipolygon instance...
n = 0
p = []
polys = []
for vn, v in enumerate(poly):
if vn == 3 + 4 * n:
n += 1
p.append(v)
polys.append(p)
p = []
else:
p.append(v)
else:
polys = [poly]
for polygon in polys:
verts = plotutil.UnstructuredPlotUtilities.arctan2(
np.array(polygon)
)
if cell not in self._polygons:
self._polygons[cell] = [Polygon(verts, closed=True)]
else:
self._polygons[cell].append(
Polygon(verts, closed=True)
)
return copy.copy(self._polygons)
def get_extent(self):
"""
Get the extent of the rotated and offset grid
Returns
-------
tuple : (xmin, xmax, ymin, ymax)
"""
xpts = []
for _, verts in self.projpts.items():
for v in verts:
xpts.append(v[0])
xmin = np.min(xpts)
xmax = np.max(xpts)
ymin = np.min(self.elev)
ymax = np.max(self.elev)
return xmin, xmax, ymin, ymax
def plot_array(self, a, masked_values=None, head=None, **kwargs):
"""
Plot a three-dimensional array as a patch collection.
Parameters
----------
a : numpy.ndarray
Three-dimensional array to plot.
masked_values : iterable of floats, ints
Values to mask.
head : numpy.ndarray
Three-dimensional array to set top of patches to the minimum
of the top of a layer or the head value. Used to create
patches that conform to water-level elevations.
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
patches : matplotlib.collections.PatchCollection
"""
ax = kwargs.pop("ax", self.ax)
if not isinstance(a, np.ndarray):
a = np.array(a)
if a.ndim > 1:
a = np.ravel(a)
a = a.astype(float)
if masked_values is not None:
self._masked_values.extend(list(masked_values))
for mval in self._masked_values:
a = np.ma.masked_values(a, mval)
if isinstance(head, np.ndarray):
projpts = self.set_zpts(np.ravel(head))
else:
projpts = None
pc = self.get_grid_patch_collection(a, projpts, **kwargs)
if pc is not None:
ax.add_collection(pc)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return pc
def plot_surface(self, a, masked_values=None, **kwargs):
"""
Plot a two- or three-dimensional array as line(s).
Parameters
----------
a : numpy.ndarray
Two- or three-dimensional array to plot.
masked_values : iterable of floats, ints
Values to mask.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.plot
Returns
-------
plot : list containing matplotlib.plot objects
"""
ax = kwargs.pop("ax", self.ax)
color = kwargs.pop("color", "b")
color = kwargs.pop("c", color)
if not isinstance(a, np.ndarray):
a = np.array(a)
if a.ndim > 1:
a = np.ravel(a)
a = a.astype(float)
if a.size % self._ncpl != 0:
raise AssertionError("Array size must be a multiple of ncpl")
if masked_values is not None:
self._masked_values.extend(list(masked_values))
for mval in self._masked_values:
a = np.ma.masked_values(a, mval)
d = {
i: (np.min(np.array(v).T[0]), np.max(np.array(v).T[0]))
for i, v in sorted(self.projpts.items())
}
surface = []
for cell, val in d.items():
if cell >= a.size:
continue
elif np.isnan(a[cell]):
continue
elif a[cell] is np.ma.masked:
continue
else:
line = ax.plot(
d[cell], [a[cell], a[cell]], color=color, **kwargs
)
surface.append(line)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return surface
def plot_fill_between(
self,
a,
colors=("blue", "red"),
masked_values=None,
head=None,
**kwargs,
):
"""
Plot a three-dimensional array as lines.
Parameters
----------
a : numpy.ndarray
Three-dimensional array to plot.
colors : list
matplotlib fill colors, two required
masked_values : iterable of floats, ints
Values to mask.
head : numpy.ndarray
Three-dimensional array to set top of patches to the minimum
of the top of a layer or the head value. Used to create
patches that conform to water-level elevations.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.plot
Returns
-------
plot : list containing matplotlib.fillbetween objects
"""
ax = kwargs.pop("ax", self.ax)
kwargs["colors"] = colors
if not isinstance(a, np.ndarray):
a = np.array(a)
a = np.ravel(a).astype(float)
if masked_values is not None:
self._masked_values.extend(list(masked_values))
for mval in self._masked_values:
a = np.ma.masked_values(a, mval)
if isinstance(head, np.ndarray):
projpts = self.set_zpts(head)
else:
projpts = self.projpts
pc = self.get_grid_patch_collection(
a, projpts, fill_between=True, **kwargs
)
if pc is not None:
ax.add_collection(pc)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return pc
def contour_array(self, a, masked_values=None, head=None, **kwargs):
"""
Contour a two-dimensional array.
Parameters
----------
a : numpy.ndarray
Three-dimensional array to plot.
masked_values : iterable of floats, ints
Values to mask.
head : numpy.ndarray
Three-dimensional array to set top of patches to the minimum
of the top of a layer or the head value. Used to create
patches that conform to water-level elevations.
**kwargs : dictionary
keyword arguments passed to matplotlib.pyplot.contour
Returns
-------
contour_set : matplotlib.pyplot.contour
"""
import matplotlib.tri as tri
if not isinstance(a, np.ndarray):
a = np.array(a)
if a.ndim > 1:
a = np.ravel(a)
ax = kwargs.pop("ax", self.ax)
xcenters = self.xcenters
plotarray = np.array([a[cell] for cell in sorted(self.projpts)])
(
plotarray,
xcenters,
zcenters,
mplcontour,
) = self.mg.cross_section_set_contour_arrays(
plotarray, xcenters, head, self.elev, self.projpts
)
if not mplcontour:
if isinstance(head, np.ndarray):
zcenters = self.set_zcentergrid(np.ravel(head))
else:
zcenters = np.array(
[
np.mean(np.array(v).T[1])
for i, v in sorted(self.projpts.items())
]
)
# work around for tri-contour ignore vmin & vmax
# necessary for the tri-contour NaN issue fix
if "levels" not in kwargs:
vmin = kwargs.pop("vmin", np.nanmin(plotarray))
vmax = kwargs.pop("vmax", np.nanmax(plotarray))
levels = np.linspace(vmin, vmax, 7)
kwargs["levels"] = levels
# workaround for tri-contour nan issue
plotarray[np.isnan(plotarray)] = -(2**31)
if masked_values is None:
masked_values = [-(2**31)]
else:
masked_values = list(masked_values)
if -(2**31) not in masked_values:
masked_values.append(-(2**31))
ismasked = None
if masked_values is not None:
self._masked_values.extend(list(masked_values))
for mval in self._masked_values:
if ismasked is None:
ismasked = np.isclose(plotarray, mval)
else:
t = np.isclose(plotarray, mval)
ismasked += t
filled = kwargs.pop("filled", False)
plot_triplot = kwargs.pop("plot_triplot", False)
if "extent" in kwargs:
extent = kwargs.pop("extent")
idx = (
(xcenters >= extent[0])
& (xcenters <= extent[1])
& (zcenters >= extent[2])
& (zcenters <= extent[3])
)
plotarray = plotarray[idx].flatten()
xcenters = xcenters[idx].flatten()
zcenters = zcenters[idx].flatten()
if mplcontour:
plotarray = np.ma.masked_array(plotarray, ismasked)
if filled:
contour_set = ax.contourf(
xcenters, zcenters, plotarray, **kwargs
)
else:
contour_set = ax.contour(
xcenters, zcenters, plotarray, **kwargs
)
else:
triang = tri.Triangulation(xcenters, zcenters)
analyze = tri.TriAnalyzer(triang)
mask = analyze.get_flat_tri_mask(rescale=False)
if ismasked is not None:
ismasked = ismasked.flatten()
mask2 = np.any(
np.where(ismasked[triang.triangles], True, False), axis=1
)
mask[mask2] = True
triang.set_mask(mask)
if filled:
contour_set = ax.tricontourf(triang, plotarray, **kwargs)
else:
contour_set = ax.tricontour(triang, plotarray, **kwargs)
if plot_triplot:
ax.triplot(triang, color="black", marker="o", lw=0.75)
ax.set_xlim(self.extent[0], self.extent[1])
ax.set_ylim(self.extent[2], self.extent[3])
return contour_set
def plot_inactive(self, ibound=None, color_noflow="black", **kwargs):
"""
Make a plot of inactive cells. If not specified, then pull ibound
from the self.ml
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in 'BAS6' package.)
color_noflow : string
(Default is 'black')
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
if ibound is None:
if self.mg.idomain is None:
raise AssertionError("An idomain array must be provided")
else:
ibound = self.mg.idomain
plotarray = np.zeros(ibound.shape, dtype=int)
idx1 = ibound == 0
plotarray[idx1] = 1
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(["0", color_noflow])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
patches = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs)
return patches
def plot_ibound(
self,
ibound=None,
color_noflow="black",
color_ch="blue",
color_vpt="red",
head=None,
**kwargs,
):
"""
Make a plot of ibound. If not specified, then pull ibound from the
self.model
Parameters
----------
ibound : numpy.ndarray
ibound array to plot. (Default is ibound in 'BAS6' package.)
color_noflow : string
(Default is 'black')
color_ch : string
Color for constant heads (Default is 'blue'.)
head : numpy.ndarray
Three-dimensional array to set top of patches to the minimum
of the top of a layer or the head value. Used to create
patches that conform to water-level elevations.
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
patches : matplotlib.collections.PatchCollection
"""
if ibound is None:
if self.model is not None:
if self.model.version == "mf6":
color_ch = color_vpt
if self.mg.idomain is None:
raise AssertionError("Ibound/Idomain array must be provided")
ibound = self.mg.idomain
plotarray = np.zeros(ibound.shape, dtype=int)
idx1 = ibound == 0
idx2 = ibound < 0
plotarray[idx1] = 1
plotarray[idx2] = 2
plotarray = np.ma.masked_equal(plotarray, 0)
cmap = matplotlib.colors.ListedColormap(
["none", color_noflow, color_ch]
)
bounds = [0, 1, 2, 3]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# mask active cells
patches = self.plot_array(
plotarray,
masked_values=[0],
head=head,
cmap=cmap,
norm=norm,
**kwargs,
)
return patches
def plot_grid(self, **kwargs):
"""
Plot the grid lines.
Parameters
----------
kwargs : ax, colors. The remaining kwargs are passed into the
the LineCollection constructor.
Returns
-------
lc : matplotlib.collections.LineCollection
"""
ax = kwargs.pop("ax", self.ax)
col = self.get_grid_line_collection(**kwargs)
if col is not None:
ax.add_collection(col)
# ax.set_xlim(self.extent[0], self.extent[1])
# ax.set_ylim(self.extent[2], self.extent[3])
return col
def plot_bc(
self, name=None, package=None, kper=0, color=None, head=None, **kwargs
):
"""
Plot boundary conditions locations for a specific boundary
type from a flopy model
Parameters
----------
name : string
Package name string ('WEL', 'GHB', etc.). (Default is None)
package : flopy.modflow.Modflow package class instance
flopy package class instance. (Default is None)
kper : int
Stress period to plot
color : string
matplotlib color string. (Default is None)
head : numpy.ndarray
Three-dimensional array (structured grid) or
Two-dimensional array (vertex grid)
to set top of patches to the minimum of the top of a\
layer or the head value. Used to create
patches that conform to water-level elevations.
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
patches : matplotlib.collections.PatchCollection
"""
if "ftype" in kwargs and name is None:
name = kwargs.pop("ftype")
# Find package to plot
if package is not None:
p = package
elif self.model is not None:
if name is None:
raise Exception("ftype not specified")
name = name.upper()
p = self.model.get_package(name)
else:
raise Exception("Cannot find package to plot")
# trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention
if isinstance(p, list) or p.parent.version == "mf6":
if not isinstance(p, list):
p = [p]
idx = np.array([])
for pp in p:
if pp.package_type in ("lak", "sfr", "maw", "uzf"):
t = plotutil.advanced_package_bc_helper(pp, self.mg, kper)
else:
try:
mflist = pp.stress_period_data.array[kper]
except Exception as e:
raise Exception(
f"Not a list-style boundary package: {e!s}"
)
if mflist is None:
return
t = np.array(
[list(i) for i in mflist["cellid"]], dtype=int
).T
if len(idx) == 0:
idx = np.copy(t)
else:
idx = np.append(idx, t, axis=1)
else:
# modflow-2005 structured and unstructured grid
if p.package_type in ("uzf", "lak"):
idx = plotutil.advanced_package_bc_helper(p, self.mg, kper)
else:
try:
mflist = p.stress_period_data[kper]
except Exception as e:
raise Exception(
f"Not a list-style boundary package: {e!s}"
)
if mflist is None:
return
if len(self.mg.shape) == 3:
idx = [mflist["k"], mflist["i"], mflist["j"]]
else:
idx = mflist["node"]
if len(self.mg.shape) != 3:
plotarray = np.zeros((self._nlay, self._ncpl), dtype=int)
plotarray[tuple(idx)] = 1
else:
plotarray = np.zeros(
(self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=int
)
plotarray[idx[0], idx[1], idx[2]] = 1
plotarray = np.ma.masked_equal(plotarray, 0)
if color is None:
key = name[:3].upper()
if key in plotutil.bc_color_dict:
c = plotutil.bc_color_dict[key]
else:
c = plotutil.bc_color_dict["default"]
else:
c = color
cmap = matplotlib.colors.ListedColormap(["none", c])
bounds = [0, 1, 2]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
patches = self.plot_array(
plotarray,
masked_values=[0],
head=head,
cmap=cmap,
norm=norm,
**kwargs,
)
return patches
def plot_vector(
self,
vx,
vy,
vz,
head=None,
kstep=1,
hstep=1,
normalize=False,
masked_values=None,
**kwargs,
):
"""
Plot a vector.
Parameters
----------
vx : np.ndarray
x component of the vector to be plotted (non-rotated)
array shape must be (nlay, nrow, ncol) for a structured grid
array shape must be (nlay, ncpl) for a unstructured grid
vy : np.ndarray
y component of the vector to be plotted (non-rotated)
array shape must be (nlay, nrow, ncol) for a structured grid
array shape must be (nlay, ncpl) for a unstructured grid
vz : np.ndarray
y component of the vector to be plotted (non-rotated)
array shape must be (nlay, nrow, ncol) for a structured grid
array shape must be (nlay, ncpl) for a unstructured grid
head : numpy.ndarray
MODFLOW's head array. If not provided, then the quivers will be
plotted in the cell center.
kstep : int
layer frequency to plot (default is 1)
hstep : int
horizontal frequency to plot (default is 1)
normalize : bool
boolean flag used to determine if vectors should be normalized
using the vector magnitude in each cell (default is False)
masked_values : iterable of floats
values to mask
kwargs : matplotlib.pyplot keyword arguments for the
plt.quiver method
Returns
-------
quiver : matplotlib.pyplot.quiver
result of the quiver function
"""
ax = kwargs.pop("ax", self.ax)
pivot = kwargs.pop("pivot", "middle")
# Check that the cross section is not arbitrary with a tolerance
# of the mean cell size in each direction
arbitrary = False
pts = self.pts
xuniform = [
True if abs(pts.T[0, 0] - i) < self.mean_dy else False
for i in pts.T[0]
]
yuniform = [
True if abs(pts.T[1, 0] - i) < self.mean_dx else False
for i in pts.T[1]
]
if not np.all(xuniform) and not np.all(yuniform):
arbitrary = True
if arbitrary:
err_msg = (
"plot_specific_discharge() does not "
"support arbitrary cross-sections"
)
raise AssertionError(err_msg)
# get ibound array to mask inactive cells
ib = np.ones((self.mg.nnodes,), dtype=int)
if self.mg.idomain is not None:
ib = self.mg.idomain.ravel()
# get the actual values to plot and set xcenters
if self.direction == "x":
u_tmp = vx
else:
u_tmp = vy * -1.0
# kstep implementation for vertex grid
projpts = {
key: value
for key, value in self.projpts.items()
if (key // self._ncpl) % kstep == 0
}
# set x and z centers
if isinstance(head, np.ndarray):
# pipe kstep to set_zcentergrid to assure consistent array size
zcenters = self.set_zcentergrid(np.ravel(head), kstep=kstep)
else:
zcenters = [
np.mean(np.array(v).T[1]) for i, v in sorted(projpts.items())
]
xcenters = np.array(
[np.mean(np.array(v).T[0]) for i, v in sorted(projpts.items())]
)
x = np.ravel(xcenters)
z = np.ravel(zcenters)
u = np.array([u_tmp.ravel()[cell] for cell in sorted(projpts)])
v = np.array([vz.ravel()[cell] for cell in sorted(projpts)])
ib = np.array([ib[cell] for cell in sorted(projpts)])
x = x[::hstep]
z = z[::hstep]
u = u[::hstep]
v = v[::hstep]
ib = ib[::hstep]
# mask values
if masked_values is not None:
for mval in masked_values:
to_mask = np.logical_or(u == mval, v == mval)
u[to_mask] = np.nan
v[to_mask] = np.nan
# normalize
if normalize:
vmag = np.sqrt(u**2.0 + v**2.0)
idx = vmag > 0.0
u[idx] /= vmag[idx]
v[idx] /= vmag[idx]
# mask with an ibound array
u[ib == 0] = np.nan
v[ib == 0] = np.nan
# plot with quiver
quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs)
return quiver
def plot_pathline(
self, pl, travel_time=None, method="cell", head=None, **kwargs
):
"""
Plot the MODPATH pathlines
Parameters
----------
pl : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile PathlineFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, ==, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
method : str
"cell" shows only pathlines that intersect with a cell
"all" projects all pathlines onto the cross section regardless
of whether they intersect with a given cell
head : np.ndarray
optional adjustment to only show pathlines that are <= to
the top of the water table given a user supplied head array
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor.
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from matplotlib.collections import LineCollection
# make sure pathlines is a list
if not isinstance(pl, list):
pids = np.unique(pl["particleid"])
if len(pids) > 1:
pl = [pl[pl["particleid"] == pid] for pid in pids]
else:
pl = [pl]
marker = kwargs.pop("marker", None)
markersize = kwargs.pop("markersize", None)
markersize = kwargs.pop("ms", markersize)
markercolor = kwargs.pop("markercolor", None)
markerevery = kwargs.pop("markerevery", 1)
ax = kwargs.pop("ax", self.ax)
if "colors" not in kwargs:
kwargs["colors"] = "0.5"
projpts = self.projpts
if head is not None:
projpts = self.set_zpts(head)
pl2 = []
for p in pl:
tp = plotutil.filter_modpath_by_travel_time(p, travel_time)
pl2.append(tp)
tp = plotutil.intersect_modpath_with_crosssection(
pl2,
projpts,
self.xvertices,
self.yvertices,
self.direction,
self._ncpl,
method=method,
)
plines = plotutil.reproject_modpath_to_crosssection(
tp,
projpts,
self.xypts,
self.direction,
self.mg,
self._ncpl,
self.geographic_coords,
)
# build linecollection and markers arrays
linecol = []
markers = []
for _, arr in plines.items():
arr = np.array(arr)
# sort by travel time
arr = arr[arr[:, -1].argsort()]
linecol.append(arr[:, :-1])
if marker is not None:
for xy in arr[::markerevery]:
markers.append(xy)
lc = None
if len(linecol) > 0:
lc = LineCollection(linecol, **kwargs)
ax.add_collection(lc)
if marker is not None:
markers = np.array(markers)
ax.plot(
markers[:, 0],
markers[:, 1],
lw=0,
marker=marker,
color=markercolor,
ms=markersize,
)
return lc
def plot_timeseries(
self, ts, travel_time=None, method="cell", head=None, **kwargs
):
"""
Plot the MODPATH timeseries.
Parameters
----------
ts : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile TimeseriesFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, ==, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lo : list of Line2D objects
"""
if "color" in kwargs:
kwargs["markercolor"] = kwargs["color"]
return self.plot_pathline(
ts, travel_time=travel_time, method=method, head=head, **kwargs
)
def plot_endpoint(
self,
ep,
direction="ending",
selection=None,
selection_direction=None,
method="cell",
head=None,
**kwargs,
):
"""
Parameters
----------
Returns
-------
"""
ax = kwargs.pop("ax", self.ax)
# colorbar kwargs
createcb = kwargs.pop("colorbar", False)
colorbar_label = kwargs.pop("colorbar_label", "Endpoint Time")
shrink = float(kwargs.pop("shrink", 1.0))
# marker kwargs
s = kwargs.pop("s", np.sqrt(50))
s = float(kwargs.pop("size", s)) ** 2.0
cd = {}
if "c" not in kwargs:
vmin, vmax = 1e10, -1e10
for rec in ep:
tt = float(rec["time"] - rec["time0"])
if tt < vmin:
vmin = tt
if tt > vmax:
vmax = tt
cd[int(rec["particleid"])] = tt
kwargs["vmin"] = vmin
kwargs["vmax"] = vmax
else:
tc = kwargs.pop("c")
for rec in ep:
cd[int(rec["praticleid"])] = tc
tep, istart = plotutil.parse_modpath_selection_options(
ep, direction, selection, selection_direction
)[0:2]
projpts = self.projpts
if head is not None:
projpts = self.set_zpts(head)
tep = plotutil.intersect_modpath_with_crosssection(
tep,
projpts,
self.xvertices,
self.yvertices,
self.direction,
method=method,
starting=istart,
)
if not tep:
return
epdict = plotutil.reproject_modpath_to_crosssection(
tep,
projpts,
self.xypts,
self.direction,
self.mg,
self.geographic_coords,
starting=istart,
)
arr = []
c = []
for node, epl in sorted(epdict.items()):
c.append(cd[node])
for xy in epl:
arr.append(xy)
arr = np.array(arr)
sp = ax.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs)
# add a colorbar for travel times
if createcb:
cb = plt.colorbar(sp, ax=ax, shrink=shrink)
cb.set_label(colorbar_label)
return sp
def get_grid_line_collection(self, **kwargs):
"""
Get a PatchCollection of the grid
Parameters
----------
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.LineCollection
Returns
-------
PatchCollection : matplotlib.collections.LineCollection
"""
from matplotlib.collections import PatchCollection
edgecolor = kwargs.pop("colors", "grey")
edgecolor = kwargs.pop("color", edgecolor)
edgecolor = kwargs.pop("ec", edgecolor)
edgecolor = kwargs.pop("edgecolor", edgecolor)
facecolor = kwargs.pop("facecolor", "none")
facecolor = kwargs.pop("fc", facecolor)
polygons = [
p for _, polys in sorted(self.polygons.items()) for p in polys
]
if len(polygons) > 0:
patches = PatchCollection(
polygons, edgecolor=edgecolor, facecolor=facecolor, **kwargs
)
else:
patches = None
return patches
def set_zpts(self, vs):
"""
Get an array of projected vertices corrected with corrected
elevations based on minimum of cell elevation (self.elev) or
passed vs numpy.ndarray
Parameters
----------
vs : numpy.ndarray
Two-dimensional array to plot.
Returns
-------
zpts : dict
"""
# make vertex array based on projection direction
if vs is not None:
if not isinstance(vs, np.ndarray):
vs = np.array(vs)
if self.direction == "x":
xyix = 0
else:
xyix = -1
projpts = {}
nlay = self.mg.nlay + self.ncb
nodeskip = self.mg.cross_section_nodeskip(nlay, self.xypts)
cbcnt = 0
for k in range(1, nlay + 1):
if not self.active[k - 1]:
cbcnt += 1
continue
k, ns, ncbnn = self.mg.cross_section_adjust_indicies(k - 1, cbcnt)
top = self.elev[k - 1, :]
botm = self.elev[k, :]
d0 = 0
# trap to split multipolygons
xypts = []
for nn, verts in self.xypts.items():
if nn in nodeskip[ns - 1]:
continue
if len(verts) > 2:
i0 = 2
for ix in range(len(verts)):
if ix == i0 - 1:
xypts.append((nn, verts[i0 - 2 : i0]))
i0 += 2
else:
xypts.append((nn, verts))
xypts = sorted(xypts, key=lambda q: q[-1][xyix][xyix])
if self.direction == "y":
xypts = xypts[::-1]
for nn, verts in xypts:
if vs is None:
t = top[nn]
else:
t = vs[nn + ncbnn]
if np.isclose(t, -1e30):
t = botm[nn]
if t < botm[nn]:
t = botm[nn]
if top[nn] < t:
t = top[nn]
b = botm[nn]
if self.geographic_coords:
if self.direction == "x":
projt = [(v[0], t) for v in verts]
projb = [(v[0], b) for v in verts]
else:
projt = [(v[1], t) for v in verts]
projb = [(v[1], b) for v in verts]
else:
verts = np.array(verts).T
a2 = (np.max(verts[0]) - np.min(verts[0])) ** 2
b2 = (np.max(verts[1]) - np.min(verts[1])) ** 2
c = np.sqrt(a2 + b2)
d1 = d0 + c
projt = [(d0, t), (d1, t)]
projb = [(d0, b), (d1, b)]
d0 += c
projpt = projt + projb
node = nn + ncbnn
if node not in projpts:
projpts[node] = projpt
else:
projpts[node] += projpt
return projpts
def set_zcentergrid(self, vs, kstep=1):
"""
Get an array of z elevations at the center of a cell that is based
on minimum of cell top elevation (self.elev) or passed vs numpy.ndarray
Parameters
----------
vs : numpy.ndarray
Three-dimensional array to plot.
kstep : int
plotting layer interval
Returns
-------
zcentergrid : numpy.ndarray
"""
verts = self.set_zpts(vs)
zcenters = [
np.mean(np.array(v).T[1])
for i, v in sorted(verts.items())
if (i // self._ncpl) % kstep == 0
]
return zcenters
def get_grid_patch_collection(
self, plotarray, projpts=None, fill_between=False, **kwargs
):
"""
Get a PatchCollection of plotarray in unmasked cells
Parameters
----------
plotarray : numpy.ndarray
One-dimensional array to attach to the Patch Collection.
projpts : dict
dictionary defined by node number which contains model
patch vertices.
fill_between : bool
flag to create polygons that mimick the matplotlib fill between
method. Only used by the plot_fill_between method.
**kwargs : dictionary
keyword arguments passed to matplotlib.collections.PatchCollection
Returns
-------
patches : matplotlib.collections.PatchCollection
"""
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
use_cache = False
if projpts is None:
use_cache = True
projpts = self.polygons
vmin = kwargs.pop("vmin", None)
vmax = kwargs.pop("vmax", None)
match_original = False
if fill_between:
match_original = True
colors = kwargs.pop("colors")
rectcol = []
data = []
for cell, poly in sorted(projpts.items()):
if not use_cache:
if len(poly) > 4:
# multipolygon instance...
n = 0
p = []
polys = []
for vn, v in enumerate(poly):
if vn == 3 + 4 * n:
n += 1
p.append(v)
polys.append(p)
p = []
else:
p.append(v)
else:
polys = [poly]
else:
polys = poly
for polygon in polys:
if not use_cache:
polygon = plotutil.UnstructuredPlotUtilities.arctan2(
np.array(polygon)
)
if np.isnan(plotarray[cell]):
continue
elif plotarray[cell] is np.ma.masked:
continue
if use_cache:
rectcol.append(polygon)
elif fill_between:
x = list(set(np.array(polygon).T[0]))
y1 = np.max(np.array(polygon).T[1])
y = np.min(np.array(polygon).T[1])
v = plotarray[cell]
if v > y1:
v = y
if v < y:
v = y
p1 = [(x[0], y1), (x[1], y1), (x[1], v), (x[0], v)]
p2 = [(x[0], v), (x[1], v), (x[1], y), (x[0], y)]
rectcol.append(Polygon(p1, closed=True, color=colors[0]))
rectcol.append(Polygon(p2, closed=True, color=colors[1]))
else:
rectcol.append(Polygon(polygon, closed=True))
data.append(plotarray[cell])
if len(rectcol) > 0:
patches = PatchCollection(
rectcol, match_original=match_original, **kwargs
)
if not fill_between:
patches.set_array(np.array(data))
patches.set_clim(vmin, vmax)
else:
patches = None
return patches
|
####################################################################
######### Copyright 2016-2017 BigSQL ###########
####################################################################
from flask import Flask, render_template, url_for, request, session, redirect
import os
from flask_triangle import Triangle
from flask_restful import reqparse, abort, Api, Resource
from flask_login import user_logged_in
from flask_security import auth_token_required, auth_required
import json
from Components import Components as pgc
from flask_security import login_required, roles_required, current_user, roles_accepted
# from flask_login import current_user
from flask_mail import Mail
from flask_babel import Babel, gettext
from pgadmin.utils.session import create_session_interface
from pgadmin.model import db, Role, User, Server, ServerGroup, Process, roles_users
from pgadmin.utils.crypto import encrypt, decrypt, pqencryptpassword
from flask_security import Security, SQLAlchemyUserDatastore
from pgadmin.utils.sqliteSessions import SqliteSessionInterface
from pgadmin.utils.driver import get_driver
import config
from config import PG_DEFAULT_DRIVER
from flask_restful import reqparse
from datetime import datetime, timedelta
import dateutil
import hashlib
import time
import pytz
import psutil
from pickle import dumps, loads
import csv
import sqlite3
from werkzeug.contrib.fixers import ProxyFix
parser = reqparse.RequestParser()
#parser.add_argument('data')
import platform
this_uname = str(platform.system())
PGC_HOME = os.getenv("PGC_HOME", "")
PGC_LOGS = os.getenv("PGC_LOGS", "")
config.APP_NAME = "pgDevOps"
config.LOGIN_NAME = "pgDevOps"
application = Flask(__name__)
application.wsgi_app = ProxyFix(application.wsgi_app)
babel = Babel(application)
Triangle(application)
api = Api(application)
application.config.from_object(config)
current_path = os.path.dirname(os.path.realpath(__file__))
reports_path = os.path.join(current_path, "reports")
##########################################################################
# Setup session management
##########################################################################
application.session_interface = SqliteSessionInterface(config.SESSION_DB_PATH)
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{0}?timeout={1}'.format(
config.SQLITE_PATH.replace('\\', '/'),
getattr(config, 'SQLITE_TIMEOUT', 500)
)
application.config['WTF_CSRF_ENABLED'] = False
application.config['SECURITY_RECOVERABLE'] = True
application.config['SECURITY_CHANGEABLE'] = True
application.config['SECURITY_REGISTERABLE'] = True
application.config['SECURITY_REGISTER_URL'] = '/register'
application.config['SECURITY_CONFIRMABLE'] = False
application.config['SECURITY_SEND_REGISTER_EMAIL'] = False
application.config['SECURITY_TOKEN_MAX_AGE'] = 600
application.permanent_session_lifetime = timedelta(minutes=10)
db.init_app(application)
Mail(application)
import pgadmin.utils.paths as paths
paths.init_app(application)
#Enviornment variable for restrict api action commands
os.environ['IS_DEVOPS'] = "True"
def before_request():
if not current_user.is_authenticated and request.endpoint == 'security.login' and no_admin_users():
return redirect(url_for('security.register'))
if not current_user.is_authenticated and request.endpoint == 'security.register' and not no_admin_users():
return redirect(url_for('security.login'))
application.before_request(before_request)
from forms import RegisterForm, check_ami
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(application, user_datastore, register_form=RegisterForm)
from flask_security.signals import user_registered
is_ami = check_ami()
def no_admin_users():
if not len(User.query.filter(User.roles.any(name='Administrator'), User.active == True).all()) > 0:
return True
return False
@user_registered.connect_via(application)
def on_user_registerd(app, user, confirm_token):
sg = ServerGroup(
user_id=user.id,
name="Servers")
db.session.add(sg)
if is_ami.get('rc') != 2:
session['initial-logged-in'] = True
db.session.commit()
default_user = user_datastore.get_user('bigsql@bigsql.org')
if not len(User.query.filter(User.roles.any(name='Administrator'),User.active==True).all()) > 0 :
if default_user is not None and default_user.has_role('Administrator') and not default_user.active:
db.session.delete(default_user)
db.session.commit()
user_datastore.add_role_to_user(user.email, 'Administrator')
return
user_datastore.add_role_to_user(user.email, 'User')
@user_logged_in.connect_via(application)
def on_user_logged_in(sender, user):
try:
from pgadmin.model import UserPreference, Preferences
bin_pref = Preferences.query.filter_by(
name="pg_bin_dir"
).order_by("id").first()
check_pref = UserPreference.query.filter_by(
pid=bin_pref.id,
uid=user.id
).order_by("pid")
if check_pref.count() > 0:
pass
else:
path = None
for p in ["pg10", "pg96", "pg95", "pg94"]:
bin_path = os.path.join(PGC_HOME, p, "bin")
if os.path.exists(bin_path):
path = bin_path
break
if path:
pref = UserPreference(
pid=bin_pref.id,
uid=user.id,
value=path
)
db.session.add(pref)
db.session.commit()
except Exception as e:
pass
from pgstats import pgstats
application.register_blueprint(pgstats, url_prefix='/pgstats')
from credentials import credentials
application.register_blueprint(credentials, url_prefix='/api/pgc/credentials')
from CloudHandler import cloud
application.register_blueprint(cloud, url_prefix='/api/pgc/instances')
from CloudCreateHandler import _cloud_create
application.register_blueprint(_cloud_create, url_prefix='/api/pgc/create')
from ProvisionHandler import _pgc_provision
application.register_blueprint(_pgc_provision, url_prefix='/api/pgc/provision')
from BackupRestore import _backrest
application.register_blueprint(_backrest, url_prefix='/api/pgc')
from login_controller import _user
application.register_blueprint(_user, url_prefix='/api/login')
from UserHandler import _user_management
application.register_blueprint(_user_management, url_prefix='/api/user')
db_session = db.session
class pgcRestApi(Resource):
@auth_required('token', 'session')
def get(self, arg):
if current_user.has_role("Developer"):
non_admin_cmds = ['list', 'lablist', 'info', 'status', 'register', 'metalist']
if arg.split(" ")[0] in non_admin_cmds:
data = pgc.get_data(arg)
return data
return unauth_handler()
data = pgc.get_data(arg)
return data
api.add_resource(pgcRestApi,
'/api/pgc/<string:arg>')
class checkInitLogin(Resource):
@auth_required('token', 'session')
def get(self):
if is_ami.get('rc') != 2 and session.get('initial-logged-in'):
session['initial-logged-in'] = False
return True
return False
api.add_resource(checkInitLogin,
'/check_init_login')
class pgcUtilRelnotes(Resource):
@auth_required('token', 'session')
def get(self, comp, version=None):
json_dict = {}
v=version
import mistune, util, sys
if version == None:
rel_notes = unicode(str(util.get_relnotes (comp)),sys.getdefaultencoding(),errors='ignore').strip()
else:
rel_notes=unicode(str(util.get_relnotes (comp, version)),sys.getdefaultencoding(),errors='ignore').strip()
json_dict['component'] = comp
json_dict['relnotes'] = mistune.markdown(rel_notes)
# # json_dict['plainText'] = rel_notes
data = json.dumps([json_dict])
return data
api.add_resource(pgcUtilRelnotes, '/api/utilRelnotes/<string:comp>','/api/utilRelnotes/<string:comp>/<string:version>')
class pgcApiHostCmd(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self, pgc_cmd, host_name,pwd=None):
password=pwd
pwd_session_name = "{0}_pwd".format(host_name)
if session.get("hostname", "") == host_name:
if not pwd and session.get(pwd_session_name):
password = session.get(pwd_session_name)
else:
session[pwd_session_name] = pwd
elif host_name is None or host_name in ("", "localhost"):
pwd_session_name="localhost_pwd"
if not pwd and session.get(pwd_session_name):
password = session.get(pwd_session_name)
else:
session[pwd_session_name] = pwd
session['hostname'] = host_name
data = pgc.get_data(pgc_cmd, pgc_host=host_name, pwd=password)
if len(data)>0 and data[0].get("pwd_failed"):
if session.get(pwd_session_name):
session.pop(pwd_session_name)
return data
api.add_resource(pgcApiHostCmd,
'/api/hostcmd/<string:pgc_cmd>/<string:host_name>',
'/api/hostcmd/<string:pgc_cmd>/<string:host_name>/<string:pwd>/')
class pgdgCommand(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator','User')
def get(self, repo_id, pgc_cmd, host=None, pwd=None):
password = pwd
pwd_session_name = "{0}_pwd".format(host)
if session.get("hostname", "") == host:
if not pwd and session.get(pwd_session_name):
password = session.get(pwd_session_name)
else:
session[pwd_session_name] = pwd
elif host is None or host in ("", "localhost"):
pwd_session_name = "localhost_pwd"
if not pwd and session.get("localhost_pwd"):
password = session.get("localhost_pwd")
else:
session['localhost_pwd'] = pwd
data = pgc.get_pgdg_data(repo_id, pgc_cmd, pgc_host=host, pwd=password)
if len(data)>0 and data[0].get("pwd_failed"):
if session.get(pwd_session_name):
session.pop(pwd_session_name)
return data
api.add_resource(pgdgCommand,
'/api/pgdg/<string:repo_id>/<string:pgc_cmd>',
'/api/pgdg/<string:repo_id>/<string:pgc_cmd>/<string:host>',
'/api/pgdg/<string:repo_id>/<string:pgc_cmd>/<string:host>/<string:pwd>')
class pgdgHostCommand(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator','User')
def get(self, repo_id, pgc_cmd, comp, host=None):
data = pgc.get_pgdg_data(repo_id, pgc_cmd, component=comp, pgc_host=host)
return data
api.add_resource(pgdgHostCommand, '/api/pgdghost/<string:repo_id>/<string:pgc_cmd>/<string:comp>',
'/api/pgdghost/<string:repo_id>/<string:pgc_cmd>/<string:comp>/<string:host>')
class TestConn(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self):
username = request.args.get('user')
password = request.args.get('password')
ssh_key = request.args.get('ssh_key')
sudo_pwd = request.args.get('ssh_sudo_pwd')
host = request.args.get('host')
from PgcRemote import PgcRemote
json_dict = {}
try:
remote = PgcRemote(host, username, password=password, ssh_key=ssh_key, sudo_pwd=sudo_pwd)
if not sudo_pwd:
remote.connect()
json_dict['state'] = "success"
json_dict['msg'] = "Testing Connection Successful."
data = json.dumps([json_dict])
remote.disconnect()
except Exception as e:
errmsg = "ERROR: Cannot connect to " + username + "@" + host + " - " + str(e)
json_dict['state'] = "error"
json_dict['msg'] = errmsg
data = json.dumps([json_dict])
return data
api.add_resource(TestConn, '/api/testConn')
from responses import Result, InvalidParameterResult, ServerErrorResult
class TestCloudConnection(Resource):
@auth_required('token', 'session')
def post(self):
payload = request.get_json()['params']
if not set(("cloud_type","credentials")).issubset(payload):
return InvalidParameterResult(errors = ["Both cloud_type and credentials are required"]).http_response()
if payload["cloud_type"] not in ('aws','azure','vmware'):
return InvalidParameterResult(errors=["Possible values for cloud_type are aws/azure/vmware"]).http_response()
pgcCmd = "test-cred --cloud="+payload["cloud_type"]
pgcCmd = pgcCmd + " --credentials \'" + json.dumps(payload["credentials"]) + "\'"
data = pgc.get_cmd_data(pgcCmd)
if len(data) == 0:
return ServerErrorResult().http_response()
if data[0]['state'] != 'info' or data[0]['state'] == 'completed':
return ServerErrorResult(state=data[0]['state'],message=data[0].get('msg')).http_response()
return Result(200,data[0]['state'], data[0]['msg']).http_response()
api.add_resource(TestCloudConnection, '/api/testCloudConn')
class checkUser(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self):
host = request.args.get('hostname')
cred_name = request.args.get('cred_name')
import util
cred_info = util.get_credentials_by_name(cred_name)
enc_secret = util.get_value("GLOBAL", "SECRET", "")
enc_key = "{0}{1}".format(enc_secret, cred_info.get("cred_uuid"))
username = cred_info.get("ssh_user")
password= ""
if cred_info.get("ssh_passwd"):
password = decrypt(cred_info.get("ssh_passwd"),enc_key)
ssh_key = ""
if cred_info.get("ssh_key"):
ssh_key = decrypt(cred_info.get("ssh_key"), enc_key)
sudo_pwd = ""
if cred_info.get("ssh_sudo_pwd"):
sudo_pwd = decrypt(cred_info.get("ssh_sudo_pwd"), enc_key)
# username = request.args.get('username')
# password = request.args.get('password')
# ssh_key = request.args.get('ssh_key')
# sudo_pwd = request.args.get('sudo_pwd', None)
from PgcRemote import PgcRemote
json_dict = {}
try:
remote = PgcRemote(host, username, password=password, ssh_key=ssh_key, sudo_pwd=sudo_pwd)
if not sudo_pwd:
remote.connect()
json_dict['state'] = "success"
try:
remote_pgc_path = remote.get_exixting_pgc_path()
for key in remote_pgc_path.keys():
json_dict[key] = remote_pgc_path[key]
except Exception as e:
print (str(e))
pass
data = json.dumps([json_dict])
remote.disconnect()
except Exception as e:
errmsg = "ERROR: Cannot connect to " + username + "@" + host + " - " + str(e)
json_dict['state'] = "error"
json_dict['msg'] = errmsg
data = json.dumps([json_dict])
return data
api.add_resource(checkUser, '/api/checkUser')
class checkHostAccess(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self):
host = request.args.get('hostname')
check_sudo_password = request.args.get('pwd')
pgc_host_info = util.get_pgc_host(host)
pgc_host = pgc_host_info.get('host')
ssh_cred_id = pgc_host_info.get('ssh_cred_id')
cred_info = util.get_credentials_by_uuid(ssh_cred_id)
enc_secret = util.get_value("GLOBAL", "SECRET", "")
enc_key = "{0}{1}".format(enc_secret, cred_info.get("cred_uuid"))
pgc_user = cred_info.get("ssh_user")
pgc_passwd= ""
if cred_info.get("ssh_passwd"):
pgc_passwd = decrypt(cred_info.get("ssh_passwd"),enc_key)
pgc_ssh_key = ""
if cred_info.get("ssh_key"):
pgc_ssh_key = decrypt(cred_info.get("ssh_key"), enc_key)
util.update_cred_used(cred_info.get("cred_uuid"))
from PgcRemote import PgcRemote
json_dict = {}
try:
remote = PgcRemote(pgc_host, pgc_user, password=pgc_passwd, ssh_key=pgc_ssh_key, sudo_pwd=check_sudo_password)
remote.connect()
is_sudo = remote.has_root_access()
json_dict['state'] = "success"
json_dict['isSudo'] = is_sudo
data = json.dumps([json_dict])
remote.disconnect()
except Exception as e:
errmsg = "ERROR: Cannot connect to " + username + "@" + host + " - " + str(e)
json_dict['state'] = "error"
json_dict['msg'] = errmsg
data = json.dumps([json_dict])
return data
api.add_resource(checkHostAccess, '/api/checkUserAccess')
class initPGComp(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self, host, comp, pgpasswd, username=None, password=None):
from PgcRemote import PgcRemote
json_dict = {}
if password == None or username == None:
import util
pgc_host_info = util.get_pgc_host(host)
ssh_host = pgc_host_info.get('host')
ssh_host_name = pgc_host_info.get('host_name')
ssh_cred_id = pgc_host_info.get('ssh_cred_id')
cred_info = util.get_credentials_by_uuid(ssh_cred_id)
enc_secret = util.get_value("GLOBAL", "SECRET", "")
enc_key = "{0}{1}".format(enc_secret, cred_info.get("cred_uuid"))
ssh_username = cred_info.get("ssh_user")
password= ""
if cred_info.get("ssh_passwd"):
ssh_password = decrypt(cred_info.get("ssh_passwd"),enc_key)
ssh_key = ""
if cred_info.get("ssh_key"):
ssh_key = decrypt(cred_info.get("ssh_key"), enc_key)
sudo_pwd = ""
if cred_info.get("ssh_sudo_pwd"):
sudo_pwd = decrypt(cred_info.get("ssh_sudo_pwd"), enc_key)
is_sudo = pgc_host_info.get('is_sudo')
util.update_cred_used(cred_info.get("cred_uuid"))
try:
remote = PgcRemote(ssh_host, ssh_username, password=ssh_password, ssh_key=ssh_key)
remote.connect()
is_file_added = remote.add_file('/tmp/.pgpass', pgpasswd)
remote.disconnect()
data = pgc.get_data("init", comp, ssh_host_name, '/tmp/.pgpass')
except Exception as e:
errmsg = "ERROR: Cannot connect to " + ssh_username + "@" + ssh_host + " - " + str(e.args[0])
json_dict['state'] = "error"
json_dict['msg'] = errmsg
data = json.dumps([json_dict])
return data
api.add_resource(initPGComp, '/api/initpg/<string:host>/<string:comp>/<string:pgpasswd>','/api/initpg/<string:host>/<string:comp>/<string:pgpasswd>/<string:username>/<string:password>')
class bamUserInfo(Resource):
@auth_required('token', 'session')
def get(self):
userInfo = {}
if current_user.is_authenticated:
userInfo['email'] = current_user.email
userInfo['isAdmin'] = current_user.has_role("Administrator")
email_md5=hashlib.md5( current_user.email.lower() ).hexdigest()
gravtar_url="https://www.gravatar.com/avatar/"+ email_md5 + "?d=retro"
userInfo['gravatarImage']=gravtar_url
return userInfo
api.add_resource(bamUserInfo, '/api/userinfo')
class checkUserRole(Resource):
@auth_required('token', 'session')
def get(self):
result = {}
if current_user.has_role("Developer"):
result['code'] = 1
result['role'] = "Developer"
else:
result['code'] = 0
return result
api.add_resource(checkUserRole, '/api/checkUserRole')
class getRecentReports(Resource):
@auth_required('token', 'session')
def get(self, report_type):
recent_reports_path = os.path.join(reports_path, report_type)
jsonDict = {}
jsonList = []
if os.path.isdir(recent_reports_path):
mtime = lambda f: os.stat(os.path.join(recent_reports_path, f)).st_mtime
sorted_list=sorted(os.listdir(recent_reports_path),
key=mtime, reverse=True)
for d in sorted_list:
if d.endswith(".html"):
jsonDict = {}
html_file_path = os.path.join(recent_reports_path, d)
jsonDict['file']=d
jsonDict["file_link"] = "reports/"+report_type+"/"+d
mtime=os.stat(html_file_path).st_mtime
mdate=datetime.fromtimestamp(mtime).strftime('%Y-%m-%d %H:%M:%S')
jsonDict['mtime']=mdate
jsonList.append(jsonDict)
return {'data':jsonList}
api.add_resource(getRecentReports, '/api/getrecentreports/<string:report_type>')
class CheckConn(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def post(self):
result = {}
args = request.json.get('params')
host = args.get('host')
user = args.get('username')
password = args.get('password')
dbname = args.get('dbname')
port = args.get('port')
try:
from PgInstance import PgInstance
remoteConn = PgInstance(str(host),str(user), str(dbname), int(port), str(password))
remoteConn.connect()
remoteConn.close()
result['error'] = 0
result['msg'] = 'Sucessfully Connected.'
return result
except Exception as e:
result['error'] = 1
result['msg'] = str(e)
return result
api.add_resource(CheckConn, '/check_pg_conn')
class GenerateReports(Resource):
@auth_required('token', 'session')
def post(self):
args = request.json['data']
from ProfilerReport import ProfilerReport
try:
plReport = ProfilerReport(args)
report_file = plReport.generateSQLReports(args.get('pgQuery'),
args.get('pgTitle'),
args.get('pgDesc'))
result = {}
result['report_file'] = report_file
result['error'] = 0
except Exception as e:
#import traceback
#print traceback.format_exc()
#print e
result = {}
result['error'] = 1
result['msg'] = str(e)
return result
api.add_resource(GenerateReports, '/api/generate_profiler_reports')
class RemoveReports(Resource):
@auth_required('token', 'session')
def post(self,report_type):
from ProfilerReport import ProfilerReport
try:
recent_reports_path = os.path.join(reports_path, report_type)
for fileName in request.json:
os.remove(os.path.join(recent_reports_path, fileName))
result = {}
result['msg'] = 'success'
result['error'] = 0
except Exception as e:
result = {}
result['error'] = 1
result['msg'] = str(e)
print e
return result
api.add_resource(RemoveReports, '/api/remove_reports/<string:report_type>')
class GetEnvFile(Resource):
@auth_required('token', 'session')
def get(self, comp):
import util
try:
result = dict()
util.read_env_file(comp)
result['PGUSER'] = os.environ['PGUSER']
result['PGDATABASE'] = os.environ['PGDATABASE']
result['PGPORT'] = os.environ['PGPORT']
except Exception as e:
result = {}
result['error'] = 1
result['msg'] = str(e)
return result
api.add_resource(GetEnvFile, '/api/read/env/<string:comp>')
class AddtoMetadata(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator','User')
def post(self):
def add_to_pginstances(pg_arg):
server_id = None
try:
component_name = pg_arg.get("component")
component_port = pg_arg.get("port", 5432)
component_host = pg_arg.get("host", "localhost")
component_proj = pg_arg.get("project")
component_db = pg_arg.get("db", "postgres")
component_user = pg_arg.get("user", "postgres")
gid = pg_arg.get("gid")
sid = pg_arg.get("sid")
servergroup_id=1
is_rds = pg_arg.get("rds")
is_new =True
discovery_id = "BigSQL PostgreSQL"
if is_rds:
discovery_id = "RDS"
servername = component_name
server_group_name = pg_arg.get("region", "AWS RDS")
rds_serverGroup = ServerGroup.query.filter_by(
user_id=current_user.id,
name=server_group_name
).order_by("id")
if rds_serverGroup.count() > 0:
servergroup = rds_serverGroup.first()
servergroup_id = servergroup.id
else:
try:
sg = ServerGroup(
user_id=current_user.id,
name=server_group_name)
db.session.add(sg)
db.session.commit()
servergroup_id = sg.id
except sqlite3.IntegrityError as e:
err_msg = str(e)
if err_msg.find("UNIQUE constraint failed") >= 0:
rds_serverGroup = ServerGroup.query.filter_by(
user_id=current_user.id,
name=server_group_name
).order_by("id")
if rds_serverGroup.count() > 0:
servergroup = rds_serverGroup.first()
servergroup_id = servergroup.id
else:
print (err_msg)
result = {}
result['error'] = 1
result['msg'] = err_msg
return result
else:
if gid:
servername=component_name
servergroup_id=gid
if sid:
component_server = Server.query.filter_by(
id=sid,
user_id=current_user.id,
).first()
is_new=False
else:
servername = "{0}({1})".format(component_name, component_host)
if component_host in ("localhost", ""):
component_host = "localhost"
servername = "{0}({1})".format(component_name, component_host)
else:
import util
host_info = util.get_pgc_host(component_host)
component_host = host_info.get('host')
if component_host == '':
component_host = pg_arg.get("host", "localhost")
user_id = current_user.id
servergroups = ServerGroup.query.filter_by(
user_id=user_id
).order_by("id")
if servergroups.count() > 0:
servergroup = servergroups.first()
servergroup_id = servergroup.id
else:
sg = ServerGroup(
user_id=current_user.id,
name="Servers")
db.session.add(sg)
db.session.commit()
servergroup_id = sg.id
component_server = Server.query.filter_by(
name=servername,
host=component_host,
servergroup_id=servergroup_id,
port=component_port
).first()
if component_server:
is_new=False
else:
is_new=True
if is_new:
svr = Server(user_id=current_user.id,
servergroup_id=servergroup_id,
name=servername,
host=component_host,
port=component_port,
maintenance_db=component_db,
username=component_user,
ssl_mode='prefer',
comment=component_proj,
discovery_id=discovery_id)
db_session.add(svr)
db_session.commit()
server_id = svr.id
else:
component_server.servergroup_id=servergroup_id
component_server.name=servername
component_server.host=component_host
component_server.port=component_port
component_server.maintenance_db=component_db
component_server.username=component_user
db_session.commit()
except Exception as e:
print ("Failed while adding/updating pg instance in metadata :")
print (str(e))
pass
return server_id
result = {}
result['error'] = 0
args = request.json.get("params")
is_multiple = args.get("multiple")
remote_host = args.get("remotehost")
if is_multiple:
for pg_data in args.get("multiple"):
server_id = add_to_pginstances(pg_data)
else:
if remote_host:
components_list = pgc.get_data("status", pgc_host=remote_host)
for c in components_list:
if c.get("category") == 1 and c.get("state") != "Not Initialized":
comp_args = {}
comp_args['component'] = c.get("component")
comp_args['port'] = c.get("port")
comp_args['host'] = remote_host
server_id = add_to_pginstances(comp_args)
else:
server_id = add_to_pginstances(args)
result['sid'] = server_id
return result
api.add_resource(AddtoMetadata, '/api/add_to_metadata')
class DeleteFromMetadata(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def post(self):
args = request.json
gid = args.get('gid')
sid = args.get('sid')
result = {}
servers = Server.query.filter_by(user_id=current_user.id, id=sid)
if servers is None:
result['error'] = 1
result['msg'] = 'The specified server could not be found. Does the user have permission to access the server?'
else:
try:
for s in servers:
get_driver(PG_DEFAULT_DRIVER).delete_manager(s.id)
db.session.delete(s)
db.session.commit()
except Exception as e:
result['error'] = 1
result['msg'] = e.message
return result
result['error'] = 0
result['msg'] = "Server deleted"
return result
api.add_resource(DeleteFromMetadata, '/api/delete_from_metadata')
def get_process_status(process_log_dir,line_count=None):
process_dict = {}
status_file = os.path.join(process_log_dir, "status")
if os.path.exists(status_file):
with open(status_file) as data_file:
data = json.load(data_file)
process_dict = data
err_file = os.path.join(process_log_dir, "err")
out_file = os.path.join(process_log_dir, "out")
exit_code = process_dict.get("exit_code", None)
err_data_content = None
out_data_content = None
process_dict['out_data'] = ""
with open(out_file) as out_data:
if line_count is None:
out_data_content = out_data.readlines()
else:
out_data_content = out_data.readlines()[-line_count:]
line_count = line_count - len(out_data_content)
out_data_content = "".join(out_data_content).replace("\r", "\n").strip()
with open(err_file) as err_data:
if line_count is None:
err_data_content = err_data.readlines()
else:
err_data_content = err_data.readlines()[-line_count:]
err_data_content = "".join(err_data_content).replace("\r", "\n").strip()
if err_data_content and out_data_content:
process_dict['out_data'] = '\n'.join([out_data_content, err_data_content])
elif err_data_content:
process_dict['out_data'] = err_data_content
elif out_data_content:
process_dict['out_data'] = out_data_content
return process_dict
def get_current_time(format='%Y-%m-%d %H:%M:%S.%f %z'):
"""
Generate the current time string in the given format.
"""
return datetime.utcnow().replace(
tzinfo=pytz.utc
).strftime(format)
class pgdgAction(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator','User')
def post(self):
result = {}
args = request.json
component_name = args.get("component")
component_host = args.get("host","localhost")
pwd=args.get("pwd")
pwd_session_name = "{0}_pwd".format(component_host)
if session.get("hostname", "") == component_host:
if not pwd and session.get(pwd_session_name):
pwd = session.get(pwd_session_name)
session['hostname'] = component_host
if pwd:
session[pwd_session_name] = pwd
repo = args.get("repo")
action = args.get("action")
from detached_process import detached_process
ctime = get_current_time(format='%y%m%d%H%M%S%f')
if action=="register" or action=="unregister":
report_cmd = PGC_HOME + os.sep + "pgc " + action + " REPO " + repo + " -y"
else:
report_cmd = PGC_HOME + os.sep + "pgc repo-pkgs " + repo + " " + action + " " + component_name
if not pwd:
report_cmd = report_cmd + " --no-tty"
isLocal = True
if component_host and component_host != "localhost":
isLocal = False
report_cmd = report_cmd + " --host \"" + component_host + "\""
if this_uname == "Windows":
report_cmd = report_cmd.replace("\\", "\\\\")
process_status = detached_process(report_cmd, ctime, stdin_str=pwd, is_local=isLocal)
result['error']=None
result['status'] =process_status['status']
result['log_dir'] = process_status['log_dir']
result['process_log_id'] = process_status['process_log_id']
result['cmd'] = report_cmd
return result
api.add_resource(pgdgAction, '/api/pgdgAction')
class GenerateBadgerReports(Resource):
@auth_required('token', 'session')
def post(self):
result = {}
args = request.json
log_files=args.get("log_files")
db=args.get("db")
jobs=args.get("jobs")
log_prefix=args.get("log_prefix")
title=args.get("title")
try:
from BadgerReport import BadgerReport
ctime = get_current_time(format='%y%m%d%H%M%S%f')
badgerRpts = BadgerReport()
pid_file_path = os.path.join(config.SESSION_DB_PATH,"process_logs", ctime)
report_file = badgerRpts.generateReports(log_files, db, jobs, log_prefix, title, ctime, pid_file_path)
process_log_dir = report_file['log_dir']
report_status = get_process_status(process_log_dir)
result['pid'] = report_status.get('pid')
result['exit_code'] = report_status.get('exit_code')
result['process_log_id'] = report_file["process_log_id"]
if report_status.get('exit_code') is None:
result['in_progress'] = True
try:
j = Process(
pid=int(report_file["process_log_id"]), command=report_file['cmd'],
logdir=process_log_dir, desc=dumps("pgBadger Report"), user_id=current_user.id,
acknowledge='pgDevOps'
)
db_session.add(j)
db_session.commit()
except Exception as e:
print str(e)
pass
"""bg_process={}
bg_process['process_type'] = "badger"
bg_process['cmd'] = report_file['cmd']
bg_process['file'] = report_file['file']
bg_process['report_file'] = report_file['report_file']
bg_process['process_log_id'] = report_file["process_log_id"]"""
if report_file['error']:
result['error'] = 1
result['msg'] = report_file['error']
else:
result['error'] = 0
result['report_file'] = report_file['file']
report_file_path = os.path.join(reports_path, report_file['file'])
if not os.path.exists(report_file_path):
result['error'] = 1
result['msg'] = "Check the parameters provided."
except Exception as e:
import traceback
result = {}
result['error'] = 1
result['msg'] = str(e)
time.sleep(2)
return result
api.add_resource(GenerateBadgerReports, '/api/generate_badger_reports')
class ComparePGVersions(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self, host):
result={}
local_pg_ver = pgc.get_data('info')[0]['version']
result['local_pg_ver'] = local_pg_ver
remote_pg_ver = pgc.get_data('info', pgc_host=host)[0]['version']
result['remote_pg_ver'] = remote_pg_ver
if local_pg_ver == remote_pg_ver:
result_code = 0
elif local_pg_ver > remote_pg_ver:
result_code = 1
elif local_pg_ver < remote_pg_ver:
result_code = 2
result['result_code'] = result_code
return result
api.add_resource(ComparePGVersions, '/api/compare_pg_versions/<string:host>')
class GetBgProcessList(Resource):
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def get(self, process_type=None):
from sqlalchemy import desc
result={}
if process_type:
#processes = Process.query.filter_by(user_id=current_user.id, desc=dumps(str(process_type))).order_by(db.func.COALESCE(Process.end_time,datetime.now()).desc(),Process.start_time.desc()).limit(100)
processes = Process.query.filter_by(user_id=current_user.id, desc=dumps(str(process_type))).order_by(
Process.start_time.desc()).limit(100)
else:
processes = Process.query.filter_by(user_id=current_user.id).order_by(Process.start_time.desc()).limit(100)
clean_up_old_process=False
result['process'] = []
for p in processes:
proc_log_dir = os.path.join(config.SESSION_DB_PATH,
"process_logs",
p.pid)
if os.path.exists(proc_log_dir):
proc_status = get_process_status(proc_log_dir)
if p.acknowledge or proc_status.get("end_time") or p.end_time:
'''clean_up_old_process=True
db_session.delete(p)
try:
import shutil
shutil.rmtree(proc_log_dir, True)
except Exception as e:
pass
continue'''
pass
try:
stime = dateutil.parser.parse(proc_status.get("start_time"))
etime = dateutil.parser.parse(proc_status.get("end_time",get_current_time()))
from utils import get_readable_time_diff
execution_time = get_readable_time_diff((etime - stime).total_seconds())
proc_status['execution_time'] = execution_time
except Exception as e:
print e
pass
proc_status['process_failed'] = False
proc_status['process_completed'] = True
if proc_status.get("exit_code") is None:
proc_status['process_completed'] = False
if not psutil.pid_exists(proc_status.get('pid')):
proc_status['process_completed'] = True
proc_status['process_failed'] = True
proc_status['error_msg'] = "Background process terminated unexpectedly."
elif proc_status.get("exit_code") != 0:
proc_status['process_failed'] = True
proc_status['process_log_id'] = p.pid
#proc_status['process_type'] = "badger"
if proc_status.get('report_file'):
proc_status['file'] = "badger/" + proc_status.get('report_file')
proc_status['report_file'] = proc_status.get('report_file')
result['process'].append(proc_status)
if clean_up_old_process:
db_session.commit()
return result
api.add_resource(GetBgProcessList, '/api/bgprocess_list', '/api/bgprocess_list/<string:process_type>')
class GetBgProcessStatus(Resource):
@auth_required('token', 'session')
def get(self,process_log_id):
result = {}
args = request.args
line_count = None
if 'line_count' in args:
try:
line_count = int(args['line_count'])
except Exception as ex:
result['exit_code'] = 3
result['process_failed'] = True
result['error_msg'] = str(ex)
return result
proc_log_dir = os.path.join(config.SESSION_DB_PATH,
"process_logs",
process_log_id)
proc_status = get_process_status(proc_log_dir,line_count=line_count)
proc_status['log_dir'] = proc_log_dir
p = Process.query.filter_by(
pid=process_log_id, user_id=current_user.id
).first()
try:
if p.start_time is None or p.end_time is None:
p.start_time = proc_status['start_time']
if 'exit_code' in proc_status and \
proc_status['exit_code'] is not None:
p.exit_code = proc_status['exit_code']
# We can't have 'end_time' without the 'exit_code'.
if 'end_time' in proc_status and proc_status['end_time']:
p.end_time = proc_status['end_time']
db_session.commit()
except Exception as e:
pass
stime = dateutil.parser.parse(proc_status.get("start_time"))
etime = dateutil.parser.parse(proc_status.get("end_time") or get_current_time())
from utils import get_readable_time_diff
execution_time = get_readable_time_diff((etime - stime).total_seconds())
proc_status['execution_time'] = execution_time
proc_status['error_msg']=""
proc_status['process_log_id'] = process_log_id
proc_status['process_failed'] = False
proc_status['process_completed'] = True
if proc_status.get("exit_code")==2 and (proc_status.get("process_type")=="backup" or proc_status.get("process_type")=="restore") :
try:
out_data = proc_status.get("out_data").strip().split(":")
if out_data[0].strip() == "ERROR" and out_data[1].strip() == "component_required":
proc_status['component_required'] = out_data[2].strip()
except Exception as e:
pass
#proc_status['process_type'] = "badger"
if proc_status.get("exit_code") is None:
proc_status['process_completed'] = False
if proc_status.get('pid'):
if not psutil.pid_exists(proc_status.get('pid')):
proc_status['process_completed'] = True
proc_status['process_failed'] = True
proc_status['error_msg'] = "Background process terminated unexpectedly."
elif proc_status.get("exit_code") != 0:
proc_status['process_failed'] = True
proc_status['error_msg'] = "Background process terminated unexpectedly."
if proc_status.get('report_file'):
proc_status['file'] = "badger/" + proc_status.get('report_file')
proc_status['report_file'] = proc_status.get('report_file')
result=proc_status
return result
api.add_resource(GetBgProcessStatus, '/api/bgprocess_status/<string:process_log_id>')
@application.route('/api/dirlist', methods = ['POST'])
@auth_required('token', 'session')
@roles_accepted('Administrator', 'User')
def dirlist():
"""
Method to get the list of directories available in remote server or local in specific directory.
:return: It yields json string for the list of components.
"""
data = request.json
pgc_host = data.get("pgcHost")
base_dir = data.get("baseDir",os.path.expanduser('~'))
cmd = 'dirlist "'+base_dir + '"'
if pgc_host not in["","localhost"]:
cmd = cmd + ' --host "' + pgc_host + '"'
result = pgc.get_data(cmd)
return json.dumps(result)
@application.route('/list')
@login_required
def list():
"""
Method to get the list of components available.
:return: It yields json string for the list of components.
"""
data = pgc.get_data("list")
if request.is_xhr:
return json.loads(data)
return render_template('status.html', data=data)
@application.route('/details/<component>')
@login_required
def details(component):
"""
Method to get the list of components available.
:return: It yields json string for the list of components.
"""
data = pgc.get_data("info", component)
if request.is_xhr:
return json.dumps(data)
return render_template('status.html', data=data)
@application.route('/status')
@login_required
def status():
"""
Method to get the list of components available.
:return: It yields json string for the list of components.
"""
data = pgc.get_data("status")
return render_template('status.html', data=data)
@application.route('/')
@login_required
# @roles_accepted('Administrator','User')
def home():
return render_template('index.html',
user=current_user,
is_admin=current_user.has_role("Administrator"))
from responses import InvalidSessionResult
def unauth_handler():
return InvalidSessionResult().http_response()
security.unauthorized_handler(unauth_handler)
|
# from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import login, logout
from django.db import connection
import os, requests, io
from regex import subf
from pybase64 import urlsafe_b64decode
from PIL import Image
from .forms import LoginForm
from .backend import LoginBackend
from core_empresa.models import LoginEmpresa
@csrf_protect
def login_view(request):
if request.method == 'POST':
try:
form = LoginForm(request.POST)
if form.is_valid():
data = form.clean_form()
login_company = LoginBackend.authenticate(request, data['email'], data['senha_hash'])
if login_company != None and login_company != False:
login_company.is_authenticated = True
login_company.save()
form = LoginForm()
login(request, login_company, backend='empresa.backend.LoginBackend')
return redirect('/vagas/')
else:
if login_company == False:
login_form = request.POST
error = 'Senha não confere'
else:
login_form = request.POST
error = 'Não existe empresa com esse e-mail'
else:
login_form = request.POST
error = 'Preencher campos de login corretamente'
except:
login_form = request.POST
error = 'Não foi possível realizar o login. Tente novamente'
else:
form = LoginForm()
error = None
login_form = {
'email': '',
'senha': '',
}
context = {
'login': login_form,
'error': error,
}
context.update(csrf(request))
return render(request, 'login/index.html', context)
def camera_view(request):
os.environ['NO_PROXY'] = '127.0.0.1'
if request.method == 'POST':
url_img = request.POST['url']
img_encode = subf('^data:image/png;base64,', '', url_img)
img_decode = urlsafe_b64decode(img_encode)
img = Image.open(io.BytesIO(img_decode))
photo = io.BytesIO()
img.save(photo, 'png')
photo.seek(0)
try:
response = requests.post('http://127.0.0.1:5000/api/recognize', data={'group': 'empresa'}, files={ 'file': ('photo.png', photo, 'image/png') })
if response.status_code == 200:
responseJSON = response.json()
company_codigo = responseJSON['reconhecimento']
with connection.cursor() as cursor:
cursor.execute("SELECT id, email, senha_hash FROM empresa WHERE cod_treino=%s", [company_codigo])
result = cursor.fetchone()
if result != None:
data = { 'id': result[0], 'email': result[1], 'senha_hash': result[2] }
login_company = LoginBackend.authenticate(request, data['email'], data['senha_hash'])
if login_company != None and login_company != False:
login_company.is_authenticated = True
login_company.save()
login(request, login_company, backend='empresa.backend.LoginBackend')
return redirect('/vagas/')
else:
return redirect('login')
else:
return redirect('login')
else:
return redirect('login')
except:
return redirect('login')
return render(request, 'login/camera.html', {})
def readmore_view(request):
return render(request, 'login/readmore.html', {})
def contact_view(request):
return render(request, 'login/contact.html', {})
def forgot_view(request):
return render(request, 'login/forgot.html', {})
def logout_view (request):
try:
logout_email = request.user.email
logout(request)
company_session = LoginEmpresa.objects.get(email=logout_email)
company_session.delete()
finally:
return redirect('login')
|
from api.models import employee, tasks, meetings, todo, scribling_data, category, projects
import datetime
from django.db.models import Q
import json
import time
from dateutil.parser import parse
from api.utils import *
# The below function returns the todo and tasks count for the present and other days
def taskOverview(request):
webresponse = {}
response = {}
try:
webresponse['errorMessages'] = []
response['task_details'] = []
data = []
date = datetime.datetime.now()
date = date + datetime.timedelta(-2)
Employee = employee.objects.get(email_id = str( request.session.get('loginID')))
for i in range(5):
task_data = tasks.objects.filter(Q(scheduled_date=date) & Q(task_type=True)& Q(emp_id = Employee))
todo_data = todo.objects.filter(Q(scheduled_date=date) & Q(task_type=False)& Q(emp_id = Employee) )
current_date = str(date.strftime("%d %B %Y"))
split_date = current_date.split(" ")
task = {
"date" :str(split_date[0]) ,
"tasks": len(task_data),
"todo" : len(todo_data),
"week" : week_day(date.weekday()) + ", " + split_date[1] + " " + split_date[2]
}
data.append(task)
date = date + datetime.timedelta(1)
response['task_details'] = data
webresponse['data'] = response
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
# saves new tasks
def save_tasks (request):
webresponse = {}
req_data = json_request(request)
response = {}
try:
weekMode =False
webresponse['errorMessages'] = []
webresponse['Status'] = False
req_date = req_data['date']
task = req_data['task']
my_time = req_data['time']
req_project = req_data['project'].strip()
req_category = req_data['category'].strip()
task = task.replace("\"", "")
if req_project == "":
req_project = "N/A"
if req_category == "":
req_category = "N/A"
my_new_time = time.strftime("%H:%M:%S", time.strptime(my_time, "%I:%M:%S %p"))
id = employee.objects.get(email_id = str( request.session.get('loginID')))
if(req_date == "null"):
current_date = datetime.datetime.now();
if "today" in task.lower():
task = task.replace("today", "")
req_date = current_date
elif "tomorrow" in task.lower():
task = task.replace("tomorrow", "")
req_date = current_date + datetime.timedelta(1)
elif "monday" in task.lower():
if "next monday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("monday", "")
req_date = calculate_date(0, weekMode)
elif "tuesday" in task.lower():
if "next tuesday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("tuesday", "")
req_date = calculate_date(1, weekMode)
elif "wednesday" in task.lower():
if "next wednesday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("wednesday", "")
req_date = calculate_date(2, weekMode)
elif "thursday" in task.lower():
if "next thursday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("thursday", "")
req_date = calculate_date(3,weekMode)
elif "friday" in task.lower():
if "next friday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("friday", "")
req_date = calculate_date(4,weekMode)
elif "saturday" in task.lower():
if "next saturday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("saturday", "")
req_date = calculate_date(5,weekMode)
elif "sunday" in task.lower():
if "next sunday" in task.lower():
task = task.replace("next ", "")
weekMode = True
task = task.replace("sunday", "")
req_date = calculate_date(6,weekMode)
elif "next week" in task.lower():
task = task.replace("next week", "")
req_date = calculate_date(0,weekMode)
else:
for word in task.split():
if word.startswith(':'):
try:
req_date = parse(word.replace(":", ""))
task = task.replace(word, "")
except Exception as name:
req_date = current_date
else :
req_date = current_date
if(req_project == "N/A"):
for word in task.split():
if word.startswith('+'):
task = task.replace(word, "")
req_project = word.replace("+", "")
break
if(req_category == "N/A"):
for word in task.split():
if word.startswith('@'):
task = task.replace(word, "")
req_category = word.replace("@", "")
break
todo_data = todo(emp_id=id, scheduled_date=req_date, tasks=task, task_type=False, priority=int(req_data['priority']), task_status=True, category=req_category, project=req_project, scheduled_time=my_new_time)
todo_data.save()
todoTask = todo.objects.filter(Q(tasks=task) & Q(scheduled_date=req_date) & Q(emp_id=id) & Q(task_type=False) &Q(priority=int(req_data['priority'])) & Q(task_status=True) & Q(category=req_category) & Q( project=req_project) & Q( scheduled_time=my_new_time))
count =todoTask.count()
last_item = todoTask[ count-1 ]
response_date = str(req_date).split(" ")
response = generate_response(last_item)
response ['projects'] = update_project(req_project, id)
response ['categories'] =update_category(req_category, id)
webresponse['Status'] = True
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['data'] = response
return json_response(webresponse)
def profile_data (request):
webresponse= {}
webresponse['errorMessages'] = []
try:
Employee = employee.objects.get(email_id = str( request.session.get('loginID')))
data = {
"first_name" : Employee.first_name,
"Last_name" : Employee.last_name,
"image_url" : str(Employee.image),
"desig" : Employee.designation,
"email_id" : Employee.email_id,
}
webresponse['data'] = data
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
# the below function returns the upcoming task summary
def upcomingTasks (request):
webresponse = {}
response = {}
try:
webresponse ['errorMessages'] = []
response ['pending_task'] = []
response ['todays_task'] = []
response ['tomarrows_task'] = []
response ['week_task'] = []
response ['week_heading'] = "This Week"
response ['categories'] = []
categories = [ "feature", "support", "task", "codereview", "bug", "wireframe", "testcase"]
id = employee.objects.get(email_id = str( request.session.get('loginID')))
date = datetime.datetime.now()
# pending tasks
task_details = todo.objects.filter(Q(scheduled_date__lt=date) & Q(task_status=True) & Q(emp_id=id))
response ['pending_task'] = fetch_taskDetails(task_details)
# todays task
task_details = todo.objects.filter(Q(scheduled_date=date) & Q(emp_id=id))
response ['todays_task'] = fetch_taskDetails(task_details)
# tomorrows task
date += datetime.timedelta(1)
task_details = todo.objects.filter(Q(scheduled_date=date) & Q(emp_id=id))
response ['tomarrows_task'] = fetch_taskDetails(task_details)
# task of next week or this week
week = date.weekday()
if(week > 3):
nextMonday = date + datetime.timedelta(7 - date.weekday())
nextsatday = nextMonday + datetime.timedelta(5)
task_details = todo.objects.filter(Q (scheduled_date__gte=nextMonday) & Q (scheduled_date__lte=nextsatday) & Q(emp_id=id))
response ['week_task'] = fetch_taskDetails(task_details)
response ['week_heading'] = "Next Week"
else:
nextday = date + datetime.timedelta(1)
nextsatday = date + datetime.timedelta(4 - date.weekday()+1 )
task_details = todo.objects.filter(Q (scheduled_date__gte=nextday) & Q (scheduled_date__lte=nextsatday) & Q(emp_id=id))
response ['week_task'] = fetch_taskDetails(task_details)
category_data = category.objects.filter(emp_id=id)
for data in category_data:
categories.append(data.category)
response ['categories'] = categories
project_data = projects.objects.filter(emp_id=id)
project=[]
for data in project_data:
project.append(data.projects)
response ['projects'] = project
webresponse['data'] = response
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
# The below function returns the pending leave requests holidays and birthdays of this month
def otherdetails (request):
req_date = request.GET.get('date')
id = request.GET.get('id')
return json_response({"pending_leave_request" : "0", "holidays" :"0", "birthdays" :"0"})
def projectsummary (request):
return json_response({"projects" : [ ] })
# The below function returns the first five meeting details of the given day
def meetingsummary (request):
webresponse = {}
response = {}
try:
id = employee.objects.get(email_id = str( request.session.get('loginID')))
webresponse['errorMessages'] = []
response['meetings'] = []
date = datetime.datetime.now()
meeting_details = []
meeting_content = meetings.objects.filter(Q(scheduled_date=date)& Q(emp_id = id))[:5]
for data in meeting_content :
meeting_data = {
"meeting_topic" : data.meeting_topic,
"meeting_time" : str(data.meeting_time),
"meeting_id" : data.id
}
meeting_details.append(meeting_data)
response['meetings'] = meeting_details
webresponse['data'] = response
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
# The below function returns the first 5 tasks of the day
def tasksummary (request):
webresponse = {}
response = {}
try:
id = employee.objects.get(email_id = str( request.session.get('loginID')))
webresponse['errorMessages'] = []
response["tasks"] = []
date = datetime.datetime.now()
task_summary = []
task_data = tasks.objects.filter(Q(scheduled_date=date)& Q(emp_id = id))[:5]
for data in task_data:
task_summary.append(data.tasks)
response["tasks"] = task_summary
webresponse['data'] = response
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
def completeTask (request):
webresponse = {}
response = {}
try:
webresponse['errorMessages'] = []
response['Success'] = False
if( request.session.get('loginID') != None):
task_id = request.GET.get('id')
task_data = todo.objects.get(id=task_id)
task_data.task_status = False
task_data.save()
response['Success'] = True
else:
webresponse['errorMessages'] = 'your session expired refresh page to login again'
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['data'] = response
return json_response(webresponse)
def deleteTask (request):
webresponse= {}
response = {}
response['errorMessages'] = []
response['Success'] = False
try:
if( request.session.get('loginID') != None):
task_id = request.GET.get('id')
todo.objects.filter(id=task_id).delete()
response['Success'] = True
else:
webresponse['errorMessages'] = 'your session expired refresh page to login again'
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['data'] = response
return json_response(webresponse)
def update_task_date(request):
content = json_request(request)
webresponse = {}
try:
webresponse['errorMessages'] = []
webresponse['status'] = False
if( request.session.get('loginID') != None):
task_id = content['id']
date = content['date']
task_details = todo.objects.get(id=task_id)
task_details.scheduled_date = date
task_details.scheduled_time = content['time']
task_details.save()
webresponse['status'] = True
webresponse['data'] = generate_response(task_details)
else:
webresponse['errorMessages'] = 'your session expired refresh page to login again'
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
def scriblingData (request):
req_data = json_request(request)
webresponse = {}
response = {}
webresponse['Success'] = False
try:
webresponse['errorMessages'] = []
data = req_data['task']
id = employee.objects.get(email_id = str( request.session.get('loginID')))
Quicknote = scribling_data(emp_id=id, date=datetime.datetime.now(), data=data)
Quicknote.save()
response['data'] = Quicknote.data
response['id'] = Quicknote.id
response['date']= str(Quicknote.date)
webresponse['Success'] = True
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['data'] = response
return json_response(webresponse)
def updateTask(request):
req_data = json_request(request)
webresponse = {}
try:
webresponse['errorMessages'] = []
webresponse['status'] = False
if( request.session.get('loginID') != None):
my_time = req_data['time']
my_new_time = time.strftime("%H:%M:%S", time.strptime(my_time, "%I:%M:%S %p"))
task_id = req_data ['id']
task_details = todo.objects.get(id=task_id)
task_details.scheduled_date = req_data ['date']
task_details.tasks = req_data ['tasks']
task_details.priority = req_data ['priority']
if(req_data ['task_status'] == "false"):
task_details.task_status = False
else:
task_details.task_status = True
task_details.category = req_data ['category']
task_details.project = req_data ['project']
task_details.scheduled_time = my_new_time
task_details.save()
webresponse['status'] = True
webresponse['data'] = generate_response(task_details)
else:
webresponse['errorMessages'] = 'your session expired refresh page to login again'
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
return json_response(webresponse)
def search(request):
webresponse = {}
response = {}
try:
webresponse['status']= False
search_term = request.GET.get('q')
if(search_term != None):
id = employee.objects.get(email_id = str( request.session.get('loginID')))
task_details = todo.objects.filter( Q(tasks__icontains=search_term) & Q(emp_id=id))
response = fetch_taskDetails(task_details)
webresponse['status'] = True
webresponse['data'] = response
else:
webresponse['errorMessages'] = 'Search parameter not entered'
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['status'] = False
return json_response(webresponse)
def myscriblings(request):
webresponse = {}
response = {}
try:
webresponse['errorMessages'] = []
webresponse['status'] = False
scriblings =[]
id = employee.objects.get(email_id = str( request.session.get('loginID')))
scribling_datas= scribling_data.objects.filter(emp_id=id)
for data in scribling_datas:
scribling = {
"data":data.data,
"date": str(data.date),
"id" : data.id
}
scriblings.append(scribling)
webresponse['status'] = True
webresponse["data"] = scriblings
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['status'] = False
return json_response(webresponse)
def deleteScriblingData (request):
webresponse = {}
response = {}
try:
webresponse['errorMessages'] = []
webresponse['status'] = False
scriblings =[]
scriblingID = request.GET.get('id')
id = employee.objects.get(email_id = str( request.session.get('loginID')))
scribling_data.objects.filter(Q(emp_id=id) & Q (id = scriblingID)).delete()
webresponse['status'] = True
except Exception as name:
webresponse['errorMessages'] = generate_error_response(name.__unicode__() )
webresponse['status'] = False
return json_response(webresponse)
|
from typing import List
from collections import defaultdict
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
group_counter = defaultdict(list)
for string in strs:
# counter for each English letter
count = [0] * 26
for char in string:
count[ord(char) - ord('a')] += 1
group_counter[tuple(count)].append(string)
return group_counter.values()
|
from saml2test.check.ec_compare import Result
from saml2test.check.ec_compare import EntityCategoryTestResult
from saml2test.check.ec_compare import verify_rs_compliance
from saml2test.check.ec_compare import verify_coco_compliance
from saml2.entity_category import refeds
from saml2.entity_category import edugain
__author__ = 'roland'
def list_eq(l1, l2):
return set(l1) == set(l2)
def test_result():
res = Result('R&S')
res.missing.append('mail')
assert len(res) == 1
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail']"
res.missing.append("cn")
assert len(res) == 2
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail', 'cn']"
res.extra.append('ou')
assert len(res) == 3
_str = '{}'.format(res)
assert _str == "R&S: missing=['mail', 'cn'], extra=['ou']"
def test_entity_category_test_result():
res = Result('R&S')
res.missing.append('mail')
res.extra.append('ou')
tr = EntityCategoryTestResult('test_id', 2, 'name', specifics=[res])
tr.message = "Non conformant"
assert tr.status == 2
_str = '{}'.format(tr)
assert _str == "test_id: status=WARNING, message=Non conformant\nR&S: " \
"missing=['mail'], extra=['ou']"
def test_entity_category_test_result_comb():
ec_attr_rs = refeds.RELEASE[refeds.RESEARCH_AND_SCHOLARSHIP]
ec_attr_rs.extend(refeds.RELEASE[''])
ec_attr_coco = edugain.RELEASE[edugain.COCO]
ec_attr_coco.extend(edugain.RELEASE[''])
ava = {
'eduPersonPrincipalName': 'foo@example.com',
'eduPersonTargetedID': 'foovar',
'location': 'earth'
}
requested_attributes = ['eduPersonPrincipalName',
'eduPersonScopedAffiliation',
'mail']
res_rs = verify_rs_compliance('R&S', ava, requested_attributes, ec_attr_rs)
assert list_eq(res_rs.missing, ['mail', 'displayName', 'givenName', 'sn'])
assert list_eq(res_rs.expected,
['eduPersonPrincipalName', 'eduPersonTargetedID'])
assert res_rs.extra == ['location']
res_coco = verify_coco_compliance('CoCo', ava, requested_attributes,
ec_attr_coco)
assert list_eq(res_coco.missing, ['eduPersonScopedAffiliation', 'mail'])
assert list_eq(res_coco.expected, ['eduPersonPrincipalName',
'eduPersonTargetedID'])
assert res_coco.extra == ['location']
res = res_rs.union(res_coco)
assert list_eq(res.missing, ['displayName', 'givenName',
'eduPersonScopedAffiliation', 'sn', 'mail'])
assert list_eq(res.expected,
['eduPersonPrincipalName', 'eduPersonTargetedID'])
assert res.extra == ['location']
|
import telnetlib
import sys
import getpass
import time
import os
import rrdtool
try:
rrdpath = sys.argv[1]
syncimg = sys.argv[2]
marginimg = sys.argv[3]
except IndexError:
print >> sys.stderr, "Usage: %s <rrdpath> <syncimg> <marginimg>"
sys.exit(2)
password = getpass.getpass("Password: ")
tn = telnetlib.Telnet('192.168.1.254')
print tn.read_until('Username : ')
tn.write('Administrator\r\n')
print tn.read_until('Password : ')
tn.write(password + '\r\n')
print tn.read_until('{Administrator}=>')
if not os.path.exists(rrdpath):
step = 10
heartbeat = step * 2
data_sources = [
'DS:synctx:GAUGE:%s:U:U' % heartbeat,
'DS:syncrx:GAUGE:%s:U:U' % heartbeat,
'DS:margintx:GAUGE:%s:U:U' % heartbeat,
'DS:marginrx:GAUGE:%s:U:U' % heartbeat,
]
rrdtool.create(
rrdpath,
'--start', str(int(time.time())),
'--step', str(step),
data_sources,
'RRA:AVERAGE:0.5:6:1000',
'RRA:AVERAGE:0.5:60:1000')
while True:
tn.write('xdsl info expand=enabled\r\n')
data = tn.read_until('{Administrator}=>')
synctx = -1
syncrx = -1
margintx = -1
marginrx = -1
for line in data.split('\n'):
line = line.strip()
if line.startswith('Payload rate [Kbps]:'):
syncrx, synctx = map(float, line.split()[-2:])
if line.startswith('Margins [dB]:'):
marginrx, margintx = map(float, line.split()[-2:])
data = "N:%f:%f:%f:%f" % (synctx, syncrx, margintx, marginrx)
print "update: %s" % (data,)
rrdtool.update(rrdpath, data)
# make pretty graphs
rrdtool.graph(marginimg,
'-M', '-l', '0', '--start', '-8h',
'--width', '800', '--height', '200',
'DEF:marginrx=%s:marginrx:AVERAGE' % (rrdpath,),
'LINE1:marginrx#800000:Margin RX',
'DEF:margintx=%s:margintx:AVERAGE' % (rrdpath,),
'LINE1:margintx#000080:Margin TX',
'GPRINT:marginrx:LAST:Current Margin RX\: %1.3lf',
'GPRINT:margintx:LAST:Current Margin TX\: %1.3lf')
rrdtool.graph(syncimg,
'-M', '-l', '0', '--start', '-8h',
'--width', '800', '--height', '200',
'DEF:syncrx=%s:syncrx:AVERAGE' % (rrdpath,),
'LINE1:syncrx#800000:Sync RX',
'DEF:synctx=%s:synctx:AVERAGE' % (rrdpath,),
'LINE1:synctx#000080:Sync TX',
'GPRINT:syncrx:LAST:Current Sync RX\: %1.3lf',
'GPRINT:synctx:LAST:Current Sync TX\: %1.3lf')
time.sleep(10)
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import sklearn
data_path = "D:/VScode workshop/big_data_competition/data/"
train_df = pd.read_csv(data_path + "train.csv" , encoding = "big5" , low_memory = False)
for i in train_df.columns.to_list() :
if train_df[i].nunique() == 2 :
train_df[i].replace({'Y' : 1 , 'N' : 0} , inplace = True)
sub_df = train_df.iloc[ : , 72 : 109]
sub_df = pd.concat([sub_df , train_df[['Y1']]] , axis = 1)
print(sub_df.corr()['Y1'])
#|r| >= 0.05 (相關係數前十大)
#X_B_IND、X_C_IND、X_E_IND、X_H_IND、TOOL_VISIT_1YEAR_CNT、DIEBENEFIT_AMT、
#DIEACCIDENT_AMT、MONTHLY_CARE_AMT、LIFE_INSD_CNT、IF_ISSUE_INSD_I_IND
r_col = sub_df.corr()['Y1'].sort_values()[:10].index.to_list()
for i in r_col :
plt.scatter(range(100000) , train_df[i])
plt.xlabel("count")
plt.ylabel(i)
plt.title("EDA scatter")
plt.show()
for i in r_col :
if train_df[i].nunique() > 2 :
sns.boxplot(x = 'Y1' , y = i , data = train_df , palette = "hls")
plt.show()
|
def func1(ulist):
compare = [0,0,7,'x']
for i in ulist:
if i == compare[0]:
compare.pop(0)
return len(compare) == 1
ulist=[1,2,0,0,7,8,9]
print(func1(ulist)) |
#Leetcode 692. Top K Frequent Words
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
if len(words) == 0:
return []
if len(words) == 1:
return words
d={}
for word in words:
if word in d:
d[word] +=1
else:
d[word] =1
buckets = [[] for _ in range(len(words))]
for key, val in d.items():
buckets[val-1].append(key)
res = []
for bucket in buckets[::-1]:
bucket.sort()
for word in bucket:
if len(bucket) == 0:
continue
if len(res) == k:
return res
else:
res.append(word)
return res
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
d = {}
for i in words:
d[i] = d.get(i, 0)+1
heap = []
for key,val in d.items():
heapq.heappush(heap, (-val, key))
res = []
for _ in range(k):
res.append(heapq.heappop(heap)[1])
return res |
import pandas as pd
import numpy as np
import intervaltree as it
from collections import defaultdict
from collections import Counter
import sys, os, re
from os import listdir
from os.path import isfile, join
from tqdm import tqdm_notebook, tnrange
import distance
from operator import itemgetter
import numpy as np
from datetime import datetime
from joblib import Parallel, delayed
def intersection_rsite(filename, inputdir, outputdir, restrict_dict, restrictase, inswindow):
readsname = os.path.splitext(filename)[0]
df = pd.read_table(inputdir + filename, '\t')
df[restrictase] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df_group = df.groupby(['CHR', 'INS_STRAND'])
for name, group in tqdm_notebook(df_group, desc=readsname):
if name[0] in restrict_dict:
if name[1] == '+':
insrange = zip(np.array(group['POS'])-np.array(group['TLEN'])+3,
np.array(group['POS'])+inswindow+1,
list(group.index))
else:
insrange = zip(np.array(group['POS'])-inswindow,
np.array(group['POS'])+np.array(group['TLEN']-2),
list(group.index))
for start, end, idx in insrange:
if len(restrict_dict[name[0]][start:end]) > 0:
df.set_value(idx, restrictase, 1)
df.to_csv(outputdir + filename, sep='\t', index=None)
def main(inputdir, outputdir, restrictway, restrictase, inswindow, n_core):
before = datetime.now()
inputdir = os.path.abspath(inputdir) + '/'
outputdir = os.path.abspath(outputdir) + '/'
if not os.path.exists(outputdir):
os.makedirs(outputdir)
onlyfiles = [f for f in listdir(inputdir) if (isfile(join(inputdir, f))
and os.path.splitext(f)[1] == '.txt')]
restrict_dict = {}
restrict = pd.read_table(restrictway, compression='bz2')
restrict.columns = ['CHR', 'POS']
restrict_group = restrict.groupby(['CHR'])
for name, group in tqdm_notebook(restrict_group, desc='restrict'):
start_group = np.array(group['POS'])
end_group = start_group+1
restrict_dict[name] = it.IntervalTree(it.Interval(start, end)
for start, end in zip(start_group, end_group))
if len(onlyfiles) == 1:
filename = onlyfiles[0]
stat_series = intersection_rsite(filename,
inputdir, outputdir, restrict_dict, restrictase, inswindow)
#stat_df = stat_series.to_frame().transpose()
else:
stat_series = Parallel(n_jobs=n_core)(delayed(intersection_rsite)(filename,
inputdir, outputdir, restrict_dict, restrictase, inswindow)
for filename in onlyfiles)
#stat_df = pd.concat(stat_series, axis=1).transpose() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.