text stringlengths 38 1.54M |
|---|
import demistomock as demisto
import json
def executeCommand(name, args=None):
if name == 'demisto-api-get' and args and 'uri' in args and args['uri'] == "/settings/integration-commands":
file_name = 'TestData/integration_commands.json'
elif name == 'demisto-api-post' and args and 'uri' in args and args['uri'] == "/settings/integration/search":
file_name = 'TestData/integration_search.json'
else:
raise ValueError('Unimplemented command called: {}'.format(name))
with open(file_name, 'r') as f:
raw_data = f.read()
data = json.loads(raw_data)
return data
def test_main(mocker):
from ProvidesCommand import main
mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
mocker.patch.object(demisto, 'args', return_value={
'command': 'send-mail'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args
assert results[0][0] == 'EWS Mail Sender,Gmail,Mail Sender (Deprecated),Mail Sender (New)'
mocker.patch.object(demisto, 'args', return_value={
'command': 'send-mail',
'enabled': 'true'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args
assert results[0][0] == 'Mail Sender (New)'
mocker.patch.object(demisto, 'args', return_value={
'command': 'send-mail',
'enabled': 'false'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args
assert results[0][0] == 'EWS Mail Sender,Gmail,Mail Sender (Deprecated)'
|
# I want to be able to call nested_sum from main w/ various nested lists
# and I greatly desire that the function returns the sum.
# Ex. [1, 2, [3]]
# Verify you've tested w/ various nestings.
# In your final submission:
# - Do not print anything extraneous!
# - Do not put anything but pass in main()
#########################################################################
def nested_sum(l):
'''Takes a nested list of integers and sum up all the elements from
all the nested lists using list comprehension. The function returns the sum.
'''
l_flatter = [nested_sum(item) if type(item) is list else item for item in l]
return sum(l_flatter)
# Is it possible to eliminate the total counter when using a for loop
# since total is not necessary when using list comprehension?
# Or is that a benefit of list comprehension?
def nested_sum_alternate(l, total = 0):
'''Takes a nested list of integers and sum up all the elements from
all the nested lists using a for loop. The function returns the sum.
'''
for item in l:
if type(item) is list:
total = nested_sum(item, total)
else:
total = total + item
return total
#########################################################################
def main():
pass
if __name__ == "__main__":
main() |
# waifu2xのupconv7と同様のモデル構造のモデルです
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
from MMD2illust_util import tensor_to_pil
MODEL_PATH = "latest_960_1920.pth"
class waifu2x_processor(object):
def __init__(self):
super().__init__()
modules = [nn.ZeroPad2d(5),
nn.Conv2d(3, 16, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.Conv2d(16, 32, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.Conv2d(32, 64, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.Conv2d(64, 128, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.Conv2d(128, 128, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.Conv2d(128, 256, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=False),
nn.ConvTranspose2d(256, 3, kernel_size=6, stride=2, padding=0, bias=False)
]
self.model = nn.Sequential(*modules)
self.model.load_state_dict(torch.load(MODEL_PATH))
self.use_cuda = torch.cuda.is_available()
if self.use_cuda:
self.model = self.model.cuda()
self.model.eval()
self.to_tensor = transforms.ToTensor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, "model"):
del self.model
torch.cuda.empty_cache()
def waifu2x_path(self, path):
img = Image.open(path)
return self.waifu2x_pil(img)
def waifu2x_pil(self, img):
with torch.no_grad():
img = img.convert("RGB")
tensor_img = self.to_tensor(img).unsqueeze(0)
gpu_img = torch.tensor(0)
if self.use_cuda:
try:
gpu_img = tensor_img.cuda()
result_tensor_img = self.model(gpu_img)
result_pil_img = tensor_to_pil(result_tensor_img)
del gpu_img
del result_tensor_img
del tensor_img
return result_pil_img
except:
print(gpu_img.shape)
del gpu_img
print("gpu error")
result_tensor_img = self.model(tensor_img)
result_pil_img = tensor_to_pil(result_tensor_img)
del result_tensor_img
del tensor_img
return result_pil_img |
from allure_commons.types import AttachmentType
from selenium import webdriver
from selenium.webdriver.common.by import By
from Solarwinds_login.utilities.handy_wrappers import HandyWrappers
from Solarwinds_login.utilities.explicit_wait import ExplicitWaitType
import time
class solarwin_login():
baseUrl = "https://solarwinds.vmware.com/"
global driver
driver = webdriver.Chrome()
driver.maximize_window()
driver.implicitly_wait(5)
global wait
wait = ExplicitWaitType(driver)
global hw
hw = HandyWrappers(driver)
driver.get(baseUrl)
def test(self):
login_element_state,login_username = hw.isElementPresent("//div[@id='dialog']/input[@name='ctl00$BodyContent$Username']", By.XPATH)
if login_username == 'None found':
print("login_element_state" + ' ' + str(login_element_state))
else:
login_username.send_keys('vmwarem\yc')
password_element_state,login_password = hw.isElementPresent("//div[@id='dialog']/div[@class='sw-password-box']/input[@name='ctl00$BodyContent$Password']", By.XPATH)
if login_password == 'None found':
print("password_element_state" + ' ' + str(password_element_state))
else:
login_password.send_keys('Yasodha3@mithra')
try:
login_btn = driver.find_element_by_link_text("LOGIN")
login_btn.click()
time.sleep(2)
driver.save_screenshot("C:\\Users\\yc\\workspace_python\\Scheduled_tests\\Solarwinds_login\\allure_reports\\login.png")
except:
driver.save_screenshot("C:\\Users\\yc\\workspace_python\\Scheduled_tests\\Solarwinds_login\\allure_reports\\login.png")
return driver;
|
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm.lib import common
from . import qcom_akronite_nand
class QcomDakotaRouterNAND(qcom_akronite_nand.QcomAkroniteRouterNAND):
"""QcomDakotaRouter board configuration class inherits from QcomAkroniteRouterNAND
"""
model = ("dk07-nand", "dk04-nand")
uboot_ddr_addr = "0x88000000"
machid_table = {
"dk03": "8010100",
"dk04-nand": "8010001",
"dk06-nand": "8010005",
"dk07-nand": "8010006",
"ea8300": "8010006"
}
uboot_network_delay = 5
def boot_linux_ramboot(self):
"""This method flashes the linux from initramfs.
initramfs is a complete set of directories that you would find on a normal root filesystem.
"""
common.print_bold("\n===== Booting linux (ramboot) for %s =====" %
self.model)
bootargs = 'console=ttyMSM0,115200 clk_ignore_unused norootfssplit mem=256M %s' % self.get_safe_mtdparts(
)
if self.boot_dbg:
bootargs += " dyndbg=\"module %s +p\"" % self.boot_dbg
self.sendline("setenv bootargs '%s'" % bootargs)
self.expect(self.uprompt)
self.sendline('set fdt_high 0x85000000')
self.expect(self.uprompt)
self.sendline("bootm %s" % self.uboot_ddr_addr)
self.expect("Loading Device Tree to")
self.rambooted = True
|
#!/usr/bin/env python3
# use sys to add a count from the command line. When entered on command line, comes in as a string, so you have to make it an int or float first.
import sys
count = int(sys.argv[1])
if count > 100:
print('True')
else:
print('False')
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
Generating Topography plots
===========================
Example demonstrating a topography plot."""
from mvpa.suite import *
# Sanity check if we have griddata available
externals.exists("griddata", raise_=True)
# EEG example splot
pl.subplot(1, 2, 1)
# load the sensor information from their definition file.
# This file has sensor names, as well as their 3D coordinates
sensors=XAVRSensorLocations(os.path.join(pymvpa_dataroot, 'xavr1010.dat'))
# make up some artifical topography
# 'enable' to channels, all others set to off ;-)
topo = np.zeros(len(sensors.names))
topo[sensors.names.index('O1')] = 1
topo[sensors.names.index('F4')] = 1
# plot with sensor locations shown
plot_head_topography(topo, sensors.locations(), plotsensors=True)
# MEG example plot
pl.subplot(1, 2, 2)
# load MEG sensor locations
sensors=TuebingenMEGSensorLocations(
os.path.join(pymvpa_dataroot, 'tueb_meg_coord.xyz'))
# random values this time
topo = np.random.randn(len(sensors.names))
# plot without additional interpolation
plot_head_topography(topo, sensors.locations(),
interpolation='nearest')
if cfg.getboolean('examples', 'interactive', True):
# show all the cool figures
pl.show()
"""
The ouput of the provided example should look like
.. image:: ../pics/ex_topo_plot.*
:align: center
:alt: Topography plot of MEG data
"""
|
#638. 大礼包
'''
在LeetCode商店中, 有许多在售的物品。
然而,也有一些大礼包,每个大礼包以优惠的价格捆绑销售一组物品。
现给定每个物品的价格,每个大礼包包含物品的清单,以及待购物品清单。请输出确切完成待购清单的最低花费。
每个大礼包的由一个数组中的一组数据描述,最后一个数字代表大礼包的价格,其他数字分别表示内含的其他种类物品的数量。
任意大礼包可无限次购买。
示例 1:
输入: [2,5], [[3,0,5],[1,2,10]], [3,2]
输出: 14
解释:
有A和B两种物品,价格分别为¥2和¥5。
大礼包1,你可以以¥5的价格购买3A和0B。
大礼包2, 你可以以¥10的价格购买1A和2B。
你需要购买3个A和2个B, 所以你付了¥10购买了1A和2B(大礼包2),以及¥4购买2A。
示例 2:
输入: [2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1]
输出: 11
解释:
A,B,C的价格分别为¥2,¥3,¥4.
你可以用¥4购买1A和2B,也可以用¥9购买2A,2B和1C。
你需要买1A,2B和1C,所以你付了¥4买了1A和1B(大礼包1),以及¥3购买1B, ¥4购买1C。
你不可以购买超出待购清单的物品,尽管购买大礼包2更加便宜。
说明:
最多6种物品, 100种大礼包。
每种物品,你最多只需要购买6个。
你不可以购买超出待购清单的物品,即使更便宜。
'''
#深度优先搜索所有可能的购买方案以求得最优解
'''
先尽可能多的选择能够购买的礼包,然后在一个个单买满足最后的需求
通过递归实现回溯,即回溯到选择另外一种礼包
具体算法思路在代码中给出。
'''
import sys
class Solution(object):
def __init__(self):
self.Min = sys.maxsize
def CanBeChosen(self,offer,needs):
#判断能否选择某个礼包
#遍历礼包每个值,如果大于需求对应的商品,那么就肯定不能购买这个礼包
for i in range(len(offer)-1):
if offer[i]>needs[i]:
return False
return True
def buyThisOffer(self,offer,needs):
#购买指定礼包,然后返回剩下的需求needs
temp = []
for i in range(len(needs)):
temp.append(needs[i]-offer[i])
return temp
def chooseOffers(self,price,special,needs,money):
#递归函数实现
for offer in special:#对每个礼包进行遍历
if self.CanBeChosen(offer,needs):#如果当前可以购买指定礼包
newNeeds = self.buyThisOffer(offer,needs)#那么就购买一个指定礼包,并且返回更新后新的需求
self.chooseOffers(price,special,newNeeds,money+offer[-1])#执行下一层递归,注意money要加上当前礼包的价格
for i in range(len(needs)):#挑选完礼包之后,对余下的needs进行单独购买
money += needs[i]*price[i]
if money<self.Min:#如果找到一个总花费比之前最小的还要小,更新总体最小值
self.Min=money
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
self.chooseOffers(price,special,needs,0)
return self.Min
'''
注意,以上算法不是最优的,可以理解成全排列的问题,假设有ABC三种礼包,那么就会多次计算同一种情形:
例如1A1B1C,比如购买顺序是ABC,ACB,BCA,CBA,但是实际上并没有什么购买顺序,因此是组合问题
''' |
# @Time : 2021/1/27 16:31
# @Author : DengZh
# @File : SVMDemo.py
# @Software: PyCharm
import numpy as np
from sklearn.svm import SVR
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
# y = 1 * x_0 + 2 * x_1 + 3
y = np.dot(X, np.array([1, 2])) + 3
# model = SVR()
#调参 高斯核作为它的核函数,同时将核参数设为1,惩罚系数设为100
#调参后的svm模型
model = SVR(kernel='rbf',gamma=1,C=100)
model.fit(X, y)
print(model.predict(X))
|
# Generated by Django 2.1.2 on 2019-01-05 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rounds', '0011_auto_20190103_1743'),
]
operations = [
migrations.AddField(
model_name='assessment',
name='slug',
field=models.SlugField(default='dffdds', max_length=8),
preserve_default=False,
),
migrations.AlterField(
model_name='rubric',
name='release',
field=models.BooleanField(default=False, help_text='If this is True, then the round is visible to judge.', verbose_name='release rubric'),
),
]
|
"""WebReporter: OpenMM Reporter for live plotting of summary statistics
in the browser, using tornado, websockets, and google charts.
Authors: Robert McGibbon
License: GPLv3
"""
##############################################################################
# Imports
##############################################################################
# stdlib
import sys
import Queue as queue
import uuid
import threading
import json
import inspect
import webbrowser
# openmm
from simtk.unit import (dalton, kilojoules_per_mole, nanometer,
gram, item, picosecond)
import simtk.openmm as mm
# external
try:
import tornado.ioloop
import tornado.web
import tornado.websocket
except ImportError:
print '#'*70
print 'WebReporter requires the python "tornado" package.'
print 'It can be installed with:'
print ' sudo easy_install tornado'
print ''
print 'For details, see http://www.tornadoweb.org/en/stable/#installation'
print '#'*70
sys.exit(1)
__all__ = ['WebReporter']
##############################################################################
# Classes
##############################################################################
class WebReporter(object):
def __init__(self, report_interval, observables=None, port=5000, open_browser=True):
"""Create a WebReporter
Parameters
----------
report_interval : int
The interval (in time steps) at which to plot frames
observables : list of strings
A list of the observables you wish to plot. You may select from:
'kineticEnergy', 'potentialEnergy', 'totalEnergy', 'temperature',
'volume', or 'density'. You may also use a custom observable,
as long as you register its functional form with the
register_observable method.
port : int
The port to run this webservice on.
open_browser : bool
Boot up your browser and navigate to the page.
"""
self.port = int(port)
if not (1000 < self.port < 65535):
raise ValueError('Port must be between 1000 and 65535')
self.report_interval = int(report_interval)
self._has_initialized = False
if observables is None:
self.observables = []
elif isinstance(observables, basestring):
self.observables = [observables]
else:
self.observables = list(observables)
if open_browser:
webbrowser.open('http://localhost:' + str(self.port))
# create the dispatch table with the methods that we currently
# have, keyed by a se
self.dispatch = {
'KE': ('Kinetic Energy [kJ/mol]', self._kinetic_energy),
'kinetic': ('Kinetic Energy [kJ/mol]', self._kinetic_energy),
'kinetic_energy': ('Kinetic Energy [kJ/mol]', self._kinetic_energy),
'kinetic energy': ('Kinetic Energy [kJ/mol]', self._kinetic_energy),
'kineticEnergy': ('Kinetic Energy [kJ/mol]', self._kinetic_energy),
'V': ('Potential Energy [kJ/mol]', self._potential_energy),
'potential': ('Potential Energy [kJ/mol]', self._potential_energy),
'potential_energy': ('Potential Energy [kJ/mol]', self._potential_energy),
'potential energy': ('Potential Energy [kJ/mol]', self._potential_energy),
'potentialEnergy': ('Potential Energy [kJ/mol]', self._potential_energy),
'total': ('Total Energy [kJ/mol]', self._total_energy),
'total_energy': ('Total Energy [kJ/mol]', self._total_energy),
'totalEnergy': ('Total Energy [kJ/mol]', self._total_energy),
'total energy': ('Total Energy [kJ/mol]', self._total_energy),
'T': ('Temperature [K]', self._temperature),
'temp': ('Temperature [K]', self._temperature),
'temperature': ('Temperature [K]', self._temperature),
'vol': ('Volume [nm^3]', self._volume),
'volume': ('Volume [nm^3]', self._volume),
'rho': ('Density [g/mL]', self._density),
'density': ('Density [g/mL]', self._density),
}
# start the webserver in another thread
t = threading.Thread(target=self._run)
t.daemon = True
t.start()
def register_observable(self, key, function=None, label=None):
"""Register a new observable
Parameters
----------
key : string
The name of this obervable.
function : callable, optional
If you're registering a NEW observable that WebReporter doesn't
know about by default, supply the function used to compute it.
The function should be a callable that accepts a single argument,
the State, and returns a float.
label : string, optional
If you're registering a NEW observable, this is the string that
will be used as the axis label for the graph.
"""
if function is not None:
n_args = len(inspect.getargspec(function)[0])
if n_args != 1:
raise ValueError('function must be a callable taking 1 argumente')
if label is None:
label = key
self.dispatch[key] = (label, function)
if not key in self.dispatch.keys():
raise ValueError('"%s" is not a valid observable. You may '
'choose from %s' % (key, ', '.join('"' + e + '"' for e in self.dispatch.keys())))
self.observables.append(key)
def describeNextReport(self, simulation):
steps = self.report_interval - simulation.currentStep%self.report_interval
return (steps, True, False, False, True)
def report(self, simulation, state):
if not self._has_initialized:
self._initialize_constants(simulation)
self._has_initialized = True
message = dict(self.build_message(simulation, state))
tornado.ioloop.IOLoop.instance().add_callback(lambda: _WSHandler.broadcast(message))
def build_message(self, simulation, state):
yield ('Time [ps]', state.getTime().value_in_unit(picosecond))
for k in self.observables:
try:
name, func = self.dispatch[k]
except KeyError:
raise ValueError('"%s" is not a valid observable. You may '
'choose from %s' % (k, ', '.join('"' + e + '"' for e in self.dispatch.keys())))
yield (name, func(state))
def _kinetic_energy(self, state):
return state.getKineticEnergy().value_in_unit(kilojoules_per_mole)
def _potential_energy(self, state):
return state.getPotentialEnergy().value_in_unit(kilojoules_per_mole)
def _total_energy(self, state):
return (state.getKineticEnergy()+state.getPotentialEnergy()).value_in_unit(kilojoules_per_mole)
def _temperature(self, state):
return (2*state.getKineticEnergy()/(self._dof*0.00831451)).value_in_unit(kilojoules_per_mole)
def _volume(self, state):
box = state.getPeriodicBoxVectors()
volume = box[0][0]*box[1][1]*box[2][2]
return volume.value_in_unit(nanometer**3)
def _density(self, state):
box = state.getPeriodicBoxVectors()
volume = box[0][0]*box[1][1]*box[2][2]
return (self._totalMass/volume).value_in_unit(gram/item/milliliter)
def _initialize_constants(self, simulation):
"""Initialize a set of constants required for the reports
Parameters
- simulation (Simulation) The simulation to generate a report for
"""
system = simulation.system
# Compute the number of degrees of freedom.
dof = 0
for i in range(system.getNumParticles()):
if system.getParticleMass(i) > 0*dalton:
dof += 3
dof -= system.getNumConstraints()
if any(type(system.getForce(i)) == mm.CMMotionRemover for i in range(system.getNumForces())):
dof -= 3
self._dof = dof
# Compute the total system mass.
self._totalMass = 0*dalton
for i in range(system.getNumParticles()):
self._totalMass += system.getParticleMass(i)
def _run(self):
"""Run the tornado webserver. This should be run in a separate thread,
as it'll block"""
tornado.web.Application([
(r'/', _MainHandler),
(r'/ws', _WSHandler),
]).listen(self.port)
tornado.ioloop.IOLoop.instance().start()
class _WSHandler(tornado.websocket.WebSocketHandler):
clients = {}
def open(self):
self.id = uuid.uuid4()
self.clients[self.id] = self
def on_close(self):
del self.clients[self.id]
@classmethod
def broadcast(cls, message):
if not isinstance(message, basestring):
message = json.dumps(message)
for client in cls.clients.itervalues():
client.write_message(message)
class _MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(_HTML)
_HTML = """
<!DOCTYPE html>
<html>
<head>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script src="https://raw.github.com/bgrins/TinyColor/master/tinycolor.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/underscore.js/1.4.4/underscore-min.js"></script>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<title>OpenMM Web Reporter</title>
<script type="text/javascript">
main = function() {
var host = 'ws://' + window.location.origin.split('//')[1] + '/ws';
var socket = new WebSocket(host);
var data;
var x_label = 'Time [ps]';
var setup_tables = function(msg) {
data = {};
var N = _.size(msg);
var i = 0.0;
for (key in msg) {
if (key == x_label) continue;
i += 1.0;
// assume that it's a y-axis
var table = new google.visualization.DataTable();
table.addColumn('number', x_label);
table.addColumn('number', key);
var $chart_div = $("<div class='chart'></div>");
$chart_div.appendTo('body');
var chart = new google.visualization.LineChart($chart_div[0]);
console.log(i);
console.log(N);
data[key] = {
table: table,
chart: chart,
options: {
title: key + ' vs. ' + x_label,
vAxis: {title: key},
hAxis: {title: x_label},
legend: {position: 'none'},
colors: [tinycolor({h:(360.0*(i+1)/N), s:100, v:90}).toHex()]
}
};
}
};
socket.onopen = function() {
console.log('opened');
};
socket.onmessage = function(packet) {
console.log
msg = JSON.parse(packet.data);
if (data == undefined) setup_tables(msg);
console.log(msg);
for (key in msg) {
if (key == x_label) continue;
data[key].table.addRow([msg[x_label], msg[key]]);
data[key].chart.draw(data[key].table, data[key].options);
}
};
socket.onclose = function() {
console.log('socket closed');
};
}
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(main);
</script>
</head>
<body>
<h2 style="text-align: center;">OpenMM Web Reporter</h2>
</body>
</html>
"""
|
class Pokemon:
def __init__(self,nome,tipo,descricao,ataques,nivel=0,poder_luta=0,brilhante=True):
self.nome = nome
self.tipo = tipo
self.descricao = descricao
self.ataques = ataques
self.nivel = nivel
self.poder_luta = poder_luta
self.brilhante = brilhante
def mostrar_ataques(self):
print(self.ataques)
def subir_nivel(self):
self.nivel+=1
self.poder_luta+=1
def mostrar_poder_luta(self):
print(self.poder_luta)
def e_brilhante(self):
if(self.brilhante==True):
print("brilhante")
else:
print("não é brilhante")
def adicionar_ataque(self,novo_ataque):
self.ataques.append(novo_ataque)
def imprimir(self):
print(f"nome: {self.nome}\ntipo: {self.tipo}\ndescricao: {self.descricao}\nataques: {self.ataques}\nnivel: {self.nivel}\npoder luta: {self.poder_luta}\n")
p1= Pokemon("digimon","aço","resistente",["asa de aço"],3,6)
p1.mostrar_ataques()
p1.subir_nivel()
p1.mostrar_poder_luta()
p1.e_brilhante()
p1.adicionar_ataque("contra-ataque")
p1.imprimir() |
# Guide Schema
# id (text, required): unique id
# required_targets (list): An empty list will cause the guide to be shown regardless
# of page/targets presence.
# steps (list): List of steps
# Step Schema
# title (text, required): Title text. Tone should be active.
# message (text, optional): Message text. Should help illustrate how to do a task, not
# just literally what the button does.
# target (text, optional): step is tied to an anchor target. If the anchor doesn't exist,
# the step will not be shown. if the anchor exists but is of type
# "invisible", it will not be pinged but will be scrolled to.
# otherwise the anchor will be pinged and scrolled to. If you'd like
# your step to show always or have a step is not tied to a specific
# element but you'd still like it to be shown, set this as None.
GUIDES = {
"issue": {"id": 1, "required_targets": ["issue_title", "exception"]},
"issue_stream": {"id": 3, "required_targets": ["issue_stream"]},
"inbox_guide": {"id": 8, "required_targets": ["inbox_guide"]},
"for_review_guide": {"id": 9, "required_targets": ["for_review_guide_tab"]},
"alerts_write_member": {"id": 10, "required_targets": ["alerts_write_member"]},
"alerts_write_owner": {"id": 11, "required_targets": ["alerts_write_owner"]},
}
|
import numpy as np
import matplotlib.pyplot as plt
"""Example (modified) from http://matplotlib.org/users/recipes.html
"""
np.random.seed(1234)
fig, ax = plt.subplots(1)
x = 30*np.random.randn(10000)
mu = x.mean()
median = np.median(x)
sigma = x.std()
textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$'%(mu, median, sigma)
ax.hist(x, 50)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) # bbox properties
# place a text box in upper left in axes coords
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
plt.show()
|
class colors:
G = '\033[92m' # Green
Y = '\033[93m' # Yellow
R = '\033[91m' # Red
B = '\033[1m' # Bold
U = '\033[4m' # Underline
def color(msg, color):
return color + str(msg) +'\033[0m'
|
#!/usr/bin/python3
from flask import Flask
from flask_restful import Api
from transpec.resources.whoami import Whoami
app = Flask(__name__)
api = Api(app)
from security.providers.mock import MockAuthProvider
app.config['SECRET_KEY'] = 'super-secret'
auth_provider = MockAuthProvider(app)
auth_provider()
api.add_resource(Whoami, '/')
if __name__ == '__main__':
app.run(debug=True)
|
# pylint:disable=line-too-long
"""
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of PyFunceble.database.inactive.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io///en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# pylint: enable=line-too-long
from datetime import datetime, timedelta
from unittest import TestCase
from unittest import main as launch_tests
import PyFunceble
from PyFunceble.database.whois import WhoisDB
class TestWhoisDB(TestCase):
"""
Tests of PyFunceble.database.inactive.
"""
def setUp(self):
"""
Setups everything needed for the test.
"""
PyFunceble.load_config(
generate_directory_structure=False, custom={"db_type": "json"}
)
self.storage_file = (
PyFunceble.CONFIG_DIRECTORY + PyFunceble.OUTPUTS.default_files.whois_db
)
self.whois_db = WhoisDB(parent_process=True)
self.our_dataset = {
"google.com": {
"epoch": "1600034400",
"expiration_date": "14-sep-2020",
"state": "future",
},
"github.com": {
"epoch": "1602194400",
"expiration_date": "09-oct-2020",
"state": "future",
},
}
PyFunceble.helpers.File(self.storage_file).delete()
def tearDown(self):
"""
Setups everything needed after a test.
"""
PyFunceble.helpers.File(self.storage_file).delete()
del self.whois_db
def test_load_file_does_not_exists(self):
"""
Tests the case that we load the file but it does not exists.
"""
self.whois_db.load()
expected = {}
self.assertEqual(expected, self.whois_db.database)
def test_load_file_exists(self):
"""
Tests the case that we load the file.
"""
expected = self.our_dataset.copy()
PyFunceble.helpers.Dict(self.our_dataset.copy()).to_json_file(self.storage_file)
self.whois_db.load()
self.assertEqual(expected, self.whois_db.database)
def test_authorization(self):
"""
Tests of the authorization method.
"""
PyFunceble.CONFIGURATION.no_whois = True
PyFunceble.CONFIGURATION.whois_database = False
expected = False
self.assertEqual(expected, self.whois_db.authorization())
PyFunceble.CONFIGURATION.no_whois = False
PyFunceble.CONFIGURATION.whois_database = False
self.assertEqual(expected, self.whois_db.authorization())
PyFunceble.CONFIGURATION.no_whois = True
PyFunceble.CONFIGURATION.whois_database = True
self.assertEqual(expected, self.whois_db.authorization())
PyFunceble.CONFIGURATION.no_whois = False
PyFunceble.CONFIGURATION.whois_database = True
expected = True
self.assertEqual(expected, self.whois_db.authorization())
def test_save(self):
"""
Tests the saving saving method.
"""
self.whois_db.database = self.our_dataset.copy()
self.whois_db.save()
expected = True
actual = PyFunceble.helpers.File(self.storage_file).exists()
self.assertEqual(expected, actual)
expected = self.our_dataset.copy()
actual = PyFunceble.helpers.Dict().from_json_file(self.storage_file)
self.assertEqual(expected, actual)
def test_is_present(self):
"""
Tests the presence of a subject into the database.
"""
self.whois_db.database = self.our_dataset.copy()
expected = True
actual = "google.com" in self.whois_db
self.assertEqual(expected, actual)
expected = False
actual = "hello.google.com" in self.whois_db
self.assertEqual(expected, actual)
def test_is_time_older(self):
"""
Tests of the method which checks if a given epoch/time
is older.
"""
self.whois_db.database = self.our_dataset.copy()
self.whois_db.database["google.com"]["epoch"] = (
datetime.now() - timedelta(days=15)
).timestamp()
expected = True
actual = self.whois_db.is_time_older("google.com")
self.assertEqual(expected, actual)
self.whois_db.database["google.com"]["epoch"] = (
datetime.now() + timedelta(days=15)
).timestamp()
expected = False
actual = self.whois_db.is_time_older("google.com")
self.assertEqual(expected, actual)
def test_get_expiration_date(self):
"""
Tests the method which is used to get the expiration date
of a subject in the database.
"""
self.whois_db.database = self.our_dataset.copy()
expected = "14-sep-2020"
actual = self.whois_db.get_expiration_date("google.com")
self.assertEqual(expected, actual)
expected = None
actual = self.whois_db.get_expiration_date("hello.google.com")
self.assertEqual(expected, actual)
def test_add(self):
"""
Tests of the addition method.
"""
self.whois_db.database = {}
epoch = datetime.strptime("25-dec-2022", "%d-%b-%Y").timestamp()
expected = {
"microsoft.google.com": {
"epoch": epoch,
"expiration_date": "25-dec-2022",
"state": "future",
}
}
self.whois_db.add("microsoft.google.com", "25-dec-2022")
self.assertEqual(expected, self.whois_db.database)
self.whois_db.database["microsoft.google.com"]["state"] = "hello"
self.whois_db.add("microsoft.google.com", "25-dec-2022")
self.assertEqual(expected, self.whois_db.database)
epoch = datetime.strptime("25-dec-2007", "%d-%b-%Y").timestamp()
expected = {
"microsoft.google.com": {
"epoch": epoch,
"expiration_date": "25-dec-2007",
"state": "past",
}
}
self.whois_db.add("microsoft.google.com", "25-dec-2007")
self.assertEqual(expected, self.whois_db.database)
if __name__ == "__main__":
launch_tests()
|
#! /usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Game Saving MacGyver : this game is a simple labyrinth game with conditions of victory.
The movements will be made with the keyboard : right, left, up and down arrow keys.
The conditions of victory is simple : collect the 3 items on the map
before heading to the issue guarded by Murdoc. If you don't have all the items, you loose.
Others files used : class_map, class_player, class_object, variables, level and images.
"""
import pygame
import sys
from pygame.locals import *
# import my class
from class_map import *
from class_player import *
from class_object import *
from variables import *
pygame.init()
# initialize my screen
SCREEN = pygame.display.set_mode((screen_dimension, screen_dimension))
pygame.display.set_caption(title)
continue_program = 1
continue_win = 0
continue_loose = 0
# Program's loop
while continue_program == 1:
continue_home_page = 1
# home_page's loop
while continue_home_page == 1:
home_page = pygame.image.load(HOME)
SCREEN.blit(home_page, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
# creating a multi-level game :
# F1 = level1, F2 = level2....etc
if event.key == K_F1:
choice = "level/level1.txt"
continue_home_page = 0
continue_game = 1
if event.key == K_F2:
choice = "level/level2.txt"
continue_home_page = 0
continue_game = 1
if event.key == K_F3:
choice = "level/level3.txt"
continue_home_page = 0
continue_game = 1
if event.key == K_F4:
choice = "level/level4.txt"
continue_home_page = 0
continue_game = 1
# generation of my map
if choice != 0:
my_level_map = Level(choice)
my_level_map.level_generator()
# generation of my objects
ether = Object("images/ether.png", "ether")
needle = Object("images/needle.png", "needle")
tube = Object("images/tube.png", "tube")
# get random position for my objects
ether.generate_random_position(my_level_map.my_map)
needle.generate_random_position(my_level_map.my_map)
tube.generate_random_position(my_level_map.my_map)
# display my map
my_level_map.afficher(SCREEN)
pygame.display.flip()
# generation of my characters
mcgyver = Player("images/macgyver.png", "s")
murdoc = Player("images/murdoc.png", "o")
# attibute position to my characters according to the level design.
mcgyver.generate(my_level_map.my_map)
murdoc.generate(my_level_map.my_map)
# update my display
SCREEN.blit(mcgyver.image, (mcgyver.x_pix, mcgyver.y_pix))
pygame.display.flip()
# Game's loop
while continue_game == 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
# condition's loop for movement of the character according to pressed key
elif event.key == K_UP:
mcgyver.moove("up", my_level_map.my_map)
elif event.key == K_DOWN:
mcgyver.moove("down", my_level_map.my_map)
elif event.key == K_LEFT:
mcgyver.moove("left", my_level_map.my_map)
elif event.key == K_RIGHT:
mcgyver.moove("right", my_level_map.my_map)
# events according to character's position in order to collect the items.
# if character is on the same square as an object
# he collect the object in his inventory and
# the object is removed from the map
ether.collect(mcgyver, my_level_map.my_map)
needle.collect(mcgyver, my_level_map.my_map)
tube.collect(mcgyver, my_level_map.my_map)
# clean the screen before updating
my_level_map.afficher(SCREEN)
# update character's position on the map
SCREEN.blit(mcgyver.image, (mcgyver.x_pix, mcgyver.y_pix))
pygame.display.update()
# conditions of victory
if mcgyver.position == murdoc.position:
if ether in mcgyver.inventory and needle in mcgyver.inventory \
and tube in mcgyver.inventory:
continue_win = 1
continue_game = 0
else:
continue_loose = 1
continue_game = 0
# victory loop. provide the choice to continue or quit
while continue_win == 1:
page_win = pygame.image.load(WIN).convert()
SCREEN.blit(page_win, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
if event.key == K_RETURN or event.key == K_KP_ENTER:
continue_home_page = 1
continue_win = 0
# defeat loop. provide the choice to continue or quit
while continue_loose == 1:
page_loose = pygame.image.load(LOOSE).convert()
SCREEN.blit(page_loose, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
if event.key == K_RETURN or event.key == K_KP_ENTER:
continue_home_page = 1
continue_loose = 0
|
# -*- coding: utf-8 -*-
# @File : 76_MinimumWindowSubstring.py
# @Author: ZRN
# @Date : 2019/5/14
"""
给定一个字符串 S 和一个字符串 T,请在 S 中找出包含 T 所有字母的最小子串。
示例:
输入: S = "ADOBECODEBANC", T = "ABC"
输出: "BANC"
"""
class Solution:
def minWindow(self, s: str, t: str) -> str:
letter_count = {}
cur_letter = {}
for c in t:
cur_letter[c] = 0
if c in letter_count:
letter_count[c] += 1
else:
letter_count[c] = 1
letter_list = []
index_list = []
min_sub = [0, len(s) * 2]
all_letter = False
for i, c in enumerate(s):
if c in letter_count:
letter_list.append(c)
index_list.append(i)
cur_letter[c] += 1
while cur_letter[letter_list[0]] > letter_count[letter_list[0]]:
cur_letter[letter_list[0]] -= 1
del letter_list[0]
del index_list[0]
if all_letter:
if (min_sub[1] - min_sub[0]) > (index_list[-1] - index_list[0]):
min_sub[0] = index_list[0]
min_sub[1] = index_list[-1]
else:
for j in letter_count:
if letter_count[j] > cur_letter[j]:
break
else:
all_letter = True
min_sub[0] = index_list[0]
min_sub[1] = index_list[-1]
if min_sub[1] - min_sub[0] + 1 <= len(s):
return s[min_sub[0]: min_sub[1] + 1]
return ''
def minWindow2(self, s, t):
from collections import defaultdict
i = j = 0
count = len(t)
step = float("inf")
res = ""
dic = defaultdict(int)
for e in t:
dic[e] += 1
while j < len(s):
if dic[s[j]] > 0:
count -= 1
dic[s[j]] -= 1 # t之外的字符数据为负,s中所有出现的字母都记录
j += 1
while count == 0:
if step > j - i:
step = j - i
res = s[i:j]
if dic[s[i]] == 0: # t中的字母才为0
count += 1
dic[s[i]] += 1
i += 1
return res
if __name__ == '__main__':
s = Solution()
print(s.minWindow2("acadb", "ab"))
|
class Person:
def set(self,name,age,adrs):
self.name=name
self.age=age
self.adrs=adrs
print(self.name,self.age,self.adrs)
class Employee(Person):
def setval(self,id,salary,dprtmnt):
self.id=id
self.salary=salary
self.dprtmnt=dprtmnt
print(self.id,self.salary,self.dprtmnt)
|
def read_varlen_quantity(string):
pass
def scale_num(num, from_bits, to_bits):
assert num <= (2**from_bits-1)
assert isinstance(num, int)
scale_factor = num / (2.0**from_bits-1)
fixed_number = scale_factor * (2.0**to_bits-1)
return int(fixed_number)
if __name__ == "__main__":
print scale_num(2819, 14, 16)
|
import os
import subprocess
import sys
from .hdllogger import HDLLogger
class HDL:
hdd = ''
isoDirectoryPath = ''
hdlPath = ''
hdlCommand = ''
sliceIndex = '*u4'
logger = HDLLogger('LOG.log')
def __init__(self, hdd, isoDirectoryPath, hdlPath):
self.hdd = hdd
self.hdlPath = hdlPath
self.isoDirectoryPath = isoDirectoryPath
def formatSerialNumberName(self, serial):
result = serial[:4] + "_" + serial[4 + 1:]
result = result[:8] + "." + result[8:]
return result[0:11]
def saveGamesListToFile(self):
hdlCommand = 'hdl_toc'
file = self.hdd[:-1] + '_games.txt'
command = self.hdlPath + ' ' + hdlCommand + ' ' + self.hdd + ' > ' + file
os.system(command)
p = subprocess.Popen(command, shell=True)
try:
outs, errs = p.communicate(timeout=60)
self.logger.log('Saved games list for {0}'.format(self.hdd))
except TimeoutExpired:
p.kill()
outs, errs = p.communicate()
self.logger.log('Saved games list for {0} failed with error {1}'.format(self.hdd, errs))
def bulkConvertBinToIso(self):
self.logger.log('HDL.bulkInjectDvd() Called')
hdlCommand = 'inject_dvd'
gameName = 'undefined'
try:
for root, dirs, files in os.walk(self.isoDirectoryPath):
for file in files:
fileName = os.path.splitext(file)[0]
extension = os.path.splitext(file)[1]
if extension == '.7z':
command = 'del "' + root + '\\' + file + '"'
print('Archive clean up: ' + file + ' deleted')
self.logger.log('Archive clean up: ' + file + ' deleted')
else:
with open(os.path.join(root, file), 'r') as auto:
gameName = root.split('\\')[-1]
if extension == '.bin':
command = 'D:\\ApplicationFiles\\AnyToISO\\anytoiso /convert ' \
+ '"' + root + '\\' + fileName + '.bin' + '" ' \
+ '"' + root + '\\' + fileName + '.iso' + '"'
procOutput = subprocess.getoutput(command)
print('Convert: {0} {1} to .iso'.format(gameName, file))
self.logger.log('Convert: {0} {1} to .iso'.format(gameName, file))
except OSError as e:
print('OS error: {0}'.format(e))
self.logger.log('OS error: {0} for game {1}'.format(e, gameName))
except:
e = sys.exc_info()[0]
print('Exception {0}'.format(e))
self.logger.log('Exception {0} for game {1}'.format(e, gameName))
finally:
self.logger.log('HDL.bulkInjectDvd() Terminated')
# example command -> hdl_dump.exe inject_dvd hdd2: "Game Name (USA)" "C:\path\game.iso" SLUS_212.05 *u4
def bulkInjectDvd(self):
self.logger.log('HDL.bulkInjectDvd() Called')
hdlCommand = 'inject_dvd'
gameName = 'undefined'
try:
for root, dirs, files in os.walk(self.isoDirectoryPath):
for file in files:
fileName = os.path.splitext(file)[0]
extension = os.path.splitext(file)[1]
if extension == '.7z':
command = 'del "' + root + '\\' + file + '"'
# subprocess.call(command)
print('Archive clean up: ' + file + ' deleted')
self.logger.log('Archive clean up: ' + file + ' deleted')
else:
with open(os.path.join(root, file), 'r') as auto:
gameName = root.split('\\')[-1]
if extension == '.iso':
slu = 'XXXX_XXX.XX'
slu = self.formatSerialNumberName(fileName)
command = self.hdlPath + ' ' + hdlCommand + ' ' \
+ self.hdd + ' "' + gameName + '" ' + '"' + root + '\\' \
+ file + '" ' + slu + ' ' + self.sliceIndex
procOutput = '<' + hdlCommand + ' not called>'
# uncomment when ready for loading
# subprocess.call(command)
self.logger.log('Inject: ' + gameName + ' complete')
print('Inject: ' + gameName + ' complete')
except OSError as e:
print('OS error: {0}'.format(e))
self.logger.log('OS error: {0} for game {1}'.format(e, gameName))
except:
e = sys.exc_info()[0]
print('Exception {0}'.format(e))
self.logger.log('Exception {0} for game {1}'.format(e, gameName))
finally:
self.logger.log('HDL.bulkInjectDvd() Terminated')
def __str__(self):
return '{ hdd: "' + self.hdd + '", isoDirectoryPath: "' + self.isoDirectoryPath + '", hdlPath: "' + self.hdlPath + '" }'
|
'''
Developer: Ersin ÖZTÜRK
Date: 01.02.2020
Purpose of Software: Reinforcement of learned Python Code and Self-improvement
'''
fruits= ['apple','banana','cherry']
for x in fruits:
if x!=fruits[-1]:
print(x,end=',')
else:
print(x)
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, UserManager
)
#
# class UserManager(BaseUserManager):
# def create_user(self, email, password=None):
# """
# Creates and saves a User with the given email and password.
# """
# if not email:
# raise ValueError('Users must have an email address')
#
# if not password:
# raise ValueError("Users must have a password!!! ")
# user = self.model(
# email=self.normalize_email(email),
# )
#
# user.set_password(password)
# user.staff = is_staff
# user.admin = is_admin
# user.active = is_active
# # user.save(using=self._db)
# return user
#
# def create_staffuser(self, email, password):
# """
# Creates and saves a staff user with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# # user.save(using=self._db)
# return user
#
# def create_superuser(self, email, password):
# """
# Creates and saves a superuser with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# user.admin = True
# # user.save(using=self._db)
# return user
#
# class User(AbstractBaseUser):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# # full_name = models.CharField(max_length=255, blank=True, null=True)
# is_active = models.BooleanField(default=True)
# staff = models.BooleanField(default=False) # a admin user; non super-user
# admin = models.BooleanField(default=False) # a superuser
# timestamp = models.DateTimeField(auto_now_add=True)
# # notice the absence of a "Password field", that is built in.
#
# USERNAME_FIELD = 'email'
# REQUIRED_FIELDS = [] # Email & Password are required by default.
# objects = UserManager()
#
# def get_full_name(self):
# # The user is identified by their email address
# return self.email
#
# def get_short_name(self):
# # The user is identified by their email address
# return self.email
#
# def __str__(self):
# return self.email
#
# def has_perm(self, perm, obj=None):
# "Does the user have a specific permission?"
# # Simplest possible answer: Yes, always
# return True
#
# def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# # Simplest possible answer: Yes, always
# return True
#
# @property
# def is_staff(self):
# "Is the user a member of staff?"
# return self.staff
#
# @property
# def is_active(self):
# "Is the user a admin member?"
# return self.active
#
# @property
# def is_admin(self):
# "Is the user a admin member?"
# return self.admin
#
#
#
#
#
# class GuestEmail(models.Model):
# email = models.EmailField()
# active = models.BooleanField(default=True)
# update = models.DateTimeField(auto_now=True)
# timestamp = models.DateTimeField(auto_now_add=True)
#
# def __str__(self):
# return self.email
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
if not password:
raise ValueError("Users must have a password!!! ")
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_active', True)
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(email, password, **extra_fields)
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_('email address'), max_length=255, unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
|
import moviepy.editor
video = moviepy.editor.VideoFileClip("F:\AAFAQ RASHID\VIDEOS\Hindi and Urdu Songs\kabir.mp4")
audio = video.audio
audio.write_audiofile("result.mp3")
|
import numpy as np
from numpy import isnan
from yahooFinance import getQuote
def normcorrcoef(a,b):
return np.correlate(a,b)/np.sqrt(np.correlate(a,a)*np.correlate(b,b))[0]
def interpolate(self, method='linear'):
"""
Interpolate missing values (after the first valid value)
Parameters
----------
method : {'linear'}
Interpolation method.
Time interpolation works on daily and higher resolution
data to interpolate given length of interval
Returns
-------
interpolated : Series
from-- https://github.com/wesm/pandas/blob/master/pandas/core/series.py
edited to keep only 'linear' method
Usage: infill NaN values with linear interpolated values
"""
#print " ... inside interpolate .... len(self) = ", len(self)
inds = np.arange(len(self))
values = np.array(self.copy())
#print " values = ", values
#print " values.dtype = ", values.dtype
#print " type(values) = ", type(values)
invalid = isnan(values)
valid = -invalid
firstIndex = valid.argmax()
#print " ... inside interpolate .... firstIndex = ", firstIndex
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
#print " ... inside interpolate .... len(valid) = ", len(valid)
#print " ... inside interpolate .... len(invalid) = ", len(invalid)
inds = inds[firstIndex:]
result = values.copy()
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],values[firstIndex:][valid])
return result
#----------------------------------------------
def cleantobeginning(self):
"""
Copy missing values (to all dates prior the first valid value)
Usage: infill NaN values at beginning with copy of first valid value
"""
inds = np.arange(len(self))
values = self.copy()
#print " type(values) = ", type(values)
invalid = isnan(values)
valid = -invalid
firstIndex = valid.argmax()
for i in range(firstIndex):
values[i]=values[firstIndex]
return values
#----------------------------------------------
def cleanspikes(x,periods=20,stddevThreshold=5.0):
# remove outliers from gradient of x (in 2 directions)
x_clean = np.array(x).copy()
test = np.zeros(x.shape[0],'float')
#gainloss_f = np.ones((x.shape[0]),dtype=float)
#gainloss_r = np.ones((x.shape[0]),dtype=float)
#print gainloss_f[1:],x[1:].shape,x[:-1].shape
#print " ...inside cleanspikes... ", x[1:].shape, x[:-1].shape
#gainloss_f[1:] = x[1:] / x[:-1]
#gainloss_r[:-1] = x[:-1] / x[1:]
gainloss_f = x[1:] / x[:-1]
gainloss_r = x[:-1] / x[1:]
valid_f = gainloss_f[gainloss_f != 1.]
valid_f = valid_f[~np.isnan(valid_f)]
Stddev_f = np.std(valid_f) + 1.e-5
valid_r = gainloss_r[gainloss_r != 1.]
valid_r = valid_r[~np.isnan(valid_r)]
Stddev_r = np.std(valid_r) + 1.e-5
forward_test = gainloss_f/Stddev_f - np.median(gainloss_f/Stddev_f)
reverse_test = gainloss_r/Stddev_r - np.median(gainloss_r/Stddev_r)
test[:-1] += reverse_test
test[1:] += forward_test
x_clean[ test > stddevThreshold ] = np.nan
"""
for i in range( 1,x.shape[0]-2 ):
minx = max(0,i-periods/2)
maxx = min(x.shape[0],i+periods/2)
#Stddev_f = np.std(gainloss_f[minx:maxx]) + 1.e-5
#Stddev_r = np.std(gainloss_r[minx:maxx]) + 1.e-5
if gainloss_f[i-1]/Stddev_f > stddevThreshold and gainloss_r[i]/Stddev_r > stddevThreshold:
x_clean[i] = np.nan
"""
return x_clean
#----------------------------------------------
def cleantoend(self):
"""
Copy missing values (to all dates after the last valid value)
Usage: infill NaN values at end with copy of last valid value
"""
# reverse input 1D array and use cleantobeginning
reverse = self[::-1]
reverse = cleantobeginning(reverse)
return reverse[::-1]
#----------------------------------------------
def percentileChannel(x,minperiod,maxperiod,incperiod,lowPct,hiPct):
periods = np.arange(minperiod,maxperiod,incperiod)
minchannel = np.zeros(len(x),dtype=float)
maxchannel = np.zeros(len(x),dtype=float)
for i in range(len(x)):
divisor = 0
for j in range(len(periods)):
minx = max(1,i-periods[j])
if len(x[minx:i]) < 1:
minchannel[i] = minchannel[i] + x[i]
maxchannel[i] = maxchannel[i] + x[i]
divisor += 1
else:
minchannel[i] = minchannel[i] + np.percentile(x[minx:i+1],lowPct)
maxchannel[i] = maxchannel[i] + np.percentile(x[minx:i+1],hiPct)
divisor += 1
minchannel[i] /= divisor
maxchannel[i] /= divisor
return minchannel,maxchannel
#----------------------------------------------
def percentileChannel_2D(x,minperiod,maxperiod,incperiod,lowPct,hiPct):
periods = np.arange(minperiod,maxperiod,incperiod)
minchannel = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
maxchannel = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
for i in range( x.shape[1] ):
divisor = 0
for j in range(len(periods)):
minx = max(1,i-periods[j])
if len(x[0,minx:i]) < 1:
minchannel[:,i] = minchannel[:,i] + x[:,i]
maxchannel[:,i] = maxchannel[:,i] + x[:,i]
divisor += 1
else:
minchannel[:,i] = minchannel[:,i] + np.percentile(x[:,minx:i+1],lowPct,axis=-1)
maxchannel[:,i] = maxchannel[:,i] + np.percentile(x[:,minx:i+1],hiPct,axis=-1)
divisor += 1
minchannel[:,i] /= divisor
maxchannel[:,i] /= divisor
print " minperiod,maxperiod,incperiod = ", minperiod,maxperiod,incperiod
print " lowPct,hiPct = ", lowPct,hiPct
print " x min,mean,max = ", x.min(),x.mean(),x.max()
print " divisor = ", divisor
return minchannel,maxchannel
#----------------------------------------------
def dpgchannel(x,minperiod,maxperiod,incperiod):
periods = np.arange(minperiod,maxperiod,incperiod)
minchannel = np.zeros(len(x),dtype=float)
maxchannel = np.zeros(len(x),dtype=float)
for i in range(len(x)):
divisor = 0
for j in range(len(periods)):
minx = max(1,i-periods[j])
if len(x[minx:i]) < 1:
minchannel[i] = minchannel[i] + x[i]
maxchannel[i] = maxchannel[i] + x[i]
divisor += 1
else:
minchannel[i] = minchannel[i] + min(x[minx:i+1])
maxchannel[i] = maxchannel[i] + max(x[minx:i+1])
divisor += 1
minchannel[i] /= divisor
maxchannel[i] /= divisor
return minchannel,maxchannel
#----------------------------------------------
def dpgchannel_2D(x,minperiod,maxperiod,incperiod):
periods = np.arange(minperiod,maxperiod,incperiod)
minchannel = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
maxchannel = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
for i in range( x.shape[1] ):
divisor = 0
for j in range(len(periods)):
minx = max(1,i-periods[j])
if len(x[0,minx:i]) < 1:
minchannel[:,i] = minchannel[:,i] + x[:,i]
maxchannel[:,i] = maxchannel[:,i] + x[:,i]
divisor += 1
else:
minchannel[:,i] = minchannel[:,i] + np.min(x[:,minx:i+1],axis=-1)
maxchannel[:,i] = maxchannel[:,i] + np.max(x[:,minx:i+1],axis=-1)
divisor += 1
minchannel[:,i] /= divisor
maxchannel[:,i] /= divisor
return minchannel,maxchannel
#----------------------------------------------
def selfsimilarity(hi,lo):
from scipy.stats import percentileofscore
HminusL = hi-lo
periods = 10
SMS = np.zeros( (hi.shape[0]), dtype=float)
for i in range( hi.shape[0] ):
minx = max(0,i-periods)
SMS[i] = np.sum(HminusL[minx:i+1],axis=-1)
# find the 10-day range (incl highest high and lowest low)
range10day = MoveMax(hi,10) - MoveMin(lo,10)
# normalize
SMS /= range10day
# compute quarterly (60-day) SMA
SMS = SMA(SMS,60)
# find percentile rank
movepctrank = np.zeros( (hi.shape[0]), dtype=float)
for i in range( hi.shape[0] ):
minx = max(0,i-periods)
movepctrank[i] = percentileofscore(SMS[minx:i+1],SMS[i])
return movepctrank
#----------------------------------------------
def jumpTheChannelTest(x,minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28, offset=3):
###
### compute linear trend in upper and lower channels and compare
### actual stock price to forecast range
### return pctChannel for each stock
### calling function will use pctChannel as signal.
### - e.g. negative pctChannel is signal that down-trend begins
### - e.g. more than 100% pctChanel is sgnal of new up-trend beginning
# calculate dpgchannel for all stocks in x
# - x[stock_number,date]
# - 'numdaysinfit' describes number of days over which to calculate a linear trend
# - 'offset' describes number days to forecast channel trends forward
import warnings
warnings.simplefilter('ignore', np.RankWarning)
pctChannel = np.zeros( (x.shape[0]), 'float' )
# calculate linear trend over 'numdaysinfit' with 'offset'
minchannel,maxchannel = dpgchannel(x,minperiod,maxperiod,incperiod)
minchannel_trenddata = minchannel[-(numdaysinfit+offset):-offset]
regression = np.polyfit(range(-(numdaysinfit+offset),-offset), minchannel_trenddata, 1)
minchannel_trend = regression[-1]
maxchannel_trenddata = maxchannel[-(numdaysinfit+offset):-offset]
regression = np.polyfit(range(-(numdaysinfit+offset),-offset), maxchannel_trenddata, 1)
maxchannel_trend = regression[-1]
pctChannel = (x[-1]-minchannel_trend) / (maxchannel_trend-minchannel_trend)
# calculate the stdev over the period
gainloss_period = x[-(numdaysinfit+offset)+1:-offset+1] / x[-(numdaysinfit+offset):-offset]
gainloss_period[np.isnan(gainloss_period)] = 1.
gainloss_cumu = np.cumprod( gainloss_period )[-1] -1.
gainloss_std = np.std( gainloss_period )
# calculate the current quote as number of stdevs above or below trend
currentMidChannel = (maxchannel_trenddata+minchannel_trend)/2.
numStdDevs = (x[-1]/currentMidChannel[-1]-1.) / gainloss_std
'''
print "pctChannel = ", pctChannel
print "gainloss_period = ", gainloss_period
print "gainloss_cumu = ", gainloss_cumu
print "gainloss_std = ", gainloss_std
print "currentMidChannel = ", currentMidChannel[-1]
print "numStdDevs = ", numStdDevs
'''
return pctChannel, gainloss_cumu, gainloss_std, numStdDevs
#----------------------------------------------
def recentChannelFit(x,minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28, offset=3):
###
### compute cumulative gain over fitting period and number of
### ratio of current quote to fitted trend. Rescale based on std dev
### of residuals during fitting period.
### - e.g. negative pctChannel is signal that down-trend begins
### - e.g. more than 100% pctChanel is sgnal of new up-trend beginning
# calculate dpgchannel for all stocks in x
# - x[stock_number,date]
# - 'numdaysinfit' describes number of days over which to calculate a linear trend
# - 'offset' describes number days to forecast channel trends forward
import warnings
warnings.simplefilter('ignore', np.RankWarning)
pctChannel = np.zeros( (x.shape[0]), 'float' )
# calculate linear trend over 'numdaysinfit' with 'offset'
minchannel,maxchannel = dpgchannel(x,minperiod,maxperiod,incperiod)
minchannel_trenddata = minchannel[-(numdaysinfit+offset):-offset]
regression1 = np.polyfit(range(-(numdaysinfit+offset),-offset), minchannel_trenddata, 1)
minchannel_trend = regression1[-1]
maxchannel_trenddata = maxchannel[-(numdaysinfit+offset):-offset]
regression2 = np.polyfit(range(-(numdaysinfit+offset),-offset), maxchannel_trenddata, 1)
maxchannel_trend = regression2[-1]
pctChannel = (x[-1]-minchannel_trend) / (maxchannel_trend-minchannel_trend)
return regression1, regression2
#----------------------------------------------
def recentTrendAndStdDevs(x,datearray,minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28, offset=3):
###
### compute linear trend in upper and lower channels and compare
### actual stock price to forecast range
### return pctChannel for each stock
### calling function will use pctChannel as signal.
### - e.g. numStdDevs < -1. is signal that down-trend begins
### - e.g. whereas > 1.0 is signal of new up-trend beginning
# calculate dpgchannel for all stocks in x
# - x[stock_number,date]
# - 'numdaysinfit' describes number of days over which to calculate a linear trend
# - 'offset' describes number days to forecast channel trends forward
# fit short-term recent trend channel for plotting
lowerFit, upperFit = recentChannelFit( x,
minperiod=minperiod,
maxperiod=maxperiod,
incperiod=incperiod,
numdaysinfit=numdaysinfit,
offset=offset)
recentFitDates = datearray[-numdaysinfit-offset:-offset+1]
relativedates = range(-numdaysinfit-offset,-offset+1)
p = np.poly1d(upperFit)
upperTrend = p(relativedates)
currentUpper = p(0) * 1.
p = np.poly1d(lowerFit)
lowerTrend = p(relativedates)
currentLower = p(0) * 1.
midTrend = (upperTrend+lowerTrend)/2.
#residuals = x[-numdaysinfit-offset:-offset+1] - midTrend
#fitStdDev = np.std(residuals)
fitStdDev = np.mean( upperTrend - lowerTrend )/2.
#print ".....lowerFit, upperFit = ", lowerFit, upperFit
#print ".....fitStdDev,currentUpper,currentLower,x[-1] = ", fitStdDev, currentUpper,currentLower,x[-1]
currentResidual = x[-1] - (currentUpper + currentLower)/2.
numStdDevs = currentResidual / fitStdDev
# calculate gain or loss over the period
gainloss_period = x[-(numdaysinfit+offset)+1:-offset+1] / x[-(numdaysinfit+offset):-offset]
gainloss_period[np.isnan(gainloss_period)] = 1.
gainloss_cumu = np.cumprod( gainloss_period )[-1] -1.
pctChannel = (x[-1]-currentUpper) / (currentUpper-currentLower)
return gainloss_cumu, numStdDevs, pctChannel
#----------------------------------------------
def recentTrendAndMidTrendWithGap(x,datearray,minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28,numdaysinfit2=20, offset=3):
###
### - Cmpute linear trend in upper and lower channels and compare
### actual stock price to forecast range
### - Compute 2nd linear trend in upper and lower channels only for
### small number of recent prices without gap
### - return pctChannel for each stock
### - calling function will use pctChannel as signal.
### * e.g. numStdDevs < -1. is signal that down-trend begins
### * e.g. whereas > 1.0 is signal of new up-trend beginning
# calculate dpgchannel for all stocks in x
# - x[stock_number,date]
# - 'numdaysinfit' describes number of days over which to calculate a linear trend
# - 'offset' describes number days to forecast channel trends forward
# fit short-term recent trend channel with offset from current date for plotting
gappedLowerFit, gappedUpperFit = recentChannelFit( x,
minperiod=minperiod,
maxperiod=maxperiod,
incperiod=incperiod,
numdaysinfit=numdaysinfit,
offset=offset)
recentFitDates = datearray[-numdaysinfit-offset:-offset+1]
relativedates = range(-numdaysinfit-offset,-offset+1)
p = np.poly1d(gappedUpperFit)
upperTrend = p(relativedates)
currentUpper = p(0) * 1.
p = np.poly1d(gappedLowerFit)
lowerTrend = p(relativedates)
currentLower = p(0) * 1.
midTrend = (upperTrend+lowerTrend)/2.
#residuals = x[-numdaysinfit-offset:-offset+1] - midTrend
#fitStdDev = np.std(residuals)
fitStdDev = np.mean( upperTrend - lowerTrend )/2.
#print ".....gappedLowerFit, gappedUpperFit = ", gappedLowerFit, gappedUpperFit
#print ".....fitStdDev,currentUpper,currentLower,x[-1] = ", fitStdDev, currentUpper,currentLower,x[-1]
currentResidual = x[-1] - (currentUpper + currentLower)/2.
numStdDevs = currentResidual / fitStdDev
# calculate gain or loss over the period (with offset)
gainloss_period = x[-(numdaysinfit+offset)+1:-offset+1] / x[-(numdaysinfit+offset):-offset]
gainloss_period[np.isnan(gainloss_period)] = 1.
gainloss_cumu = np.cumprod( gainloss_period )[-1] -1.
pctChannel = (x[-1]-currentUpper) / (currentUpper-currentLower)
# fit shorter trend without offset
NoGapLowerFit, NoGapUpperFit = recentChannelFit( x,
minperiod=minperiod,
maxperiod=maxperiod,
incperiod=incperiod,
numdaysinfit=numdaysinfit2,
offset=1)
recentFitDates = datearray[-numdaysinfit2:]
relativedates = range(-numdaysinfit2,0)
p = np.poly1d(NoGapUpperFit)
NoGapUpperTrend = p(relativedates)
NoGapCurrentUpper = p(0) * 1.
p = np.poly1d(NoGapLowerFit)
NoGapLowerTrend = p(relativedates)
NoGapCurrentLower = p(0) * 1.
NoGapMidTrend = (NoGapUpperTrend+NoGapLowerTrend)/2.
'''
# calculate gain or loss over the shorter period (with no offset)
gainloss_period = x[-(numdaysinfit2+1):1] / x[-(numdaysinfit2):]
gainloss_period[np.isnan(gainloss_period)] = 1.
gainloss_cumu = np.cumprod( gainloss_period )[-1] -1.
'''
# calculate relative gain or loss over entire period
gainloss_cumu2 = NoGapMidTrend[-1]/midTrend[0] -1.
relative_GainLossRatio = (NoGapCurrentUpper + NoGapCurrentLower)/(currentUpper + currentLower)
import matplotlib.pylab as plt
plt.figure(1)
plt.clf()
plt.grid(True)
plt.plot(datearray[-(numdaysinfit+offset+20):],x[-(numdaysinfit+offset+20):],'k-')
relativedates = range(-numdaysinfit-offset,-offset+1)
plt.plot(datearray[np.array(relativedates)],upperTrend,'y-')
plt.plot(datearray[np.array(relativedates)],lowerTrend,'y-')
plt.plot([datearray[-1]],[(upperTrend[-1]+lowerTrend[-1])/2.],'y.',ms=30)
relativedates = range(-numdaysinfit2,0)
plt.plot(datearray[np.array(relativedates)],NoGapUpperTrend,'c-')
plt.plot(datearray[np.array(relativedates)],NoGapLowerTrend,'c-')
plt.plot([datearray[-1]],[(NoGapUpperTrend[-1]+NoGapLowerTrend[-1])/2.],'c.',ms=30)
plt.show()
return gainloss_cumu, gainloss_cumu2, numStdDevs, relative_GainLossRatio
#----------------------------------------------
def textmessageOutsideTrendChannel( symbols, adjClose ):
# temporarily skip this!!!!!!
#return
import datetime
from functions.GetParams import *
from functions.CheckMarketOpen import *
from functions.SendEmail import SendTextMessage
from functions.SendEmail import SendEmail
# send text message for held stocks if the lastest quote is outside
# (to downside) the established channel
# Get Credentials for sending email
params = GetParams()
print ""
#print "params = ", params
print ""
username = str(params['fromaddr']).split("@")[0]
emailpassword = str(params['PW'])
subjecttext = "PyTAAA update - Pct Trend Channel"
boldtext = "time is "+datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
headlinetext = "market status: " + get_MarketOpenOrClosed()
# Get Holdings from file
holdings = GetHoldings()
holdings_symbols = holdings['stocks']
edition = GetEdition()
# process symbols in current holdings
downtrendSymbols = []
channelPercent = []
channelGainsLossesHoldings = []
channelStdsHoldings = []
channelGainsLosses = []
channelStds = []
currentNumStdDevs = []
for i, symbol in enumerate(symbols):
pctChannel,channelGainLoss,channelStd,numStdDevs = jumpTheChannelTest(adjClose[i,:],\
#minperiod=4,\
#maxperiod=12,\
#incperiod=3,\
#numdaysinfit=28,\
#offset=3)
minperiod=params['minperiod'],
maxperiod=params['maxperiod'],
incperiod=params['incperiod'],
numdaysinfit=params['numdaysinfit'],
offset=params['offset'])
channelGainsLosses.append(channelGainLoss)
channelStds.append(channelStd)
if symbol in holdings_symbols:
#pctChannel = jumpTheChannelTest(adjClose[i,:],minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28, offset=3)
print " ... performing PctChannelTest: symbol = ",format(symbol,'5s'), " pctChannel = ", format(pctChannel-1.,'6.1%')
'''
if pctChannel < 1.:
# send textmessage alert of possible new down-trend
downtrendSymbols.append(symbol)
channelPercent.append(format(pctChannel-1.,'6.1%'))
'''
# send textmessage alert of current trend
downtrendSymbols.append(symbol)
channelPercent.append(format(pctChannel-1.,'6.1%'))
channelGainsLossesHoldings.append(format(channelGainLoss,'6.1%'))
channelStdsHoldings.append(format(channelStd,'6.1%'))
currentNumStdDevs.append(format(numStdDevs,'6.1f'))
print "\n ... downtrending symbols are ", downtrendSymbols, "\n"
if len(downtrendSymbols) > 0:
#--------------------------------------------------
# send text message
#--------------------------------------------------
#text_message = "PyTAAA/"+edition+" shows "+str(downtrendSymbols)+" in possible downtrend... \n"+str(channelPercent)+" % of trend channel."
text_message = "PyTAAA/"+edition+" shows "+str(downtrendSymbols)+" current trend... "+\
"\nPct of trend channel = "+str(channelPercent)+\
"\nperiod gainloss = "+str(channelGainsLossesHoldings)+\
"\nperiod gainloss std = "+str(channelStdsHoldings)+\
"\ncurrent # std devs = "+str(currentNumStdDevs)
print text_message +"\n\n"
# send text message if market is open
if 'close in' in get_MarketOpenOrClosed():
#SendTextMessage( username,emailpassword,params['toSMS'],params['fromaddr'],text_message )
SendEmail(username,emailpassword,params['toSMS'],params['fromaddr'],subjecttext,text_message,boldtext,headlinetext)
return
#----------------------------------------------
def SMA_2D(x,periods):
SMA = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
for i in range( x.shape[1] ):
minx = max(0,i-periods)
SMA[:,i] = np.mean(x[:,minx:i+1],axis=-1)
return SMA
#----------------------------------------------
def despike_2D(x,periods,stddevThreshold=5.0):
# remove outliers from gradient of x (in 2nd dimension)
gainloss = np.ones((x.shape[0],x.shape[1]),dtype=float)
gainloss[:,1:] = x[:,1:] / x[:,:-1]
for i in range( 1,x.shape[1] ):
minx = max(0,i-periods)
Stddev = np.std(gainloss[:,minx:i],axis=-1)
Stddev *= stddevThreshold
Stddev += 1.
test = np.dstack( (Stddev, gainloss[:,i]) )
gainloss[:,i] = np.min( test, axis=-1)
gainloss[:,0] = x[:,0].copy()
value = np.cumprod(gainloss,axis=1)
return value
#----------------------------------------------
def SMA(x,periods):
SMA = np.zeros( (x.shape[0]), dtype=float)
for i in range( x.shape[0] ):
minx = max(0,i-periods)
SMA[i] = np.mean(x[minx:i+1],axis=-1)
return SMA
#----------------------------------------------
def MoveMax_2D(x,periods):
MMax = np.zeros( (x.shape[0],x.shape[1]), dtype=float)
for i in range( x.shape[1] ):
minx = max(0,i-periods)
MMax[:,i] = np.max(x[:,minx:i+1],axis=-1)
return MMax
#----------------------------------------------
def MoveMax(x,periods):
MMax = np.zeros( (x.shape[0]), dtype=float)
for i in range( x.shape[0] ):
minx = max(0,i-periods)
MMax[i] = np.max(x[minx:i+1],axis=-1)
return MMax
#----------------------------------------------
def MoveMin(x,periods):
MMin = np.zeros( (x.shape[0]), dtype=float)
for i in range( x.shape[0] ):
minx = max(0,i-periods)
MMin[i] = np.min(x[minx:i+1],axis=-1)
return MMin
#----------------------------------------------
def move_sharpe_2D(adjClose,dailygainloss,period):
"""
Compute the moving sharpe ratio
sharpe_ratio = ( gmean(PortfolioDailyGains[-lag:])**252 -1. )
/ ( np.std(PortfolioDailyGains[-lag:])*sqrt(252) )
formula assume 252 trading days per year
Geometric mean is simplified as follows:
where the geometric mean is being used to determine the average
growth rate of some quantity, and the initial and final values
of that quantity are known, the product of the measured growth
rate at every step need not be taken. Instead, the geometric mean
is simply ( a(n)/a(0) )**(1/n), where n is the number of steps
"""
from scipy.stats import gmean
from math import sqrt
from numpy import std
#
sharpe = np.zeros( (adjClose.shape[0],adjClose.shape[1]), dtype=float)
for i in range( dailygainloss.shape[1] ):
minindex = max( i-period, 0 )
if i > minindex :
sharpe[:,i] = ( gmean(dailygainloss[:,minindex:i+1],axis=-1)**252 -1. ) \
/ ( np.std(dailygainloss[:,minindex:i+1],axis=-1)*sqrt(252) )
else :
sharpe[:,i] = 0.
sharpe[sharpe==0]=.05
sharpe[isnan(sharpe)] =.05
return sharpe
#----------------------------------------------
def computeSignal2D( adjClose, gainloss, params ):
print " ... inside computeSignal2D ... "
print " params = ",params
MA1 = int(params['MA1'])
MA2 = int(params['MA2'])
MA2offset = int(params['MA2offset'])
narrowDays = params['narrowDays']
mediumDays = params['mediumDays']
wideDays = params['wideDays']
lowPct = float(params['lowPct'])
hiPct = float(params['hiPct'])
sma2factor = float(params['MA2factor'])
uptrendSignalMethod = params['uptrendSignalMethod']
if uptrendSignalMethod == 'SMAs' :
print " ...using 3 SMA's for signal2D"
print "\n\n ...calculating signal2D using '"+uptrendSignalMethod+"' method..."
########################################################################
## Calculate signal for all stocks based on 3 simple moving averages (SMA's)
########################################################################
sma0 = SMA_2D( adjClose, MA2 ) # MA2 is shortest
sma1 = SMA_2D( adjClose, MA2 + MA2offset )
sma2 = sma2factor * SMA_2D( adjClose, MA1 ) # MA1 is longest
signal2D = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
for ii in range(adjClose.shape[0]):
for jj in range(adjClose.shape[1]):
if adjClose[ii,jj] > sma2[ii,jj] or ((adjClose[ii,jj] > min(sma0[ii,jj],sma1[ii,jj]) and sma0[ii,jj] > sma0[ii,jj-1])):
signal2D[ii,jj] = 1
if jj== adjClose.shape[1]-1 and isnan(adjClose[ii,-1]):
signal2D[ii,jj] = 0 #### added to avoid choosing stocks no longer in index
# take care of special case where constant share price is inserted at beginning of series
index = np.argmax(np.clip(np.abs(gainloss[ii,:]-1),0,1e-8)) - 1
signal2D[ii,0:index] = 0
dailyNumberUptrendingStocks = np.sum(signal2D,axis = 0)
return signal2D
elif uptrendSignalMethod == 'minmaxChannels' :
print " ...using 3 minmax channels for signal2D"
print "\n\n ...calculating signal2D using '"+uptrendSignalMethod+"' method..."
########################################################################
## Calculate signal for all stocks based on 3 minmax channels (dpgchannels)
########################################################################
# narrow channel is designed to remove day-to-day variability
print "narrow days min,max,inc = ", narrowDays[0], narrowDays[-1], (narrowDays[-1]-narrowDays[0])/7.
narrow_minChannel, narrow_maxChannel = dpgchannel_2D( adjClose, narrowDays[0], narrowDays[-1], (narrowDays[-1]-narrowDays[0])/7. )
narrow_midChannel = (narrow_minChannel+narrow_maxChannel)/2.
medium_minChannel, medium_maxChannel = dpgchannel_2D( adjClose, mediumDays[0], mediumDays[-1], (mediumDays[-1]-mediumDays[0])/7. )
medium_midChannel = (medium_minChannel+medium_maxChannel)/2.
mediumSignal = ((narrow_midChannel-medium_minChannel)/(medium_maxChannel-medium_minChannel)-0.5)*2.0
wide_minChannel, wide_maxChannel = dpgchannel_2D( adjClose, wideDays[0], wideDays[-1], (wideDays[-1]-wideDays[0])/7. )
wide_midChannel = (wide_minChannel+wide_maxChannel)/2.
wideSignal = ((narrow_midChannel-wide_minChannel)/(wide_maxChannel-wide_minChannel)-0.5)*2.0
signal2D = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
for ii in range(adjClose.shape[0]):
for jj in range(adjClose.shape[1]):
if mediumSignal[ii,jj] + wideSignal[ii,jj] > 0:
signal2D[ii,jj] = 1
if jj== adjClose.shape[1]-1 and isnan(adjClose[ii,-1]):
signal2D[ii,jj] = 0 #### added to avoid choosing stocks no longer in index
# take care of special case where constant share price is inserted at beginning of series
index = np.argmax(np.clip(np.abs(gainloss[ii,:]-1),0,1e-8)) - 1
signal2D[ii,0:index] = 0
'''
# take care of special case where mp quote exists at end of series
if firstTrailingEmptyPriceIndex[ii] != 0:
signal2D[ii,firstTrailingEmptyPriceIndex[ii]:] = 0
'''
return signal2D
elif uptrendSignalMethod == 'percentileChannels' :
print "\n\n ...calculating signal2D using '"+uptrendSignalMethod+"' method..."
signal2D = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
lowChannel,hiChannel = percentileChannel_2D(adjClose,MA1,MA2+.01,MA2offset,lowPct,hiPct)
for ii in range(adjClose.shape[0]):
for jj in range(1,adjClose.shape[1]):
if (adjClose[ii,jj] > lowChannel[ii,jj] and adjClose[ii,jj-1] <= lowChannel[ii,jj-1]) or adjClose[ii,jj] > hiChannel[ii,jj]:
signal2D[ii,jj] = 1
elif (adjClose[ii,jj] < hiChannel[ii,jj] and adjClose[ii,jj-1] >= hiChannel[ii,jj-1]) or adjClose[ii,jj] < lowChannel[ii,jj]:
signal2D[ii,jj] = 0
else:
signal2D[ii,jj] = signal2D[ii,jj-1]
if jj== adjClose.shape[1]-1 and isnan(adjClose[ii,-1]):
signal2D[ii,jj] = 0 #### added to avoid choosing stocks no longer in index
# take care of special case where constant share price is inserted at beginning of series
index = np.argmax(np.clip(np.abs(gainloss[ii,:]-1),0,1e-8)) - 1
signal2D[ii,0:index] = 0
return signal2D, lowChannel, hiChannel
#----------------------------------------------
def nanrms(x, axis=None):
from bottleneck import nanmean
return sqrt(nanmean(x**2, axis=axis))
#----------------------------------------------
def move_informationRatio(dailygainloss_portfolio,dailygainloss_index,period):
"""
Compute the moving information ratio
returns for stock (annualized) = Rs
-- assuming 252 days per year this is gmean(dailyGains)**252 -1
-- Rs denotes stock's return
excess return compared to bendmark = Expectation(Rp - Ri)
-- assuming 252 days per year this is sum(Rp - Ri)/252, or just mean(Rp - Ri)
-- Rp denotes active portfolio return
-- Ri denotes index return
tracking error compared to bendmark = sqrt(Expectation((Rp - Ri)**2))
-- assuming 252 days per year this is sqrt(Sum((Rp - Ri)**2)/252), or just sqrt(mean(((Rp - Ri)**2)))
-- Rp denotes active portfolio return
-- Ri denotes index return
information_ratio = ExcessReturn / TrackingError
formula assume 252 trading days per year
Geometric mean is simplified as follows:
where the geometric mean is being used to determine the average
growth rate of some quantity, and the initial and final values
of that quantity are known, the product of the measured growth
rate at every step need not be taken. Instead, the geometric mean
is simply ( a(n)/a(0) )**(1/n), where n is the number of steps
"""
from scipy.stats import gmean
from math import sqrt
from numpy import std
from bottleneck import nanmean
#
infoRatio = np.zeros( (dailygainloss_portfolio.shape[0],dailygainloss_portfolio.shape[1]), dtype=float)
for i in range( dailygainloss_portfolio.shape[1] ):
minindex = max( i-period, 0 )
if i > minindex :
returns_portfolio = dailygainloss_portfolio[:,minindex:i+1] -1.
returns_index = dailygainloss_index[minindex:i+1] -1.
excessReturn = nanmean( returns_portfolio - returns_index, axis = -1 )
trackingError = nanrms( dailygainloss_portfolio[:,minindex:i+1] - dailygainloss_index[minindex:i+1], axis = -1 )
infoRatio[:,i] = excessReturn / trackingError
if i == dailygainloss_portfolio.shape[1]-1:
print " returns_portfolio = ", returns_portfolio
print " returns_index = ", returns_index
print " excessReturn = ", excessReturn
print " infoRatio[:,i] = ", infoRatio[:,i]
else :
infoRatio[:,i] *= 0.
infoRatio[infoRatio==0]=.0
infoRatio[isnan(infoRatio)] =.0
return infoRatio
#----------------------------------------------
def multiSharpe( datearray, adjClose, periods ):
from functions.allstats import *
maxPeriod = np.max( periods )
dates = datearray[maxPeriod:]
sharpesPeriod = np.zeros( (len(periods),len(dates)), 'float' )
adjCloseSubset = adjClose[:,-len(dates):]
for iperiod,period in enumerate(periods) :
lenSharpe = period
for idate in range( maxPeriod,adjClose.shape[1] ):
sharpes = []
for ii in range(adjClose.shape[0]):
sharpes.append( allstats( adjClose[ii,idate-lenSharpe:idate] ).sharpe() )
sharpes = np.array( sharpes )
sharpes = sharpes[np.isfinite( sharpes )]
if len(sharpes) > 0:
sharpesAvg = np.mean(sharpes)
if idate%1000 == 0:
print period, datearray[idate],len(sharpes), sharpesAvg
else:
sharpesAvg = 0.
sharpesPeriod[iperiod,idate-maxPeriod] = sharpesAvg
plotSharpe = sharpesPeriod[:,-len(dates):].copy()
plotSharpe += .3
plotSharpe /= 1.25
signal = np.median(plotSharpe,axis=0)
for i in range( plotSharpe.shape[0] ):
signal += (np.clip( plotSharpe[i,:], -1., 2.) - signal)
medianSharpe = np.median(plotSharpe,axis=0)
signal = np.median(plotSharpe,axis=0) + 1.5 * (np.mean(plotSharpe,axis=0) - np.median(plotSharpe,axis=0))
medianSharpe = np.clip( medianSharpe, -.1, 1.1 )
signal = np.clip( signal, -.05, 1.05 )
return dates, medianSharpe, signal
#----------------------------------------------
def move_martin_2D(adjClose,period):
"""
Compute the moving martin ratio (ulcer performance index)
martin ratio is based on ulcer index (rms drawdown over period)
Reference: http://www.tangotools.com/ui/ui.htm
martin_ratio = ( gmean(PortfolioDailyGains[-lag:])**252 -1. )
/ ( np.std(PortfolioDailyGains[-lag:])*sqrt(252) )
formula assume 252 trading days per year
Geometric mean is simplified as follows:
where the geometric mean is being used to determine the average
growth rate of some quantity, and the initial and final values
of that quantity are known, the product of the measured growth
rate at every step need not be taken. Instead, the geometric mean
is simply ( a(n)/a(0) )**(1/n), where n is the number of steps
"""
from scipy.stats import gmean
from math import sqrt
from numpy import std
#
MoveMax = MoveMax_2D( adjClose, period )
pctDrawDown = adjClose / MoveMax - 1.
pctDrawDown = pctDrawDown ** 2
martin = np.sqrt( SMA_2D( pctDrawDown, period ) )
# reset NaN's to zero
martin[ np.isnan(martin) ] = 0.
return martin
#----------------------------------------------
def sharpeWeightedRank_2D(datearray,symbols,adjClose,signal2D,signal2D_daily,LongPeriod,rankthreshold,riskDownside_min,riskDownside_max,rankThresholdPct,stddevThreshold=4.,makeQCPlots=False):
# adjClose -- # 2D array with adjusted closing prices (axes are stock number, date)
# rankthreshold -- # select this many funds with best recent performance
import numpy as np
import nose
import os
import sys
from matplotlib.pylab import *
import matplotlib.gridspec as gridspec
try:
import bottleneck as bn
from bn import rankdata as rd
except:
import scipy.stats.mstats as bn
from quotes_for_list_adjClose import get_Naz100List
from functions.GetParams import *
# Get params for sending textmessage and email
params = GetParams()
adjClose_despike = despike_2D( adjClose, LongPeriod, stddevThreshold=stddevThreshold )
gainloss = np.ones((adjClose.shape[0],adjClose.shape[1]),dtype=float)
#gainloss[:,1:] = adjClose[:,1:] / adjClose[:,:-1]
gainloss[:,1:] = adjClose_despike[:,1:] / adjClose_despike[:,:-1] ## experimental
gainloss[isnan(gainloss)]=1.
# convert signal2D to contain either 1 or 0 for weights
signal2D -= signal2D.min()
signal2D *= signal2D.max()
# apply signal to daily gainloss
print "\n\n\n######################\n...gainloss min,median,max = ",gainloss.min(),gainloss.mean(),np.median(gainloss),gainloss.max()
print "...signal2D min,median,max = ",signal2D.min(),signal2D.mean(),np.median(signal2D),signal2D.max(),"\n\n\n"
gainloss = gainloss * signal2D
gainloss[gainloss == 0] = 1.0
# update file with daily count of uptrending symbols in index universe
filepath = os.path.join( os.getcwd(), "pyTAAA_web", "pyTAAAweb_dailyNumberUptrendingSymbolsList.txt" )
print "\n\nfile for daily number of uptrending symbols = ", filepath
if os.path.exists( os.path.abspath(filepath) ):
numberUptrendingSymbols = 0
for i in range(len(symbols)):
if signal2D_daily[i,-1] == 1.:
numberUptrendingSymbols += 1
#print "numberUptrendingSymbols,i,symbol,signal2D = ",numberUptrendingSymbols,i,symbols[i],signal2D_daily[i,-1]
dailyUptrendingCount_text = "\n"+str(datearray[-1])+", "+str(numberUptrendingSymbols)
with open( filepath, "a" ) as f:
f.write(dailyUptrendingCount_text)
else:
dailyUptrendingCount_text = "date, daily count of uptrending symbols"
with open( filepath, "w" ) as f:
f.write(dailyUptrendingCount_text)
numberUptrendingSymbols = np.zeros( signal2D_daily.shape[1], 'int' )
for k in range(signal2D_daily.shape[1]):
for i in range(len(symbols)):
if signal2D_daily[i,k] == 1.:
numberUptrendingSymbols[k] += 1
#print "numberUptrendingSymbols,i,symbol,signal2D = ",numberUptrendingSymbols[k],datearray[k],symbols[i],signal2D_daily[i,k]
dailyUptrendingCount_text = "\n"+str(datearray[k])+", "+str(numberUptrendingSymbols[k])
with open( filepath, "a" ) as f:
f.write(dailyUptrendingCount_text)
value = 10000. * np.cumprod(gainloss,axis=1)
# calculate gainloss over period of "LongPeriod" days
monthgainloss = np.ones((adjClose.shape[0],adjClose.shape[1]),dtype=float)
#monthgainloss[:,LongPeriod:] = adjClose[:,LongPeriod:] / adjClose[:,:-LongPeriod]
monthgainloss[:,LongPeriod:] = adjClose_despike[:,LongPeriod:] / adjClose_despike[:,:-LongPeriod] ## experimental
monthgainloss[isnan(monthgainloss)]=1.
# apply signal to daily monthgainloss
monthgainloss = monthgainloss * signal2D
monthgainloss[monthgainloss == 0] = 1.0
monthgainlossweight = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
rankweight = 1./rankthreshold
'''
# plot closing prices and signal-adjusted price
#
# REMOVE THIS plot loop
#
symbol_root = "Naz100_"
print "datearray[-1] = ", datearray[-1]
print "datearray[-1] = ", str(datearray[-1])
strdate = str(datearray[-1].year) +"-" + str(datearray[-1].month) + "-" +str(datearray[-1].day)
for i in range(len(symbols)):
clf()
grid()
plot(datearray[-825:],signal2D[i,-825:]*np.max(adjClose[i,-825:]))
plot(datearray[-825:],adjClose[i,-825:])
plot(datearray[-825:],adjClose[i,-825]/value[i,-825]*value[i,-825:])
aaa = signal2D[i,:]
NaNcount = aaa[np.isnan(aaa)].shape[0]
title("signal2D before figure3 ... "+symbols[i]+" "+str(NaNcount))
draw()
savefig(os.path.join("pngs",symbol_root+"_"+symbols[i]+"_"+strdate+"__"+".png"), format='png', edgecolor='gray' )
'''
########################################################################
## Calculate change in rank of active stocks each day (without duplicates as ties)
########################################################################
monthgainlossRank = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=int)
monthgainlossPrevious = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
monthgainlossPreviousRank = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=int)
monthgainlossRank = bn.rankdata(monthgainloss,axis=0)
# reverse the ranks (low ranks are biggest gainers)
maxrank = np.max(monthgainlossRank)
monthgainlossRank -= maxrank-1
monthgainlossRank *= -1
monthgainlossRank += 2
monthgainlossPrevious[:,LongPeriod:] = monthgainloss[:,:-LongPeriod]
monthgainlossPreviousRank = bn.rankdata(monthgainlossPrevious,axis=0)
# reverse the ranks (low ranks are biggest gainers)
maxrank = np.max(monthgainlossPreviousRank)
monthgainlossPreviousRank -= maxrank-1
monthgainlossPreviousRank *= -1
monthgainlossPreviousRank += 2
# weight deltaRank for best and worst performers differently
rankoffsetchoice = rankthreshold
delta = -( monthgainlossRank.astype('float') - monthgainlossPreviousRank.astype('float') ) / ( monthgainlossRank.astype('float') + float(rankoffsetchoice) )
# if rank is outside acceptable threshold, set deltarank to zero so stock will not be chosen
# - remember that low ranks are biggest gainers
rankThreshold = (1. - rankThresholdPct) * ( monthgainlossRank.max() - monthgainlossRank.min() )
for ii in range(monthgainloss.shape[0]):
for jj in range(monthgainloss.shape[1]):
if monthgainloss[ii,jj] > rankThreshold :
delta[ii,jj] = -monthgainloss.shape[0]/2
if jj == monthgainloss.shape[1]:
print "*******setting delta (Rank) low... Stock has rank outside acceptable range... ",ii, symbols[ii], monthgainloss[ii,jj]
"""
# if adjClose is nan, set deltarank to zero so stock will not be chosen
# - remember that low ranks are biggest gainers
rankThreshold = (1. - rankThresholdPct) * ( monthgainlossRank.max() - monthgainlossRank.min() )
for ii in range(monthgainloss.shape[0]):
if isnan( adjClose[ii,-1] ) :
delta[ii,:] = -monthgainloss.shape[0]/2
numisnans = adjClose[ii,:]
# NaN in last value usually means the stock is removed from the index so is not updated, but history is still in HDF file
print "*******setting delta (Rank) low... Stock has NaN for last value... ",ii, symbols[ii], numisnans[np.isnan(numisnans)].shape
"""
# if symbol is not in current Naz100 universe, set deltarank to zero so stock will not be chosen
# - remember that low ranks are biggest gainers
Naz100SymbolList,_,_ = get_Naz100List()
rankThreshold = (1. - rankThresholdPct) * ( monthgainlossRank.max() - monthgainlossRank.min() )
for ii in range(monthgainloss.shape[0]):
if symbols[ii] not in Naz100SymbolList and symbols[ii] != 'CASH' :
delta[ii,:] = -monthgainloss.shape[0]/2
numisnans = adjClose[ii,:]
# NaN in last value usually means the stock is removed from the index so is not updated, but history is still in HDF file
print "*******setting delta (Rank) low... Stock is no longer in Naz100 universe... ",ii, symbols[ii]
deltaRank = bn.rankdata( delta, axis=0 )
# reverse the ranks (low deltaRank have the fastest improving rank)
maxrank = np.max(deltaRank)
deltaRank -= maxrank-1
deltaRank *= -1
deltaRank += 2
for ii in range(monthgainloss.shape[1]):
if deltaRank[:,ii].min() == deltaRank[:,ii].max():
deltaRank[:,ii] = 0.
########################################################################
## Copy current day rankings deltaRankToday
########################################################################
deltaRankToday = deltaRank[:,-1].copy()
########################################################################
## Hold values constant for calendar month (gains, ranks, deltaRanks)
########################################################################
for ii in range(1,monthgainloss.shape[1]):
if datearray[ii].month == datearray[ii-1].month:
monthgainloss[:,ii] = monthgainloss[:,ii-1]
delta[:,ii] = delta[:,ii-1]
deltaRank[:,ii] = deltaRank[:,ii-1]
########################################################################
## Calculate number of active stocks each day
########################################################################
# TODO: activeCount can be computed before loop to save CPU cycles
# count number of unique values
activeCount = np.zeros(adjClose.shape[1],dtype=float)
for ii in np.arange(0,monthgainloss.shape[0]):
firsttradedate = np.argmax( np.clip( np.abs( gainloss[ii,:]-1. ), 0., .00001 ) )
activeCount[firsttradedate:] += 1
minrank = np.min(deltaRank,axis=0)
maxrank = np.max(deltaRank,axis=0)
# convert rank threshold to equivalent percent of rank range
rankthresholdpercentequiv = np.round(float(rankthreshold)*(activeCount-minrank+1)/adjClose.shape[0])
ranktest = deltaRank <= rankthresholdpercentequiv
########################################################################
### Calculate downside risk measure for weighting stocks.
### Use 1./ movingwindow_sharpe_ratio for risk measure.
### Modify weights with 1./riskDownside and scale so they sum to 1.0
########################################################################
riskDownside = 1. / move_sharpe_2D(adjClose,gainloss,LongPeriod)
riskDownside = np.clip( riskDownside, riskDownside_min, riskDownside_max)
riskDownside[isnan(riskDownside)] = np.max(riskDownside[~isnan(riskDownside)])
for ii in range(riskDownside.shape[0]) :
riskDownside[ii] = riskDownside[ii] / np.sum(riskDownside,axis=0)
########################################################################
### calculate equal weights for ranks below threshold
########################################################################
elsecount = 0
elsedate = 0
for ii in np.arange(1,monthgainloss.shape[1]) :
if activeCount[ii] > minrank[ii] and rankthresholdpercentequiv[ii] > 0:
for jj in range(value.shape[0]):
test = deltaRank[jj,ii] <= rankthresholdpercentequiv[ii]
if test == True :
monthgainlossweight[jj,ii] = 1./rankthresholdpercentequiv[ii]
monthgainlossweight[jj,ii] = monthgainlossweight[jj,ii] / riskDownside[jj,ii]
else:
monthgainlossweight[jj,ii] = 0.
elif activeCount[ii] == 0 :
monthgainlossweight[:,ii] *= 0.
monthgainlossweight[:,ii] += 1./adjClose.shape[0]
else :
elsedate = datearray[ii]
elsecount += 1
monthgainlossweight[:,ii] = 1./activeCount[ii]
aaa = np.sum(monthgainlossweight,axis=0)
allzerotest = np.sum(monthgainlossweight,axis=0)
sumallzerotest = allzerotest[allzerotest == 0].shape
if sumallzerotest > 0:
print ""
print " invoking correction to monthgainlossweight....."
print ""
for ii in np.arange(1,monthgainloss.shape[1]) :
if np.sum(monthgainlossweight[:,ii]) == 0:
monthgainlossweight[:,ii] = 1./activeCount[ii]
print " weights calculation else clause encountered :",elsecount," times. last date encountered is ",elsedate
rankweightsum = np.sum(monthgainlossweight,axis=0)
monthgainlossweight[isnan(monthgainlossweight)] = 0. # changed result from 1 to 0
monthgainlossweight = monthgainlossweight / np.sum(monthgainlossweight,axis=0)
monthgainlossweight[isnan(monthgainlossweight)] = 0. # changed result from 1 to 0
if makeQCPlots==True:
# input symbols and company names from text file
companyName_file = os.path.join( os.getcwd(), "symbols", "companyNames.txt" )
with open( companyName_file, "r" ) as f:
companyNames = f.read()
print "\n\n\n"
companyNames = companyNames.split("\n")
ii = companyNames.index("")
del companyNames[ii]
companySymbolList = []
companyNameList = []
for iname,name in enumerate(companyNames):
name = name.replace("amp;", "")
testsymbol, testcompanyName = name.split(";")
companySymbolList.append(testsymbol)
companyNameList.append(testcompanyName)
# print list showing current rankings and weights
# - symbol
# - rank (at begining of month)
# - rank (most recent trading day)
# - weight from sharpe ratio
# - price
import os
'''
rank_text = "<div id='rank_table_container'><h3>"+"<p>Current stocks, with ranks, weights, and prices are :</p></h3><font face='courier new' size=3><table border='1'> \
<tr><td>Rank (start of month) \
</td><td>Rank (today) \
</td><td>Symbol \
</td><td>Company \
</td><td>Weight \
</td><td>Price \
</td><td>Trend \
</td><td>ChannelPct \
</td></tr>\n"
'''
rank_text = "<div id='rank_table_container'><h3>"+"<p>Current stocks, with ranks, weights, and prices are :</p></h3><font face='courier new' size=3><table border='1'> \
<tr><td>Rank (start of month) \
</td><td>Rank (today) \
</td><td>Symbol \
</td><td>Company \
</td><td>Weight \
</td><td>Price \
</td><td>Trend \
</td><td>recent Gain or Loss (excludes a few days) \
</td><td>stdDevs above or below trend \
</td><td>P/E ratio \
</td></tr>\n"
ChannelPct_text = "channelPercent:"
channelPercent = []
channelGainsLosses = []
stdevsAboveChannel = []
floatChannelGainsLosses = []
floatStdevsAboveChannel = []
for i, isymbol in enumerate(symbols):
### save current projected position in price channel calculated without recent prices
"""
pctChannel,channelGainLoss,channelStd,numStdDevs = jumpTheChannelTest(adjClose[i,:],\
#minperiod=4,\
#maxperiod=12,\
#incperiod=3,\
#numdaysinfit=28,\
#offset=3)
minperiod=params['minperiod'],
maxperiod=params['maxperiod'],
incperiod=params['incperiod'],
numdaysinfit=params['numdaysinfit'],
offset=params['offset'])
print " ... performing PctChannelTest: symbol = ",format(isymbol,'5s'), " pctChannel = ", format(pctChannel-1.,'6.1%')
channelPercent.append(format(pctChannel-1.,'6.1%'))
channelGainsLosses.append(format(channelGainLoss,'6.1%'))
stdevsAboveChannel.append(format(numStdDevs,'6.1f'))
floatChannelGainsLosses.append(channelGainLoss)
floatStdevsAboveChannel.append(numStdDevs)
ChannelPct_text = ChannelPct_text + format(pctChannel-1.,'6.1%')
"""
#print "\nsymbol = ", isymbol
channelGainLoss, numStdDevs, pctChannel = recentTrendAndStdDevs(adjClose[i,:],
datearray,
minperiod=params['minperiod'],
maxperiod=params['maxperiod'],
incperiod=params['incperiod'],
numdaysinfit=params['numdaysinfit'],
offset=params['offset'])
print " ... performing PctChannelTest: symbol = ",format(isymbol,'5s'), " numStdDevs = ", format(numStdDevs,'6.1f')
channelGainsLosses.append(format(channelGainLoss,'6.1%'))
stdevsAboveChannel.append(format(numStdDevs,'6.1f'))
floatChannelGainsLosses.append(channelGainLoss)
floatStdevsAboveChannel.append(numStdDevs)
ChannelPct_text = ChannelPct_text + format(pctChannel-1.,'6.1%')
path_symbolChartsSort_byRankBeginMonth = os.path.join( os.getcwd(), "pyTAAA_web", "pyTAAAweb_symbolCharts_MonthStartRank.html" )
path_symbolChartsSort_byRankToday = os.path.join( os.getcwd(), "pyTAAA_web", "pyTAAAweb_symbolCharts_TodayRank.html" )
path_symbolChartsSort_byRecentGainRank = os.path.join( os.getcwd(), "pyTAAA_web", "pyTAAAweb_symbolCharts_recentGainRank.html" )
pagetext_byRankBeginMonth = "<!DOCTYPE html>+\n" +\
"<html>+\n" +\
"<head>+\n" +\
"<title>pyTAAA web</title>+\n" +\
"</head>+\n" +\
"<br><h1>Symbol Charts Ordered by Ranking at Start of Month</h1>+\n"
pagetext_byRankToday = "<!DOCTYPE html>+\n" +\
"<html>+\n" +\
"<head>+\n" +\
"<title>pyTAAA web</title>+\n" +\
"</head>+\n" +\
"<br><h1>Symbol Charts Ordered by Ranking Today</h1>+\n"
pagetext_byRecentGainRank = "<!DOCTYPE html>+\n" +\
"<html>+\n" +\
"<head>+\n" +\
"<title>pyTAAA web</title>+\n" +\
"</head>+\n" +\
"<br><h1>Symbol Charts Ordered by Recent Gain Ranking</h1>+\n"
floatChannelGainsLosses = np.array(floatChannelGainsLosses)
floatChannelGainsLosses[np.isinf(floatChannelGainsLosses)] = -999.
floatChannelGainsLosses[np.isneginf(floatChannelGainsLosses)] = -999.
floatChannelGainsLosses[np.isnan(floatChannelGainsLosses)] = -999.
floatStdevsAboveChannel = np.array(floatStdevsAboveChannel)
floatStdevsAboveChannel[np.isinf(floatStdevsAboveChannel)] = -999.
floatStdevsAboveChannel[np.isneginf(floatStdevsAboveChannel)] = -999.
floatStdevsAboveChannel[np.isnan(floatStdevsAboveChannel)] = -999.
RecentGainRank = len(floatChannelGainsLosses) - bn.rankdata( floatChannelGainsLosses )
RecentGainStdDevRank = len(floatStdevsAboveChannel)- bn.rankdata( floatStdevsAboveChannel )
RecentOrder = np.argsort( RecentGainRank + RecentGainStdDevRank )
RecentRank = np.argsort( RecentOrder )
peList = []
floatPE_list = []
for i, isymbol in enumerate(symbols):
pe = getQuote(isymbol)['PE'][0]
floatPE_list.append(pe)
peList.append(str(pe))
for i, isymbol in enumerate(symbols):
for j in range(len(symbols)):
if int( deltaRank[j,-1] ) == i :
if signal2D_daily[j,-1] == 1.:
trend = 'up'
else:
trend = 'down'
# search for company name
try:
symbolIndex = companySymbolList.index(symbols[j])
companyName = companyNameList[symbolIndex]
except:
companyName = ""
'''
rank_text = rank_text + \
"<tr><td>" + format(deltaRank[j,-1],'6.0f') + \
"<td>" + format(deltaRankToday[j],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(monthgainlossweight[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"<td>" + channelPercent[j] + \
"</td></tr> \n"
'''
#pe = format(getQuote(symbols[j])['PE'][0],'f7.2')
#pe = str(getQuote(symbols[j])['PE'][0])
pe = peList[j]
rank_text = rank_text + \
"<tr><td>" + format(deltaRank[j,-1],'6.0f') + \
"<td>" + format(deltaRankToday[j],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(monthgainlossweight[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"<td>" + channelGainsLosses[j] + \
"<td>" + stdevsAboveChannel[j] + \
"<td>" + pe + \
"</td></tr> \n"
###print " i,j,companyName = ", i,j,"__"+companyName+"__"
if companyName != "":
if i==1:
avgChannelGainsLosses = floatChannelGainsLosses[j]
avgStdevsAboveChannel = floatStdevsAboveChannel[j]
else:
avgChannelGainsLosses = (avgChannelGainsLosses*(i-1)+floatChannelGainsLosses[j])/(i)
avgStdevsAboveChannel = (avgStdevsAboveChannel*(i-1)+floatStdevsAboveChannel[j])/(i)
if i == deltaRank[j,-1]:
if signal2D_daily[j,-1] == 1.:
trend = 'up'
else:
trend = 'down'
# search for company name
try:
symbolIndex = companySymbolList.index(symbols[j])
companyName = companyNameList[symbolIndex]
except:
companyName = ""
#pe = str(getQuote(symbols[j])['PE'][0])
pe = peList[j]
pagetext_byRankBeginMonth = pagetext_byRankBeginMonth +"<br><p> </p><p> </p><p> </p>"+\
"<font face='courier new' size=3><table border='1'>" +\
"<tr><td>Rank (start of month)" +\
"</td><td>Rank (today)" +\
"</td><td>Symbol" +\
"</td><td>Company" +\
"</td><td>Weight" +\
"</td><td>Price" +\
"</td><td>Trend" +\
"</td><td>recent Gain or Loss (excludes a few days)" +\
"</td><td>stdDevs above or below trend" +\
"</td><td>P/E ratio" +\
"</td></tr>\n"+\
"<tr><td>" + format(deltaRank[j,-1],'6.0f') + \
"<td>" + format(deltaRankToday[j],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(monthgainlossweight[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"<td>" + channelGainsLosses[j] + \
"<td>" + stdevsAboveChannel[j] + \
"<td>" + pe + \
"</td></tr> \n"+\
u"<br><img src='0_recent_" +symbols[j]+ u".png' alt='PyTAAA by DonaldPG' width='850' height='500'>"
if i == deltaRankToday[j]:
if signal2D_daily[j,-1] == 1.:
trend = 'up'
else:
trend = 'down'
# search for company name
try:
symbolIndex = companySymbolList.index(symbols[j])
companyName = companyNameList[symbolIndex]
except:
companyName = ""
#pe = str(getQuote(symbols[j])['PE'][0])
pe = peList[j]
pagetext_byRankToday = pagetext_byRankToday +"<br><p> </p><p> </p><p> </p><br>"+\
"<font face='courier new' size=3><table border='1'>" +\
"<tr><td>Rank (start of month)" +\
"</td><td>Rank (today)" +\
"</td><td>Symbol" +\
"</td><td>Company" +\
"</td><td>Weight" +\
"</td><td>Price" +\
"</td><td>Trend" +\
"</td><td>recent Gain or Loss (excludes a few days)" +\
"</td><td>stdDevs above or below trend" +\
"</td><td>P/E ratio" +\
"</td></tr>\n"+\
"<tr><td>" + format(deltaRank[j,-1],'6.0f') + \
"<td>" + format(deltaRankToday[j],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(monthgainlossweight[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"<td>" + channelGainsLosses[j] + \
"<td>" + stdevsAboveChannel[j] + \
"<td>" + pe + \
"</td></tr> \n"+\
u"<br><img src='0_recent_" +symbols[j]+ u".png' alt='PyTAAA by DonaldPG' width='850' height='500'>"
if i == RecentRank[j]:
if signal2D_daily[j,-1] == 1.:
trend = 'up'
else:
trend = 'down'
# search for company name
try:
symbolIndex = companySymbolList.index(symbols[j])
companyName = companyNameList[symbolIndex]
except:
companyName = ""
#pe = str(getQuote(symbols[j])['PE'][0])
pe = peList[j]
pagetext_byRecentGainRank = pagetext_byRecentGainRank +"<br><p> </p><p> </p><p> </p><br>"+\
"<font face='courier new' size=3><table border='1'>" +\
"<tr><td>Rank (start of month)" +\
"</td><td>Rank (today)" +\
"</td><td>Symbol" +\
"</td><td>Company" +\
"</td><td>Weight" +\
"</td><td>Price" +\
"</td><td>Trend" +\
"</td><td>recent Gain or Loss (excludes a few days)" +\
"</td><td>stdDevs above or below trend" +\
"</td><td>P/E ratio" +\
"</td></tr>\n"+\
"<tr><td>" + format(deltaRank[j,-1],'6.0f') + \
"<td>" + format(deltaRankToday[j],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(monthgainlossweight[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"<td>" + channelGainsLosses[j] + \
"<td>" + stdevsAboveChannel[j] + \
"<td>" + pe + \
"</td></tr> \n"+\
u"<br><img src='0_recent_" +symbols[j]+ u".png' alt='PyTAAA by DonaldPG' width='850' height='500'>"
medianChannelGainsLosses = np.median(floatChannelGainsLosses)
medianStdevsAboveChannel = np.median(floatStdevsAboveChannel)
print "peList = ", floatPE_list
floatPE_list = np.array(floatPE_list)
floatPE_list = floatPE_list[~np.isinf(floatPE_list)]
floatPE_list = floatPE_list[~np.isneginf(floatPE_list)]
floatPE_list = floatPE_list[~np.isnan(floatPE_list)]
averagePE = np.mean(floatPE_list)
medianPE = np.median(floatPE_list)
avg_performance_text = "\n\n\n<font face='courier new' size=5><p>Average recent performance:</p></h3><font face='courier new' size=4>"+\
"<p>average trend excluding several days = "+format(avgChannelGainsLosses,'6.1%')+"<br>"+\
"median trend excluding several days = "+format(medianChannelGainsLosses,'6.1%')+"</p></h3><font face='courier new' size=4>"+\
"<p>average number stds above/below trend = "+format(avgStdevsAboveChannel,'5.1f')+"<br>"+\
"median number stds above/below trend = "+format(medianStdevsAboveChannel,'5.1f')+"</p></h3><font face='courier new' size=4>"+\
"<p>average P/E = "+format(averagePE,'5.1f')+"<br>"+\
"median P/E = "+format(medianPE,'5.1f')+"</p></h3><font face='courier new' size=4>\n\n"
rank_text = avg_performance_text + rank_text + "</table></div>\n"
filepath = os.path.join( os.getcwd(), "pyTAAA_web", "pyTAAAweb_RankList.txt" )
with open( filepath, "w" ) as f:
f.write(rank_text)
filepath = path_symbolChartsSort_byRankBeginMonth
with open( filepath, "w" ) as f:
f.write(pagetext_byRankBeginMonth)
filepath = path_symbolChartsSort_byRankToday
with open( filepath, "w" ) as f:
f.write(pagetext_byRankToday)
filepath = path_symbolChartsSort_byRecentGainRank
with open( filepath, "w" ) as f:
f.write(pagetext_byRecentGainRank)
########################################################################
### save current ranks to params file
########################################################################
lastdate_text = "lastdate: " + str(datearray[-1])
symbol_text = "symbols: "
rank_text = "ranks:"
#####ChannelPct_text = "channelPercent:"
"""
for i, isymbol in enumerate(symbols):
symbol_text = symbol_text + format(symbols[i],'6s')
rank_text = rank_text + format(deltaRankToday[i],'6.0f')
"""
for i, isymbol in enumerate(symbols):
for j in range(len(symbols)):
if int( deltaRank[j,-1] ) == i :
symbol_text = symbol_text + format(symbols[j],'6s')
rank_text = rank_text + format(deltaRankToday[j],'6.0f')
#####pctChannel = jumpTheChannelTest(adjClose[i,:],minperiod=4,maxperiod=12,incperiod=3,numdaysinfit=28, offset=3)
#####print " ... performing PctChannelTest: symbol = ",format(symbol,'5s'), " pctChannel = ", format(pctChannel-1.,'6.1%')
#####channelPercent.append(format(pctChannel-1.,'6.1%'))
#####ChannelPct_text = ChannelPct_text + format(pctChannel-1.,'6.1%')
filepath = os.path.join( os.getcwd(), "PyTAAA_ranks.params" )
with open( filepath, "a" ) as f:
f.write(lastdate_text)
f.write("\n")
f.write(symbol_text)
f.write("\n")
f.write(rank_text)
f.write("\n")
f.write(ChannelPct_text)
f.write("\n")
print "leaving function sharpeWeightedRank_2D..."
return monthgainlossweight
#----------------------------------------------
def MAA_WeightedRank_2D(datearray,symbols,adjClose,signal2D,signal2D_daily,LongPeriod,numberStocksTraded,
wR, wC, wV, wS, stddevThreshold=4. ):
# adjClose -- # 2D array with adjusted closing prices (axes are stock number, date)
# rankthreshold -- # select this many funds with best recent performance
import numpy as np
import nose
import os
import sys
from matplotlib.pylab import *
import matplotlib.gridspec as gridspec
try:
import bottleneck as bn
from bn import rankdata as rd
except:
import scipy.stats.mstats as bn
adjClose_despike = despike_2D( adjClose, LongPeriod, stddevThreshold=stddevThreshold )
gainloss = np.ones((adjClose.shape[0],adjClose.shape[1]),dtype=float)
#gainloss[:,1:] = adjClose[:,1:] / adjClose[:,:-1]
gainloss[:,1:] = adjClose_despike[:,1:] / adjClose_despike[:,:-1] ## experimental
gainloss[isnan(gainloss)]=1.
# convert signal2D to contain either 1 or 0 for weights
signal2D -= signal2D.min()
signal2D *= signal2D.max()
############################
###
### filter universe of stocks to exclude all that have return < 0
### - needed for correlation to "equal weight index" (EWI)
### - EWI is daily gain/loss percentage
###
############################
EWI = np.zeros( adjClose.shape[1], 'float' )
EWI_count = np.zeros( adjClose.shape[1], 'int' )
for jj in np.arange(LongPeriod,adjClose.shape[1]) :
for ii in range(adjClose.shape[0]):
if signal2D_daily[ii,jj] == 1:
EWI[jj] += gainloss[ii,jj]
EWI_count[jj] += 1
EWI = EWI/EWI_count
EWI[np.isnan(EWI)] = 1.0
############################
###
### compute correlation to EWI
### - each day, for each stock
### - not needed for stocks on days with return < 0
###
############################
corrEWI = np.zeros( adjClose.shape, 'float' )
for jj in np.arange(LongPeriod,adjClose.shape[1]) :
for ii in range(adjClose.shape[0]):
start_date = max( jj - LongPeriod, 0 )
if adjClose_despike[ii,jj] > adjClose_despike[ii,start_date]:
corrEWI[ii,jj] = normcorrcoef(gainloss[ii,start_date:jj]-1.,EWI[start_date:jj]-1.)
if corrEWI[ii,jj] <0:
corrEWI[ii,jj] = 0.
############################
###
### compute weights
### - each day, for each stock
### - set to 0. for stocks on days with return < 0
###
############################
weights = np.zeros( adjClose.shape, 'float' )
for jj in np.arange(LongPeriod,adjClose.shape[1]) :
for ii in range(adjClose.shape[0]):
start_date = max( jj - LongPeriod, 0 )
returnForPeriod = (adjClose_despike[ii,jj]/adjClose_despike[ii,start_date])-1.
if returnForPeriod < 0.:
returnForPeriod = 0.
volatility = np.std(adjClose_despike[ii,start_date:jj])
weights[ii,jj] = ( returnForPeriod**wR * (1.-corrEWI[ii,jj])**wC / volatility**wV ) **wS
weights[np.isnan(weights)] = 0.0
# make duplicate of weights for adjusting using crashProtection
CPweights = weights.copy()
CP_cashWeight = np.zeros(adjClose.shape[1], 'float' )
for jj in np.arange(adjClose.shape[1]) :
weightsToday = weights[:,jj]
CP_cashWeight[jj] = float(len(weightsToday[weightsToday==0.])) / len(weightsToday)
############################
###
### compute weights ranking and keep best
### 'best' are numberStocksTraded*%risingStocks
### - weights need to sum to 100%
###
############################
weightRank = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=int)
weightRank = bn.rankdata(weights,axis=0)
# reverse the ranks (low ranks are biggest gainers)
maxrank = np.max(weightRank)
weightRank -= maxrank-1
weightRank *= -1
weightRank += 2
# set top 'numberStocksTraded' to have weights sum to 1.0
for jj in np.arange(adjClose.shape[1]) :
ranksToday = weightRank[:,jj].copy()
weightsToday = weights[:,jj].copy()
weightsToday[ranksToday > numberStocksTraded] = 0.
if np.sum(weightsToday) > 0.:
weights[:,jj] = weightsToday / np.sum(weightsToday)
else:
weights[:,jj] = 1./len(weightsToday)
# set CASH to have weight based on CrashProtection
cash_index = symbols.index("CASH")
for jj in np.arange(adjClose.shape[1]) :
CPweights[ii,jj] = CP_cashWeight[jj]
weightRank[ii,jj] = 0
ranksToday = weightRank[:,jj].copy()
weightsToday = CPweights[:,jj].copy()
weightsToday[ranksToday > numberStocksTraded] = 0.
if np.sum(weightsToday) > 0.:
CPweights[:,jj] = weightsToday / np.sum(weightsToday)
else:
CPweights[:,jj] = 1./len(weightsToday)
# hold weights constant for month
for jj in np.arange(LongPeriod,adjClose.shape[1]) :
start_date = max( jj - LongPeriod, 0 )
yesterdayMonth = datearray[jj-1].month
todayMonth = datearray[jj].month
if todayMonth == yesterdayMonth:
weights[:,jj] = weights[:,jj-1]
CPweights[:,jj] = CPweights[:,jj-1]
# input symbols and company names from text file
companyName_file = os.path.join( os.getcwd(), "symbols", "companyNames.txt" )
with open( companyName_file, "r" ) as f:
companyNames = f.read()
print "\n\n\n"
companyNames = companyNames.split("\n")
ii = companyNames.index("")
del companyNames[ii]
companySymbolList = []
companyNameList = []
for iname,name in enumerate(companyNames):
name = name.replace("amp;", "")
testsymbol, testcompanyName = name.split(";")
companySymbolList.append(testsymbol)
companyNameList.append(testcompanyName)
# print list showing current rankings and weights
# - symbol
# - rank (at begining of month)
# - rank (most recent trading day)
# - weight from sharpe ratio
# - price
import os
rank_text = "<div id='rank_table_container'><h3>"+"<p>Current stocks, with ranks, weights, and prices are :</p></h3><font face='courier new' size=3><table border='1'> \
</td><td>Rank (today) \
</td><td>Symbol \
</td><td>Company \
</td><td>Weight \
</td><td>CP Weight \
</td><td>Price \
</td><td>Trend \
</td></tr>\n"
for i, isymbol in enumerate(symbols):
for j in range(len(symbols)):
if int( weightRank[j,-1] ) == i :
if signal2D_daily[j,-1] == 1.:
trend = 'up'
else:
trend = 'down'
# search for company name
try:
symbolIndex = companySymbolList.index(symbols[j])
companyName = companyNameList[symbolIndex]
except:
companyName = ""
rank_text = rank_text + \
"<tr><td>" + format(weightRank[j,-1],'6.0f') + \
"<td>" + format(symbols[j],'5s') + \
"<td>" + format(companyName,'15s') + \
"<td>" + format(weights[j,-1],'5.03f') + \
"<td>" + format(CPweights[j,-1],'5.03f') + \
"<td>" + format(adjClose[j,-1],'6.2f') + \
"<td>" + trend + \
"</td></tr> \n"
rank_text = rank_text + "</table></div>\n"
print "leaving function MAA_WeightedRank_2D..."
"""
print " symbols = ", symbols
print " weights = ", weights[:,-1]
print " CPweights = ", CPweights[:,-1]
print " number NaNs in weights = ", weights[np.isnan(weights)].shape
print " number NaNs in CPweights = ", CPweights[np.isnan(CPweights)].shape
print " NaNs in monthgainlossweight = ", weights[np.isnan(weights)].shape
testsum = np.sum(weights,axis=0)
print " testsum shape, min, and max = ", testsum.shape, testsum.min(), testsum.max()
"""
return weights, CPweights
#----------------------------------------------
def UnWeightedRank_2D(datearray,adjClose,signal2D,LongPeriod,rankthreshold,riskDownside_min,riskDownside_max,rankThresholdPct):
# adjClose -- # 2D array with adjusted closing prices (axes are stock number, date)
# rankthreshold -- # select this many funds with best recent performance
import numpy as np
import nose
try:
import bottleneck as bn
from bn import rankdata as rd
except:
import scipy.stats.mstats as bn
gainloss = np.ones((adjClose.shape[0],adjClose.shape[1]),dtype=float)
gainloss[:,1:] = adjClose[:,1:] / adjClose[:,:-1]
gainloss[isnan(gainloss)]=1.
# convert signal2D to contain either 1 or 0 for weights
signal2D -= signal2D.min()
signal2D *= signal2D.max()
# apply signal to daily gainloss
gainloss = gainloss * signal2D
gainloss[gainloss == 0] = 1.0
value = 10000. * np.cumprod(gainloss,axis=1)
# calculate gainloss over period of "LongPeriod" days
monthgainloss = np.ones((adjClose.shape[0],adjClose.shape[1]),dtype=float)
monthgainloss[:,LongPeriod:] = adjClose[:,LongPeriod:] / adjClose[:,:-LongPeriod]
monthgainloss[isnan(monthgainloss)]=1.
monthgainlossweight = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
rankweight = 1./rankthreshold
########################################################################
## Calculate change in rank of active stocks each day (without duplicates as ties)
########################################################################
monthgainlossRank = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=int)
monthgainlossPrevious = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=float)
monthgainlossPreviousRank = np.zeros((adjClose.shape[0],adjClose.shape[1]),dtype=int)
monthgainlossRank = bn.rankdata(monthgainloss,axis=0)
# reverse the ranks (low ranks are biggest gainers)
maxrank = np.max(monthgainlossRank)
monthgainlossRank -= maxrank-1
monthgainlossRank *= -1
monthgainlossRank += 2
monthgainlossPrevious[:,LongPeriod:] = monthgainloss[:,:-LongPeriod]
monthgainlossPreviousRank = bn.rankdata(monthgainlossPrevious,axis=0)
# reverse the ranks (low ranks are biggest gainers)
maxrank = np.max(monthgainlossPreviousRank)
monthgainlossPreviousRank -= maxrank-1
monthgainlossPreviousRank *= -1
monthgainlossPreviousRank += 2
# weight deltaRank for best and worst performers differently
rankoffsetchoice = rankthreshold
delta = -(monthgainlossRank - monthgainlossPreviousRank ) / (monthgainlossRank + rankoffsetchoice)
# if rank is outside acceptable threshold, set deltarank to zero so stock will not be chosen
# - remember that low ranks are biggest gainers
rankThreshold = (1. - rankThresholdPct) * ( monthgainlossRank.max() - monthgainlossRank.min() )
for ii in range(monthgainloss.shape[0]):
for jj in range(monthgainloss.shape[1]):
if monthgainloss[ii,jj] > rankThreshold :
delta[ii,jj] = -monthgainloss.shape[0]/2
deltaRank = bn.rankdata(delta,axis=0)
# reverse the ranks (low deltaRank have the fastest improving rank)
maxrank = np.max(deltaRank)
deltaRank -= maxrank-1
deltaRank *= -1
deltaRank += 2
for ii in range(monthgainloss.shape[1]):
if deltaRank[:,ii].min() == deltaRank[:,ii].max():
deltaRank[:,ii] = 0.
########################################################################
## Hold values constant for calendar month (gains, ranks, deltaRanks)
########################################################################
for ii in np.arange(1,monthgainloss.shape[1]):
if datearray[ii].month == datearray[ii-1].month:
monthgainloss[:,ii] = monthgainloss[:,ii-1]
deltaRank[:,ii] = deltaRank[:,ii-1]
########################################################################
## Calculate number of active stocks each day
########################################################################
# TODO: activeCount can be computed before loop to save CPU cycles
# count number of unique values
activeCount = np.zeros(adjClose.shape[1],dtype=float)
for ii in np.arange(0,monthgainloss.shape[0]):
firsttradedate = np.argmax( np.clip( np.abs( gainloss[ii,:]-1. ), 0., .00001 ) )
activeCount[firsttradedate:] += 1
minrank = np.min(deltaRank,axis=0)
maxrank = np.max(deltaRank,axis=0)
# convert rank threshold to equivalent percent of rank range
rankthresholdpercentequiv = np.round(float(rankthreshold)*(activeCount-minrank+1)/adjClose.shape[0])
ranktest = deltaRank <= rankthresholdpercentequiv
########################################################################
### calculate equal weights for ranks below threshold
########################################################################
elsecount = 0
elsedate = 0
for ii in np.arange(1,monthgainloss.shape[1]) :
if activeCount[ii] > minrank[ii] and rankthresholdpercentequiv[ii] > 0:
for jj in range(value.shape[0]):
test = deltaRank[jj,ii] <= rankthresholdpercentequiv[ii]
if test == True :
monthgainlossweight[jj,ii] = 1./rankthresholdpercentequiv[ii]
else:
monthgainlossweight[jj,ii] = 0.
elif activeCount[ii] == 0 :
monthgainlossweight[:,ii] *= 0.
monthgainlossweight[:,ii] += 1./adjClose.shape[0]
else :
elsedate = datearray[ii]
elsecount += 1
monthgainlossweight[:,ii] = 1./activeCount[ii]
aaa = np.sum(monthgainlossweight,axis=0)
print ""
print " invoking correction to monthgainlossweight....."
print ""
# find first date with number of stocks trading (rankthreshold) + 2
activeCountAboveMinimum = activeCount
activeCountAboveMinimum += -rankthreshold + 2
firstTradeDate = np.argmax( np.clip( activeCountAboveMinimum, 0 , 1 ) )
for ii in np.arange(firstTradeDate,monthgainloss.shape[1]) :
if np.sum(monthgainlossweight[:,ii]) == 0:
for kk in range(rankthreshold):
indexHighDeltaRank = np.argmin(deltaRank[:,ii]) # remember that best performance is lowest deltaRank
monthgainlossweight[indexHighDeltaRank,ii] = 1./rankthreshold
deltaRank[indexHighDeltaRank,ii] = 1000.
print " weights calculation else clause encountered :",elsecount," times. last date encountered is ",elsedate
rankweightsum = np.sum(monthgainlossweight,axis=0)
monthgainlossweight[isnan(monthgainlossweight)] = 0. # changed result from 1 to 0
monthgainlossweight = monthgainlossweight / np.sum(monthgainlossweight,axis=0)
monthgainlossweight[isnan(monthgainlossweight)] = 0. # changed result from 1 to 0
return monthgainlossweight
def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
>>> 0.5057444
######################## Function contributed by Xin Liu #################
https://code.google.com/p/pyeeg/source/browse/pyeeg.py
Copyleft 2010 Forrest Sheng Bao http://fsbao.net
PyEEG, a Python module to extract EEG features, v 0.02_r2
Project homepage: http://pyeeg.org
**Naming convention**
Constants: UPPER_CASE_WITH_UNDERSCORES, e.g., SAMPLING_RATE, LENGTH_SIGNAL.
Function names: lower_case_with_underscores, e.g., spectrum_entropy.
Variables (global and local): CapitalizedWords or CapWords, e.g., Power.
If a variable name consists of one letter, I may use lower case, e.g., x, y.
"""
from numpy import zeros, log, array, cumsum, std
from numpy.linalg import lstsq
N = len(X)
T = array([float(i) for i in xrange(1,N+1)])
Y = cumsum(X)
Ave_T = Y/T
S_T = zeros((N))
R_T = zeros((N))
for i in xrange(N):
S_T[i] = std(X[:i+1])
X_T = Y - T * Ave_T[i]
R_T[i] = max(X_T[:i + 1]) - min(X_T[:i + 1])
R_S = R_T / S_T
R_S = log(R_S)
n = log(T).reshape(N, 1)
H = lstsq(n[1:], R_S[1:])[0]
return H[0]
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPool2D, Flatten, BatchNormalization, Activation
from keras import activations
from math import floor
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=(3, 3), kernel_initializer='normal', activation='relu', padding='same', input_shape=(5, 144, 1)))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(Conv2D(filters=8, kernel_size=(3, 3), kernel_initializer='normal', activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPool2D(pool_size =(2,2)))
model.add(Dropout(.25))
model.add(Conv2D(filters=8, kernel_size=(3, 3), kernel_initializer='normal', activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPool2D(pool_size =(2,2)))
model.add(Dropout(.25))
model.add(Flatten())
model.compile()
model.summary()
|
import pandas as pd
import numpy as np
class QlearningTable:
def __init__(self,action=[0,1,2,3],learning_rate = 0.01,reward_decay = 0.9, e_greddy = 0.9):
self.action = action
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greddy
self.q_table = pd.DataFrame(columns=self.action,dtype = np.float64)
def choose_action(self,position):
self.check_state_exist(position)
if np.random.uniform()< self.epsilon:
state_action = self.q_table.loc[position,:]
#print(self.action)
#print('good')
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
#print(state_action)
#action =np.random.choice(state_action[state_action == np.max(state_action).index])
#这一行看不懂 np.random.choice
else:
#print (self.action)
action = np.random.choice(self.action)
#print ('this')
#print(action)
return action
def learn(self,position,action,reward,next_position,final_positon):
self.check_state_exist(next_position)
# print(self.q_table)
#print(type(position))
#print(type(action))
q_predict = self.q_table.loc[position,action]
if next_position != final_positon:
q_target = reward + self.gamma* self.q_table.loc[next_position,:].max()
else :
q_target = reward
self.q_table.loc[position,action]+=self.lr*(q_target-q_predict)
def check_state_exist(self,position):
# 将新位置添加到q_table 之中,全0
if position not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.action),
index=self.q_table.columns,
name=position
)
)
|
from setuptools import setup
setup
{
name='twidder',
packages=['twidder'],
include_package_data=True,
install_requires=[
'flask', 'validate_email', 'app', 'gevent', 'flask_sockets'
],
}
|
#!/usr/bin/env python
# encoding: utf-8
from selenium import webdriver
from Util.oper_Browser import *
from Util.find_el import *
import time
class Tyzf:
def __init__(self):
self.driver=driver()
def rever(self,method,el):
#self.driver.set_window_siz(200,400)
get_url(self.driver,'https://www.cnblogs.com/fanqian0330/p/10723170.html')
#time.sleep(10)
get_el(self.driver,method,el)
if __name__=="__main__":
a=Tyzf()
a.rever('link_text',"新闻") |
'''
Created on Mar 8, 2015
@author: saur
'''
from pypower.api import ppoption, runpf
from case_modul import casemodul
from numpy import *
def adapt_case(node,power, time):
#input node is the node that the agent accesses, the power is in kW , the time is the timestep [0,96]
time = time % 96 # if the time interval exceeds the number of elements in the load profile
loadprofile = 0.001 * array([2.1632,1.9456,1.7568,1.5968,1.4784 ,1.3952,1.3408,1.3056,1.2832,1.2672,
1.2608,1.2512,1.2416,1.2352,1.2256,
1.2256,1.2288,1.2416,1.2576,1.28,
1.3088,1.3792,1.5264,1.7856,2.176,
2.6496,3.136,3.568,3.8912,4.112,
4.2464,4.3136,4.3328,4.3136,4.2592,
4.1824,4.0864,3.9872,3.888,3.808,
3.7536,3.7184,3.7024,3.7024,3.7152,
3.744,3.7984,3.888,4.0128,4.1472,
4.256,4.3136,4.2944,4.2144,4.096,
3.968,3.8464,3.7376,3.6384,3.5424,
3.4528,3.376,3.312,3.2768,3.2704,
3.3024,3.3792,3.5168,3.712,3.9584,
4.2432,4.5536,4.8768,5.1904,5.4784,
5.7248,5.9104,6.0224,6.0448,5.9648,
5.7824,5.5264,5.2448,4.9792,4.7648,
4.5888,4.4288,4.2624,4.0704,3.856,
3.6256,3.3824,3.136,2.8864,2.64,
2.3968])
q = zeros(25) #set the reactive power to zero at each point
p = loadprofile[time] * ones(25) # set the active power at each grid point to the value in the load profile given the time
p[0] = 0 # set the load at the transformer to 0
p[node] = p[node] + power * 0.001 # add the power to the node that the agent controlls
# do the actual power flow simulation
ppc = casemodul(p,q)
ppopt = ppoption(PF_ALG=2, VERBOSE= False,OUT_ALL=0)
ppc_result,y = runpf(ppc, ppopt) #run the powerflow simulation gibven the case and the options
return ppc_result["bus"][node,7]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import json
from pathlib import Path
def get_project_root() -> Path:
"""Returns project root folder."""
return Path(__file__).parent.parent.parent
def keep_indices_in_seg_img(seg_img, indices_list):
img2keep = np.zeros(shape=seg_img.shape, dtype=np.uint8)
for obj_idx in indices_list:
img_indices = np.where(seg_img[:, :] == obj_idx)
img2keep[img_indices[0], img_indices[1]] = obj_idx
return img2keep
def seg_img2clr_img(seg_img):
clr_image = np.zeros(shape=(seg_img.shape[0], seg_img.shape[1], 3), dtype=np.uint8)
solid_indices = np.where(seg_img[:, :] == type_name2type_idx('solid'))
dashed_indices = np.where(seg_img[:, :] == type_name2type_idx('dashed'))
vcl_indices = np.where(seg_img[:, :] == type_name2type_idx('vehicle'))
clr_image[solid_indices[0], solid_indices[1]] = type_name2color('solid')
clr_image[dashed_indices[0], dashed_indices[1]] = type_name2color('dashed')
clr_image[vcl_indices[0], vcl_indices[1]] = type_name2color('vehicle')
return clr_image
def swap(x1, x2):
return x2, x1
def draw_line2(img, r1, c1, r2, c2, clr, width=5):
if abs(r1 - r2) > abs(c1 - c2):
# a = (c1-c2)/(float(r1-r2))
# b = c1 - a * r1
if r1 > r2:
r1, r2 = swap(r1, r2)
for r in range(r1, r2):
c = int((c1-c2)/(float(r1-r2)) * r + c1 - ((c1-c2)/(float(r1-r2))) * r1)
if (r > 0) and (c > 0) and (r < img.shape[0]) and (c < img.shape[1]):
img[r, c - width // 2: c + width // 2] = clr
else:
# a = (r1-r2)/(float(c1-c2))
# b = c1 - a * r1
if c1 > c2:
c1, c2 = swap(c1, c2)
for c in range(c1, c2):
r = int((r1 - r2) / (float(c1 - c2)) * c + r1 - ((r1 - r2) / (float(c1 - c2))) * c1)
left = r - width // 2
if (r > 0) and (c >= 0) and (r < img.shape[0]) and (c < img.shape[1]):
img[r - width // 2: r + width // 2, c] = clr
# img[r, c] = clr
return img
def draw_rect(img, r1, c1, r2, c2, clr, fill_clr=None, width=15):
img = draw_line2(img, r1, c1, r1, c2, clr, width)
img = draw_line2(img, r1, c1, r2, c1, clr, width)
img = draw_line2(img, r1, c2, r2, c2, clr, width)
img = draw_line2(img, r2, c1, r2, c2, clr, width)
if fill_clr is not None:
if r1 > r2:
r1, r2 = swap(r1, r2)
if c1 > c2:
c1, c2 = swap(c1, c2)
img[r1+width//2:r2-width//2, c1+width//2:c2-width//2] = fill_clr
return img
def type_name2color(type_input):
"""
# pay attention - the first channel ('R', indexed 0) should be unique per type!!!
"""
color = [0, 0, 0]
if type_input == 'solid':
color = [1, 255, 255]
elif type_input == 'dashed':
color = [253, 255, 0]
elif type_input == 'vehicle':
color = [252, 0, 255]
else:
# print("no color for this type yet:", type_input)
pass
return color
def type_name2type_idx(type_input):
"""
# pay attention - the first channel ('R', indexed 0) should be unique per type!!!
"""
type_idx = None
if type_input == 'solid':
type_idx = 3
elif type_input == 'dashed':
type_idx = 4
elif type_input == 'vehicle':
type_idx = 8
else:
# print("no color for this type yet:", type_input)
pass
return type_idx
def read_objects_csv(filename):
f = open(filename, "r")
lines = f.read().split('\n')
objects = list()
for line in lines[1:-1]:
line_stripped = [x.strip() for x in line.split(',')]
if line_stripped[2] == '1':
single_object = dict()
single_object['type'] = line_stripped[1]
single_object['bottom'] = int(float(line_stripped[4])) + int(float(line_stripped[6]))
single_object['top'] = int(float(line_stripped[4]))
single_object['left'] = int(float(line_stripped[3])) # - int(line_stripped[6]))
single_object['right'] = int(float(line_stripped[3])) + int(float(line_stripped[5]))
objects.append(single_object)
if line_stripped[7] == '1':
single_object_rear = dict()
single_object_rear['type'] = 'rear_vehicle'
single_object_rear['bottom'] = int(float(line_stripped[9])) + int(float(line_stripped[11]))
single_object_rear['top'] = int(float(line_stripped[9])) # - int(line_stripped[6])
single_object_rear['left'] = int(float(line_stripped[8])) # - int(line_stripped[6])
single_object_rear['right'] = int(float(line_stripped[8])) + int(float(line_stripped[10]))
objects.append(single_object_rear)
return objects
def get_collected_data_full_seg_non_cropped_paths_list(parent_dir):
full_paths_seg_not_cropped = list()
dirs_in_parent = os.listdir(parent_dir)
for session_dir in dirs_in_parent:
session_full_path = os.path.join(parent_dir, session_dir)
if not os.path.isdir(session_full_path):
continue
clip_dirs = os.listdir(session_full_path)
for clip_dir in clip_dirs:
clip_full_path = os.path.join(session_full_path, clip_dir, clip_dir) # second clip_dir - for some reason...
if not os.path.isdir(clip_full_path):
continue
filenames = os.listdir(clip_full_path)
for filename in filenames:
if 'seg_front_center' in filename:
full_path_seg = os.path.join(clip_full_path, filename)
full_paths_seg_not_cropped.append(full_path_seg)
return full_paths_seg_not_cropped
def get_img2show_of_collected_data(filename):
seg_image = cv2.imread(filename)
csv_file_full_path = filename.replace('seg', 'out').replace('png', 'csv')
objects = read_objects_csv(csv_file_full_path)
color_image = seg_img2clr_img(seg_image)
for single_object in objects:
if single_object['type'] == 'rear_vehicle':
clr = type_name2color('vehicle')
else:
# print('no color for type', single_object['type'])
continue
# r1, c1, r2, c2, clr, width
color_image = draw_rect(color_image,
single_object['top'], single_object['left'],
single_object['bottom'], single_object['right'],
clr, width=2)
color_image = cv2.resize(color_image, (1920, 1280))
return color_image
def get_img2pred_of_collected_data(filename):
seg_image = cv2.imread(filename, -1)
csv_file_full_path = filename.replace('seg', 'out').replace('png', 'csv')
objects = read_objects_csv(csv_file_full_path)
for single_object in objects:
if (single_object['type'] == 'rear_vehicle') or (single_object['type'] == 'Vehicle'):
idx = type_name2type_idx('vehicle')
else:
# print('no color for type', single_object['type'])
continue
# r1, c1, r2, c2, clr, width
seg_image = draw_rect(seg_image, single_object['top'], single_object['left'], single_object['bottom'],
single_object['right'], idx, width=2)
relevant_indices_img = keep_indices_in_seg_img(seg_image, [3, 4, 8])
return np.expand_dims(np.expand_dims(relevant_indices_img, axis=0), axis=3)
def read_ground_truth(images_dir, filename, image_source):
if image_source is 'collected':
return None
meta_data_dir = images_dir.replace('front_view_image', 'meta_data')
meta_data_file_name = (filename.replace('seg_front_view_image', 'meta_data')).replace('.png', '.json')
json_fp = os.path.join(meta_data_dir, meta_data_file_name)
with open(json_fp) as json_file:
data = json.load(json_file)
gt_horizon = data['seg_resized_y_center_host_in_100m']
return gt_horizon
def show_in_plt(display_image, seg_filename, seg_dir, horizon_on_raw):
fig = plt.figure(figsize=(2, 1))
fig.add_subplot(2, 1, 1)
plt.imshow(display_image)
raw_filename = seg_filename.replace('seg', 'img')
full_path_raw_image = os.path.join(seg_dir, raw_filename)
raw_img = cv2.imread(full_path_raw_image)
# img, horizon, line_width, clr=[0, 0, 255]
draw_horizon_line(raw_img, horizon_on_raw, line_width=2, clr=[255, 0, 0])
fig.add_subplot(2, 1, 2)
plt.imshow(raw_img)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
def save_as_jpgs(display_image, seg_filename, seg_dir, horizon_on_raw, trgt_fn):
fig = plt.figure(figsize=(12.0, 7.5))
fig.add_subplot(2, 1, 2)
plt.imshow(display_image)
raw_filename = seg_filename.replace('seg', 'img')
# raw_filename = raw_filename.replace('.png', '.jpg')
full_path_raw_image = os.path.join(seg_dir, raw_filename)
raw_img = cv2.imread(full_path_raw_image)
# img, horizon, line_width, clr=[0, 0, 255]
draw_horizon_line(raw_img, horizon_on_raw, line_width=2, clr=[255, 0, 0])
fig.add_subplot(2, 1, 1)
plt.imshow(raw_img)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.savefig(trgt_fn)
plt.close()
def img2_hybrid_points(img, num_columns=70, num_channels=3):
lanes_img = np.zeros_like(img)
lanes_img[img == 4] = 4
lanes_img[img == 3] = 3
lanes_non_z_img = np.nonzero(lanes_img)
vcls_img = np.zeros_like(img)
vcls_img[img == 8] = 8
vcls_non_z_img = np.nonzero(vcls_img)
reduced_img_lanes = np.zeros((img.shape[0], num_columns))
indices_img_lanes = np.zeros((img.shape[0], num_columns))
reduced_img_vcls = np.zeros((img.shape[0], num_columns))
indices_img_vcls = np.zeros((img.shape[0], num_columns))
lined_indices = np.zeros((img.shape[0], num_columns))
half_columns = num_columns // 2
for i in range(img.shape[0]):
non_z_lanes_xx = np.nonzero(lanes_non_z_img[0] == i)
non_z_vcls_xx = np.nonzero(vcls_non_z_img[0] == i)
num_col_lanes = min(num_columns, len(non_z_lanes_xx[0]))
num_col_vcls = min(num_columns, len(non_z_vcls_xx[0]))
beg_idx_lanes = half_columns - (num_col_lanes // 2)
end_idx_lanes = beg_idx_lanes + len(lanes_non_z_img[1][non_z_lanes_xx[0][:num_col_lanes]])
beg_idx_vcls = half_columns - (num_col_vcls // 2)
end_idx_vcls = beg_idx_vcls + len(vcls_non_z_img[1][non_z_vcls_xx[0][:num_col_vcls]])
reduced_img_lanes[i, beg_idx_lanes:end_idx_lanes] = img[i, lanes_non_z_img[1][non_z_lanes_xx[0][:num_col_lanes]]]
reduced_img_vcls[i, beg_idx_vcls:end_idx_vcls] = img[i, vcls_non_z_img[1][non_z_vcls_xx[0][:num_col_vcls]]]
indices_img_lanes[i, beg_idx_lanes:end_idx_lanes] = lanes_non_z_img[1][non_z_lanes_xx[0][:num_col_lanes]] / 512.
indices_img_vcls[i, beg_idx_vcls:end_idx_vcls] = vcls_non_z_img[1][non_z_vcls_xx[0][:num_col_vcls]] / 512.
lined_indices[i, :] = np.full(num_columns, i / 288.)
res_lanes = np.concatenate((np.expand_dims(reduced_img_lanes, axis=2), np.expand_dims(indices_img_lanes, axis=2)),
axis=2)
res_vcls = np.concatenate((np.expand_dims(reduced_img_vcls, axis=2), np.expand_dims(indices_img_vcls, axis=2)),
axis=2)
res = np.concatenate((res_lanes, res_vcls, np.expand_dims(lined_indices, axis=2)), axis=2)
return res
def draw_horizon_line(img, horizon, line_width, clr=[0, 0, 255]):
width = img.shape[1]
draw_line2(img, horizon, 0,
horizon, width, clr=clr, width=line_width)
return img
|
import os
import asyncio
from pathlib import Path
from contextlib import contextmanager
import pytest
from lonelyconnect import game, startup, shutdown, auth
def test_auth(requests, admin_token):
r = requests.get("/codes", headers={"Authorization": f"Bearer wrong"})
assert not r.ok
r = requests.get("/codes", headers={"Authorization": f"Bearer {admin_token}"})
assert r.ok
assert r.json() == {}
r = requests.post("/pair/left", headers={"Authorization": f"Bearer {admin_token}"})
left_code = r.json()
r = requests.post("/pair/right", headers={"Authorization": f"Bearer {admin_token}"})
right_code = r.json()
r = requests.get("/codes", headers={"Authorization": f"Bearer {admin_token}"})
assert r.json() == {left_code: "left", right_code: "right"}
def test_roles(requests, admin_token, player_token):
assert requests.get("/codes", headers={"Authorization": f"Bearer {admin_token}"}).ok
assert not requests.get(
"/codes", headers={"Authorization": f"Bearer {player_token}"}
).ok
game.GAME.buzz_state = "active"
assert not requests.post(
"/buzz", headers={"Authorization": f"Bearer {admin_token}"}
).ok
assert requests.post(
"/buzz", headers={"Authorization": f"Bearer {player_token}"}
).ok
@contextmanager
def hide_true_swap_file():
swap_path = Path("swap.bin")
if swap_path.exists():
hidden_path = swap_path.rename(".pytest-running.swap.bin")
else:
hidden_path = None
try:
yield
finally:
if hidden_path:
hidden_path.rename(swap_path)
def test_swap_file(sample_game):
original = os.environ.pop("lonelyconnect_no_swap")
os.environ["lonelyconnect_admin_code"] = "123456"
with hide_true_swap_file():
asyncio.run(startup())
game.GAME = sample_game
game.GAME.arbitrary = "foobar"
asyncio.run(shutdown())
game.GAME.arbitrary = "barfoo"
asyncio.run(startup())
assert game.GAME.arbitrary == "foobar"
del game.GAME.arbitrary
if original:
os.environ["lonelyconnect_no_swap"] = original
def test_various_admin_functions(requests, admin_token, sample_game):
game.GAME = sample_game
assert game.GAME.buzz_state == "inactive"
requests.put("/buzz/active", headers={"Authorization": f"Bearer {admin_token}"})
assert game.GAME.buzz_state == "active"
requests.post(
"/score/left",
data={"points": 23},
headers={"Authorization": f"Bearer {admin_token}"},
)
assert game.GAME.points["left"] == 23
assert not auth.USERS["right"].descriptive_name
requests.post(
"/name/right",
data={"teamname": "foobar"},
headers={"Authorization": f"Bearer {admin_token}"},
)
assert auth.USERS["right"].descriptive_name == "FOOBAR"
|
APIAI_CLIENT_ACCESS_TOKEN = 'e4bbf6d0e27547d281620e50d5d63d00'
OWM_CLIENT_ACCESS_TOKEN = 'f9070aef8910b5e6551d1816a02843a3'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
import logging
import pyfaidx
import random
import string
from bioformats.blast import BlastTab
from chromosomer.exception import MapError
from chromosomer.exception import AlignmentToMapError
from bioformats.fasta import RandomSequence
from bioformats.fasta import Writer
from collections import defaultdict
from itertools import izip
from collections import namedtuple
from operator import attrgetter
logging.basicConfig()
logger = logging.getLogger(__name__)
class Map(object):
"""
The class implements routines related to creation, reading and
writing a fragment map that describes how genome fragments are
situated on reference genome chromosomes.
"""
numeric_values = (1, 2, 3, 6, 7)
record_names = ('fr_name', 'fr_length', 'fr_start', 'fr_end',
'fr_strand', 'ref_chr', 'ref_start', 'ref_end')
Record = namedtuple('Record', record_names)
def __init__(self):
"""
Initializes a Map object.
"""
self.__fragments = defaultdict(list)
self.__block_adding = False
def add_record(self, new_record):
"""
Given a new fragment record, add it to the fragment map.
:param new_record: a record to be added to the map
:type new_record: Map.Record
"""
self.__fragments[new_record.ref_chr].append(new_record)
def read(self, filename):
"""
Read a fragment map from the specified file. The file records
are added to the records in the map.
:param filename: a name of a file to read a fragment map from
:type: str
"""
lineno = 0
i = 0
with open(filename) as input_map_file:
for line in input_map_file:
lineno += 1
line_parts = line.split('\t', 8)
if len(line_parts) < 8:
logger.error('line %d: the incorrect number of '
'columns', lineno)
raise MapError
for i in self.numeric_values:
try:
line_parts[i] = int(line_parts[i])
except ValueError:
logger.error('line %d: the incorrect numeric '
'value %s', lineno, line_parts[i])
raise MapError
new_record = Map.Record(*line_parts)
self.add_record(new_record)
i += 1
logger.debug('map of %d fragments was successfully read from '
'%s', i, filename)
@property
def records(self):
return self.__fragments
def chromosomes(self):
"""
Return an iterator to the chromosomes the fragment map
describes.
:return: an iterator to iterate through the fragment map
chromosomes
"""
sorted_chromosomes = sorted(self.__fragments.keys())
for i in sorted_chromosomes:
yield i
def fragments(self, chromosome):
"""
Return an iterator to fragments of the specified chromosome
describes by the map. If the chromosome is absent, None is
returned.
:param chromosome: a chromosome which fragments are to be
iterated
:type chromosome: str
:return: an iterator to iterate through the chromosome's
fragments
"""
if chromosome not in self.__fragments:
logging.error('%s missing in the fragment map', chromosome)
raise MapError
sorted_fragments = sorted(self.__fragments[chromosome],
key=attrgetter('ref_start'))
for i in sorted_fragments:
yield i
def write(self, filename):
"""
Write the fragment map to the specified file.
:param filename: a name of a file to write the fragment map to
:type filename: str
"""
template = '\t'.join(['{}'] * len(self.record_names)) + '\n'
i = 0
with open(filename, 'w') as output_map_file:
for chromosome in self.chromosomes():
for fragment in self.fragments(chromosome):
new_line = template.format(*fragment)
output_map_file.write(new_line)
i += 1
logger.debug('the map of %d fragments successfully written '
'to %s', i, filename)
def assemble(self, fragment_filename, output_filename,
save_soft_mask=False):
"""
Assemble chromosome sequences from fragments.
:param fragment_filename: a name of a FASTA file of fragment
sequences
:param output_filename: a name of the output FASTA file of
the assembled chromosomes
:param save_soft_mask: save soft-masking in sequences being
assembled or not
:type fragment_filename: str
:type output_filename: str
:type save_soft_mask: bool
"""
logger.debug('assembling chromosomes...')
logger.debug('FASTA of fragments: %s', fragment_filename)
logger.debug('FASTA of chromosomes: %s', output_filename)
logger.debug('saving soft mask: %r', save_soft_mask)
num_fragments = 0
num_chromosomes = 0
fragment_fasta = pyfaidx.Fasta(fragment_filename)
complement = string.maketrans('ATCGatcgNnXx', 'TAGCtagcNnXx')
with Writer(output_filename) as chromosome_writer:
for chromosome in self.chromosomes():
seq = []
for record in self.fragments(chromosome):
if record.fr_name == 'GAP':
record_seq = 'N' * (record.fr_end -
record.fr_start)
else:
if record.fr_name not in fragment_fasta:
logger.error('the fragment %s sequence '
'missing', record.fr_name)
raise MapError
record_seq = fragment_fasta[record.fr_name][
record.fr_start:record.fr_end].seq
# convert the sequence to non-unicode
record_seq = str(record_seq)
if not save_soft_mask:
record_seq = record_seq.upper()
# if the fragment orientation is reverse, then
# the reverse complement of the fragment
# sequence is written
if record.fr_strand == '-':
record_seq = record_seq[::-1].translate(
complement)
seq.append(record_seq)
num_fragments += 1
chromosome_writer.write(chromosome, ''.join(seq))
num_chromosomes += 1
logger.debug('%d fragments assembled to %d chromosomes',
num_fragments, num_chromosomes)
def shrink_gaps(self, gap_size):
"""
Shrink gaps inserted into the map to the specified size.
:param gap_size: a required gap size
:type gap_size: int
"""
logger.debug('shrinking gaps to %d bp', gap_size)
total_shift = 0
# process each chromosome separately
for chrom in self.__fragments.keys():
if len(self.__fragments[chrom]) > 1:
shifts = []
# iterate through gaps
for i in self.__fragments[chrom]:
if i.fr_name == 'GAP':
len_diff = i.fr_length - gap_size
shifts.append(len_diff)
total_shift += len_diff
else:
shifts.append(0)
# calculate absolute shifts for chromosome fragments
accumulated_shift = 0
for i in xrange(len(shifts)):
accumulated_shift += shifts[i]
shifts[i] = accumulated_shift
for i in xrange(len(self.__fragments[chrom])):
fragment = list(self.__fragments[chrom][i])
if fragment[0] == 'GAP':
# change gap length and end position
fragment[1] = gap_size
fragment[3] = gap_size
fragment[6] -= shifts[i-1]
fragment[7] = fragment[6] + gap_size
else:
fragment[6] -= shifts[i]
fragment[7] -= shifts[i]
self.__fragments[chrom][i] = Map.Record(*fragment)
logger.debug('in total, gaps shrinked by %d bp', total_shift)
def summary(self):
"""
Return a summary on the fragment map.
:return: a dictionary of tuples each describing one assembled
chromosome
:rtype: dict
"""
summary = {}
for chromosome in self.chromosomes():
gaps = 0
chr_fragments = list(self.fragments(chromosome))
for fragment in chr_fragments:
if fragment.fr_name == 'GAP':
gaps += fragment.fr_length
chr_length = chr_fragments[-1].ref_end
fr_num = len(chr_fragments)
summary[chromosome] = (fr_num, gaps, chr_length)
return summary
def convert2bed(self, bed_filename):
"""
Given a name of the output BED3 file, write the fragment map
to it in the BED format.
:param bed_filename: a name of the output BED file of the
fragment map
:type bed_filename: str
"""
template = '\t'.join(['{}'] * (len(Map.record_names) + 1)) + \
'\n'
with open(bed_filename, 'w') as bed_file:
for chromosome in sorted(self.chromosomes()):
for fragment in self.fragments(chromosome):
bed_file.write(template.format(
fragment.ref_chr,
fragment.ref_start,
fragment.ref_end,
fragment.fr_name,
1000,
fragment.fr_strand,
fragment.fr_start,
fragment.fr_end,
fragment.fr_length
))
class AlignmentToMap(object):
"""
The class implements routines to create a fragment map from a set
of alignments between fragments to be assembled and reference
chromosomes.
"""
Anchor = namedtuple('Anchor', ('fragment', 'fr_start', 'fr_end',
'fr_strand', 'ref_chr',
'ref_start', 'ref_end'))
def __init__(self, gap_size, fragment_lengths,
min_fragment_length=None, centromeres=None):
"""
Create a converter object to create fragment maps from
alignmets between reference chromosomes and fragments to be
assembled.
:param gap_size: a size of a gap between fragments
:param fragment_lengths: a dictionary of fragment lengths
which keys are their names and values are their lengths
:param min_fragment_length: the minimal length of a fragment
to be included in the map
:param centromeres: a dictionary of reference chromosome
centromere locations
:type gap_size: int
:type fragment_lengths: dict
:type min_fragment_length: int
:type centromeres: dict
"""
self.__gap_size = gap_size
self.__fragment_lengths = fragment_lengths
self.__min_fragment_length = min_fragment_length
self.__centromeres = centromeres
self.__anchors = {}
self.__unlocalized = []
self.__unplaced = []
self.__fragment_map = Map()
def blast(self, blast_alignments, bitscore_ratio_threshold):
"""
Create a fragment map from BLAST blast_alignments between
fragments and reference chromosomes.
:param blast_alignments: BLAST blast_alignments
:param bitscore_ratio_threshold: the minimal ratio of two
greatest fragment alignment bit scores to consider the
fragment placed to a reference
:type blast_alignments: BlastTab
:type bitscore_ratio_threshold: float
:return: a tuple containing the fragment map constructed from
the provided BLAST alignments, the list of unlocalized
fragments and a list of unplaced fragments
:rtype: tuple
"""
self.__anchors = {}
self.__unlocalized = []
self.__unplaced = []
temp_anchors = defaultdict(list)
for alignment in blast_alignments.alignments():
if self.__min_fragment_length is not None:
# check if the fragment length is equal or greater
# than the threshold value
try:
if self.__fragment_lengths[alignment.query] < \
self.__min_fragment_length:
# skip the alignment
continue
except KeyError:
logger.error('the fragment %s length is missing',
alignment.query)
raise AlignmentToMapError
# consider the centromeres if required
if self.__centromeres is not None and alignment.subject \
in self.__centromeres:
# the chromosome a fragment was aligned to has a
# centromere, so we determine which arm the alignment
# refers to and modify the chromosome name by adding
# '_1' or '_2' to it
if min(alignment.s_start, alignment.s_end) < \
self.__centromeres[alignment.subject].start:
arm_prefix = '_1'
else:
arm_prefix = '_2'
new_alignment = list(alignment)
new_alignment[1] += arm_prefix
alignment = BlastTab.Alignment(*new_alignment)
temp_anchors[alignment.query].append(alignment)
# check if there is more than 2 alignments for the
# fragment; if there is, then leave two fragments with
# the greatest bit-score values
if len(temp_anchors[alignment.query]) > 2:
temp_anchors[alignment.query] = sorted(
temp_anchors[alignment.query],
key=attrgetter('bit_score'),
reverse=True
)
temp_anchors[alignment.query] = temp_anchors[
alignment.query][0:2]
for fragment, alignments in temp_anchors.iteritems():
if len(alignments) > 1:
# check if the ratio of the alignment bit scores is
# greater than the required threshold to consider a
# fragment places
if alignments[0].bit_score/alignments[1].bit_score > \
bitscore_ratio_threshold:
self.__anchors[fragment] = \
AlignmentToMap.Anchor(
fragment=alignments[0].query,
fr_start=alignments[0].q_start - 1,
fr_end=alignments[0].q_end,
fr_strand='+' if alignments[0].s_start <
alignments[0].s_end else '-',
ref_chr=alignments[0].subject,
ref_start=min(alignments[0].s_start,
alignments[0].s_end) - 1,
ref_end=max(alignments[0].s_start,
alignments[0].s_end)
)
elif alignments[0].subject == alignments[1].subject:
# the fragment is considered unlocalized
self.__unlocalized.append(
(fragment, alignments[0].subject))
else:
# the fragment is considered unplaced
self.__unplaced.append(fragment)
else:
# there is a single alignment, use it as an anchor
self.__anchors[fragment] = \
AlignmentToMap.Anchor(
fragment=alignments[0].query,
fr_start=alignments[0].q_start - 1,
fr_end=alignments[0].q_end,
fr_strand='+' if alignments[0].s_start <
alignments[0].s_end else '-',
ref_chr=alignments[0].subject,
ref_start=min(alignments[0].s_start,
alignments[0].s_end) - 1,
ref_end=max(alignments[0].s_start,
alignments[0].s_end)
)
# get total lengths of mapped, unlocalized and unplaced
# fragments
total_mapped = total_unlocalized = total_unplaced = 0
for i in self.__anchors.itervalues():
total_mapped += self.__fragment_lengths[i.fragment]
for i in self.__unlocalized:
total_unlocalized += self.__fragment_lengths[i[0]]
for i in self.__unplaced:
total_unplaced += self.__fragment_lengths[i]
logger.info('%d mapped fragments of total length %d bp',
len(self.__anchors), total_mapped)
logger.info('%d unlocalized fragments of total length %d bp',
len(self.__unlocalized), total_unlocalized)
logger.info('%d unplaced fragments of total length %d bp',
len(self.__unplaced), total_unplaced)
self.__anchor_fragments()
return (self.__fragment_map, self.__unlocalized,
self.__unplaced)
def __anchor_fragments(self):
"""
Build a fragment map from anchors.
"""
# first, we split anchors by reference genome chromosomes
chr_anchors = defaultdict(list)
for anchor in self.__anchors.itervalues():
chr_anchors[anchor.ref_chr].append(anchor)
# second, we sort the anchors by their position on the
# chromosomes
for chr_name in chr_anchors.iterkeys():
chr_anchors[chr_name] = sorted(
chr_anchors[chr_name], key=attrgetter('ref_start')
)
# now we form a fragment map from the anchors
total_inserted_gaps = 0
self.__fragment_map = Map()
for chr_name in chr_anchors.iterkeys():
previous_end = 0
for anchor in chr_anchors[chr_name]:
try:
fragment_length = self.__fragment_lengths[
anchor.fragment]
except KeyError:
logger.error('the fragment %s length is missing',
anchor.fragment)
raise AlignmentToMapError
# determine the fragment's start and end positions
if anchor.fr_strand == '+':
ref_start = anchor.ref_start - anchor.fr_start
ref_end = ref_start + fragment_length
else:
ref_end = anchor.ref_end + anchor.fr_start
ref_start = ref_end - fragment_length
new_record = Map.Record(
fr_name=anchor.fragment,
fr_length=fragment_length,
fr_start=0,
fr_end=fragment_length,
fr_strand=anchor.fr_strand,
ref_chr=anchor.ref_chr,
ref_start=previous_end,
ref_end=previous_end + ref_end - ref_start
)
self.__fragment_map.add_record(new_record)
previous_end += ref_end - ref_start
# add a gap
new_gap = Map.Record(
fr_name='GAP',
fr_length=self.__gap_size,
fr_start=0,
fr_end=self.__gap_size,
fr_strand='+',
ref_chr=anchor.ref_chr,
ref_start=previous_end,
ref_end=previous_end + self.__gap_size
)
previous_end += self.__gap_size
self.__fragment_map.add_record(new_gap)
total_inserted_gaps += self.__gap_size
logger.info('%d chromosomes formed', len(chr_anchors))
logger.info('%d bp of gaps inserted', total_inserted_gaps)
class Simulator(object):
"""
The class describes routines to simulate genome fragments and
chromosomes that are composed from them.
"""
def __init__(self, fragment_length, fragment_number,
chromosome_number, unplaced_number, gap_size):
"""
Create a fragment simulator object.
:param fragment_length: the length of a fragment
:param fragment_number: the number of fragments constituting
the chromosomes
:param chromosome_number: the number of chromosomes
:param unplaced_number: the number of fragments not included
in the chromosomes
:param gap_size: the length of gaps between fragments in
chromosomes
:type fragment_length: int
:type fragment_number: int
:type chromosome_number: int
:type unplaced_number: int
:type gap_size: int
"""
self.__fragment_length = fragment_length
self.__fragment_number = fragment_number
self.__chromosome_number = chromosome_number
self.__gap_size = gap_size
# create fragment sequences
self.__fragments = {}
seq_generator = RandomSequence(self.__fragment_length)
for i in xrange(self.__fragment_number):
fr_name = 'fragment{}'.format(i+1)
self.__fragments[fr_name] = seq_generator.get()
self.__map = Map()
self.__create_map()
self.__assemble_chromosomes()
# add unplaced fragments
for i in xrange(self.__fragment_number,
self.__fragment_number + unplaced_number):
fr_name = 'fragment{}'.format(i+1)
self.__fragments[fr_name] = seq_generator.get()
def __create_map(self):
"""
Assign fragments to chromosomes randomly and create a
fragment map.
"""
fragment_positions = [0] * self.__chromosome_number
for i in xrange(self.__fragment_number):
chr_num = random.randrange(self.__chromosome_number)
fr_strand = random.choice(('+', '-'))
self.__map.add_record(Map.Record(
fr_name='fragment{}'.format(i+1),
fr_length=self.__fragment_length,
fr_start=0,
fr_end=self.__fragment_length,
fr_strand=fr_strand,
ref_chr='chr{}'.format(chr_num+1),
ref_start=fragment_positions[chr_num],
ref_end=fragment_positions[chr_num] +
self.__fragment_length
))
fragment_positions[chr_num] += self.__fragment_length
self.__map.add_record(Map.Record(
fr_name='GAP',
fr_length=self.__gap_size,
fr_start=0,
fr_end=self.__gap_size,
fr_strand='+',
ref_chr='chr{}'.format(chr_num+1),
ref_start=fragment_positions[chr_num],
ref_end=fragment_positions[chr_num] + self.__gap_size
))
fragment_positions[chr_num] += self.__gap_size
def __assemble_chromosomes(self):
"""
Get chromosome sequences from fragments using the constructed
fragment map.
"""
complement = string.maketrans('ATCGatcgNnXx', 'TAGCtagcNnXx')
chromosomes = defaultdict(list)
for i in self.__map.chromosomes():
for fr in self.__map.fragments(i):
if fr.fr_name == 'GAP':
temp_fragment = 'N' * fr.fr_length
else:
temp_fragment = self.__fragments[fr.fr_name]
if fr.fr_strand == '-':
temp_fragment = temp_fragment[::-1].translate(
complement)
chromosomes[i].append(temp_fragment)
chromosomes[i] = ''.join(chromosomes[i])
self.__chromosomes = chromosomes
def write(self, map_file, fragment_file, chromosome_file):
"""
Write the produced data - a fragment map, a FASTA file of
fragments and a FASTA file of chromosomes - to the specified
files.
:param map_file: a name of a file to write the fragment map to
:param fragment_file: a name of a file to write fragment
sequences to
:param chromosome_file: a name of a file to write chromosome
sequences to
:type map_file: str
:type fragment_file: str
:type chromosome_file: str
"""
self.__map.write(map_file)
with Writer(fragment_file) as fragment_fasta:
for name, seq in self.__fragments.iteritems():
fragment_fasta.write(name, seq)
with Writer(chromosome_file) as chromosome_fasta:
for name, seq in self.__chromosomes.iteritems():
chromosome_fasta.write(name, seq)
logger.debug('a simulated map of %d fragments written to %s',
len(self.__map.records), map_file)
logger.debug('%d simulated fragments written to %s',
len(self.__fragments), fragment_file)
logger.debug('%d simulated chromosomes written to %s',
len(self.__chromosomes), chromosome_file)
class SeqLengths(object):
"""
The class implements routines to handle fragment sequence lengths.
"""
def __init__(self, filename):
"""
Create a SeqLengths object to handle sequence lengths of the
specified FASTA file.
:param filename: a name of a FASTA file with sequences which
lengths are to be derived
:type filename: str
"""
self.__filename = filename
self.__lengths = {}
def lengths(self):
"""
Return a dictionary of sequence lengths.
:return: a dictionary which keys are sequence names and
values are their lengths
:rtype: dict
"""
total_length = 0
if not self.__lengths:
reader = pyfaidx.Fasta(self.__filename)
for seq in reader.keys():
self.__lengths[seq] = len(reader[seq])
total_length += len(reader[seq])
logger.debug('%d sequences analyzed with the total length of '
'%d bp', len(self.__lengths), total_length)
return self.__lengths
def agp2map(agp_filename, map_filename):
"""
Given a name of an AGP file, convert it to the fragment map format.
:param agp_filename: a name of an AGP file
:param map_filename: a name of an output fragment map file
:type agp_filename: str
:type map_filename: str
"""
with open(agp_filename) as agp_file:
with open(map_filename, 'w') as map_file:
for line in agp_file:
if line.startswith('#'):
continue
line = line.rstrip()
line = line.split(None, 8)
if line[4] == 'N':
output = ('GAP', int(line[5]), 0, int(line[5]),
'+', line[0], int(line[1]) - 1,
int(line[2]))
else:
frag_len = int(line[7]) - int(line[6]) + 1
output = (line[5], frag_len, 0, frag_len, line[8],
line[0], int(line[1]) - 1, int(line[2]))
map_file.write('\t'.join(map(str, output)) + '\n')
|
import matplotlib.pyplot as plt
import numpy as np
import math
g=9.8
l=9.8
wd=2.0/3
q=0.5
class Simple_pendulum:
def __init__(self,_theta0=0.2,_w=0,_t0=0,_dt=math.pi/1000,_tfinal=500):
self.theta=[]
self.theta.append(_theta0)
self.t=[]
self.t.append(_t0)
self.w=[]
self.w.append(_w)
self.dt=_dt
self.tfinal=_tfinal
global dt
dt=_dt
#print self.theta[-1],self.t[-1],self.w[-1]
return
def calculate(self):
global g,l
while (self.t[-1]<self.tfinal):
self.w.append(self.w[-1]-g/l*self.theta[-1]*self.dt)
self.theta.append(self.theta[-1]+self.w[-1]*self.dt)
self.t.append(self.t[-1]+self.dt)
return
def plot_2d(self,fd):
plt.plot(self.t,self.theta,label=r'$F_D=%.3f$'%fd)
return
def fft_cal(self,_xmax,fd):
x=np.array(self.theta)
N=len(x)
n=N/2.0-1
f=1/(N*dt)*np.arange(n-1)
y=np.fft.fft(x)
y=y[0:n]
y=np.array(abs(y))
plt.plot(f,y,label='$F_D=%.3f$'%fd)
plt.xlim(0.0,_xmax)
class Dri_pendulum(Simple_pendulum):
def calculate(self,fd):
global g,l,wd,q
while (self.t[-1]<self.tfinal):
self.w.append(self.w[-1]-(g/l*math.sin(self.theta[-1])+q*self.w[-1]-fd*math.sin(wd*self.t[-1]))*self.dt)
if self.theta[-1]+self.w[-1]*self.dt>math.pi:
self.theta.append(self.theta[-1]+self.w[-1]*self.dt-2*math.pi)
elif self.theta[-1]+self.w[-1]*self.dt<-math.pi:
self.theta.append(self.theta[-1]+self.w[-1]*self.dt+2*math.pi)
else:
self.theta.append(self.theta[-1]+self.w[-1]*self.dt)
self.t.append(self.t[-1]+self.dt)
return
class Dri_pendulum_add(Simple_pendulum):
def calculate(self,fd):
global g,l,wd,q
while (self.t[-1]<self.tfinal):
self.w.append(self.w[-1]-(g/l*math.sin(self.theta[-1])+q*self.w[-1]-fd*math.sin(wd*self.t[-1]))*self.dt)
self.theta.append(self.theta[-1]+self.w[-1]*self.dt)
self.t.append(self.t[-1]+self.dt)
fig=plt.figure(figsize=(12,10))
ax1=fig.add_subplot(421)
ax1.set_ylabel(r'$\theta$(rad)')
a=Dri_pendulum(_dt=0.01,_tfinal=100)
a.calculate(1.35)
a.plot_2d(1.35)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax2=fig.add_subplot(422)
ax2.set_ylabel('intensity')
a.fft_cal(3,1.35)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax3=fig.add_subplot(423)
ax3.set_ylabel(r'$\theta$(rad)')
a=Dri_pendulum(_dt=0.01,_tfinal=100)
a.calculate(1.44)
a.plot_2d(1.44)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax4=fig.add_subplot(424)
ax4.set_ylabel('intensity')
a.fft_cal(2,1.44)
plt.legend(loc='best',prop={'size':10},frameon=False)
plt.text(0.620,5000,r'The frequency $\frac{\Omega_D}{2}$ appears',fontsize=11)
ax5=fig.add_subplot(425)
ax5.set_ylabel(r'$\theta$(rad)')
a=Dri_pendulum(_dt=0.01,_tfinal=100)
a.calculate(1.465)
a.plot_2d(1.465)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax6=fig.add_subplot(426)
ax6.set_ylabel('intensity')
a.fft_cal(1.5,1.465)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax7=fig.add_subplot(427)
ax7.set_xlabel(r'$t(s)$')
ax7.set_ylabel(r'$\theta$(rad)')
a=Dri_pendulum(_dt=0.01,_tfinal=100)
a.calculate(1.501)
a.plot_2d(1.501)
plt.legend(loc='best',prop={'size':10},frameon=False)
ax8=fig.add_subplot(428)
ax8.set_xlabel(r'$frequency(Hz)$')
ax8.set_ylabel('intensity')
a.fft_cal(1.5,1.501)
plt.text(0.543,3200,r'The frequency $Chaos$ appears',fontsize=11)
plt.legend(loc='best',prop={'size':10},frameon=False)
plt.show()
|
"""Función recibe la palabra(parámetro) que imprime 1000 veces"""
def milPal(pal):
# Devuelve la palabra en pantalla 1000 veces
if str.isnumeric(pal):
raise ValueError
print((pal + " ")*1000)
return((pal + " ")*1000)
# Se pide la palabra
if __name__ == "__main__":
while True:
pal = input("Ingrese la palabra deseada: ")
try:
milPal(pal)
except ValueError:
print("NO, no. Ingrese una palabra")
|
class Recipe:
def __init__(self,name,cooking_lvl,cooking_time,ingredients,description,recipe_type):
if name=='' or type(name)!=str:
raise ValueError
self.name=name
if type(cooking_lvl)!=int or not(1<=cooking_lvl<=5):
raise ValueError
self.cooking_lvl=cooking_lvl
if type(cooking_time)!=int or cooking_time<0:
raise ValueError
self.cooking_time=cooking_time
if type(ingredients)!=list:
raise ValueError
self.ingredients=ingredients
if type(description)!=str:
raise ValueError
self.description=description
if recipe_type!="starter" and recipe_type!="lunch" and recipe_type!="dessert" :
raise ValueError
self.recipe_type=recipe_type
def __str__(self):
text="The {n} recipe is of level {l}, needs {t} min, uses the following ingredients {i},{d}it is taken as {r}".format(n=self.name,l=self.cooking_lvl,t=self.cooking_time,i=self.ingredients,d=self.description,r=self.recipe_type)
return text
|
python
###########
import numpy as np
import pandas as pd
# from pandas.plotting import scatter_matrix
from regression_tools.dftransformers import (
ColumnSelector,
Identity,
Intercept,
FeatureUnion,
MapFeature,
StandardScaler)
from scipy import stats
from plot_univariate import plot_one_univariate
from pandas.tools.plotting import scatter_matrix
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from basis_expansions.basis_expansions import (
Polynomial, LinearSpline)
from regression_tools.plotting_tools import (
plot_univariate_smooth,
bootstrap_train,
display_coef,
plot_bootstrap_coefs,
plot_partial_depenence,
plot_partial_dependences,
predicteds_vs_actuals)
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score as r2
from r_squared_funcs import (
r2_for_last_n_cycles,
r2_generator_last_n_cycles)
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
from basis_expansions.basis_expansions import NaturalCubicSpline
import random
import os
os.getcwd()
np.random.seed(137)
#########################
###### Self made functions######
from r_squared_funcs import (
r2_for_last_n_cycles,
r2_generator_last_n_cycles)
from enginedatatransformer import transform_dataframes_add_ys
from plot_pred_vs_act import plot_many_predicteds_vs_actuals
# from export_linear_model import export_linear_model_to_txt
from engine_pipeline import fit_engine_pipeline
##################################
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Activation, Flatten
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from matplotlib import pyplot as plt
import seaborn as sb
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore', category=DeprecationWarning)
from xgboost import XGBRegressor
from tensorflow.python.keras import backend # Fix problem with TF 2.0 no 'get default graph'
#################################################################################
### import data
##### training #############
df1 = pd.read_csv('/home/superstinky/Seattle_g89/final_project_data/flight_engineer/enginedata/train_01_fd.csv', sep= " " )
df2 = pd.read_csv('/home/superstinky/Seattle_g89/final_project_data/flight_engineer/enginedata/train_02_fd.csv', sep= ' ')
df3 = pd.read_csv('/home/superstinky/Seattle_g89/final_project_data/flight_engineer/enginedata/train_03_fd.csv', sep= ' ')
df4 = pd.read_csv('/home/superstinky/Seattle_g89/final_project_data/flight_engineer/enginedata/train_04_fd.csv', sep= ' ')
################ This will add a column for the y value which will be the number of cycles until the engine fails.
# It will be a countdown of the total cycles for training set ######
## set dataf to dataframe name ####
#### add column to the end for logistic predictive model ######
####
data_frames_to_transform = [df1, df2, df3 , df4]
def transform_dataframes_add_ys(data_list= [ ] , *args ):
# dataf = df1
for df in data_list:
max_cycles = []
y_failure = []
for num in range(1, max(df['unit']) + 1):
#print(num)
max_cycles.append(max(df['time_cycles'][df['unit']==num] ) )
# max_cycles
cycles_to_fail = []
for total in max_cycles:
for cycle in range(total, 0, -1):
y_failure.append( (cycle/total) )
cycles_to_fail.append(cycle)
# print(cycles_to_fail)
len(cycles_to_fail)
len(df)
len(y_failure)
df['cycles_to_fail'] = cycles_to_fail
df['y_failure'] = y_failure
# df1.cycles_to_fail
### Transform all four dataframes #######
transform_dataframes_add_ys(data_frames_to_transform)
############################
# use column discribe out how remove the columns that do not change ####
col = df1.columns
col = ['unit', 'time_cycles', 'op_set_1', 'op_set_2', 'op_set_3', 't2_Inlet',
't24_lpc', 't30_hpc', 't50_lpt', 'p2_fip', 'p15_pby', 'p30_hpc',
'nf_fan_speed', 'nc_core_speed', 'epr_p50_p2', 'ps_30_sta_press',
'phi_fp_ps30', 'nrf_cor_fan_sp', 'nrc_core_sp', 'bpr_bypass_rat',
'far_b_air_rat', 'htbleed_enthalpy', 'nf_dmd_dem_fan_sp', 'pcn_fr_dmd',
'w31_hpt_cool_bl', 'w32_lpt_cool_bl', 'cycles_to_fail']
##### End of data import file #######
############ Start of data analysis #############
## this will plot all columns to check for variation within the feature data
# for name in col:
# df1.plot.scatter( 'cycles_to_fail', name, alpha = .3)
# plt.show()
# #
###### Several features appear to not be predictive ######
# limit the features that are in the model scatter plot #####
small_features_list = ['time_cycles', 't24_lpc', 't30_hpc', 't50_lpt',
'p30_hpc', 'nf_fan_speed', 'nc_core_speed', 'ps_30_sta_press',
'phi_fp_ps30', 'nrf_cor_fan_sp', 'nrc_core_sp', 'bpr_bypass_rat',
'htbleed_enthalpy', 'w31_hpt_cool_bl', 'w32_lpt_cool_bl' ]
##### Scatter matrix using time cycles #####
# scatter_matrix = pd.scatter_matrix(df1[small_features_list], alpha=0.2, figsize=(20, 20), diagonal='kde')
# for ax in scatter_matrix.ravel():
# ax.set_xlabel(ax.get_xlabel(), fontsize = 6, rotation = 90)
# ax.set_ylabel(ax.get_ylabel(), fontsize = 6, rotation = 0)
# plt.show()
##### Scatter matrix using cycles to fail #####
small_features_list = ['cycles_to_fail' , 't24_lpc', 't30_hpc', 't50_lpt',
'p30_hpc', 'nf_fan_speed', 'nc_core_speed', 'ps_30_sta_press',
'phi_fp_ps30', 'nrf_cor_fan_sp', 'nrc_core_sp', 'bpr_bypass_rat',
'htbleed_enthalpy', 'w31_hpt_cool_bl', 'w32_lpt_cool_bl' ]
# scatter_matrix = pd.scatter_matrix(df1[small_features_list], alpha=0.2, figsize=(20, 20), diagonal='kde')
# for ax in scatter_matrix.ravel():
# ax.set_xlabel(ax.get_xlabel(), fontsize = 6, rotation = 90)
# ax.set_ylabel(ax.get_ylabel(), fontsize = 6, rotation = 0)
# plt.show()
##### #####
# view the description of each column
col = df1.columns
# col = train_features
for c in col:
print (df1[c].describe() )
### This will print only the standard deviation for each column
col = df1.columns
for c in col:
print (df1[c].describe()[2] )
### This will remove features based the standard deviation for each column
train_features = []
limit = .01
col = df1.columns
for c in col:
if (df1[c].describe()[2] ) >= .01:
train_features.append(c)
train_features
#### Created the short list of features to train to ######
#### List of features to train the model to ####### ### remove 'unit'
train_features = ['time_cycles', 't24_lpc', 't30_hpc', 't50_lpt',
'p30_hpc', 'nf_fan_speed', 'nc_core_speed', 'ps_30_sta_press',
'phi_fp_ps30', 'nrf_cor_fan_sp', 'nrc_core_sp', 'bpr_bypass_rat',
'htbleed_enthalpy', 'w31_hpt_cool_bl', 'w32_lpt_cool_bl']
###### the training features has the columns to train to ###
####### the columns time_cycles and time_to_fail have been removed ##
#### The time cycles column may be used as an alternate y value to train to
# y_cycles_to_fail = df1.cycles_to_fail
# y_time_cycles = df1.time_cycles
#### ####
# ## view plots for the features that are to be used in df1 ######
# for name in train_features:
# df1.plot.scatter( 'cycles_to_fail', name, alpha = .3)
# plt.show()
#### remove features that do not change at all for this dataset
for c in col:
df1[c].describe()
##### adjust the data frame to choose 20 % of the engines by unmber and
##### train to a sample of 80% by number and 20% saved for test data.
# engines = list(np.random.choice(range(1,101), 20, replace= False))
test_engines = [4, 18, 19, 21, 28, 33, 42, 45, 46, 50, 61, 73, 74, 78, 82, 83, 84, 86, 92, 94]
train_engines = []
for num in range(1,101):
if num not in test_engines:
train_engines.append(num)
# #
train_engines = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 22, 23, 24, 25, 26,
27, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 43, 44, 47, 48, 49, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 75, 76, 77, 79, 80, 81, 85,
87, 88, 89, 90, 91, 93, 95, 96, 97, 98, 99, 100]
train_engines
test_engines
# for eng in train_engines:
# if eng in test_engines:
# print(True)
# else:
# print(False)
test_idx = df1['unit'].apply(lambda x: x in test_engines)
train_idx = df1['unit'].apply(lambda x: x in train_engines)
test_idx
train_idx
type(test_idx)
type(train_idx)
test_list = list(test_idx)
train_list = list(train_idx)
df_new_test = df1.iloc[test_list].copy()
df_new_train = df1.iloc[train_list].copy()
df_new_test.shape
df_new_train.shape
#############################################################
test = df1.iloc[test_list].copy()
train = df1.iloc[train_list].copy()
# def get_data():
# #get train data
# train_data_path ='train.csv'
# train = pd.read_csv(train_data_path)
# #get test data
# test_data_path ='test.csv'
# test = pd.read_csv(test_data_path)
# return train , test
def get_combined_data():
#reading train data
# train , test = get_data()
target = train.y_failure
train.drop(['y_failure'],axis = 1 , inplace = True)
combined = train.append(test)
combined.reset_index( drop=False, inplace=True)
combined.drop(['index', 'unit' , 'y_failure' , 'cycles_to_fail'], inplace=True, axis=1)
return combined, target
#Load train and test data into pandas DataFrames
# train_data, test_data = get_data()
#Combine train and test data to process them together
combined, target = get_combined_data()
###############################################################
combined.describe()
###### this will make a list of the max number of cycles for the training set of engines
##
train_eng_max_cycles = []
for e in train_engines:
train_eng_max_cycles.append(max(df1['time_cycles'][df1['unit']==e]))
train_eng_max_cycles
stats.describe(train_eng_max_cycles)
# DescribeResult(nobs=80, minmax=(128, 362),
# mean=203.4375, variance=2055.6922468354433,
# skewness=1.063155863408599, kurtosis=1.5047506637832253)
####### the max number of cycles for the test set of engines ########
test_eng_max_cycles = []
for e in test_engines:
test_eng_max_cycles.append(max(df1['time_cycles'][df1['unit']==e]))
test_eng_max_cycles
#### Fit pipeline from engine pipeline script ##################
##################################################################
def get_cols_with_no_nans(df,col_type):
'''
Arguments :
df : The dataframe to process
col_type :
num : to only get numerical columns with no nans
no_num : to only get nun-numerical columns with no nans
all : to get any columns with no nans
'''
if (col_type == 'num'):
predictors = df.select_dtypes(exclude=['object'])
elif (col_type == 'no_num'):
predictors = df.select_dtypes(include=['object'])
elif (col_type == 'all'):
predictors = df
else :
print('Error : choose a type (num, no_num, all)')
return 0
cols_with_no_nans = []
for col in predictors.columns:
if not df[col].isnull().any():
cols_with_no_nans.append(col)
return cols_with_no_nans
num_cols = get_cols_with_no_nans(combined , 'num')
cat_cols = get_cols_with_no_nans(combined , 'no_num')
combined = combined[num_cols + cat_cols]
combined.hist(figsize = (12,10))
plt.show()
####### Get heatmap of coorilations ##################
train_data = train_data[num_cols + cat_cols]
train_data['y_failure'] = target
C_mat = train_data.corr()
fig = plt.figure(figsize = (15,15))
sb.heatmap(C_mat, vmax = .8, square = True)
plt.show()
########### Train to life remaining ######################
###########@@@@@@@@ Target is % of life remaining @@@@@########################
NN_model = Sequential()
# The Input Layer :
NN_model.add(Dense(128, kernel_initializer='normal',input_dim = train.shape[1], activation='relu'))
# The Hidden Layers :
NN_model.add(Dense(256, kernel_initializer='normal',activation='relu'))
NN_model.add(Dense(256, kernel_initializer='normal',activation='relu'))
NN_model.add(Dense(256, kernel_initializer='normal',activation='relu'))
# The Output Layer :
NN_model.add(Dense(1, kernel_initializer='normal',activation='linear'))
# Compile the network :
NN_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
NN_model.summary()
checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')
callbacks_list = [checkpoint]
NN_model.fit(train, target, epochs=15, batch_size=32, validation_split = 0.2, callbacks=callbacks_list)
# Load wights file of the best model :
wights_file = 'Weights-002--0.01048.hdf5' # choose the best checkpoint
NN_model.load_weights(wights_file) # load it
NN_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
# ########### Train to y_failure (0-1) ######################
# ## This will make the train test split for this model ####
# ytrain = df_new_train['y_failure']
# X_features = df_new_train[train_features]
# ytest = df_new_test['y_failure']
# X_test_features = df_new_test[train_features]
test = df1.iloc[test_list].copy()
train = df1.iloc[train_list].copy()
train_y = train['y_failure']
train_X = train.copy()
test_y = test['y_failure']
test_X = test.copy()
# train_X, val_X, train_y, val_y = train_test_split(train, target, test_size = 0.25, random_state = 14)
model = RandomForestRegressor(n_estimators = 100)
model.fit(train_X,train_y)
# Get the mean absolute error on the validation data
predicted_life = model.predict(test_X)
MAE = mean_absolute_error(test_y , predicted_life)
print('Random forest validation MAE = ', MAE)
################### XG Boost Model #########################
XGBModel = XGBRegressor()
XGBModel.fit(train_X,train_y , verbose=False)
# Get the mean absolute error on the validation data :
XGBpredictions = XGBModel.predict(test_X)
MAE = mean_absolute_error(test_y , XGBpredictions)
print('XGBoost validation MAE = ',MAE)
#
# ##### Plot the data from the first model and evaluate the residuals
plt.scatter(XGBpredictions, val_y, alpha = 0.1)
plt.xlabel('y hat from training set')
plt.ylabel( 'y values from training set')
plt.show()
###
plt.scatter(predicted_life, val_y, alpha = 0.1)
plt.xlabel('y hat from training set')
plt.ylabel( 'y values from training set')
plt.show()
### Second plot that will show the difference from actuals vs pred
fig = plt.figure()
fig, ax = plt.subplots(figsize=(15,15) )
ax.plot(list(range(1, len(L_y_predicted) + 1)) , L_y_predicted, '.r', label='predicted')
ax.plot(list(range(1, len(ytrain) + 1 )) , ytrain, '.b' , label='actual')
plt.xlabel('Index of Value')
plt.ylabel( 'Cycles to Fail')
ax.legend()
plt.show()
## First score from basic linear regression model ####
base_score = r2(ytrain, L_y_predicted)
base_score
linear_model_80_engine = base_score
linear_model_80_engine
##### score of model no tuning trained to time cycles to go
## 0.5302416225409862
#### score of model with no tuning trained to cycles remaining
## 0.5302416225409862
##
### There is no difference between the two which makes sense.
#### Linear model 80 engine split
# linear_model_80_engine
# 0.6004573742141459
# Begin spline analysis of each significant feature
###### plot the full range of each engine against the cycles to fail
fig, axs = plt.subplots(3, 5, figsize=(14, 8))
univariate_plot_names = df1[train_features] #columns[:-1]
for name, ax in zip(univariate_plot_names, axs.flatten()):
plot_univariate_smooth(ax,
df1['cycles_to_fail'],
df1[name].values.reshape(-1, 1),
bootstrap=100)
ax.set_title(name, fontsize=7)
plt.show()
#### Plot each feature individually.
### (ax, df, y, var_name,
for col in train_features:
fig, ax = plt.subplots(figsize=(12, 3))
plot_one_univariate(ax, df1, 'cycles_to_fail', col )
ax.set_title("Evaluation of: " + str(col))
plt.xlabel(col)
plt.ylabel( 'Cycles to Fail')
plt.show()
### Begining of the linear spline transformation parameters #######
linear_spline_transformer = LinearSpline(knots=[10, 35, 50, 80, 130, 150, 200, 250, 300])
linear_spline_transformer.transform(df1['cycles_to_fail']).head()
cement_selector = ColumnSelector(name='cycles_to_fail')
cement_column = cement_selector.transform('cycles_to_fail')
linear_spline_transformer.transform(cement_column).head()
train_features
train_features = [
'time_cycles',
't24_lpc',
't30_hpc',
't50_lpt',
'p30_hpc',
'nf_fan_speed',
'nc_core_speed',
'ps_30_sta_press',
'phi_fp_ps30',
'nrf_cor_fan_sp',
'nrc_core_sp',
'bpr_bypass_rat',
'htbleed_enthalpy',
'w31_hpt_cool_bl',
'w32_lpt_cool_bl']
#### Build out the new dataframes with each knot
#### Must use the 80 engine traing set !!!!!!!
# feature_pipeline.fit(df_new_train)
# features = feature_pipeline.transform(df_new_test)
### Fit model to the pipeline #######
ytest
features
model = LinearRegression(fit_intercept=True)
model.fit(df_new_train[train_features], ytrain) #np.log(ytrain) # <---- note: the np.log transformation
len(ytest)
len(features)
len(y_hat)
#### View the coefficients
display_coef(model, features.columns)
plt.plot(range(0,len(model.coef_)), model.coef_)
plt.show()
ytest
y_hat
#### Make predictions against the training set
y_hat = model.predict(df_new_test[train_features])
y_hat = y_hat # np.exp(y_hat) ## <----- note: the exp to transform back
'''
polynomeal analysis
'''
import numpy as np
from scipy.optimize import curve_fit
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
poly = PolynomialFeatures(degree=2)
X_ = poly.fit_transform(df_new_train[train_features])
predict_ = poly.fit_transform(df_new_train[train_features])
predict_ = poly.fit_transform(df_new_test[train_features])
clf = linear_model.LinearRegression()
clf.fit(X_, ytrain)
y_hat = clf.predict(predict_)
len(y_hat)
len(ytest)
'''
the above is the alternate quadratic test
'''
#### Plot predictions from data against the actual values ########
x = list(range(0,400))
y = x
plt.scatter(y_hat, ytest, alpha = 0.1, color='blue')
plt.plot(x, y, '-r', label='y=2x+1')
plt.title('Polynomial Regression: Cycles to Fail')
plt.xlabel('$\hat {y}$ from test set')
plt.ylabel( '${y}$ from test set')
plt.ylim(0, 360)
plt.xlim(370, -0.1)
plt.show()
###
clf.coef_
predict_.shape
#### Second plot that will show the difference from actuals vs pred for the pipeline model ######
fig, ax = plt.subplots(figsize=(15,15) )
ax.plot(list(range(1, len(y_hat) + 1)) , y_hat, '.r', label='predicted')
ax.plot(list(range(1, len(ytest) + 1 )) , ytest, '.b' , label='actual')
plt.xlabel('Index of Value')
plt.ylabel( 'Cycles to Fail')
ax.legend()
plt.show()
##########################################
# #print(num)
# max_cycles.append(max(df['time_cycles'][df['unit']==num] ) )
# ax.set_title('Plot number {}'.format(i))
##### this is the plot of all 80 engines on a single chart
fig, axs = plt.subplots(8,10, figsize=(10,4))
ax.set_title("Spline Model of 80 Training Engines")
start_idx = 0
for idx, ax in enumerate(axs.flatten()):
# for idx, e in enumerate(train_engines):
end_idx = start_idx + train_eng_max_cycles[idx]
print(start_idx, end_idx, train_eng_max_cycles[idx], end_idx-start_idx)
# fig, ax = plt.subplots(figsize=(15,15) )
# ax.plot(y_hat[start_idx : end_idx], list(range(train_eng_max_cycles[idx], 0, -1)), '.r', label='predicted')
# ax.plot(ytrain[start_idx : end_idx] , list(range(train_eng_max_cycles[idx], 0, -1)) , '-b' , label='actual')
ax.plot(list(range(train_eng_max_cycles[idx], 0, -1)) , y_hat[start_idx : end_idx], '.r', label='predicted')
ax.plot(list(range(train_eng_max_cycles[idx], 0, -1)) , ytrain[start_idx : end_idx] , '-b' , label='actual')
ax.set_title("Engine # " + str(train_engines[idx]), size=6)
# plt.tick_params(axis='both', which='major', labelsize=8)
# plt.tick_params(axis='both', which='minor', labelsize=6)
# plt.xticks(fontsize=8) #, rotation=90)
# plt.title('Engine #: ' + str(train_engines[idx]))
# plt.xlabel('Index')
# plt.ylabel( 'Cycles to Fail')
# ax.legend()
ax.set_ylim(0, 1.1)
ax.set_xlim(350 , 0)
ax.xaxis.set_tick_params(labelsize=5)
ax.yaxis.set_tick_params(labelsize=5)
start_idx = end_idx
# plt.show()
# plt.tight_layout()
plt.show()
##### Test Set of data ###############################
##### this is the plot of all 20 test engines on a single chart
fig, axs = plt.subplots(4, 5 , figsize=(10,4))
ax.set_title("Spline Model of 20 Test Engines")
start_idx = 0
for idx, ax in enumerate(axs.flatten()):
# for idx, e in enumerate(train_engines):
end_idx = start_idx + test_eng_max_cycles[idx]
print(start_idx, end_idx, test_eng_max_cycles[idx], end_idx-start_idx)
# fig, ax = plt.subplots(figsize=(15,15) )
# ax.plot(y_hat[start_idx : end_idx], list(range(train_eng_max_cycles[idx], 0, -1)), '.r', label='predicted')
# ax.plot(ytrain[start_idx : end_idx] , list(range(train_eng_max_cycles[idx], 0, -1)) , '-b' , label='actual')
ax.plot(list(range(test_eng_max_cycles[idx], 0, -1)) , y_hat[start_idx : end_idx], '.r', label='predicted')
ax.plot(list(range(test_eng_max_cycles[idx], 0, -1)) , ytest[start_idx : end_idx] , '-b' , label='actual')
ax.set_title("Engine # " + str(test_engines[idx]), size=6)
# plt.tick_params(axis='both', which='major', labelsize=8)
# plt.tick_params(axis='both', which='minor', labelsize=6)
# plt.xticks(fontsize=8) #, rotation=90)
# plt.title('Engine #: ' + str(train_engines[idx]))
# plt.xlabel('Index')
# plt.ylabel( 'Cycles to Fail')
# ax.legend()
ax.set_ylim(0, 1.1)
ax.set_xlim(350 , 0)
ax.xaxis.set_tick_params(labelsize=5)
ax.yaxis.set_tick_params(labelsize=5)
start_idx = end_idx
# plt.show()
# plt.tight_layout()
plt.show()
#### Third plot that will show the difference from actuals vs pred for
# # the pipeline model for each engine one by one ######
start_idx = 0
for idx, e in enumerate(train_engines):
end_idx = start_idx + train_eng_max_cycles[idx]
print(start_idx, end_idx, train_eng_max_cycles[idx], end_idx-start_idx)
fig, ax = plt.subplots(figsize=(15,15) )
ax.plot(list(range(train_eng_max_cycles[idx], 0, -1)) , y_hat[start_idx : end_idx], '.r', label='predicted')
ax.plot(list(range(train_eng_max_cycles[idx], 0, -1)) , ytrain[start_idx : end_idx] , '.b' , label='actual')
plt.title('Engine #: ' + str(e))
plt.xlabel('Index')
plt.ylabel( 'Cycles to Fail')
# plt.axvline(stats.describe(train_eng_max_cycles)[1][0], color='r', label='min' )
# plt.axvline(stats.describe(train_eng_max_cycles)[2], color='g' , label='avg' )
# plt.axvline(stats.describe(train_eng_max_cycles)[1][1], color='b' , label='max' )
plt.xlim(350,0)
plt.ylim(0 , 1.1)
ax.legend()
start_idx = end_idx
plt.show()
### This will function will create the actual estimations vs predicted values
def plot_many_predicteds_vs_actuals(var_names, y_hat, n_bins=50):
fig, axs = plt.subplots(len(var_names), figsize=(12, 3*len(var_names)))
for ax, name in zip(axs, var_names):
x = df_new_train[name]
predicteds_vs_actuals(ax, x, df_new_train["cycles_to_fail"], y_hat, n_bins=n_bins)
# ax.set_title("{} Predicteds vs. Actuals".format(name))
return fig, axs
### This will plot the final estimations vs the actual data
train_features
# y_hat = model.predict(features.values)
fig, axs = plot_many_predicteds_vs_actuals(train_features, y_hat)
# fig.tight_layout()df1
plt.show()
########################## Scoreing Section ###############
#### Score of the first model against the training set.
## First score from basic linear regression model ####
log_knot_model = r2(ytrain, y_hat)
log_knot_model
# time_knot_model
# first_knot_model
# 0.64194677350961
# 0.7396060171044228
# log_knot_model
# 0.7272227017732488
#log_knot_model
# 0.7273228097635444
# R- Squared for polynomeal regression of test data is:
# 'The r-squared for the last 500 cycles is: 0.6554345266551393'
##### R-squared for the last n number of observations #####
#
ytest
y_hat
r2_for_last_n_cycles(y_hat , ytest, last_n=500)
r2_for_last_n_cycles(y_hat , ytrain, last_n=100)
r2_for_last_n_cycles(y_hat , ytrain, last_n=75)
r2_for_last_n_cycles(y_hat , ytrain, last_n=50)
r2_for_last_n_cycles(y_hat , ytrain, last_n=25)
r2_for_last_n_cycles(y_hat , ytrain, last_n=15)
r2_for_last_n_cycles(y_hat , ytrain, last_n=10)
r2_for_last_n_cycles(y_hat , ytrain, last_n=5)
################### Make a list of r squared values for plotting ##########
r2_values = r2_generator_last_n_cycles(y_hat , ytrain, 200)
######## Plot the r2 values as the number of cycles remaining approaches the end #######
##### plot the full against the cycles to fail
fig, ax = plt.subplots(1, 1, figsize=(13, 13))
ax.scatter(range(len(r2_values)+1, 1, -1) , r2_values)
plt.ylim(-2, 1)
plt.title('R Squared')
plt.xlabel('Cycles to Fail')
plt.ylabel( 'R Squared Value')
plt.legend()
plt.show()
### Plot of r-squared as the number of observations approaches 1 #########
|
"""Represents a request to perform a menu action."""
from typing import Mapping
from marshmallow import EXCLUDE, fields
from .....messaging.agent_message import AgentMessage, AgentMessageSchema
from ..message_types import PERFORM, PROTOCOL_PACKAGE
HANDLER_CLASS = f"{PROTOCOL_PACKAGE}.handlers.perform_handler.PerformHandler"
class Perform(AgentMessage):
"""Class representing a request to perform a menu action."""
class Meta:
"""Perform metadata."""
handler_class = HANDLER_CLASS
message_type = PERFORM
schema_class = "PerformSchema"
def __init__(self, *, name: str = None, params: Mapping[str, str] = None, **kwargs):
"""Initialize a Perform object.
Args:
name: The name of the menu option
params: Input parameter values
"""
super().__init__(**kwargs)
self.name = name
self.params = params
class PerformSchema(AgentMessageSchema):
"""Perform schema class."""
class Meta:
"""Perform schema metadata."""
model_class = Perform
unknown = EXCLUDE
name = fields.Str(
required=True, metadata={"description": "Menu option name", "example": "Query"}
)
params = fields.Dict(
required=False,
keys=fields.Str(metadata={"example": "parameter"}),
values=fields.Str(metadata={"example": "value"}),
)
|
import math
import os
class Distancia:
def __init__(self,numero):
self.numero = numero
# Metros
def cm_to_m(self):
metros = self.numero/100
return metros
class Tiempo:
def __init__(self,distancia):
self.distancia = distancia
def calcular_tiempo(self):
tiempo = math.sqrt(((2*self.distancia)/9.81))
return tiempo
tipo_distancia = input("¿Su distancia esta en metros o cemtimetros? m/c: ")
if tipo_distancia=="c":
dato = float(input("Ingrese dato: "))
distancia = Distancia(dato)
final = distancia.cm_to_m()
elif tipo_distancia=="m":
final = float(input("Ingrese dato: "))
else:
os.system("exit")
resultado = Tiempo(final)
tiempo_final = round(resultado.calcular_tiempo(),2)
print(tiempo_final,"[s]")
|
# Generated by Django 2.0.1 on 2018-05-23 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('freshsheet', '0011_auto_20180523_1827'),
]
operations = [
migrations.AlterField(
model_name='fooditem',
name='case_count',
field=models.PositiveIntegerField(blank=True, default=None, null=True, verbose_name='Minimum quantity for case price'),
),
migrations.AlterField(
model_name='fooditem',
name='case_price',
field=models.DecimalField(decimal_places=2, default=None, max_digits=5, verbose_name='Case Price'),
),
migrations.AlterField(
model_name='fooditem',
name='wholesale_count',
field=models.PositiveIntegerField(blank=True, default=None, null=True, verbose_name='Minimum quantity for wholesale price'),
),
migrations.AlterField(
model_name='fooditem',
name='wholesale_price',
field=models.DecimalField(decimal_places=2, default=None, max_digits=5, verbose_name='Wholesale Price'),
),
]
|
import collections
import datetime
import re
regexes = {
'release': re.compile('(\w{3}\d{6})\s*(.*)\s*(\$(\d+\.\d+)|pi)', re.IGNORECASE),
'publisher': re.compile('^(dc comics|dark horse comics|idw publishing|image comics|marvel comics)$', re.IGNORECASE),
'series': re.compile('(.*?)#\d+(?!(.*poster)|.*(combo pack))', re.IGNORECASE),
'issue_no': re.compile('#(\d+)(?!-)'),
'mature': re.compile('\(MR\)'),
'printing': re.compile('(\d+)\w{2}\s+(printing|ptg)', re.IGNORECASE),
'date': re.compile('(\d+)/(\d+)/(\d+)', re.IGNORECASE),
}
publisher_keys = {
'DARK HORSE COMICS': 'Dark Horse Comics',
'DC COMICS': 'DC Comics',
'IDW PUBLISHING': 'IDW Publishing',
'IMAGE COMICS': 'Image Comics',
'MARVEL COMICS': 'Marvel Comics'
}
def parse(string):
lines = string.split("\n")
lines = collections.deque([l.strip() for l in lines if l.strip()])
output = []
under_publisher = False
current_publisher = None
seen_issues = []
while len(lines) > 0:
if not under_publisher:
l = lines.popleft()
match = regexes['publisher'].match(l)
if match:
under_publisher = True
current_publisher = publisher_keys[match.group(0)]
else:
l = lines.popleft()
match = regexes['release'].match(l)
if match:
release_info = parse_release_info(match.group(2))
if release_info != {}:
if (release_info['series'], release_info['issue_number']) in seen_issues:
continue
try:
price = int(float(match.group(4)) * 100)
except:
continue
comic = {
'code': match.group(1),
'series': release_info['series'],
'issue_number': release_info['issue_number'],
'price': price,
'publisher': current_publisher,
'printing': release_info['printing'] or 1,
'mature': release_info['mature'] or False,
}
output.append(comic)
seen_issues.append((release_info['series'], release_info['issue_number']))
else:
under_publisher = False
lines.appendleft(l)
return output
def parse_release_info(string):
output = collections.defaultdict(lambda: None)
series_match = regexes['series'].search(string)
if series_match:
title = series_match.group(1).title().strip()
output['series'] = title
string = re.sub(title, '', string).strip()
else:
return {}
issue_match = regexes['issue_no'].search(string)
if issue_match:
output['issue_number'] = int(issue_match.group(1))
else:
return {}
mature_match = regexes['mature'].search(string)
if mature_match:
output['mature'] = True
printing_match = regexes['printing'].search(string)
if printing_match:
output['printing'] = int(printing_match.group(1))
return output
def parse_date(string):
lines = string.split("\n")
lines = collections.deque([l.strip() for l in lines if l.strip()])
while len(lines) > 0:
l = lines.popleft()
match = regexes['date'].search(l)
if match:
month = int(match.group(1))
day = int(match.group(2))
year = int(match.group(3))
return datetime.date(year, month, day)
|
#! /usr/bin/env python3
"""
코틀린 코드를 실행하는 숏컷.
PATH 경로에 넣어두면 편하다.
"""
import sys
import os
import shlex
import subprocess
def usage():
print("Usage: %s [InputFile.kt]" % sys.argv[0])
if len(sys.argv) != 2:
usage()
sys.exit(0)
filename = sys.argv[1]
if filename.endswith('.kt') == False:
usage()
sys.exit(0)
filebasename = filename[:-3]
cmd_list = [
'rm -f %s.jar' % filebasename,
'kotlinc %s -include-runtime -d %s.jar' % (filename, filebasename),
'kotlin %s.jar' % filebasename
]
for cmd in cmd_list:
retcode = subprocess.call(shlex.split(cmd))
if retcode != 0:
print("Failed to execute: %s" % cmd)
sys.exit(retcode)
|
'''
general work in chapter four
also, exercise 4-10. Slices:
print the first three items from a list
print three items from the middle of the list
print the last three items from a list
also, exercise 4-11. More Loops:
use for loops to print two of the food lists
'''
my_foods = ['mice', 'voles', 'grasshoppers', 'sparrows']
print(my_foods)
your_foods = my_foods[:]
print(your_foods)
my_foods.append('squid')
print(my_foods)
print(your_foods)
your_foods = my_foods
print(my_foods)
print(your_foods)
my_foods.append('chihuahuas')
print(my_foods)
print(your_foods)
print("The first three items from my_foods are:")
for food in my_foods[:3]:
print(food)
print("Three middle items from my_foods are:")
for food in my_foods[1:4]:
print(food)
print("The last three items from my_foods are:")
for food in my_foods[-3:]:
print(food)
for food in my_foods:
print(food)
for food in your_foods:
print(food)
|
import pymysql
class Connect:
def __init__(self):
try:
self.conn = pymysql.connect(host = "localhost", user="root", password="zaq12wsx", db="Restauracja", port=3307,
charset='utf8')
print("polaczenie ustanowione")
self.wybor()
except:
print("bledne dane")
def wybor(self):
dec = input("1.Gość 2.Logowanie")
if dec =="1":
self.menu()
elif dec=="2":
self.logowanie()
else:
print("Błędna wartość")
self.wybor()
def logowanie(self):
login = input("podaj login")
passw = input("podaj haslo")
self.cursor = self.conn.cursor()
self.cursor.execute("Select * from logowanie WHERE login=%s and passwd=%s", (login,passw))
resultsLogs = self.cursor.fetchall()
if(len(resultsLogs) == 1):
print("zalogowano w systemie")
self.admenu()
else:
print("niepoprawny login lub haslo")
self.logowanie()
def menu(self):
while(True):
dec = input("P-Pizza, D-Dania, K-Kontakt, Q-Wyjdź").upper()
if(dec=="P"):
self.Pizza()
elif(dec=="D"):
self.Dania()
elif(dec=="K"):
self.Kontakt()
elif(dec=="Q"):
print("Koniec")
break
def Pizza(self):
self.cursor = self.conn.cursor()
self.cursor.execute("select * from pizza")
pizza = self.cursor.fetchall()
for row in pizza:
nazwa = 1
skladniki = 2
Cena = 3
print(row[nazwa], row[skladniki], row[Cena])
def Dania(self):
self.cursor = self.conn.cursor()
self.cursor.execute("select * from Dania")
dania = self.cursor.fetchall()
for row in dania:
nazwa = 1
skladniki = 2
Cena = 3
print(row[nazwa], row[skladniki], row[Cena])
def Kontakt(self):
self.cursor = self.conn.cursor()
self.cursor.execute("select * from Kontakt")
kontakt = self.cursor.fetchall()
for row in kontakt:
cel = 0
numer = 1
print(row[cel], row[numer])
def admenu(self):
while(True):
dec = input("A-Aktulizacja, U-Usuwanie, D-Dodawanie, Q-Wyjdź").upper()
if(dec=="A"):
self.AktualizujMenu()
elif (dec=="U" ):
self.UsunMenu()
elif(dec=="D"):
self.DodajMenu()
elif(dec=="Q"):
print("Koniec")
break
def APizza(self):
self.cursor = self.conn.cursor()
self.cursor.execute("select * from pizza")
pizza = self.cursor.fetchall()
for row in pizza:
id = 0
nazwa = 1
skladniki = 2
Cena = 3
print(row[id] ,row[nazwa], row[skladniki], row[Cena])
def AktualizujMenu(self):
while(True):
dec = input("P-Pizza, D-Dania, K-Kontakt W-Wróć").upper()
if(dec=="P"):
self.AktualizujPizza()
elif(dec=="D"):
self.AktualizujDania()
elif(dec)=="K":
self.AktualizujKontakt()
elif(dec=="W"):
self.admenu()
def UsunMenu(self):
while(True):
dec = input("P-Pizza, D-Dania, K-Kontakt W-Wróć").upper()
if(dec=="P"):
self.UsunPizza()
elif(dec=="D"):
self.UsunDania()
elif (dec == "K"):
self.UsunKontakt()
elif(dec=="W"):
self.admenu()
def DodajMenu(self) :
while(True):
dec = input("P-Pizza, D-Dania, K-Kontakt W-Wróć").upper()
if(dec=="P"):
self.DodaPizza()
elif(dec=="D"):
self.DodajDania()
elif(dec=="K"):
self.DodajKontakt()
elif(dec=="W"):
self.admenu()
def AktualizujPizza(self):
self.APizza()
decp = input("Co chcesz uaktulnić N - Nazwe, S-Składniki, C-Cene").upper()
if(decp=="N"):
DP = input("podaj id")
NW = input("podaj nową nazwe")
self.cursor.execute("UPDATE pizza SET nazwa = %s WHERE id = %s", (NW,DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if(dec=="T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
elif (decp == "S"):
DP = input("podaj id")
NW = input("podaj nowe składniki")
self.cursor.execute("UPDATE pizza SET skladniki = %s WHERE id = %s", (NW, DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if (dec == "T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
elif (decp == "C"):
DP = input("podaj id")
NW = input("podaj nową cene")
self.cursor.execute("UPDATE pizza SET Cena = %s WHERE id = %s", (NW, DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if (dec == "T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
def UsunPizza(self):
self.APizza()
ID = input("Podaj ID")
self.cursor.execute("DELETE FROM pizza WHERE id = %s", ID)
dec = input("Czy na pewno chcesz usunąc rekord T/N").upper()
if(dec=="T"):
self.conn.commit()
print("usunięto rekord")
else:
self.conn.rollback()
print("Come to MENU")
def DodaPizza(self):
nazwa = input("Nazwa")
skladniki = input("Składniki")
cena = input("Cena")
self.cursor.execute("INSERT INTO pizza(nazwa,skladniki,Cena) values (%s,%s,%s)", (nazwa,skladniki,cena))
self.conn.commit()
def ADania(self):
self.cursor = self.conn.cursor()
self.cursor.execute("select * from Dania")
pizza = self.cursor.fetchall()
for row in pizza:
id = 0
nazwa = 1
skladniki = 2
Cena = 3
print(row[id] ,row[nazwa], row[skladniki], row[Cena])
def AktualizujDania(self):
self.ADania()
decp = input("Co chcesz uaktulnić N - Nazwe, S-Składniki, C-Cene").upper()
if(decp=="N"):
DP = input("podaj id")
NW = input("podaj nową nazwe")
self.cursor.execute("UPDATE Dania SET nazwa = %s WHERE id = %s", (NW,DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if(dec=="T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
elif (decp == "S"):
DP = input("podaj id")
NW = input("podaj nowe składniki")
self.cursor.execute("UPDATE Dania SET skladniki = %s WHERE id = %s", (NW, DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if (dec == "T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
elif (decp == "C"):
DP = input("podaj id")
NW = input("podaj nową cene")
self.cursor.execute("UPDATE Dania SET Cena = %s WHERE id = %s", (NW, DP))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if (dec == "T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
def UsunDania(self):
self.ADania()
self.cursor = self.conn.cursor()
ID = input("Podaj ID")
self.cursor.execute("DELETE FROM Dania WHERE id = %s", ID)
dec = input("Czy na pewno chcesz usunąc rekord T/N").upper()
if(dec=="T"):
self.conn.commit()
print("usunięto rekord")
else:
self.conn.rollback()
print("Come to MENU")
def DodajDania(self):
self.cursor = self.conn.cursor()
nazwa = input("Nazwa")
skladniki = input("Składniki")
cena = input("Cena")
self.cursor.execute("INSERT INTO Dania(nazwa,skladniki,Cena) values (%s,%s,%s)", (nazwa,skladniki,cena))
self.conn.commit()
def AktualizujKontakt(self):
self.Kontakt()
decp = input("Co chcesz uaktulnić C - Cel, N-Numer").upper()
if(decp=="C"):
NK = input("podaj numer")
CK = input("podaj nowy cel")
self.cursor.execute("UPDATE kontakt SET cel = %s WHERE numer = %s", (CK,NK))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if(dec=="T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
elif (decp == "S"):
CK = input("podaj Cel")
NK = input("podaj nowy numer")
self.cursor.execute("UPDATE kontakt SET numer = %s WHERE cel = %s", (NK, CK))
dec = input("Czy zatwierdzasz zmiany T/N").upper()
if (dec == "T"):
self.conn.commit()
print("wartość zaktualizowano")
else:
self.conn.rollback()
print("Come to menu")
def UsunKontakt(self):
self.Kontakt()
self.cursor = self.conn.cursor()
CK = input("Podaj cel")
self.cursor.execute("DELETE FROM kontakt WHERE cel = %s", CK)
dec = input("Czy na pewno chcesz usunąc rekord T/N").upper()
if(dec=="T"):
self.conn.commit()
print("usunięto rekord")
else:
self.conn.rollback()
print("Come to MENU")
def DodajKontakt(self):
self.Kontakt = self.conn.cursor()
cel = input("Cel")
numer = input("Numer")
self.cursor.execute("INSERT INTO kontakt(cel,numer) values (%s,%s)", (cel,numer))
self.conn.commit()
Connect = Connect() |
from django.shortcuts import render
from shop.models import Shop
from django_user_agents.utils import get_user_agent
from django.http import HttpResponse,Http404
def index(request):
if get_user_agent(request).is_mobile:
templatePath = 'sp/index.html'
else:
templatePath = 'sp/index.html'
return render(request, templatePath)
def map(request):
return render(request, 'sp/map.html')
def Arealist(request, param):
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from shop.models import Pref, City
import re
pref_id, city_id, category_id, page = 13, None, None, 1
r = re.compile("(pref|city|category|page)=([0-9_]+)")
for ids in r.findall(param):
if ids[0] == 'pref':
pref_id = ids[1]
elif ids[0] == 'city':
city_id = ids[1]
elif ids[0] == 'category':
category_id = ids[1]
elif ids[0] == 'page':
page = ids[1]
where = {}
where['pref_id'] = pref_id
if city_id is not None:
cities = city_id.split('_')
where['city_id__in'] = cities
if len(cities) == 1:
AreaModel = City.objects.filter(city_id__in = cities).values('name').first()
try:
areaName = AreaModel['name']
except:
AreaModel = Pref.objects.filter(pref_id = pref_id).values('name').first()
areaName = AreaModel['name']
listData = Shop.objects.filter(**where).values('id', 'name', 'address', 'holiday', 'time', 'dartslive', 'phoenix', 'station')
paginator = Paginator(listData, 20)
try:
shopList = paginator.page(page)
except PageNotAnInteger:
shopList = paginator.page(1)
except EmptyPage:
shopList = paginator.page(paginator.num_pages)
# 将来的にはreact-pythonを使いたい
return render(request, 'sp/list.html', {
'shopList': shopList,
'areaName': areaName,
'pref': pref_id,
'city': city_id,
'category': category_id,
})
def shop(request, shop_id):
shopData = Shop.objects.filter(id = shop_id).first()
return render(request, 'sp/detail.html', {'shopData' : shopData})
def ajaxDetail(request):
import json
data = json.dumps(list(Shop.objects.values_list('latitude', 'longitude', 'name', 'id')), default=decimal_default)
return HttpResponse(data)
def decimal_default(obj):
import decimal
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
|
# ComKom1b.py
html = "<html>\n <body>\n TigerJython Web Site\n </body>\n</html>"
print html
|
##-----------------------------
# Pycrash Course
# Eric Matthes
# Cap. 10 - Arquivos
# write_message.py, p.264
##-----------------------------
filename = 'programming.txt'
with open(filename, 'w') as file_obj:
file_obj.write('I love programming!')
with open(filename, 'r') as obj:
print(obj.read())
|
import preprocess
import pandas as pd
import os
from datetime import datetime
# labels / features of the dataframe
labels = ["BUILDER", "House ID", "DATE_BUILT", "DATE_PRICED", "LOCATION",
"DST_DOCK", "DST_CAPITAL", "DST_MARKET", "DST_TOWER",
"DST_RIVER", "DST_KNIGHT_HS", "FRNT_FARM_SZ", "GARDEN",
"RENOVATION", "TREE", "KING_VISIT", "CURSE", "N_BED",
"N_BATH", "N_DINING", "N_BLESS"]
# creating an empty dataframe with headers
dataFRAME = pd.DataFrame(columns=labels)
dataFRAME.to_csv("../csv/data.csv", index=False, header=True, sep=',')
# ../data_files contain all the data in seperate txt
# files corresponding to each builder
os.chdir('../data_files')
files = os.listdir()
# preprocess.PREPROCESS shall extract and append the data to data.csv
for f in files:
preprocess.PREPROCESS(f, labels, "../csv/data.csv")
pass
os.chdir('../csv')
# Importing house id's which are missing as missing
missing = pd.read_csv("missing.csv")
# House Id's whose Golden Grains are known as target
target = pd.read_csv("house_prices.csv")
# full data, training + testing (known + unknown)
data = pd.read_csv("data.csv")
d_format = '%m/%d/%Y %I:%M %p'
data['DATE_BUILT'] = [datetime.strptime(
x, d_format) for x in data['DATE_BUILT']]
data['DAY_BUILT'] = data['DATE_BUILT'].apply(lambda x: x.day)
data['MONTH_BUILT'] = data['DATE_BUILT'].apply(lambda x: x.month)
data['YEAR_BUILT'] = data['DATE_BUILT'].apply(lambda x: x.year)
data['TIME_BUILT'] = data['DATE_BUILT'].apply(lambda x: x.strftime("%H%M"))
data['DATE_BUILT'] = data['DATE_BUILT'].apply(lambda x: x.timestamp())
data['DATE_PRICED'] = [datetime.strptime(
x, d_format) for x in data['DATE_PRICED']]
data['DAY_PRICED'] = data['DATE_PRICED'].apply(lambda x: x.day)
data['MONTH_PRICED'] = data['DATE_PRICED'].apply(lambda x: x.month)
data['YEAR_PRICED'] = data['DATE_PRICED'].apply(lambda x: x.year)
data['TIME_PRICED'] = data['DATE_PRICED'].apply(lambda x: x.strftime("%H%M"))
data['DATE_PRICED'] = data['DATE_PRICED'].apply(lambda x: x.timestamp())
# selecting rows whose Golden grains are known, a.k.a present in target data
train = data.loc[data['House ID'].isin(target['House ID'])]
train = pd.merge(train, target, on="House ID", how='inner')
# Selecting rows whose Golden Grains are not known
test = data.loc[data['House ID'].isin(missing['House ID'])]
# sorting the test DF, for submission to work
test = test.sort_values(['House ID'], ascending=True)
# saving the dataframe as csv
train.to_csv('train.csv', index=False, header=True)
test.to_csv('test.csv', index=False, header=True)
|
from rest_framework import serializers
from strategy.models import Strategy
class StrategySerializers(serializers.ModelSerializer):
class Meta:
model = Strategy
fields = ('id', 'strategy_title', 'strategy_content', 'publish_date', 'browse_count', 'pay_count', 'user_id', 'is_pay_money') |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from pymongo import MongoClient
import re
class JobparserPipeline:
def __init__(self):
client = MongoClient('localhost', 27017)
self.mongo_base = client.books
def process_item(self, item, spider):
# name = item['name']
if spider.name == 'Labirint':
try:
item['main_price'] = int(item['main_price'])
except:
print(f"Error to convert main_price {item['main_price'] }")
item['name'] = item['name'][item['name'].find(':')+1:]
elif spider.name == 'Book24':
try:
item['main_price'] = int(''.join(re.findall(r'\d',item['main_price'])))
except:
print(f"Error to convert main_price {item['main_price']}")
item['author'] = ','.join(item['author'])
try:
item['discount_price'] = int(item['discount_price'])
except:
print(f"Error to convert discount_price {item['discount_price']}")
collection = self.mongo_base[spider.name]
collection.insert_one( item)
return item
|
# Copyright (c) 2010 NORC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import codecs, hashlib
data_encode = codecs.utf_8_encode
data_decode = codecs.utf_8_decode
class DataIncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors = 'strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.__digest = hashlib.sha1()
self.__digest_start = self.__digest.copy()
@property
def digest(self):
return self.__digest
@digest.setter
def digest(self, d):
self.__digest = d
self.__digest_start = d.copy()
def encode(self, data, final = False):
data, consumed = data_encode(data, self.errors)
self.__digest.update(data)
return data, consumed
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.__digest = self.__digest_start.copy()
class DataIncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors = 'strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.__digest = hashlib.sha1()
self.__digest_start = self.__digest.copy()
@property
def digest(self):
return self.__digest
@digest.setter
def digest(self, d):
self.__digest = d
self.__digest_start = d.copy()
def decode(self, data, final = False):
self.__digest.update(data)
return data_decode(data, self.errors)
def reset(self):
codecs.IncrementalDecoder.reset(self)
self.__digest = self.__digest_start.copy()
class DataStreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors = 'strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.__digest = hashlib.sha1()
self.__digest_start = self.__digest.copy()
@property
def digest(self):
return self.__digest
@digest.setter
def digest(self, d):
self.__digest = d
self.__digest_start = d.copy()
def reset(self):
codecs.StreamWriter.reset(self)
self.__digest = self.__digest_start.copy()
def encode(self, data, errors = 'strict'):
data, consumed = data_encode(data, self.errors)
self.__digest.update(data)
return data, consumed
class DataStreamReader(codecs.StreamReader):
def __init__(self, errors = 'strict'):
codecs.StreamReader.__init__(self, errors)
self.__digest = hashlib.sha1()
self.__digest_start = self.__digest.copy()
@property
def digest(self):
return self.__digest
@digest.setter
def digest(self, d):
self.__digest = d
self.__digest_start = d.copy()
def decode(self, data, final = False):
orig_data = data
data, consumed = data_decode(data, self.errors)
self.__digest.update(orig_data[:consumed])
return data, consumed
def reset(self):
codecs.StreamReader.reset(self)
self.__digest = self.__digest_start.copy()
def search(codec):
if codec == 'csharp-data':
return codecs.CodecInfo(
name = 'csharp-data',
encode = data_encode,
decode = data_decode,
incrementalencoder = DataIncrementalEncoder,
incrementaldecoder = DataIncrementalDecoder,
streamreader = DataStreamReader,
streamwriter = DataStreamWriter,
)
codecs.register(search)
|
#!/bin/env python3
"""
https://www.hackerrank.com/challenges/python-quest-1
INPUT:
int N, where 1 <= N <= 9
OUTPUT: N-1 lines
numerical triangle of height N-1 like
1
22
333
4444
55555
...
?
1
11 * 2
111 * 3
1111 * 4
11111 * 5
1 = 1
2 + 20 = 22
3 + 30 + 300 = 333
4 + 40 + 400 + 4000 = 4444
5 + 50 + 500 + 5000 + 50,000 = 55,555
...
1 * (10^0) = 1
2 + 2 * (10^1) = 22
3 + 3 * (10^1 + 10^2) = 333
4 + 4 * (10^1 + 10^2 + 10^3) = 4444
5 + 5 * (10^1 + 10^2 + 10^3 + 10^4) = 55555
...
1 * (10^0) = 1
2 * (10^0 + 10^1) = 22
3 * (10^0 + 10^1 + 10^2) = 333
4 * (10^0 + 10^1 + 10^2 + 10^3) = 4444
5 * (10^0 + 10^1 + 10^2 + 10^3 + 10^4) = 55555
...
Rules:
Use only arithmetic operations, a single for loop and a print statement.
Use no more than two lines.
Use nothing related to strings.
"""
j = 0
for i in range(1, int(input())):
j += 10**(i - 1)
print("{} {} {}".format(i, j, i * j))
|
from flask import Blueprint
from flask_login import current_user
from .. import db
account = Blueprint('account', __name__)
from . import views # noqa
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from framework import *
from math import cos, sin
class CharacterCollision (Framework):
name="Character Collision"
description="""This tests various character collision shapes.
Limitation: Square and hexagon can snag on aligned boxes.
Feature: Loops have smooth collision, inside and out."""
def __init__(self):
super(CharacterCollision, self).__init__()
ground = self.world.CreateStaticBody(
position=(0,0),
shapes=b2EdgeShape(vertices=[(-20,0), (20,0)])
)
# Collinear edges
self.world.CreateStaticBody(
shapes=[b2EdgeShape(vertices=[(-8,1), (-6,1)]),
b2EdgeShape(vertices=[(-6,1), (-4,1)]),
b2EdgeShape(vertices=[(-4,1), (-2,1)]),
]
)
# Square tiles
self.world.CreateStaticBody(
shapes=[b2PolygonShape(box=[1, 1, (4,3), 0]),
b2PolygonShape(box=[1, 1, (6,3), 0]),
b2PolygonShape(box=[1, 1, (8,3), 0]),
]
)
# Square made from an edge loop. Collision should be smooth.
body=self.world.CreateStaticBody()
body.CreateLoopFixture(vertices=[(-1,3), (1,3), (1,5), (-1,5)])
# Edge loop.
body=self.world.CreateStaticBody(position=(-10,4))
body.CreateLoopFixture(vertices=[
(0.0, 0.0), (6.0, 0.0),
(6.0, 2.0), (4.0, 1.0),
(2.0, 2.0), (0.0, 2.0),
(-2.0,2.0), (-4.0,3.0),
(-6.0,2.0), (-6.0,0.0),]
)
# Square character 1
self.world.CreateDynamicBody(
position=(-3, 8),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(shape=b2PolygonShape(box=(0.5, 0.5)), density=20.0),
)
# Square character 2
body=self.world.CreateDynamicBody(
position=(-5, 5),
fixedRotation=True,
allowSleep=False,
)
body.CreatePolygonFixture(box=(0.25, 0.25), density=20.0)
# Hexagon character
a=b2_pi/3.0
self.world.CreateDynamicBody(
position=(-5, 8),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(
shape=b2PolygonShape(vertices=[(0.5*cos(i*a), 0.5*sin(i*a)) for i in range(6)]),
density=20.0
),
)
# Circle character
self.world.CreateDynamicBody(
position=(3, 5),
fixedRotation=True,
allowSleep=False,
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=0.5),
density=20.0
),
)
def Step(self, settings):
super(CharacterCollision, self).Step(settings)
pass
if __name__=="__main__":
main(CharacterCollision)
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome()
url ="http://github.com"
driver.get(url)
searchInput = driver.find_element_by_xpath("/html/body/div[1]/header/div/div[2]/div[2]/div/div/div/form/label/input[1]")
time.sleep(1)
searchInput.send_keys("python")
time.sleep(2)
searchInput.send_keys(Keys.ENTER)
time.sleep(2)
# result = driver.page_source
result = driver.find_elements_by_css_selector(".repo-list-item h3 a")
for element in result:
print(element.text)
driver.close() |
from django.db import models
# Create your models here.
class Product(models.Model):
id = models.AutoField(primary_key=True)
author = models.CharField(max_length=100)
title = models.CharField(max_length=100)
image = models.CharField(max_length=500)
quantity = models.IntegerField()
price = models.FloatField()
description = models.CharField(max_length=10000)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class WeiboItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
table_name = 'weibo'
weibo_id = Field()
weibo_url = Field()
weibo_content = Field()
posted_at = Field()
posted_from = Field()
user_id = Field()
user_name = Field()
user_gender = Field()
district = Field()
|
'''
Módulo Collections - Counter
Collecions - High-Performance Contarners DataTypes
Counter -> Recebe um iterável como parâmetro e cria um objeto do tipo counter que é parecido com um
dicionário, contendo como chave os elementos da coleção passada e como valor a quantidade de ocorrências
desses elementos
'''
print('\n')
# Utilizando o Counter
from collections import Counter
# Podemos utilizar quaisquer iteráveis
lista = [1, 2, 2, 2, 3, 3, 1, 1, 4, 5, 4, 3, 5, 5, 4, 1, 2, 2, 3, 5]
res = Counter(lista)
print(type(res))
print(res)
# Veja que para cada elemento da lista o Counter criou uma chave e colocou para seus valores a quantidade...
# ... de ocorrências de cada um
#==========================================================================================================
print('\n')
print(Counter('Felipe')) # Transforma cada letra em chave e atribui como valor a quantidade de ocorrências
print('\n')
texto = '''Isso vale até para a evolução das espécies, para a performance das empresas a longo
prazo e até para nós, dentro desse mundo que muda muito, o tempo todo. E isso acontece principalmente
na nossa área! O nosso computador pode se comparar à história do sistema solar, onde o tempo aqui dentro
passa tão rápido, que já completou ciclos suficientes para repetir a história do asteróide que cai e
mata grande parte dos animais, mais de um milhão de vezes. Mas sendo mais específico com a nossa área'''
palavras = texto.split()
res = Counter(palavras)
print(res)
print('\n')
# Encontrando as palavras com mais ocorrências:
print(res.most_common(5)) # As 5 mais comuns
print(res.most_common(10)) # As 10 mais comuns
#==========================================================================================================
|
'''
Created on May 29, 2013
@author: dough
'''
from playback_recording_response import PlaybackRecordingResponse
from playlist_factory import PlaylistFactory
class PlaybackRecordingController(object):
'''
classdocs
'''
def __init__(self, recordings_list):
'''
Constructor
'''
self._recordings_list = recordings_list
self._pf = PlaylistFactory()
def playback(self, req):
resp = PlaybackRecordingResponse(PlaybackRecordingResponse.RECORDING_DOES_NOT_EXIST)
for recording in self._recordings_list:
if recording.id == req.recording_id:
playlist = self._pf.produce(recording.source, recording.start_date_time, recording.end_date_time)
resp = PlaybackRecordingResponse(PlaybackRecordingResponse.SUCCESS, playback_url=playlist.url)
break
return resp |
message = 'we come to beijing'
print(message)
message2 = ' I come form china '
print(message2+message)
name ='lei kang is china '
''' title() 表示每个单词首字母大写'''
print(name.title())
name2 = 'Eric'
print('hello'+' '+name2 + ' ' +'would you like to learn some Python today')
name_3 ='eric losry'
# 输出名字小写
print('my name‘s :'+' '+name_3.lower())
# 输出名字大写
print('my name‘s :'+' '+name_3.upper())
# 输出名字显示
print('my name‘s :'+' '+name_3.title())
# 打印名人名言
print('Albert Einisterin once said,"Aperson who nerver made a mistake nerver tried anying new",')
print('mdding once said," women is have hight is to lao ," ')
famous_person = 'mading'
message_1 = 'IS women have dakjh dg'
print(famous_person+' '+message_1)
name_4 =' leikang '
print("\t leikang")
print("\n lei kang")
print(name_4.rstrip())
print(name_4.lstrip())
print(name_4.strip())
print(6+2)
print(12-4)
print(4 * 2)
print(2 ** 3)
print(8 / 2)
print(16 / 2)
number = 8
print(str(number)+' I like number.')
#存储一个列表名字,将其打印出来
name_5 = ['李俊','王朗','丰子恺','天寒','小看','乐乐','张东镇','李济生','严峻','黄亮亮']
print(name_5)
print(name_5[2])
message_2 = '长得帅的'+name_5[0]+'.'
print(message_2)
message_3 = 'like huanghuang is '+name_5[1]
print(message_3)
# 自己的出行但因列表,将其打印出来
travel_mode =['bucycle','bus','subway','taxi','walk']
declaration = '锻炼身体,又能环保'+travel_mode[0]
print(declaration)
declaration_2='省钱,人有多,没有污染'+travel_mode[1]
print(declaration_2)
name_6 =['xiaomi','wangxiang','liuhuan']
print(name_6)
# append():表示用来添加元素到末尾
name_6.append('lilei')
print(name_6)
print(name_6[1]+'wufa fuyue')
name_6[1]='lilei'
print(name_6)
# inser()用来表示添加元素到索引处元素,insert(2)添加元素到第2个
name_6.insert(2,'lk')
print(name_6)
name_6.insert(3,'ou')
print(name_6)
#del 表示删除元素 语法格式是 del 变量名[x];x表示索引处,要删除的元素
del name_6[0]
print(name_6)
del name_6[-1]
print(name_6)
#pop(x):表示要删除的元素,一般从尾部删除,也可以指定索引数字删除指定位置的元素
name_6.pop(1)
name_6_1 = name_6.pop(1)
print(name_6_1)
print(name_6)
# remove():根据值来删除元素,括号里表示要删除的名字
name_6.remove('lilei')
print(name_6)
#存储城市名
city = ['huanshan','xihu','tiananmen']
print(city)
# 城市名字排序,按字母字母顺序
city.sort()
print(city)
// 城市排名反序
city.sort(reverse= True)
print(city)
city.reverse()
print(city)
print(sorted(city))
#列表的存储大小,从1 开始计算列表存储的长度大小
print(len(city))
|
# -*- coding: UTF-8 -*-
# For ubuntu env error: findfont: Font family ['Times New Roman'] not found. Falling back to DejaVu Sans.
# ```bash
# sudo apt-get install msttcorefonts
# rm -rf ~/.cache/matplotlib
# ```
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50]
local_train = [0.6, 0.6, 0.6, 0.62, 0.64, 0.64, 0.67, 0.66, 0.63, 0.64, 0.63, 0.61, 0.64, 0.65, 0.66, 0.66, 0.61, 0.64, 0.69, 0.65, 0.68, 0.71, 0.62, 0.65, 0.65, 0.65, 0.63, 0.71, 0.63, 0.63, 0.62, 0.67, 0.66, 0.64, 0.63, 0.61, 0.58, 0.6, 0.6, 0.59, 0.6, 0.61, 0.62, 0.62, 0.63, 0.63, 0.62, 0.61, 0.61, 0.61]
fed_avg = [4.81, 2.88, 2.89, 2.96, 2.91, 2.94, 3.08, 2.87, 2.89, 2.88, 2.93, 3.05, 2.88, 2.85, 2.86, 2.91, 2.87, 2.85, 2.86, 2.86, 2.85, 2.85, 2.85, 2.89, 2.84, 2.87, 2.88, 2.89, 2.88, 2.86, 3.36, 3.06, 2.86, 3.02, 2.98, 2.87, 2.88, 2.85, 2.86, 2.88, 2.83, 2.88, 2.85, 2.90, 2.94, 2.87, 2.87, 3.03, 2.90, 2.25]
apfl = [1.25, 21.23, 1.31, 1.31, 1.36, 1.28, 1.29, 1.29, 1.28, 1.30, 1.28, 35.53, 1.35, 1.33, 1.37, 1.30, 1.31, 1.32, 1.32, 1.32, 1.32, 36.22, 1.36, 1.36, 1.38, 1.30, 1.34, 1.33, 1.33, 1.34, 1.31, 36.84, 1.36, 1.35, 1.35, 1.31, 1.27, 1.26, 1.25, 1.29, 1.29, 33.22, 1.36, 1.36, 1.32, 1.32, 1.31, 1.30, 1.31, 1.31]
scei = [15.13, 14.40, 14.69, 14.33, 14.08, 15.14, 14.74, 15.12, 15.24, 16.06, 15.75, 15.56, 14.09, 14.82, 14.57, 15.09, 15.63, 15.37, 20.22, 18.65, 16.88, 16.21, 17.48, 16.12, 16.63, 21.50, 17.17, 16.20, 16.89, 17.33, 17.62, 21.35, 20.96, 22.05, 19.75, 20.74, 17.77, 17.40, 20.78, 20.97, 22.31, 21.68, 18.02, 20.06, 20.30, 19.86, 20.20, 18.65, 18.15, 18.78]
fig, axes = plt.subplots()
legendFont = font_manager.FontProperties(family='Times New Roman', weight='bold', style='normal', size=15)
xylabelFont = font_manager.FontProperties(family='Times New Roman', weight='bold', style='normal', size=17)
csXYLabelFont = {'fontproperties': xylabelFont}
axes.plot(x, scei, label="SCEI with negotiated α", linewidth=3)
axes.plot(x, local_train, label="Local Training", linestyle='--', alpha=0.5)
axes.plot(x, apfl, label="APFL", linestyle='--', alpha=0.5)
axes.plot(x, fed_avg, label="FedAvg", linestyle='--', alpha=0.5)
axes.set_xlabel("Training Rounds", **csXYLabelFont)
axes.set_ylabel("Total Time Consumption (s)", **csXYLabelFont)
plt.xticks(family='Times New Roman', fontsize=15)
plt.yticks(family='Times New Roman', fontsize=15)
# plt.ylim(90, 100)
plt.legend(prop=legendFont)
plt.grid()
plt.show()
|
from math import sqrt
def PrimeArray(n):
from math import sqrt
A,B,C = [],[],[]
for i in range(int(n+1)):
A.append(i) #Initialize A, from 0 to 2000000
B.append(1) #Initialize B, containing all 1s from 0 to 2000000
# Remove 0 and 1 from A
if n == 0:
return([])
if n == 1:
return([])
A.reverse()
A.pop()
A.pop()
A.reverse()
B[0] = 0
B[1] = 0
root = sqrt(n) #Enough to check until root(i)
for a in A:
# Check until root(i)
if a > root:
break
# Remove all the multiples of A
loopcount = n//a
loopcount -= 1
# Remove the number from the list
for var in range(int(loopcount)):
B[(a*(var+2))] = 0
return(B)
def Primes(n):
B = PrimeArray(n)
C = []
for var in range(len(B)):
if B[var] == 1:
C.append(var)
return(C)
Primes = Primes(10000)
Array = PrimeArray(10000)
for i in range(len(Primes)):
for j in range(0,i):
Prime1 = Primes[i]
Prime2 = Primes[j]
Mid = int((Prime1 + Prime2)/2)
if Array[int(Mid)] == 1: #Numbers form an AP
Str1 = list(str(Prime1))
Str2 = list(str(Prime2))
StrM = list(str(Mid))
Str1.sort()
Str2.sort()
StrM.sort()
if Str1 == Str2:
if Str1 == StrM:
print(Prime1)
print(Prime2)
print(Mid)
|
from typing import List
class Solution:
def hIndex(self, citations: List[int]) -> int:
# left, right = 0, len(citations) - 1
# while left < right:
# middle = (left + right) // 2
# index = len(citations) - middle
# if citations[middle] >= index:
# right = middle
# else:
# left = middle + 1
# if left == len(citations) - 1 and citations[-1] == 0:
# return 0
# return len(citations) - right
left, right = 0, len(citations) - 1
while left <= right:
middle = (left + right) // 2
if citations[middle] == len(citations) - middle:
return len(citations) - middle
elif citations[middle] < len(citations) - middle:
left = middle + 1
else:
right = middle - 1
return len(citations) - left
def main():
sol = Solution()
print(sol.hIndex([0,1,3,5,6]))
print(sol.hIndex([1,2,100]))
print(sol.hIndex([0,0,0,0,0,0,0,0,0,0,0,0]))
if __name__ == '__main__':
main() |
import argparse
def build_map(input):
map = []
for index in range(0, len(input)):
row = []
for i in input[index]:
row.append(i)
map.append(row)
return map
class Slope:
def __init__(self, x, y):
self.x = x
self.y = y
def apply(self, x_0, y_0):
return x_0 + self.x, y_0 + self.y
def __repr__(self):
return "[{0}, {1}]".format(self.x, self.y)
def check_slope(map, slope):
encounters = {}
loc = [0, 0]
loc[0], loc[1] = slope.apply(loc[0], loc[1])
while loc[0] < len(map):
encounter = map[loc[0]][loc[1]]
if encounter in encounters:
encounters[encounter] += 1
else:
encounters[encounter] = 1
loc[0], loc[1] = slope.apply(loc[0], loc[1])
# If we end up off the right side of the map subtract the width of the map to simulate the pattern
if loc[1] >= len(map[0]):
loc[1] = loc[1] - len(map[0])
return encounters["#"]
def main(input):
map = build_map(input)
encounter_by_slope = {
Slope(1, 1): 0,
Slope(1, 3): 0,
Slope(1, 5): 0,
Slope(1, 7): 0,
Slope(2, 1): 0,
}
for slope in encounter_by_slope.keys():
encounter_by_slope[slope] = check_slope(map, slope)
print(encounter_by_slope)
result = 1
for _, v in encounter_by_slope.items():
result = result * v
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, nargs=1, help="filename of our input")
parser.add_argument("--output", type=str, required=False, help="filename of our expected output")
arguments = parser.parse_args()
with open(arguments.input[0], "r") as infile:
input = infile.read().splitlines()
result = main(input)
print("Result: {0}".format(result))
if arguments.output:
with open(arguments.output, "r") as outfile:
output = outfile.read().splitlines()[0]
assert(result == int(output))
|
import numpy as np
import os, sys, json
import PIL
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
# img_dir = '../../rf-chess-data/roboflow/export/'
# img_fns = os.listdir(img_dir)
# output_dir = 'data/rf-annotate/'
annotate_dir = '../../../other-chess-data/regulation-pieces-3/originals/data/'
annotate_fn = 'instances.json'
# annotate_dir = '../../../other-chess-data/regulation-pieces-1/originals/data/'
# annotate_fn = 'instances.json'
output_dir = '../data/other-annotate-rp1/'
# output_dir = '../data/rf-annotate/'
output_fn = output_dir + 'data_reg3_v1.csv'
with open(Path(annotate_dir, annotate_fn), 'r') as f:
d_annotate = json.load(f)
df_annotate = pd.DataFrame(d_annotate['annotations'])
df_images = pd.DataFrame(d_annotate['images'])
df_categories = pd.DataFrame(d_annotate['categories'])
# merges + drop unnec cols
df_join = pd.merge(df_annotate,
df_images,
how='left',
left_on='image_id',
right_on='id')
drop_cols = ["id_x","id_y", "license",
"segmentation", "iscrowd", "date_captured"]
for drop_col in drop_cols:
try:df_join = df_join.drop([drop_col], axis=1)
except:pass
df_join = pd.merge(df_join,
df_categories,
how='left',
left_on='category_id',
right_on='id'
)
df_join = df_join.drop(["id", "supercategory"], axis=1)
df_join = df_join.rename(mapper={'name': 'category_full_name'}, axis=1)
# add extra cols
df_join['annotate_id'] = df_join.index
df_join['coord_tl'] = df_join['bbox'].map(lambda x: [x[0], x[1]])
df_join['coord_br'] = df_join['bbox'].map(lambda x: [x[0] + x[2], x[1] + x[3]])
df_join['category_color_name'] = df_join['category_full_name'].map(lambda x: x.split('-')[0])
df_join['category_piece_name'] = df_join['category_full_name'].map(lambda x: x.split('-')[1])
df_join.to_csv(output_fn, index=False) |
import numpy as np
from numpy import ndarray
from pprint import pformat
from reconstruct.quaternion import Quaternion
class Matrix4:
# shape (4, 4)
value: ndarray
def __init__(self):
self.value = np.identity(4, dtype=np.float32)
def __getitem__(self, index):
return self.value[index]
def __setitem__(self, index, value):
self.value[index] = value
def __repr__(self):
return pformat(self.value)
def compose(self, position: ndarray, quaternion: Quaternion, scale: ndarray):
self.make_rotation_from_quaternion(quaternion)
self.scale(scale)
self.set_position(position)
# taken from three.js/src/math/Matrix4.js
def make_rotation_from_quaternion(self, quaternion: Quaternion):
x = quaternion.x
y = quaternion.y
z = quaternion.z
w = quaternion.w
x2 = x + x
y2 = y + y
z2 = z + z
xx = x * x2
xy = x * y2
xz = x * z2
yy = y * y2
yz = y * z2
zz = z * z2
wx = w * x2
wy = w * y2
wz = w * z2
self.value[0][0] = 1 - (yy + zz)
self.value[1][0] = xy - wz
self.value[2][0] = xz + wy
self.value[0][1] = xy + wz
self.value[1][1] = 1 - (xx + zz)
self.value[2][1] = yz - wx
self.value[0][2] = xz - wy
self.value[1][2] = yz + wx
self.value[2][2] = 1 - (xx + yy)
# last column
self.value[0][3] = 0
self.value[1][3] = 0
self.value[2][3] = 0
# bottom row
self.value[3] = np.array([0, 0, 0, 1])
def scale(self, vector: ndarray):
self.value[0] *= vector[0]
self.value[1] *= vector[1]
self.value[2] *= vector[2]
def set_position(self, vector: ndarray):
self.value[3][0] = vector[0]
self.value[3][1] = vector[1]
self.value[3][2] = vector[2]
def make_perspective(self,
left: float,
right: float,
top: float,
bottom: float,
near: float,
far: float):
x = 2 * near / (right - left)
y = 2 * near / (top - bottom)
a = (right + left) / (right - left)
b = (top + bottom) / (top - bottom)
c = -(far + near) / (far - near)
d = -2 * far * near / (far - near)
self.value[0] = np.array([x, 0, 0, 0])
self.value[1] = np.array([0, y, 0, 0])
self.value[2] = np.array([a, b, c, -1])
self.value[3] = np.array([0, 0, d, 0])
|
# -*- coding: utf-8 -*-
from EXOSIMS.util.vprint import vprint
from EXOSIMS.util.get_module import get_module
from EXOSIMS.util.get_dirs import get_cache_dir
import numpy as np
import astropy.units as u
class Completeness(object):
""":ref:`Completeness` Prototype
Args:
minComp (float):
Minimum completeness for target filtering. Defaults to 0.1.
cachedir (str, optional):
Full path to cachedir.
If None (default) use default (see :ref:`EXOSIMSCACHE`)
**specs:
:ref:`sec:inputspec`
Attributes:
_outspec (dict):
:ref:`sec:outspec`
cachedir (str):
Path to the EXOSIMS cache directory (see :ref:`EXOSIMSCACHE`)
minComp (float):
Minimum completeness value for inclusion in target list
PlanetPhysicalModel (:ref:`PlanetPhysicalModel`):
Planet physical model object
PlanetPopulation (:ref:`PlanetPopulation`):
Planet population object
updates (numpy.ndarray):
Dynamic completeness updates array for revisists.
"""
_modtype = "Completeness"
def __init__(self, minComp=0.1, cachedir=None, **specs):
# start the outspec
self._outspec = {}
# load the vprint function (same line in all prototype module constructors)
self.vprint = vprint(specs.get("verbose", True))
# find the cache directory
self.cachedir = get_cache_dir(cachedir)
self._outspec["cachedir"] = self.cachedir
specs["cachedir"] = self.cachedir
# if specs contains a completeness_spec then we are going to generate separate
# instances of planet population and planet physical model for completeness and
# for the rest of the sim
if "completeness_specs" in specs:
if specs["completeness_specs"] is None:
specs["completeness_specs"] = {}
specs["completeness_specs"]["modules"] = {}
if "modules" not in specs["completeness_specs"]:
specs["completeness_specs"]["modules"] = {}
if "PlanetPhysicalModel" not in specs["completeness_specs"]["modules"]:
specs["completeness_specs"]["modules"]["PlanetPhysicalModel"] = specs[
"modules"
]["PlanetPhysicalModel"]
if "PlanetPopulation" not in specs["completeness_specs"]["modules"]:
specs["completeness_specs"]["modules"]["PlanetPopulation"] = specs[
"modules"
]["PlanetPopulation"]
self.PlanetPopulation = get_module(
specs["completeness_specs"]["modules"]["PlanetPopulation"],
"PlanetPopulation",
)(**specs["completeness_specs"])
self._outspec["completeness_specs"] = specs.get("completeness_specs")
else:
self.PlanetPopulation = get_module(
specs["modules"]["PlanetPopulation"], "PlanetPopulation"
)(**specs)
# assign phyiscal model object to attribute
self.PlanetPhysicalModel = self.PlanetPopulation.PlanetPhysicalModel
# set minimum completeness
self.minComp = float(minComp)
self._outspec["minComp"] = self.minComp
# generate filenames for cached products (if any)
self.generate_cache_names(**specs)
# perform prelininary calcualtions (if any)
self.completeness_setup()
def generate_cache_names(self, **specs):
"""Generate unique filenames for cached products"""
self.filename = "Completeness"
self.dfilename = "DynamicCompleteness"
def completeness_setup(self):
"""Preform any preliminary calculations needed for this flavor of completeness
For the Prototype, this is just a dummy function for later overloading
"""
pass
def __str__(self):
"""String representation of Completeness object
When the command 'print' is used on the Completeness object, this
method will return the values contained in the object
"""
for att in self.__dict__:
print("%s: %r" % (att, getattr(self, att)))
return "Completeness class object attributes"
def target_completeness(self, TL):
"""Generates completeness values for target stars
This method is called from TargetList __init__ method.
Args:
TL (:ref:`TargetList`):
TargetList object
Returns:
~numpy.ndarray(float):
Completeness values for each target star
.. warning::
The prototype implementation does not perform any real completeness
calculations. To be used when you need a completeness object but do not
care about the actual values.
"""
int_comp = np.array([0.2] * TL.nStars)
return int_comp
def gen_update(self, TL):
"""Generates any information necessary for dynamic completeness
calculations (completeness at successive observations of a star in the
target list)
Args:
TL (:ref:`TargetList`):
TargetList object
Returns:
None
"""
# Prototype does not use precomputed updates, so set these to zeros
self.updates = np.zeros((TL.nStars, 5))
def completeness_update(self, TL, sInds, visits, dt):
"""Updates completeness value for stars previously observed
Args:
TL (:ref:`TargetList`):
TargetList class object
sInds (~numpy.ndarray(int)):
Indices of stars to update
visits (~numpy.ndarray(int)):
Number of visits for each star
dt (~astropy.units.Quantity(~numpy.ndarray(float))):
Time since previous observation
Returns:
~numpy.ndarray(float):
Completeness values for each star
"""
# prototype returns the single-visit completeness value
int_comp = TL.int_comp[sInds]
return int_comp
def revise_updates(self, ind):
"""Keeps completeness update values only for targets remaining in
target list during filtering (called from TargetList.filter_target_list)
Args:
ind (~numpy.ndarray(int)):
array of indices to keep
"""
self.updates = self.updates[ind, :]
def comp_per_intTime(
self, intTimes, TL, sInds, fZ, fEZ, WA, mode, C_b=None, C_sp=None, TK=None
):
"""Calculates completeness values per integration time
Note: Prototype does no calculations and always returns the same value
Args:
intTimes (~astropy.units.Quantity(~numpy.ndarray(float))):
Integration times
TL (:ref:`TargetList`):
TargetList object
sInds (~numpy.ndarray(int)):
Integer indices of the stars of interest
fZ (~astropy.units.Quantity(~numpy.ndarray(float))):
Surface brightness of local zodiacal light in units of 1/arcsec2
fEZ (~astropy.units.Quantity(~numpy.ndarray(float))):
Surface brightness of exo-zodiacal light in units of 1/arcsec2
WA (~astropy.units.Quantity(~numpy.ndarray(float))):
Working angle of the planet of interest in units of arcsec
mode (dict):
Selected observing mode
C_b (~astropy.units.Quantity(~numpy.ndarray(float)), optional):
Background noise electron count rate in units of 1/s
C_sp (~astropy.units.Quantity(~numpy.ndarray(float)), optional):
Residual speckle spatial structure (systematic error) in units of 1/s
Returns:
~numpy.ndarray(float):
Completeness values
"""
sInds = np.array(sInds, ndmin=1, copy=False)
intTimes = np.array(intTimes.value, ndmin=1) * intTimes.unit
fZ = np.array(fZ.value, ndmin=1) * fZ.unit
fEZ = np.array(fEZ.value, ndmin=1) * fEZ.unit
WA = np.array(WA.value, ndmin=1) * WA.unit
assert len(intTimes) in [
1,
len(sInds),
], "intTimes must be constant or have same length as sInds"
assert len(fZ) in [
1,
len(sInds),
], "fZ must be constant or have same length as sInds"
assert len(fEZ) in [
1,
len(sInds),
], "fEZ must be constant or have same length as sInds"
assert len(WA) in [
1,
len(sInds),
], "WA must be constant or have same length as sInds"
return np.array([0.2] * len(sInds))
def comp_calc(self, smin, smax, dMag):
"""Calculates completeness for given minimum and maximum separations
and dMag.
Args:
smin (~numpy.ndarray(float)):
Minimum separation(s) in AU
smax (~numpy.ndarray(float)):
Maximum separation(s) in AU
dMag (~numpy.ndarray(float)):
Difference in brightness magnitude
Returns:
~numpy.ndarray(float):
Completeness values
.. warning::
The prototype implementation does not perform any real completeness
calculations. To be used when you need a completeness object but do not
care about the actual values.
"""
return np.array([0.2] * len(dMag))
def dcomp_dt(
self, intTimes, TL, sInds, fZ, fEZ, WA, mode, C_b=None, C_sp=None, TK=None
):
"""Calculates derivative of completeness with respect to integration time
Note: Prototype does no calculations and always returns the same value
Args:
intTimes (~astropy.units.Quantity(~numpy.ndarray(float))):
Integration times
TL (:ref:`TargetList`):
TargetList class object
sInds (~numpy.ndarray(int)):
Integer indices of the stars of interest
fZ (~astropy.units.Quantity(~numpy.ndarray(float))):
Surface brightness of local zodiacal light in units of 1/arcsec2
fEZ (~astropy.units.Quantity(~numpy.ndarray(float))):
Surface brightness of exo-zodiacal light in units of 1/arcsec2
WA (~astropy.units.Quantity(~numpy.ndarray(float))):
Working angle of the planet of interest in units of arcsec
mode (dict):
Selected observing mode
Returns:
~astropy.units.Quantity(~numpy.ndarray(float)):
Derivative of completeness with respect to integration time
(units 1/time)
"""
intTimes = np.array(intTimes.value, ndmin=1) * intTimes.unit
sInds = np.array(sInds, ndmin=1)
fZ = np.array(fZ.value, ndmin=1) * fZ.unit
fEZ = np.array(fEZ.value, ndmin=1) * fEZ.unit
WA = np.array(WA.value, ndmin=1) * WA.unit
assert len(intTimes) in [
1,
len(sInds),
], "intTimes must be constant or have same length as sInds"
assert len(fZ) in [
1,
len(sInds),
], "fZ must be constant or have same length as sInds"
assert len(fEZ) in [
1,
len(sInds),
], "fEZ must be constant or have same length as sInds"
assert len(WA) in [
1,
len(sInds),
], "WA must be constant or have same length as sInds"
return np.array([0.02] * len(sInds)) / u.d
|
TRACK_TERMS = ["#Huracan",
"#Lamborghini",
"#Aventador",
"#Lambo",
"#urus",
"#gallardo",
"#V10",
"#V12",
"#murcielago",
"#AventadorSVJ",
"#huracantalk",
"lamborghini",
'lambo',
'v10',
'v12',
'huracan',
'aventador',
'gallardo',
'murcielago',
]
CONNECTION_STRING = ""
CSV_NAME = "huracan.csv"
TABLE_NAME = "huracan"
try:
from private import *
except Exception:
pass |
import cv2
import numpy as np
from matplotlib import pyplot as plt
import collections
from filters import Filters
from utilities import ImageUtilities
BG_COLOR = 0
UNKNOWN_COLOR = 1
FG_COLOR = 2
EDGE_COLOR = 3
TRACK_START_COLOR = 4
TRACK_END_COLOR = 10
def flood_fill(seeds, img, background_val, fill_val):
"""
Fills a continuous area of image with flooding algorithm
from starting position (sx,sy). Flood only fills pixels
with value equal to starting position pixel. If it finds
pixel with `fg_val` value, it changes it to `edge_val`.
:param seeds: Starting positions as tuples (x,y)
:param img: Image to fill (will be modified in place)
:param background_val: Color that should be flooded
:param fill_val: Value with which to fill
"""
q = collections.deque()
for s in seeds:
q.append(s)
min_x = 0
min_y = 0
max_x = img.shape[1] - 1
max_y = img.shape[0] - 1
while len(q) > 0:
x, y = q.pop()
val = img[y, x]
if val != background_val:
continue
img[y, x] = fill_val
if x < max_x:
q.append((x+1, y))
if x > min_x:
q.append((x-1, y))
if y < max_y:
q.append((x, y+1))
if y > min_y:
q.append((x, y-1))
def pad_image(img, padding_size, zeros=True):
"""
Creates new image with specified padding
:param img: Image to pad
:param padding_size: Size of pad
:param zeros: If true, padded with zeros, else with ones
:return: New, padded image
"""
fun = np.zeros if zeros else np.ones
padded = fun((img.shape[0] + 2*padding_size, img.shape[1] + 2*padding_size))
padded[padding_size:-padding_size, padding_size:-padding_size] = img
return padded
def track_edge(sx, sy, img, fill_val, edge_val, k_size, k_edge_max=8):
"""
Tracks the edge by using flooding algorithm, starting at
position (sx, sy) and stopping at `edge_val` pixels.
:param sx: Starting position x-coordinate (should be inside particle)
:param sy: Starting position y-coordinate (should be inside particle)
:param img: Image to fill (will be modified in place)
:param fill_val: Value with which to fill
:param edge_val: Value which is considered as edge
:param k_size: Size of kernel, must be odd number
:param k_edge_max: Maximum number of edge pixels that may be present
in kernel so that pixel is still considered as particle
:return: Tuple of edge pixels as list [(x1,y1),(x2,y2),..] and boolean if the
particle is touching edge.
"""
output = []
q = collections.deque()
q.append((sx, sy))
half_k_size = k_size // 2
inv_edge_condition = k_size**2 - k_edge_max
# Pad the image
padded = pad_image(img, half_k_size, False)
min_x = 0
min_y = 0
max_x = img.shape[1] - 1
max_y = img.shape[0] - 1
touching_limits = False
while len(q) > 0:
x, y = q.pop()
val = img[y, x]
if val == edge_val:
output.append((x, y))
continue
non_zero = np.count_nonzero(padded[y:y+k_size, x:x+k_size])
if non_zero <= inv_edge_condition:
output.append((x,y))
continue
if val == fill_val:
continue
img[y, x] = fill_val
if x < max_x:
q.append((x + 1, y))
else:
touching_limits = True
if x > min_x:
q.append((x - 1, y))
else:
touching_limits = True
if y < max_y:
q.append((x, y + 1))
else:
touching_limits = True
if y > min_y:
q.append((x, y - 1))
else:
touching_limits = True
return output, touching_limits
def track_all_particles(sure_fg, bg_flood):
"""
Finds contours for all particles in image.
:param sure_fg: Image with areas where we are sure foreground is
:param bg_flood: Image with background flooded with BG_COLOR
:return: List of contours, where each particle has one. Each
contour is list of tuples (x,y)
"""
contours = []
bg_flood[sure_fg > BG_COLOR] = TRACK_START_COLOR
for (y, x), value in np.ndenumerate(bg_flood):
if value == TRACK_START_COLOR:
contour, ignore = track_edge(x, y, bg_flood, TRACK_END_COLOR, BG_COLOR, 5)
if ignore or len(contour) == 0:
continue
contours.append(contour)
return contours
def find_seeds(background_before_fill):
"""
Find seeds for background filling.
:param background_before_fill: Background to use.
:return: List of seeds array (x,y) tuples.
"""
seeds = []
width = background_before_fill.shape[1]
height = background_before_fill.shape[0]
max_x = width - 1
max_y = height - 1
for x in range(width):
if background_before_fill[0, x] == UNKNOWN_COLOR:
seeds.append((x, 0))
for x in range(width):
if background_before_fill[max_y, x] == UNKNOWN_COLOR:
seeds.append((x, max_y))
for y in range(height):
if background_before_fill[y, 0] == UNKNOWN_COLOR:
seeds.append((0, y))
for y in range(height):
if background_before_fill[y, max_x] == UNKNOWN_COLOR:
seeds.append((max_x, y))
return seeds
def flood_image(original_image_path, flooded_output_path):
"""
Floods background of the image, starting at seeds that
are obtained at the edges in unknown areas. After
filling, unknown areas should only be inside particles.
:param original_image_path: Path to input image
:param flooded_output_path: Path to flooded image
:return: Image where background is BG_COLOR and rest is
either UNKNOWN_COLOR or FG_COLOR.
"""
img = cv2.imread(original_image_path, 0)
sure_fg = Filters.threshold(img, 130, False)
#equalize histogram
# equalized_img = ImageUtilities.histeq(img)
# plt.subplot(121), plt.imshow(img, cmap='gray')
# plt.title('img'), plt.xticks([]), plt.yticks([])
# plt.subplot(122), plt.imshow(equalized_img, cmap='gray')
# plt.title('equalized_img'), plt.xticks([]), plt.yticks([])
# plt.show()
# Scharr filter
grad = Filters.scharr(img)
grad_8 = np.uint8(grad)
grad_bg = Filters.threshold(grad_8, 4, True)
grad_bg[np.all([grad_bg == 0, img < 55], axis=0)] = 255
sure_bg = grad_bg - sure_fg
sure_bg_filtered = Filters.median_filter(sure_bg, 3)
cv2.imwrite('Gradient.tif', grad_bg)
cv2.imwrite('Gradient_Background.tif', grad_bg)
cv2.imwrite('Sure_Background_Filtered.tif', sure_bg_filtered)
sure_bg_inv = Filters.threshold(sure_bg_filtered, 2, True)
sure_bg_inv = (sure_bg_inv // 255) + 1
sure_bg_flood = np.copy(sure_bg_inv)
seeds = find_seeds(sure_bg_flood)
flood_fill(seeds, sure_bg_flood, UNKNOWN_COLOR, BG_COLOR)
cv2.imwrite(flooded_output_path, sure_bg_flood)
return sure_bg_flood
def track_image(original_image_path, flooded_background):
img = cv2.imread(original_image_path, 0)
sure_fg = Filters.threshold(img, 180, False)
tracked_edges = np.copy(flooded_background)
contours = track_all_particles(sure_fg, tracked_edges)
nparr = np.asarray(contours)
np.save("contours.npy", nparr)
return contours
class FloodEdgeDetector:
@staticmethod
def find_contours(input_path):
flood_path = 'flood_output.tif'
bg_flood = flood_image(input_path, flood_path)
contours = track_image(input_path, bg_flood)
return contours
|
import os
from client import Client
class SendJson(Client):
def __init__(self, TCP, host, port, path):
Client.__init__(self, TCP, host, port)
self.path = path
def run(self):
while(self.continuer):
try:
data = self.sock.recv(1024)
self.logger.debug(data)
if(data == b'EHLO'):
self.sendData(bytearray(self.getJson().encode()))
if(data == b'HELO'):
self.continuer = False
except Exception as e:
self.logger.error(e)
self.closeSocket()
def setJson(self, path):
self.path = path
def getJson(self):
f = open("json/manifest.JSON", "r")
return f.read()
|
# -*- coding: utf-8 -*-
"""
City Mania
version: You have to start somewhere right?
"""
import engine
import region
import sys
sys.path.append("..")
import common.protocol_pb2 as proto
import chat
import filesystem
from threading import Lock
import users
import simulator
from network import Network
# TODO: These values should be in seperate config file
HOST = ""
PORT = 52003
# TODO: Seperate files dude!
class GameState(engine.Entity):
"""
Defines the game state (running, loaded, saving?).
May end up being a finite state machine of some sort
"""
def __init__(self):
"""
Server states:
0: No region loaded.
1: Region loaded, but full simulation pause
"""
self.serverState = 0
self.accept("requestServerState", self.getServerState)
self.accept("requestGameState", self.fullPause)
self.accept("setServerState", self.setServerState)
def getServerState(self, peer):
container = proto.Container()
container.serverState = self.serverState
messenger.send("sendData", [peer, container])
def setServerState(self, state):
self.serverState = state
print "Server set to state", state
def fullPause(self, var1=None):
# If serverstate is 0 then can't change it to 1!
if self.serverState:
self.serverState = 1
class CommandProcessor(engine.Entity):
"""
This converts incomming communication into the server message system
Locking system provided so the server can halt the queue mid operation
TODO: Add more refined locking for a per city basis (so one city update wont block the others)
TODO: Add user account management/command authorization here
"""
def __init__(self):
self.accept("lockCommandQueue", self.lockQueue)
self.accept("unlockCommandQueue", self.unlockQueue)
self.accept("gotData", self.queue)
self.accept("logout", self.logout)
self.accept("tick", self.step)
self.commandQueue = []
self.lock = Lock()
# Simple list to make sure people who are here should be here
self.password = ""
def lockQueue(self):
#self.lock.aquire()
pass
def unlockQueue(self):
#self.lock.release()
pass
def queue(self, peer, data):
self.commandQueue.append((peer, data))
def step(self):
#print "Step1"
if self.commandQueue:
#self.lock.acquire()
peer, data = self.commandQueue.pop()
self.processData(peer, data)
#self.lock.release()
def processData(self, peer, data):
"""
processes serialized network event object into internal message system
"""
container = proto.Container()
container.ParseFromString(data)
logger.debug("Data from: %s\nData: %s" %(peer, container))
# Parsing chain!
# Great care will need to be taken on when to use if, else, and elif
# If the profile for this process takes too long
if container.HasField("login"):
self.login(peer, container.login)
# If the player is not logged in we will not process any other message
if peer not in users.peers:
print "Unauthorized message from", peer, ". Skipping."
return
if container.HasField("chat"):
messenger.send("onChat", [peer, container.chat])
if container.HasField("requestServerState"):
messenger.send("requestServerState", [peer])
elif container.HasField("requestMaps"):
messenger.send("requestMaps", [peer])
elif container.HasField("mapRequest"):
# Make sure user is admin!
name = users.getNameFromPeer(peer)
if users.isAdmin(name):
messenger.send("mapRequest", [container.mapRequest])
elif container.HasField("newCityRequest"):
messenger.send("newCityRequest", [peer, container.newCityRequest])
elif container.HasField("requestGameState"):
if not container.requestGameState:
messenger.send("sendGameState", [peer])
elif container.HasField("requestUnfoundCity"):
messenger.send("requestUnfoundCity", [peer, container.requestUnfoundCity])
elif container.HasField("requestEnterCity"):
messenger.send("requestEnterCity", [peer, container.requestEnterCity])
elif container.HasField('requestExitCity'):
messenger.send('requestExitCity', [peer,])
def login(self, peer, login):
"""
Logs in player to the server
"""
self.lock.acquire()
container = proto.Container()
container.loginResponse.type = 1
if login.regionPassword != self.password:
container.loginResponse.type = 0
container.loginResponse.message = "Region password incorrect"
if login.name not in users.userdb:
users.addUser(login.name, login.password)
loginType = users.login(login.name, login.password, peer)
if not loginType:
container.loginResponse.type = 0
container.loginResponse.message = "Player password incorrect"
if container.loginResponse.type:
container.loginResponse.usertype = users.getType(login.name)
container.loginResponse.username = login.name
messenger.send("loggedIn", [peer, login.name])
logger.info("Logged in: %s %s" %(login.name, peer) )
messenger.send("sendData", [peer, container])
self.lock.release()
def logout(self, peer):
self.lock.acquire()
userName = users.getNameFromPeer(peer)
# Temporary fix.
if userName:
users.logout(userName)
logger.info("User %s %s exiting." %(userName, peer))
self.lock.release()
# We initialize the CityMania engine
import __builtin__
#__builtin__.messenger = engine.EventManager()
commandProcessor = CommandProcessor()
# Set up logging
import logging
logger = logging.getLogger('server')
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
stream = logging.StreamHandler()
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
stream.setFormatter(formatter)
logger.addHandler(stream)
logger.info('Logging stream handler added.')
def main():
vfs = filesystem.FileSystem()
# Finish setting up logger
logPath = vfs.logs + 'server.log'
logFile = logging.FileHandler(logPath)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logFile.setFormatter(formatter)
logger.addHandler(logFile)
logger.info("Logging file handler added. Logging to %s" % logPath)
network = Network(HOST, PORT)
chatServer = chat.ChatServer()
state = GameState()
reg = region.Region()
try:
messenger.start()
except Exception, e:
logger.exception(e)
if __name__ == "__main__":
main()
|
# Generated by Django 3.1.5 on 2021-03-08 02:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userauth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.DecimalField(decimal_places=0, max_digits=13),
),
]
|
from django.test import TestCase
from products.models import Location, Warehouse
class LocationTest(TestCase):
def setUp(self):
self.warehouse = Warehouse.objects.create(name="Test")
def test_add_location(self):
location = Location.objects.create(
name="Test",
warehouse=self.warehouse,
)
self.assertEqual(location.name, "Test")
self.assertEqual(location.warehouse, self.warehouse)
def test_unicode(self):
location = Location.objects.create(
name="Location",
warehouse=self.warehouse,
)
self.assertEquals(str(location), "Test > Location")
|
from django.contrib.auth.decorators import login_required
from django.contrib.admin import AdminSite
from django.contrib.auth.models import User
from django.shortcuts import redirect
from SputnikIDE.models import Project, Version
class MyAdminSite(AdminSite):
def each_context(self, request):
context = super(MyAdminSite, self).each_context(request)
context['special_actions'] = [
('Удалить все проекты', 'clear_all_projects/')
]
return context
admin_site = MyAdminSite(name='MyAdmin')
admin_site.register(Project)
admin_site.register(Version)
admin_site.register(User)
def create_admin(username, password, email, first_name, last_name):
from django.contrib.auth.models import User
if not User.objects.filter(username=username).exists():
user = User.objects.create_superuser(username=username, email=email, password=password)
user.first_name = first_name
user.last_name = last_name
user.save()
print('Admin with username "{}" created.'.format(username))
else:
print('User with username "{}" is already created.'.format(username))
def create_user(username, password, email, first_name, last_name):
from django.contrib.auth.models import User
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(username=username, email=email, password=password)
user.first_name = first_name
user.last_name = last_name
user.save()
print('User with username "{}" created.'.format(username))
else:
print('User with username "{}" is already created.'.format(username))
@login_required
def clear_all_projects_view(request):
if request.user.is_superuser:
for user in User.objects.filter(is_superuser=False):
for project in Project.objects.filter(author=user):
project.delete()
return redirect('/admin/')
def clear_all_users():
for user in User.objects.filter(is_superuser=False):
for project in Project.objects.filter(author=user):
project.delete()
user.delete()
|
#################################################
# Bootstrapping to solve local vol by tenor
# Input: r_quotes, r_tenors, rf_quotes, rf_tenors, imp_vol_quotes, imp_vol_tenors, imp_vol_strikes
# Output: loc_vol_surface
#################################################
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from input_data_initialization import input_data_initialization
from strike_grid_discretization import StrikeGridsAllTenors
from tenor_market_data import TenorMarketData
from implied_vol_class import ImpliedVolatility
from compute_sum_sqr_vol_T import compute_sum_sqr_vol_T
from new_pillar_strike_extrapolation import NewPillarStrikeExtrapolation
from compute_maturity_grid import compute_maturity_grid
from compute_local_vol_init_guess import compute_local_vol_init_guess
from black_scholes_formulas import black_scholes_vanilla, black_scholes_vanilla_fwd_norm
from blk_sch_fwd_pde import blk_sch_fwd_pde
from local_vol_calibration import LocalVolCalibration
from initial_condition import InitialConditionFirstTenor, InitialConditionOtherTenors
from read_write_excel import print_to_excel
def loc_vol_tenor_bootstrapping():
## Whole Set Data initialization
S, r_para, rf_para, csc_para, imp_vol_tenors, imp_vol_strikes, imp_vol_quotes = input_data_initialization()
nb_tenors = len(imp_vol_tenors)
nb_strikes = imp_vol_strikes.shape[1]
mid = nb_strikes / 2
strike_grid_info = StrikeGridsAllTenors(nb_tenors)
loc_vol_all_tenors = np.zeros_like(imp_vol_quotes) # to store solved-out local vols
price_lv_all_tenors = np.zeros_like(imp_vol_quotes) # to store LV fwd pde prices
price_bs_all_tenors = np.zeros_like(imp_vol_quotes) # to store BS fwd pde prices
price_cls_all_tenors = np.zeros_like(imp_vol_quotes) # to store BS analytical prices
k_all_tenors = np.zeros_like(imp_vol_quotes) # to store BS fwd pde prices
print('################### Start Bootstrapping ###################\n')
## Bootstrapping on tenor to calibrate local vol
for i in range(nb_tenors):
print('Pillar: ', i)
## Pillar Data initialization
T = imp_vol_tenors[i]
if i == 0:
T_prev = 0
else:
T_prev = imp_vol_tenors[i-1]
tenor_mkt_data = TenorMarketData(S, r_para, rf_para, csc_para, T)
K_inputs = imp_vol_strikes[i, :]
k_inputs = np.log(K_inputs / tenor_mkt_data.fwd)
imp_vol_inputs = imp_vol_quotes[i, :]
imp_vol_para = ImpliedVolatility(K_inputs, k_inputs, imp_vol_inputs)
sum_sqr_vol_T = compute_sum_sqr_vol_T(imp_vol_quotes, imp_vol_tenors)
## Compute k_min, k_max, dk, Nk
new_pillar_extrplt = NewPillarStrikeExtrapolation(tenor_mkt_data, imp_vol_para)
k_min_extrplt, k_max_extrplt = new_pillar_extrplt.compute_extreme_strikes()
k_min, k_max, dk, Nk = strike_grid_info.strike_grid_discretization(i, k_min_extrplt, k_max_extrplt,
imp_vol_para, tenor_mkt_data, sum_sqr_vol_T[i])
k_grids = np.linspace(k_min, k_max, Nk, endpoint=True)
print('k_min: {}, k_max: {}, dk: {}, Nk: {}.'.format(k_min, k_max, dk, Nk))
## Compute t_min, t_max, dT, NT
t_min, t_max, dt, NT = compute_maturity_grid(i, imp_vol_tenors)
t_grids = np.linspace(t_min, t_max, NT, endpoint=True)
if i == 0:
t_grids = np.insert(t_grids, (1, 2), (dt*0.5, dt*1.5))
NT += 2
print('t_min: {}, t_max: {}, dt: {}, NT: {}.'.format(t_min, t_max, dt, NT))
## Local vol initial guess
loc_vol_guess = compute_local_vol_init_guess(k_inputs, imp_vol_inputs, T)
print('LV Init Guess: {}'.format(loc_vol_guess))
## Define Rannacher scheme
theta = np.repeat(0.5, NT+1)
if i == 0:
theta[1:5] = [1.0, 1.0, 1.0, 1.0]
## Compute BS fwd pde prices on 5 quotes
prices_5quotes_bs = blk_sch_fwd_pde(i, k_min, k_max, k_grids, Nk, t_min, t_max, t_grids, theta, NT, tenor_mkt_data, imp_vol_para, T_prev)
print('BS Fwd PDE Price: {}'.format(prices_5quotes_bs))
################
prices_5quotes_bs_cls = black_scholes_vanilla_fwd_norm(1, imp_vol_para.x_inputs, tenor_mkt_data.T, tenor_mkt_data.r, imp_vol_para.value_inputs)
print('BS Closing Price: {}'.format(prices_5quotes_bs_cls))
################
## Local vol pde initial condition
if i == 0:
init_cond_lv = InitialConditionFirstTenor()
else:
init_cond_lv = InitialConditionOtherTenors(k_prev, price_prev, tenor_mkt_data.fwd)
############################## TEST ############################################
if i == (nb_tenors - 1):
price_interpolate = init_cond_lv.compute(k_grids)
plt.plot(k_grids, price_interpolate)
plt.title('Pillar {} Price from last pillar interpolate'.format(i))
plt.show()
delta_test = np.diff(price_interpolate) / np.diff(np.exp(k_grids))
plt.plot(k_grids[1:], delta_test)
plt.title('Delta from price interpolation at pillar {}'.format(i))
plt.show()
############################## TEST END ########################################
## Calibrate local volatility
debug = False
if i == 3:
debug = False
lv_calibrator = LocalVolCalibration(k_min, k_max, k_grids, Nk, t_min, t_max, t_grids, theta, NT, tenor_mkt_data, imp_vol_para, init_cond_lv, prices_5quotes_bs, debug)
loc_vol_solved, prices_5quotes_lv, price_grid_lv = lv_calibrator.calibration(loc_vol_guess)
print('LV Fwd PDE Price: {}'.format(prices_5quotes_lv))
print('Solved LV: {}\n'.format(loc_vol_solved))
loc_vol_all_tenors[i,:] = loc_vol_solved
price_lv_all_tenors[i,:] = prices_5quotes_lv
price_bs_all_tenors[i,:] = prices_5quotes_bs
price_cls_all_tenors[i,:] = prices_5quotes_bs_cls
k_all_tenors[i,:] = k_inputs
k_prev = k_grids
price_prev = price_grid_lv
if i == 0:
print_to_excel(k_prev, price_prev)
################################ TEST ###################################
if i == (nb_tenors - 2):
plt.plot(k_prev, price_prev, '.-')
plt.title('Pillar {} Result'.format(i))
plt.show()
delta_test2 = np.diff(price_prev) / np.diff(np.exp(k_prev))
plt.plot(k_prev[1:], delta_test2)
plt.title('Delta from price interpolation at pillar {}'.format(i))
plt.show()
############################### TEST END ################################
return loc_vol_all_tenors, price_lv_all_tenors, price_bs_all_tenors, price_cls_all_tenors, imp_vol_tenors, k_all_tenors
if __name__ == '__main__':
loc_vol_all_tenors, price_lv_all_tenors, price_bs_all_tenors, price_cls_all_tenors, imp_vol_tenors, k_all_tenors = loc_vol_tenor_bootstrapping()
nb_tenors = len(imp_vol_tenors)
nb_strikes = k_all_tenors.shape[1]
fig = plt.figure(figsize=plt.figaspect(0.5))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
for i in range(nb_tenors-1, 0, -1):
xs = k_all_tenors[i,:]
ys = np.repeat(imp_vol_tenors[i], nb_strikes)
ax1.plot(xs, ys, loc_vol_all_tenors[i,:], '.-')
ax1.set_xlabel('k')
ax1.set_ylabel('T')
ax1.set_zlabel('LV')
ax1.title.set_text('Loc Vol')
ax2.plot(xs, ys, price_bs_all_tenors[i,:], 'b.-')
ax2.plot(xs, ys, price_lv_all_tenors[i,:], 'g.-')
ax2.plot(xs, ys, price_cls_all_tenors[i,:], 'r.-')
ax1.set_xlabel('k')
ax1.set_ylabel('T')
ax1.set_zlabel('Price')
ax2.title.set_text('Price: LV(green), BS(blue), Analytic(red)')
plt.show()
|
#108303540 Feb/23/2021 18:59UTC+5.5 Shan_XD 546A - Soldier and Bananas PyPy 3 Accepted 93 ms 0 KB
k,n, w= input().split()
k=int(k)
n=int(n)
w=int(w)
price= int(k*w*(w+1)/2 - n)
if price>0:
print(price)
else:
print(0)
|
import unittest
from GoMore import GoMore
#The following testclass is for the non firebase version of the program.
class TestGoMore(unittest.TestCase):
def setUp(self):
self.myGoMore = GoMore()
self.myGoMore.appendRide("Odense Copenhagen 2018-10-01 4".split())
self.myGoMore.appendRide("Copenhagen Aarhus 2018-10-01 2".split())
self.myGoMore.appendRide("Odense Copenhagen 2018-10-02 1".split())
#Insert the three rides that make the base for our tests.
class TestCreateRide(TestGoMore):
def test_create_ride(self):
#Check if length of rides list is correct after appending a trip.
self.assertEqual(self.myGoMore.appendRide("Odense Copenhagen 2018-10-01 4".split()), 4)
self.assertEqual(self.myGoMore.appendRide("Copenhagen Aarhus 2018-10-01 2".split()), 5)
self.assertEqual(self.myGoMore.appendRide("Odense Copenhagen 2018-10-02 1".split()), 6)
#Testing the findRide function. Expected output should match the search string.
class TestFindRide(TestGoMore):
def test_find_ride(self):
self.assertEqual('Odense Copenhagen 2018-10-01 4',self.myGoMore.findRide(*"Odense Copenhagen 2018-10-01".split()))
self.assertEqual('Odense Copenhagen 2018-10-01 4 Odense Copenhagen 2018-10-02 1 ',self.myGoMore.findRide(*"Odense Copenhagen 2018-10-01 2018-10-03".split()))
self.assertEqual('Odense Copenhagen 2018-10-01 4 ',self.myGoMore.findRide(*"Odense Copenhagen 2018-10-01 2018-10-03 2".split()))
#Create the return trip.
self.myGoMore.createReturnTrip('2018-10-03')
#Test if the return trip is working with the correct expected return date.
self.assertEqual('Copenhagen Odense 2018-10-03 1',self.myGoMore.findRide(*"Copenhagen Odense".split()))
|
# Linear Discriminant Analysis (LDA)
# Importing libraries
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing dataset
dataset = pd.read_csv("Wine.csv")
X = dataset.iloc[:, 0:13].values
Y = dataset.iloc[:, 13].values
# Split dataset into train and test set
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.20, random_state=0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Apply LDA
lda = LDA(n_components=2)
X_train = lda.fit_transform(X_train, Y_train)
X_test = lda.transform(X_test)
explained_variance = lda.explained_variance_ratio_
# Fitting Logistic Regression to Training set
classifier = LogisticRegression(
random_state=0, solver='lbfgs', multi_class='multinomial')
classifier.fit(X_train, Y_train)
# Predicting Test set results
Y_pred = classifier.predict(X_test)
# Confusion Matrix
cm = confusion_matrix(Y_test, Y_pred)
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
data_file_name_1 = 'data/1_Poisy-ParcDesGlaisins.txt'
data_file_name_2 = 'data/2_Piscine-Patinoire_Campus.txt'
fichier = [data_file_name_1, data_file_name_2]
try:
with open(data_file_name_1, 'r') as f :
content1 = f.read()
with open(data_file_name_2, 'r') as g :
content2 = g.read()
except OSError:
# 'File not found' error message.
print("File not found : ")
def dates2dic(dates):
dic = {}
splitted_dates = dates.split("\n")
#print(splitted_dates)
for stop_dates in splitted_dates:
tmp = stop_dates.split(" ")
dic[tmp[0]] = tmp[1:]
return dic
slited_content = content1.split("\n\n")
regular_path1 = slited_content[0]
regular_date_go1 = dates2dic(slited_content[1])
regular_date_back1 = dates2dic(slited_content[2])
we_holidays_path1 = slited_content[3]
we_holidays_date_go1 = dates2dic(slited_content[4])
we_holidays_date_back1 = dates2dic(slited_content[5])
slited_content = content2.split("\n\n")
regular_path2 = slited_content[0]
regular_date_go2 = dates2dic(slited_content[1])
regular_date_back2 = dates2dic(slited_content[2])
we_holidays_path2 = slited_content[3]
we_holidays_date_go2 = dates2dic(slited_content[4])
we_holidays_date_back2 = dates2dic(slited_content[5])
regular_date_go = [regular_date_go1, regular_date_go2]
regular_date_back=[regular_date_back1, regular_date_back2]
we_holidays_date_go = [we_holidays_date_go1, we_holidays_date_go2]
we_holidays_date_back = [we_holidays_date_back1, we_holidays_date_back2]
|
# Program to find the greatest number jn a list
arr=[1,2,3,4,5]
max=arr[0]
#comparing the max variable with rest of the array
for i in range(0,len(arr)):
if arr[i]>max:
max=arr[i]
print(max)
|
class Solution(object):
def grayCode(self, n):
"""
:type n: int
:rtype: List[int]
"""
if not n:
return [0]
res = [0, 1]
# start with n = 1, we have 0000 0001
# n = 2: 0000 0001 0011 0010
# n = 3: 0000 0001 0011 0010 0110 0111 0101 0100
# note that we append element in reverse order, each with prefix 1
# include n
for i in range(2, n + 1):
for j in range(len(res) - 1, -1, -1):
# using OR to add prefix 1
res.append(res[j] | 1 << i - 1)
return res |
import math
import numpy
import random
import logging
import pprint
import time
from genepy import ga
from genepy import generate
from genepy import mutate
from genepy import crossover
_logger = logging.getLogger(__name__)
def _create_fitness(target):
def _fitness(genepool):
fitness = {}
for individual, genes in genepool.iteritems():
a_error = math.fabs(genes['a'] - target['a']) / target['a_range']
b_error = math.fabs(genes['b'] - target['b']) / target['b_range']
fitness[individual] = 1.0 - (a_error + b_error)
return fitness
return _fitness
def _fitness(target, genes):
f = _create_fitness(target)
return f({'best':genes})['best']
def _run_search_with_kwargs(kwargs):
# just try to guess two numbers within some distance
kwargs['batch'] = True
population_size = 51
gene_parameters = {
'a': {
'generate': generate.sample(random.uniform, -10.0, 10.0),
'mutate': mutate.gaussian(0.5),
'crossover': crossover.sample_gaussian,
},
'b': {
'generate': generate.sample(random.uniform, -100.0, 100.0),
'mutate': mutate.gaussian(1.0),
'crossover': crossover.sample_gaussian,
}
}
target = {
'a': numpy.random.uniform(-10.0, 10.0),
'b': numpy.random.uniform(-100.0, 100.0),
'a_range': 20.0,
'b_range': 200.0
}
def _update_mutation(
iterations,
individuals,
genepool,
gene_properties,
fitness,
**kwargs):
for gene in gene_properties.keys():
gene_parameters[gene]['mutate'] = mutate.population_gaussian(
individuals,
genepool,
gene)
result = ga.search(
population_size,
gene_parameters,
_create_fitness(target),
ga.create_generation_callstack([
ga.max_iteration_convergence,
ga.best_ratio_convergence,
_update_mutation,
]),
**kwargs)
return (target, result)
def test_search():
kwargs = {
'max_iterations': 50,
'best_ratio_thresh': 0.001,
'active_genes': ['a', 'b'],
'mixing_ratio': 0.5,
'num_replaces': 1
}
target, result = _run_search_with_kwargs(kwargs)
assert(_fitness(target, result) >= 0.9)
kwargs = {
'max_iterations': 100,
'best_ratio_thresh': 0.001,
'mixing_ratio': 0.4,
'num_replaces': 3
}
target, result = _run_search_with_kwargs(kwargs)
assert(_fitness(target, result) >= 0.9)
|
"""
大麦网的活动简单获取
"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import pandas as pd
URL = "https://search.damai.cn/search.html?keyword=&spm=a2oeg.home.searchtxt.dsearchbtn2"
citys, titles, addresss, dates, details = [], [], [], [], []
options = webdriver.ChromeOptions()
options.add_argument(
"user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
)
def collects():
try:
es = driver.find_elements(by=By.CLASS_NAME, value="items")
while True:
e = es.pop()
if e is not None:
detail = e.find_element(by=By.TAG_NAME, value="a")
link = detail.get_attribute(name="href")
details.append(f'=HYPERLINK("{link}","{link}")')
title = e.find_element(by=By.CLASS_NAME, value="items__txt")
ss = str(title.text).split("\n")
a = ss[0].split(" ")
citys.append(a[0])
titles.append(a[1])
addresss.append(ss[2])
dates.append(ss[3])
# print(len(ss), ss)
else:
break
if len(citys) != 0:
df = pd.DataFrame(
data={"city": citys, "title": titles, "address": addresss, "dates": dates, "details": details}
)
df.to_excel("dmw.xlsx", index=False)
df.to_csv("dmw.csv", index=False)
except Exception as e:
if str(e).find("pop from empty list") != -1:
# nothing to do it's normal
pass
if len(citys) != 0:
df = pd.DataFrame(
data={"city": citys, "title": titles, "address": addresss, "dates": dates, "details": details}
)
df.to_excel("dmw.xlsx", index=False)
df.to_csv("dmw.csv", index=False)
print("出错了:", e)
driver = webdriver.Chrome(executable_path="chromedriver.exe", options=options)
driver.get(URL)
time.sleep(1)
collects()
while True:
try:
cur_page = driver.find_elements(by=By.CLASS_NAME, value="number")[4]
print("抓取页码:", cur_page.text)
driver.find_elements(by=By.CLASS_NAME, value="number")[4].click()
time.sleep(1)
collects()
if int(cur_page.text) >= 158:
print("抓取结束页码:", cur_page.text)
break
except Exception as e:
print(e)
break
driver.quit()
|
def zero(s):
if s[0] == '0':
return s[1:]
def one(s):
if s[0] == '1':
return s[1:]
def rule_sequence(s, rules):
# for rule in rules:
# s = rule(s)
# if s == None:
# break
# return s
if s == None or not rules:
return s
else:
return rule_sequence(rules[0](s), rules[1:])
print(rule_sequence('010', [zero, one, zero])) |
from math import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from tkinter import *
import matplotlib.pyplot as plt
import numpy
root = Tk()
root.title('Полет тела')
g = 9.80665
dx = 0.01
v = IntVar()
an = IntVar()
x0 = IntVar()
y0 = IntVar()
v2 = IntVar()
an2 = IntVar()
x02 = IntVar()
y02 = IntVar()
v3 = IntVar()
an3 = IntVar()
x03 = IntVar()
y03 = IntVar()
xmax = IntVar()
xmax2 = IntVar()
xmax3 = IntVar()
t = IntVar()
t2 = IntVar()
t3 = IntVar()
hmax = IntVar()
hmax2 = IntVar()
hmax3 = IntVar()
def func(x0, v, y0, an):
y = x0 * tan(an) - (1 / (2 * v ** 2)) * ((g * x0 ** 2) / (cos(an) ** 2)) + y0
if y == 0:
return 0.0
return y
def func2(x02, v2, y02, an2):
y2 = x02 * tan(an2) - (1 / (2 * v2 ** 2)) * ((g * x02 ** 2) / (cos(an2) ** 2)) + y02
if y2 == 0:
return 0.0
return y2
def func3(x03, v3, y03, an3):
y3 = x03 * tan(an3) - (1 / (2 * v3 ** 2)) * ((g * x03 ** 2) / (cos(an3) ** 2)) + y03
if y3 == 0:
return 0.0
return y3
def analysis():
v = int(entry1.get())
an = int(entry2.get())
x0 = int(entry3.get())
y0 = int(entry4.get())
v2 = int(entry5.get())
an2 = int(entry6.get())
x02 = int(entry7.get())
y02 = int(entry8.get())
v3 = int(entry9.get())
an3 = int(entry10.get())
x03 = int(entry11.get())
y03 = int(entry12.get())
an = radians(an)
an2 = radians(an2)
an3 = radians(an3)
t = 2 * v * sin(an) / g
t2 = 2 * v2 * sin(an2) / g
t3 = 2 * v3 * sin(an3) / g
xmax = (v**2/g) * 2 * sin(an) * cos(an)
xmax2 = (v2**2/g) * 2 * sin(an2) * cos(an2)
xmax3 = (v3**2/g) * 2 * sin(an3) * cos(an3)
hmax = (v**2 / (2 * g)) * sin(an)**2
hmax2 = (v2**2 / (2 * g)) * sin(an2)**2
hmax3 = (v3**2 / (2 * g)) * sin(an3)**2
dp1o.config(text=round(xmax,2))
dp2o.config(text=round(xmax2,2))
dp3o.config(text=round(xmax3,2))
hm1o.config(text=round(hmax,2))
hm2o.config(text=round(hmax2,2))
hm3o.config(text=round(hmax3,2))
timo.config(text=round(t,2))
tim2o.config(text=round(t2,2))
tim3o.config(text=round(t3,2))
xlist = numpy.arange(x0, xmax, dx)
xlist2 = numpy.arange(x02, xmax2, dx)
xlist3 = numpy.arange(x03, xmax3, dx)
ylist = [func(x0, v, y0, an) for x0 in xlist]
ylist2 = [func2(x02, v2, y02, an2) for x02 in xlist2]
ylist3 = [func3(x03, v3, y03, an3) for x03 in xlist3]
def plot():
a.cla()
a.plot(xlist, ylist, color='red')
a.plot(xlist2, ylist2, color='green')
a.plot(xlist3, ylist3, color='blue')
canvas.get_tk_widget().pack(side=TOP)
canvas._tkcanvas.pack(side=TOP)
plot()
topp = Label(root)
topp.pack(side='top')
first = LabelFrame(topp, text='Данные для 1 графика(красный)')
first.pack(side='left')
second = Label(first, text='Начальная скорость')
second.pack()
entry1 = Entry(first, width=10, textvariable=v)
entry1.pack()
third = Label(first, text='Угол выстрела')
third.pack()
entry2 = Entry(first, width=10, textvariable=an)
entry2.pack()
fourth = Label(first, text='Начальная координата x')
fourth.pack()
entry3 = Entry(first, width=10, textvariable=x0)
entry3.pack()
fifth = Label(first, text='Начальная координата y')
fifth.pack()
entry4 = Entry(first, width=10, textvariable=y0)
entry4.pack()
secondb = LabelFrame(topp, text='Данные для 2 графика(зеленый)')
secondb.pack(side='left')
second = Label(secondb, text='Начальная скорость')
second.pack()
entry5 = Entry(secondb, width=10, textvariable=v2)
entry5.pack()
third = Label(secondb, text='Угол выстрела')
third.pack()
entry6 = Entry(secondb, width=10, textvariable=an2)
entry6.pack()
fourth = Label(secondb, text='Начальная координата x')
fourth.pack()
entry7 = Entry(secondb, width=10, textvariable=x02)
entry7.pack()
fifth = Label(secondb, text='Начальная координата y')
fifth.pack()
entry8 = Entry(secondb, width=10, textvariable=y02)
entry8.pack()
thirdb = LabelFrame(topp, text='Данные для 3 графика(синий)')
thirdb.pack(side='left')
second = Label(thirdb, text='Начальная скорость')
second.pack()
entry9 = Entry(thirdb, width=10, textvariable=v3)
entry9.pack()
third = Label(thirdb, text='Угол выстрела')
third.pack()
entry10 = Entry(thirdb, width=10, textvariable=an3)
entry10.pack()
fourth = Label(thirdb, text='Начальная координата x')
fourth.pack()
entry11 = Entry(thirdb, width=10, textvariable=x03)
entry11.pack()
fifth = Label(thirdb, text='Начальная координата y')
fifth.pack()
entry12 = Entry(thirdb, width=10, textvariable=y03)
entry12.pack()
f = Figure()
canvas = FigureCanvasTkAgg(f)
FigureCanvasTkAgg.draw
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
a = f.add_subplot(111)
first = LabelFrame(topp, text='Расчет')
first.pack(side='bottom')
button = Button(first, text="Сгенерировать", command=analysis)
button.pack(side='top')
firstr = LabelFrame(first, text='Данные первого графика(красного)')
firstr.pack(side='left')
dp1 = Label(firstr, text='Дальность полета 1 тела')
dp1.pack()
dp1o = Label(firstr, text='')
dp1o.pack()
hm1 = Label(firstr, text='Максимальная высота полета 1 тела')
hm1.pack()
hm1o = Label(firstr, text='')
hm1o.pack()
tim = Label(firstr, text='Время полета 1 тела')
tim.pack()
timo = Label(firstr, text='')
timo.pack()
secondr = LabelFrame(first, text='Данные второго графика(зеленого)')
secondr.pack(side='left')
dp2 = Label(secondr, text='Дальность полета 2 тела')
dp2.pack()
dp2o = Label(secondr, text='')
dp2o.pack()
hm2 = Label(secondr, text='Максимальная высота полета 2 тела')
hm2.pack()
hm2o = Label(secondr, text='')
hm2o.pack()
tim2 = Label(secondr, text='Время полета 2 тела')
tim2.pack()
tim2o = Label(secondr, text='')
tim2o.pack()
thirdr = LabelFrame(first, text='Данные третьего графика(синего)')
thirdr.pack(side='left')
dp3 = Label(thirdr, text='Дальность полета 3 тела')
dp3.pack()
dp3o = Label(thirdr, text='')
dp3o.pack()
hm3 = Label(thirdr, text='Максимальная высота полета 3 тела')
hm3.pack()
hm3o = Label(thirdr, text='')
hm3o.pack()
tim3 = Label(thirdr, text='Время полета 3 тела')
tim3.pack()
tim3o = Label(thirdr, text='')
tim3o.pack()
root.mainloop()
|
from tkinter import *
root =Tk()
label_1 = Label(root, text="Name")
label_2 = Label(root, text="Password")
entry_1=Entry(root)
entry_2=Entry(root)
label_1.grid(row=0, sticky=E)
label_2.grid(row=1, sticky=E)
entry_1.grid(row=0,column=1)
entry_2.grid(row=1,column=1)
root.mainloop() |
#!/usr/bin/python
# Katacoda to Instruqt converter
# v. 0.0.3
# Support for notes in first challenge, fix NodePort
#
# v. 0.0.2
# Support for bulk migrate with git submodule
#
# v. 0.0.1
# First draft
#
import os
import json
import yaml
import re
import shutil
# Instruqt will order the YAML and sanitize the YAML content (e.g. assignments)
# so there's no need to order the dict nor optimize the escaped block for non-YAML
track_d={}
visualEditor=False
#Reads the homepage pathway all in the intruqt-template dir
with open('instruqt-template/homepage-pathway-all.json', 'r') as hfile:
hdata=hfile.read()
courses = json.loads(hdata)
for course in courses['courses']:
print(course["external_link"])
pathway = course["external_link"].rsplit('/', 1)[-1]
print("Creating or updating Scenario : " + pathway)
if not os.path.exists("instruqt"):
os.mkdir("instruqt")
instruqtDir = "instruqt/" + pathway
if not os.path.exists(instruqtDir):
os.mkdir(instruqtDir)
print("Directory " , instruqtDir , " Created ")
else:
print("Directory " , instruqtDir , " already exists")
pathway_id=''
if "pathway_id" in course:
pathway_id=course['pathway_id']
else:
pathway_id=pathway
title=course['title']
track_d["title"] = title
track_d["slug"] = pathway
track_d["type"] = "track"
try:
with open(pathway + '/index.json', 'r') as mycourse:
course_data=mycourse.read()
except FileNotFoundError:
print("Path " + instruqtDir + '/index.json' + " not found, skipping")
continue
course_json = json.loads(course_data)
track_d["icon"] = "https://storage.googleapis.com/instruqt-frontend/img/tracks/default.png"
track_d["tags"] = ["rhel"]
track_d["owner"] = "rhel"
track_d["developers"] = [ "rhel-tmm@redhat.com"]
track_d["private"] = False
track_d["published"] = True
track_d["skipping_enabled"] = False
#handle time migrations
if "time" in course_json:
duration=re.match('(.*?)(-(.*?))? minutes', course_json["time"] )
if duration is not None:
time=duration.group(1)
if duration.group(3) is not None:
time=duration.group(3)
time = int(time) * 60
else:
print("Time not found " + course_json["time"])
time=300
else:
time=300
#handle level migrations
difficulty="intermediate"
level="beginner"
if "difficulty" in course_json:
difficulty = course_json["difficulty"].lower()
else:
difficulty = "basic"
if level == "advanced":
difficulty = "expert"
elif level == "easy" or level == "beginner" or level == "basic":
level = "beginner"
difficulty = "basic"
track_d["level"] = level
l_challenges=[]
d_challenges={}
src=r'instruqt-template/config.yml'
dst=instruqtDir + '/' + 'config.yml'
shutil.copyfile(src, dst)
#handle background and foreground scripts
if os.path.exists(instruqtDir + '/track_scripts'):
shutil.rmtree(instruqtDir + '/track_scripts')
shutil.copytree('instruqt-template/track_scripts', instruqtDir + '/track_scripts')
if os.path.isfile(pathway + "/background.sh"):
os.system('cp -fr ' + pathway + '/background.sh ' + instruqtDir + '/track_scripts/setup-rhel' )
if os.path.isfile(pathway + "/foreground.sh"):
os.system('tail --lines=+2 ' + pathway + '/foreground.sh >> ' + instruqtDir + '/track_scripts/setup-rhel' )
if not os.path.exists(instruqtDir + '/assets'):
os.mkdir(instruqtDir + '/assets')
if not os.path.exists(instruqtDir + '/scripts'):
os.mkdir(instruqtDir + '/scripts')
introText=course_json["details"]["intro"]["text"]
with open(pathway + '/' + introText, 'r') as myintro:
intro_data=myintro.read()
intro_md=re.sub(r'\(.\/assets',r'(https://katacoda.com/rhel-labs/assets',intro_data)
track_d["description"] = intro_md
#copy all the assets to the scripts folder
try:
os.system('cp -fr ' + pathway + '/assets/* ' + instruqtDir + '/scripts/' )
print('cp -fr ' + pathway + '/assets/* ' + instruqtDir + '/scripts/')
except KeyError:
pass
#Handle terminal
if course_json["environment"]["uilayout"] == "editor-terminal":
visualEditor=True
l_steps = course_json["details"]["steps"]
l_size = len(l_steps)
time = int(int(time) / l_size)
isFirstStep=True
for step in l_steps:
slug = step["text"]
slug = re.sub(r'\.md$', '', slug )
if not os.path.exists(instruqtDir + '/' + slug):
os.mkdir(instruqtDir + '/' + slug)
print("Directory " , instruqtDir + '/' + slug , " Created ")
else:
print("Directory " , instruqtDir + '/' + slug , " already exists")
d_challenges["slug"] = slug
d_challenges["title"] = step["title"]
d_challenges["type"] = "challenge"
with open(pathway + '/' + step["text"], 'r') as myassign:
assign_data=myassign.read()
md=re.sub(r'`{1,3}(.+?)`{1,3}\{\{execute\}\}', r'```\n\1\n```', assign_data )
md=re.sub(r'\{\{copy\}\}',r'', md)
md=re.sub(r'\{\{open\}\}',r'', md)
md=re.sub(r'\(\.\.\/\.\.\/assets',r'(https://katacoda.com/rhel-labs/assets',md)
md=re.sub(r'\(\/openshift\/assets',r'(https://katacoda.com/rhel-labs/assets',md)
d_challenges["assignment"] = md
if isFirstStep:
l_notes = [{"type": "text", "contents": intro_md}]
d_challenges["notes"] = l_notes
isFirstStep=False
else:
if "notes" in d_challenges:
del d_challenges["notes"]
#Enable terminal and web console tab
l_tabs = [{"title": "Terminal", "type": "terminal","hostname":"rhel"},
{"title": "RHEL Web Console", "type" : "service", "hostname" : "rhel", "path" : "/", "port" : 9090}]
#Enable code editor
if visualEditor:
l_tabs.append({"title": "Visual Editor", "type": "code","hostname":"rhel", "path":"/root"})
d_challenges["tabs"] = l_tabs
d_challenges["difficulty"]= difficulty
d_challenges["timelimit"]= time
dictionary_copy = d_challenges. copy()
l_challenges.append(dictionary_copy);
track_d["challenges"] = l_challenges
#write out yml
with open(instruqtDir + '/track.yml', 'w') as yaml_file:
yaml.dump(track_d, yaml_file, default_flow_style=False)
|
from threading import Thread
import time
import json
from django.conf import settings
from crawler.celery_tasks import downloader
from utils import Downloader
import requests
class CrawlerManager:
def __init__(self, seeds=None, qs=10):
"""
Initiate the crawler with the seeds provided. If the seeds are not provided via kwargs,
they are taken from the DB using the outlinks API.
:param seeds: the seeds, if provided are used to crawl first.
:param qs: The queue size or the number concurrent downloaders (or celery tasks).
"""
self.seeds = seeds
self.workers_queue = []
self.queue_size = qs
def get_outlinks_from_remote(self, qs=0):
outlinks_server_url = "http://{}/get-outlinks/?qs={}".format(settings.DATABASES.get('default').get('HOST'), qs)
response = requests.get(outlinks_server_url)
seeds = json.loads(response.content).get('links')
return seeds
def crawl(self):
urls = self.seeds
if not urls:
urls = self.get_outlinks_from_remote(qs=self.queue_size)
if urls:
for url in urls:
task = downloader.delay(url)
self.workers_queue.append((task, url))
thread = Scheduler(self.workers_queue)
thread.start()
while True:
print('number of active workers {}'.format(len(self.workers_queue)))
free_workers_count = self.queue_size - len(self.workers_queue)
if free_workers_count > 5:
urls = self.get_outlinks_from_remote(qs=free_workers_count)
if len(urls) <= 0:
time.sleep(5)
continue
for url in urls:
try:
print('adding {}'.format(url))
except:
pass
task = downloader.delay(url)
self.workers_queue.append((task, url))
else:
time.sleep(.1)
class Scheduler(Thread):
def __init__(self, workers_queue):
super(Scheduler, self).__init__()
self.workers_queue = workers_queue
def run(self):
while True:
for index, task in enumerate(self.workers_queue):
if task[0].ready():
del self.workers_queue[index]
# celery -A crawler.celery_tasks worker --loglevel=info
class TCrawlerManager:
def __init__(self, seeds=None, qs=10, using_tor=True):
self.seeds = seeds
self.workers_queue = []
self.queue_size = qs
self.using_tor = using_tor
def get_outlinks_from_remote(self, qs=0):
outlinks_server_url = "http://{}/get-outlinks/?qs={}".format(settings.DATABASES.get('default').get('HOST'), qs)
response = requests.get(outlinks_server_url)
seeds = json.loads(response.content).get('links')
return seeds
def crawl(self):
thread_pool = []
urls = self.seeds
if not urls:
urls = self.get_outlinks_from_remote(qs=self.queue_size)
if urls:
for url in urls:
d = Downloader(url, using_tor=self.using_tor)
thread_pool.append(d)
d.start()
while True:
for index, worker in enumerate(thread_pool):
if not worker.is_alive():
del thread_pool[index]
free_workers_count = self.queue_size - len(thread_pool)
if free_workers_count >= 3:
urls = self.get_outlinks_from_remote(qs=free_workers_count)
for url in urls:
d = Downloader(url)
d.start()
thread_pool.append(d)
|
default_app_config = 'tenant_report.apps.TenantReportConfig'
'''
#TODO: UNIT TEST
tenant_report/__init__.py 2 0 100%
tenant_report/apps.py 3 0 100%
tenant_report/tests.py 1 0 100%
tenant_report/urls.py 15 0 100%
tenant_report/views/__init__.py 0 0 100%
tenant_report/views/csv/__init__.py 0 0 100%
tenant_report/views/csv/report_01_view.py 58 37 36% 42, 46-153
tenant_report/views/csv/report_02_view.py 75 54 28% 42, 46-178
tenant_report/views/csv/report_03_view.py 54 33 39% 42, 46-105
tenant_report/views/csv/report_04_view.py 54 33 39% 42, 46-121
tenant_report/views/csv/report_05_view.py 37 16 57% 42, 46-81
tenant_report/views/csv/report_06_view.py 37 16 57% 42, 46-78
tenant_report/views/csv/report_07_view.py 47 26 45% 42, 48-99
tenant_report/views/csv/report_08_view.py 42 21 50% 42, 47-81
tenant_report/views/csv/report_09_view.py 42 21 50% 42, 48-125
tenant_report/views/csv/report_10_view.py 48 27 44% 42, 46-106
tenant_report/views/csv/report_11_view.py 53 32 40% 42, 46-121
'''
|
import re
class CppParser:
def __init__(self):
self.num_of_class = 0
self.num_of_inherited_class = 0
self.num_of_constructors = 0
self.num_of_operator_over = 0
self.num_of_objects = 0
self.class_names = list()
self.inherited_class_names = list()
self.constructor_types = list()
self.operator_overload = list()
self.object_names = {}
def comment_removal(self, complete_text):
def substituter(matching_text):
string = matching_text.group(0)
if string.startswith('/') or string.startswith('"'):
return " "
else:
return string
pattern = re.compile(r'"(\\.|[^\\"])*"|//.*?$|/\*.*?\*/', re.DOTALL | re.MULTILINE)
no_comment_file = re.sub(pattern, substituter, complete_text)
return no_comment_file
def remove_alias(self, complete_text):
def substituter(matching_text):
string = matching_text.group(0)
if string.startswith('#define'):
return " "
else:
return string
pattern = r'(#define .*?)[\n$]'
defines = re.findall(pattern, complete_text)
# print(defines)
no_comment_file = re.sub(pattern, substituter, complete_text)
new_file = no_comment_file
for iter in defines:
x = iter.split(' ')
print(x)
pat = r'\b'+x[1]+r'\b'
repl = ' '.join(x[2:])
new_file = re.sub(pat, repl, new_file)
return new_file
def find_classes(self, complete_text):
text_lines = complete_text.split('\n')
for line in text_lines:
line = line + '\n'
class_found_flag = False
in_class_found_flag = False
class_finder = r'\bclass\b\s+([A-Za-z_]\w*)\s*[\n\{]'
class_names = re.findall(class_finder, line)
inherited_class_finder = r'\bclass\b\s+([A-Za-z_]\w*)\s*\:\s*((?:public|private|protected)?\s+(?:[A-Za-z_]\w*)\s*\,?\s*)+[\n\{]'
inherited_class_names = re.findall(inherited_class_finder, line)
class_names = list(filter(None, class_names))
if len(class_names) > 0:
class_found_flag = True
if len(inherited_class_names) > 0:
in_class_found_flag = True
if class_found_flag or in_class_found_flag:
self.num_of_class = self.num_of_class + 1
for class_name in class_names:
if class_name not in self.class_names:
self.class_names.append(class_name)
# class subclass_name : access_mode base_class_name
def find_inherited_classes(self, complete_text):
text_lines = complete_text.split('\n')
for line in text_lines:
line = line + '\n'
inherited_class_finder = r'\bclass\b\s+([A-Za-z_]\w*)\s*\:\s*((?:public|private|protected)?\s+(?:[A-Za-z_]\w*)\s*\,?\s*)+[\n\{]'
inherited_class_names = re.findall(inherited_class_finder, line)
if len(inherited_class_names) > 0:
self.num_of_inherited_class = self.num_of_inherited_class + 1
for class_name in inherited_class_names:
if class_name not in self.inherited_class_names:
self.inherited_class_names.append(class_name)
self.class_names.append(class_name[0])
# MyClass::MyClass() { }
# MyClass(T x) { xxx = x; }
# MyClass(double r = 1.0, string c = "red") : radius(r), color(c) { }
def find_constructors(self, complete_text):
text_lines = complete_text.split('\n')
for line in text_lines:
line = line + '\n'
constructors_finder = r'(?:[^~]|^)\b([A-Za-z_][\w\:\s]*)\s*\(([^)]*?)\)?\s*[\n\{\:]'
constructors_list = re.findall(constructors_finder, line)
constructor_found = False
for definition in constructors_list:
belonging_class = definition[0].split('::')
length = len(belonging_class)
belonging_class = [x.strip() for x in belonging_class]
if belonging_class[-1] in self.class_names:
if length == 1:
constructor_found = True
self.constructor_types.append(definition)
elif belonging_class[-1] == belonging_class[-2]:
constructor_found = True
self.constructor_types.append(definition)
if constructor_found:
self.num_of_constructors = self.num_of_constructors + 1
# returnType operator symbol (arguments){}
def find_overloaded_operators(self, complete_text):
text_lines = complete_text.split('\n')
for line in text_lines:
line = line + '\n'
operators = r'(\+=|-=|\*=|/=|%=|\^=|&=|\|=|<<|>>|>>=|<<=|==|!=|<=|>=|<=>|&&|\|\||\+\+|--|\,|->\*|\\->|\(\s*\)|\[\s*\]|\+|-|\*|/|%|\^|&|\||~|!|=|<|>)'
overloaded_operators_finder = r'\boperator\b\s*' + operators + r'\s*([^\{\;]*)?[\n\{]'
overloaded_operators = re.findall(overloaded_operators_finder, line)
if len(overloaded_operators) > 0:
self.num_of_operator_over = self.num_of_operator_over + 1
for operator in overloaded_operators:
self.operator_overload.append(operator)
def find_objects_declaration(self, complete_text):
text_lines = complete_text.split('\n')
for line in text_lines:
line = line + '\n'
objects_finder = r'([A-Za-z_]\w*)\b\s*([\s\*]*[A-Za-z_\,][A-Za-z0-9_\.\,\[\]\s\(\)]*[^\n<>]*?);'
class_object_list = re.findall(objects_finder, line)
object_found = False
for objects in class_object_list:
if objects[0] in self.class_names:
if objects[0] not in self.object_names:
self.object_names[objects[0]] = ''
self.object_names[objects[0]] += (objects[1]+',')
object_found = True
if object_found:
self.num_of_objects = self.num_of_objects + 1
def load_file_and_parse(self, file_name):
file_name = file_name
try:
with open(file_name, 'r') as cpp_file:
cleaned_file = self.comment_removal(cpp_file.read())
cleaned_file = self.remove_alias(cleaned_file)
self.find_classes(cleaned_file)
self.find_inherited_classes(cleaned_file)
self.find_constructors(cleaned_file)
self.find_overloaded_operators(cleaned_file)
self.find_objects_declaration(cleaned_file)
self.print_statistics()
except FileNotFoundError:
print('File not found!')
exit(1)
def print_statistics(self):
print('Classes : ', self.num_of_class)
print('Inherited classes : ', self.num_of_inherited_class)
print('Constructors : ', self.num_of_constructors)
print('Operator Overloading : ', self.num_of_operator_over)
print('Objects Declaration : ', self.num_of_objects)
def print_class_stats(self):
print('Classes : \n', self.class_names)
def print_inherited_class_stats(self):
print('Inherited Classes : \n', self.inherited_class_names)
def print_constructor_stats(self):
print('Constructors : ', )
for constructor in self.constructor_types:
print(constructor)
def print_operator_overload_stats(self):
print('Overloaded Operators : ')
for operators in self.operator_overload:
print(operators)
def print_objects_stats(self):
print('Objects : ')
for objects in self.object_names:
print(objects, ':', self.object_names[objects][:-1])
if __name__ == '__main__':
class_instance = CppParser()
user_file_name = input('Enter the file name (should be present in input/) : ')
class_instance.load_file_and_parse(user_file_name)
class_instance.print_class_stats()
class_instance.print_inherited_class_stats()
class_instance.print_constructor_stats()
class_instance.print_operator_overload_stats()
class_instance.print_objects_stats()
|
from flask import Flask, request, url_for,render_template,flash,redirect
from flask_pymongo import PyMongo,MongoClient
import bcrypt
app = Flask(__name__)
client = MongoClient("mongodb://localhost:27017/")
db = client["register"]
users = db["users"]
def verifypw(user, password):
pw = users.find({'Username':user})[0]['Password']
if bcrypt.hashpw(password.encode('utf-8'), pw) == pw:
return True
else:
return False
def verifyuser(username ):
if users.find({'Username':username}).count() == 0:
return True
else:
return False
@app.route('/',methods=['GET','POST'])
def login():
error = None
if request.method =='POST':
user = request.form['username']
password = request.form['password']
correctpw = verifypw(user,password)
if correctpw:
return redirect(url_for('home'))
else:
error = "Invalid Username or Password"
return render_template('login.html', error=error)
else:
error = None
return render_template('login.html')
@app.route('/register',methods=['GET','POST'])
def reg():
error = None
if request.method == 'POST':
user = request.form['username']
email = request.form['mail']
password = request.form['password']
verify = verifyuser(user)
if verify:
hashed_password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
users.insert_one({'Username':user,'Email':email,'Password':hashed_password})
return redirect(url_for('login'))
else:
error = "Username already exist"
return render_template('register.html',error=error)
else:
error=None
return render_template('register.html')
@app.route('/home',methods = ['GET','POST'])
def home():
return render_template("home.html")
if __name__ == "__main__":
app.run(debug = True) |
#-*-coding:utf-8-*-
#导入模块,初始化屏幕
import turtle as t
screen = t.Screen()
screen.title("极客中心")
screen.bgcolor('white')
screen.setup(800,600)
screen.tracer()
#cpc党旗的类
class PartyFlag(object):
#初始化党旗属性
def __init__(self,x,y,extend):
self.x = x
self.y = y
self.extend = extend
self.frame = t.Turtle()
self.sickle = t.Turtle()
self.ax = t.Turtle()
#画出党旗面的方法
def drawFrame(self):
self.frame.up()
self.frame.goto(self.x,self.y)
self.frame.pencolor("red")
self.frame.shapesize(2)
self.frame.speed(100)
self.frame.fillcolor("red")
self.frame.begin_fill()
self.frame.down()
self.frame.forward(300*self.extend)
self.frame.right(90)
self.frame.forward(200*self.extend)
self.frame.right(90)
self.frame.forward(300*self.extend)
self.frame.right(90)
self.frame.forward(200*self.extend)
self.frame.end_fill()
self.frame.hideturtle()
#画出斧头的函数
def drawAx(self):
aX = self.frame.xcor()+25*self.extend
aY = self.frame.ycor()-45*self.extend
self.ax.up()
self.ax.goto(aX,aY)
self.ax.pencolor("yellow")
self.ax.pensize(2)
self.ax.speed(100)
self.ax.down()
self.ax.left(45)
self.ax.fillcolor("yellow")
self.ax.begin_fill()
self.ax.forward(30*self.extend)
self.ax.right(90)
self.ax.circle(10*self.extend,90)
self.ax.right(90)
self.ax.forward(10*self.extend)
self.ax.right(90)
self.ax.forward(15*self.extend)
self.ax.left(90)
self.ax.forward(70*self.extend)
self.ax.right(90)
self.ax.forward(15*self.extend)
self.ax.right(90)
self.ax.forward(70*self.extend)
self.ax.left(90)
self.ax.forward(10*self.extend)
self.ax.right(90)
self.ax.forward(20*self.extend)
self.ax.end_fill()
self.ax.hideturtle()
#画出镰刀的函数
def drawSickle(self):
sX = self.frame.xcor()+30*self.extend
sY = self.frame.ycor()-69*self.extend
self.sickle.up()
self.sickle.goto(sX,sY)
self.sickle.pencolor("yellow")
self.sickle.pensize(2)
self.sickle.speed(100)
self.sickle.fillcolor("yellow")
self.sickle.begin_fill()
self.sickle.right(45)
self.sickle.down()
self.sickle.circle(40*self.extend,90)
self.sickle.left(25)
self.sickle.circle(45*self.extend,90)
self.sickle.right(160)
self.sickle.circle(-45*self.extend,130)
self.sickle.right(10)
self.sickle.circle(-48*self.extend,75)
self.sickle.left(160)
self.sickle.circle(-7*self.extend,340)
self.sickle.left(180)
self.sickle.circle(-48*self.extend,15)
self.sickle.right(75)
self.sickle.forward(11*self.extend)
self.sickle.end_fill()
self.sickle.hideturtle()
#画出整个党旗的函数入口
def draw(self):
self.drawFrame()
self.drawAx()
self.drawSickle()
#整个程序的入口
if __name__ == '__main__':
#实例化党旗对象,参数:x坐标、y坐标、缩放系数
partyFlag = PartyFlag(20,140,1)
#调用党旗对象的主要绘画方法
partyFlag.draw()
#循环屏幕上的内容
screen.mainloop() |
from collections import Counter
def matching_strings(strings, queries):
counts = Counter(strings)
results = [counts.get(q_str, 0) for q_str in queries]
return results
|
from activation_functions import sigmoid_function, tanh_function, linear_function,\
LReLU_function, ReLU_function, elliot_function, symmetric_elliot_function,softmax_function,dropout,add_bias
from neuralnet import NeuralNet
import numpy as np
import cPickle
f=open('/Users/yanshen/Downloads/mnist.gz','rb')
train_set,valid_set,test_set=cPickle.load(f)
train_set_x,train_set_y=train_set
valid_set_x,valid_set_y=valid_set
train_set_x=train_set_x[:]
train_set_y=train_set_y[:]
groundtruthlabel=np.zeros((train_set_y.shape[0],10))#convert labels to groundtruth label matrix
for i in range(train_set_y.shape[0]):
groundtruthlabel[i,train_set_y[i]]=1
settings = {
# Required settings
"n_inputs" : 28*28,
"n_outputs" : 10,
"n_hidden_layers" : 1, # Number of nodes in each hidden layer,I only write n=0 or 1.
"n_hiddens" : 200, # Number of hidden layers in the network
"activation_functions" : [tanh_function,softmax_function],
# Optional settings
"weights_low" : -np.sqrt(6/(28*28+200)),
"weights_high" : +np.sqrt(6/(28*28+200)),
"save_trained_network" : False,
"input_layer_dropout" : 0.0,
"hidden_layer_dropout" : 0.,
"batch_size" : train_set_x.shape[0], # must greater than 0
}
# initialize the neural network
network = NeuralNet( settings )
network.brop(
train_set_x,
train_set_y,
groundtruthlabel,
ERROR_LIMIT = 1e-6,
learning_rate = 0.01,
momentum_factor = 0.,
)
network.save_to_file( "trained_configuration.pkl" )
|
# This function prints above list of lists in a sudoku format.
def print_sudoku(sudoku):
print("-------------------------------")
for i_row in range(len(sudoku)):
if i_row % 3 == 0 and i_row != 0:
print("|---------|---------|---------|")
for i_col in range(len(sudoku[i_row])):
if i_col % 3 == 0 and i_col != 0:
print("|", end='')
if i_col == 0:
print("|", end="")
if i_col == 8:
print(" " + str(sudoku[i_row][i_col]), end=" |\n")
else:
print(" " + str(sudoku[i_row][i_col]) + " ", end='')
print("-------------------------------")
# Returns the tuple of (row, column) of the first empty position it finds.
def find_empty(sudoku):
for i_row in range(len(sudoku)):
for i_col in range(len(sudoku[i_row])):
if sudoku[i_row][i_col] == 0:
return i_row, i_col # row, col
return None
# Checks if the number to be validated is currently valid at the given position.
def check_validity(sudoku, num_to_be_validated, position):
# check for num to be validated in the row of position tuple
for i_col in range(len(sudoku[0])):
if sudoku[position[0]][i_col] == num_to_be_validated and position[1] != i_col:
return False
# check for num to be validated in the column of position tuple
for i_row in range(len(sudoku)):
if sudoku[i_row][position[1]] == num_to_be_validated and position[0] != i_row:
return False
# check for num to be validated in a 3x3 box of position tuple
box_row = position[0] // 3
box_col = position[1] // 3
for i_row in range(box_row*3, box_row*3 + 3):
for i_col in range(box_col*3, box_col*3 + 3):
if sudoku[i_row][i_col] == num_to_be_validated and (i_row, i_col) != position:
return False
return True
# Solves sudoku by using above methods
def solve_sudoku(sudoku):
find = find_empty(sudoku)
if not find:
return True
else:
row, col = find
for num_to_be_entered in range(1, 10):
if check_validity(sudoku, num_to_be_entered, (row, col)):
sudoku[row][col] = num_to_be_entered
# checking recursively
if solve_sudoku(sudoku):
return True
sudoku[row][col] = 0
return False
|
from discrete_maze.maze import ExploreTask
from loggy import Log
from schedules import ExploreCreatorSchedule as ECS
from schedules import GridMazeSchedule as GMS
import random
import numpy as np
from tqdm import tqdm
# Run this from project root as:
# python -m utils.maze_terminate_test
class TotallyRandomAgent:
def __init__(self, env):
self.env = env
def action(self, obs):
return self.env.action_space.sample()
class RandomValidAgent:
def __init__(self, env):
pass
def action(self, obs):
indices = [i - 1 for i in range(1, obs.shape[0]) if obs[i]]
return random.choice(indices)
class RandomValidGridAgent:
def __init__(self, env):
pass
def action(self, obs):
indices = [i for i in range(4) if obs[i]]
return random.choice(indices)
class RandomNotVisitedAgent:
# Just tries to avoid observations it has seen
# Now broken
def __init__(self, env):
self.seen = []
self.fallback = RandomValidAgent(env)
def action(self, obs):
nonempty = [i for i in range(1, obs.shape[1]) if obs[-1, i, 0]]
candidates = [i for i in nonempty if all([not np.array_equal(obs[-1, i, :], vec) for vec in self.seen])]
if len(candidates) == 0:
return self.fallback.action(obs)
choice = random.choice(candidates) - 1
self.seen.append(obs[-1, choice + 1, :])
return choice
def test_random_actions(creation_schedule, Chooser, samples, log = None):
print("Starting schedule at size", creation_schedule.current_size)
n_truncated = 0
sample_i = 0
progress = tqdm(total = samples)
while True:
env = creation_schedule.new_env()
obs = env.reset()
chooser = Chooser(env)
done = False
rewards = []
while not done:
sample_i += 1
progress.update(1)
if sample_i == samples:
progress.close()
return
choice = chooser.action(obs)
obs, reward, done, info = env.step(choice)
rewards.append(reward)
creation_schedule.update(done, info)
if log:
logging_data = {
'simulation steps': sample_i,
'average reward': sum(rewards) # just the sum since we are only doing a batch of one
}
creation_schedule.add_logging_data(logging_data)
log.step(logging_data)
if creation_schedule.allow_change():
print("Changing schedule to size %d at sample=%d" % (creation_schedule.current_size, sample_i))
# print("Testing for random valid agent on grid:")
# log = Log("valid-random-actions-grid")
# test_random_actions(GMS(), RandomValidGridAgent, 500000, log)
# log.close()
# print("\nTesting for totally random actions:")
# log = Log("totally-random")
# test_random_actions(ECS(is_tree = False, place_agent_far_from_dest = False), TotallyRandomAgent, 500000)
# log.close()
print("\nTesting for random valid actions:")
log = Log("valid-random")
test_random_actions(ECS(is_tree = False, place_agent_far_from_dest = False, id_size = 1), RandomValidAgent, 500000)
log.close()
# print("Testing for choosing random unseen nodes + placing far from destination:")
# log = Log("random-unseen-actions-tree")
# test_random_actions(ECS(is_tree = False, place_agent_far_from_dest = True), RandomNotVisitedAgent, 500000, log)
# log.close()
# print("Testing for choosing random unseen nodes + placing far from destination + tree:")
# log = Log("random-unseen-actions-tree")
# test_random_actions(ECS(is_tree = True, place_agent_far_from_dest = True), RandomNotVisitedAgent, 500000, log)
# log.close()
# print("Testing for random valid actions + placing far from destination + tree:")
# log = Log("valid-random-actions-tree")
# test_random_actions(ECS(is_tree = True, place_agent_far_from_dest = True), RandomValidAgent, 500000, log)
# log.close()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/emiliano/Qt/Test1/mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 300)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.formLayoutWidget = QtWidgets.QWidget(self.centralWidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(9, 9, 371, 211))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setContentsMargins(11, 11, 11, 11)
self.formLayout.setSpacing(6)
self.formLayout.setObjectName("formLayout")
self.reiniciar = QtWidgets.QPushButton(self.formLayoutWidget)
self.reiniciar.setObjectName("reiniciar")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.reiniciar)
self.salir = QtWidgets.QPushButton(self.formLayoutWidget)
self.salir.setObjectName("salir")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.salir)
self.grafico = QtWidgets.QGraphicsView(self.formLayoutWidget)
self.grafico.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.grafico.setFrameShadow(QtWidgets.QFrame.Sunken)
self.grafico.setLineWidth(5)
self.grafico.setMidLineWidth(5)
self.grafico.setObjectName("grafico")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.grafico)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 400, 27))
self.menuBar.setObjectName("menuBar")
self.menuArchivo = QtWidgets.QMenu(self.menuBar)
self.menuArchivo.setObjectName("menuArchivo")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionSalir = QtWidgets.QAction(MainWindow)
self.actionSalir.setObjectName("actionSalir")
self.menuArchivo.addAction(self.actionSalir)
self.menuBar.addAction(self.menuArchivo.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.reiniciar.setText(_translate("MainWindow", "Reiniciar"))
self.salir.setText(_translate("MainWindow", "Salir"))
self.menuArchivo.setTitle(_translate("MainWindow", "Archivo"))
self.actionSalir.setText(_translate("MainWindow", "Salir"))
# if __name__ == "__main__":
# import sys
# app = QtWidgets.QApplication(sys.argv)
# MainWindow = QtWidgets.QMainWindow()
# ui = Ui_MainWindow()
# ui.setupUi(MainWindow)
# MainWindow.show()
# sys.exit(app.exec_())
|
###############################################################################
# Copyright (c) 2015-2017, Lawrence Livermore National Security, LLC.
#
# Produced at the Lawrence Livermore National Laboratory
#
# LLNL-CODE-716457
#
# All rights reserved.
#
# This file is part of Ascent.
#
# For details, see: http://ascent.readthedocs.io/.
#
# Please also read ascent/LICENSE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
#
# file: ascent_tutorial_python_extract_histogram.py
#
###############################################################################
import numpy as np
from mpi4py import MPI
# obtain a mpi4py mpi comm object
comm = MPI.Comm.f2py(ascent_mpi_comm_id())
# get this MPI task's published blueprint data
mesh_data = ascent_data().child(0)
# fetch the numpy array for the energy field values
e_vals = mesh_data["fields/energy/values"]
# find the data extents of the energy field using mpi
# first get local extents
e_min, e_max = e_vals.min(), e_vals.max()
# declare vars for reduce results
e_min_all = np.zeros(1)
e_max_all = np.zeros(1)
# reduce to get global extents
comm.Allreduce(e_min, e_min_all, op=MPI.MIN)
comm.Allreduce(e_max, e_max_all, op=MPI.MAX)
# compute bins on global extents
bins = np.linspace(e_min_all, e_max_all)
# get histogram counts for local data
hist, bin_edges = np.histogram(e_vals[0], bins = bins[0])
# declare var for reduce results
hist_all = np.zeros_like(hist)
# sum histogram counts with MPI to get final histogram
comm.Allreduce(hist, hist_all, op=MPI.SUM)
# print result on mpi task 0
if comm.Get_rank() == 0:
print("\nEnergy extents: {} {}\n".format(e_min_all[0], e_max_all[0]))
print("Histogram of Energy:\n")
print("Counts:")
print(hist_all)
print("\nBin Edges:")
print(bin_edges)
print("")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.