text stringlengths 38 1.54M |
|---|
import os
CODE_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.dirname(CODE_DIR)
DATA_DIR = os.path.join(ROOT_DIR, 'data')
COLLECTION_NAME = "startups"
QDRANT_HOST = os.environ.get("QDRANT_HOST", "localhost")
QDRANT_PORT = os.environ.get("QDRANT_PORT", 6333)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from predictor_dl_model.pipeline import main_filter_si_region_bucket
from pyspark.sql.types import IntegerType
from test_base import TestBase
from data import test_set
# Baohua Cao optimized.
class TestMainFilterSiRegionBucket(TestBase):
def test_drop_region(self):
"""
tests drop_region() in main_filter_si_region_bucket.py
"""
df_tested = self.hive_context.createDataFrame(
test_set.factdata_tested,
test_set.factdata_columns
)
df_expected = self.hive_context.createDataFrame(
test_set.factdata_expected_drop_region,
test_set.factdata_columns
)
columns = test_set.factdata_columns
df = main_filter_si_region_bucket.drop_region(df_tested)
self.assertTrue(self.compare_dfs(
df.select(columns), df_expected.select(columns)))
def test_modify_ipl(self):
"""
tests modify_ipl() in main_filter_si_region_bucket.py
"""
mapping_df = self.hive_context.createDataFrame(
test_set.region_mapping_tested,
test_set.region_mapping_columns_renamed
)
df_tested = self.hive_context.createDataFrame(
test_set.factdata_expected_drop_region,
test_set.factdata_columns
)
df_expected = self.hive_context.createDataFrame(
test_set.factdata_expected_region_mapped,
test_set.factdata_columns
)
columns = test_set.factdata_columns
df = main_filter_si_region_bucket.modify_ipl(df_tested, mapping_df)
self.assertTrue(self.compare_dfs(
df.select(columns), df_expected.select(columns)))
def test_assign_new_bucket_id(self):
"""
tests assign_new_bucket_id() in main_filter_si_region_bucket.py
"""
df_tested = self.hive_context.createDataFrame(
test_set.factdata_new_bucket_tested,
test_set.factdata_columns
)
df_expected = self.hive_context.createDataFrame(
test_set.factdata_new_bucket_expected,
test_set.factdata_columns
)
columns = test_set.factdata_columns
df = main_filter_si_region_bucket.assign_new_bucket_id(
df_tested, test_set.new_bucket_size)
df = df.withColumn('bucket_id', df.bucket_id.cast(IntegerType()))
for dfi in df.collect():
print(dfi)
self.assertTrue(self.compare_dfs(
df.select(columns), df_expected.select(columns)))
if __name__ == "__main__":
unittest.main()
|
import os
import urllib
from mongomock import MongoClient
def get_database():
config_path = os.environ.get('LUNNICONFIG')
if not config_path:
raise "MISS CONFIGURATION"
with open(config_path) as f:
for line in f:
command = line[:-1]
command = command.split('=')
if command[0] == 'uri':
uri_path = command[1]
uri_path = urllib.quote_plus(uri_path)
client = MongoClient(uri_path)
db = client.lunni_test
return db |
"""create post table
Revision ID: bdcf5521b1c1
Revises: b44300fa6522
Create Date: 2021-01-24 21:47:59.239258
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bdcf5521b1c1'
down_revision = 'b44300fa6522'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'posts',
sa.Column('id', sa.Integer, primary_key=True, unique=True),
sa.Column('title', sa.String(50), unique = True),
sa.Column('body', sa.String(1000)),
sa.Column('owner_id', sa.Integer),
sa.Column('is_active', sa.Boolean),
sa.Column('created_date', sa.DateTime),
)
def downgrade():
pass
|
import socket
import threading
import logging
import datetime
import sys
FORMAT = "%(asctime)s %(threadName)s %(thread)d %(message)s"
logging.basicConfig(format=FORMAT,level=logging.INFO)
class ChatServer:
def __init__(self,ip='0.0.0.0',port=8000):
self.addr = (ip,port)
self.sock = socket.socket()
self.clients = {}
def start(self):
self.sock.bind(self.addr)
self.sock.listen() #服务启动
threading.Thread(target=self.accept,name='迎接进程').start()
def accept(self):
while True: #一般一个线程
s,raddr = self.sock.accept() #阻塞
logging.info('\n来了个新家伙\n')
threading.Thread(target=self.recv,name='接收进程',args=(s,raddr)).start()
def recv(self,sock,addr):
'''先命名客户端id,然后无限接收消息'''
sock.send('请输入你的id\n '.encode())
name = sock.recv(1024).decode()
self.clients[addr] = (sock,name) #远程地址:(socket对象(用来改送和接受数据),用户名)
while True: #很多线程
try :
data = sock.recv(1024) #阻塞,bytes
text = data.decode()
msg = '\n==================================\n\\\\\\from:{0} {1}///\n||{2}\t||\n==================================\n'.format(
self.clients[addr][1],
datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
text).encode()
sendlist = []
for s in self.clients.values():
sendlist.append(s[0])
sendlist.remove(sock)
for sr in sendlist:
sr.send(msg)
except ConnectionAbortedError:
sb = '有人消失了'
logging.info(sb)
self.clients.pop(sock.getpeername())
for s in self.clients.values():
s[0].send(sb.encode())
break
except ConnectionResetError:
sb = 'someone was left'
logging.info(sb)
self.clients.pop(sock.getpeername())
for s in self.clients.values():
s[0].send(sb.encode())
break
def stop(self):
for s in self.clients.values():
s[0].close()
self.sock.close()
try:
first = input('需要自定义ip及端口嘛?\n请回答Y/N(回答无法理解自动默认模式)\n')
if first == 'Y':
ip = input('请输入ip\n本机ip可cmd arp -a查看\n')
port = int(input('请输入端口\n'))
#IPADDR = ()
cs = ChatServer(ip,port)
cs.start()
print('需要帮助可以输入help查看帮助')
else :
cs = ChatServer()
cs.start()
print('需要帮助可以输入help查看帮助')
except Exception as e:
print (str(e))
print(f'总之哪里出错了 debug去吧。\n')
os.system('pause')
while True:
try:
cmd = input('>>>')
if cmd.strip() == 'byebye':
cs.stop()
break
if cmd.strip() == 'now':
logging.info(threading.enumerate())
if cmd.strip() == 'who':
for k,v in cs.clients.items():
print(v[1],end = ': ')
print(k)
if cmd.strip() == 'help':
print('尝试输入now,who? 顺便一提byebye是退出')
except Exception as e:
print (str(e))
print(f'总之哪里出错了 debug去吧。\n')
os.system('pause')
|
def import_args(test,*args):
print('param:',test)
for item in args:
print('other params:',item)
args = ['hello',2019]
import_args('123',*args)
|
"""
kde
~~~
Implements some general "kernel density estimation" methods which, while not
drawn directly from the literature, can be thought of as generalisation of
the `prohotpot` and `retrohotspot` methods.
"""
from . import predictors as _predictors
import numpy as _np
from . import kernels as _kernels
class ConstantTimeKernel():
"""A "time kernel" which is constantly 1."""
def __call__(self, x):
x = _np.asarray(x)
return _np.zeros_like(x) + 1
class ExponentialTimeKernel():
"""An exponentially decaying kernel, :math:`f(x) = \exp(-x/\beta)`
where :math:`beta` is the "scale".
"""
def __init__(self, scale):
self._scale = scale
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, v):
self._scale = v
def __call__(self, x):
return _np.exp( - _np.asarray(x) / self._scale ) / self._scale
def __repr__(self):
return "ExponentialTimeKernel(Scale={})".format(self._scale)
@property
def args(self):
return "E{}".format(self._scale)
class QuadDecayTimeKernel():
"""A quadratically decaying kernel, :math:`f(x) = (1 + (x/\beta)^2)^{-1]}`
where :math:`beta` is the "scale".
"""
def __init__(self, scale):
self.scale = scale
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, v):
self._scale = v
self._norm = 2 / (self._scale * _np.pi)
def __call__(self, x):
x = _np.asarray(x)
return self._norm / (1 + (x / self._scale)**2)
def __repr__(self):
return "QuadDecayTimeKernel(Scale={})".format(self._scale)
@property
def args(self):
return "Q{}".format(self._scale)
class KernelProvider():
"""Abstract base class for a "factory" which produces kernels, based
on data.
:param data: Array of coordinates in shape `(n,N)` for `n` dimensions
and `N` data points. Typically `n==2`.
"""
def __call__(self, data):
raise NotImplementedError()
class GaussianBaseProvider(KernelProvider):
"""Use the :class:`kernels.GaussianBase` to estimate a kernel.
This emulates the `scipy.kde` Gaussian kernel."""
def __call__(self, data):
return _kernels.GaussianBase(data)
def __repr__(self):
return "GaussianBaseProvider"
@property
def args(self):
return "G".format(self._scale)
class GaussianFixedBandwidthProvider(KernelProvider):
"""Use the :class:`kernels.GaussianBase` to estimate a kernel.
Has a fixed bandwidth (and identity covariance matrix).
"""
def __init__(self, bandwidth):
self._h = bandwidth
def __call__(self, data):
ker = _kernels.GaussianBase(data)
ker.bandwidth = self._h
ker.covariance_matrix = _np.eye(ker.dimension)
return ker
def __repr__(self):
return "GaussianFixedBandwidthProvider(bandwidth={})".format(self._h)
@property
def args(self):
return "GF{}".format(self._h)
class GaussianNearestNeighbourProvider(KernelProvider):
"""Use the :class:`kernels.GaussianNearestNeighbour` to estimate
a kernel."""
def __init__(self, k):
self._k = k
@property
def k(self):
"""The nearest neighbour to look at for local bandwidth estimation."""
return self._k
@k.setter
def k(self, v):
self._k = v
def __call__(self, data):
return _kernels.GaussianNearestNeighbour(data, self._k)
def __repr__(self):
return "GaussianNearestNeighbourProvider(k={})".format(self._k)
@property
def args(self):
return "GNN{}".format(self._k)
class KDE(_predictors.DataTrainer):
"""Implements a kernel density estimation, grid based prediction. We
implement a hybrid approach which, while now exactly common in the
statistics literature, seems to capture the essential features of all of
the standard "out of the box" kernel estimators, and the "Prohotspot" type
estimators.
The predictor itself is simple. We select an interval time (or all time)
and use just the data from that time range. The distance in time from each
event to the end time is calculated, and optionally a "time kernel" is
calculated: typically this kernel falls off in time, so that events in the
past are waited less.
The space locations are events are then passed to a kernel density
estimator. Finally (in a slightly non-standard way) the space kernel is
weighted by the time kernel to produce a "risk surface".
:param region: The rectangular region to use to grid the data, or `None`
to auto compute
:param grid_size: The size of the grid to use
:param grid: If not `None` that take the `region` and `grid_size` settings
from this grid.
"""
def __init__(self, region=None, grid_size=50, grid=None):
if grid is None:
self.grid = grid_size
self.region = region
else:
self.region = grid.region()
self.grid = grid.xsize
if grid.xsize != grid.ysize:
raise ValueError("Only supports *square* grid cells.")
self.time_kernel = ConstantTimeKernel()
self.time_unit = _np.timedelta64(1, "D")
self.space_kernel = GaussianBaseProvider()
@property
def time_unit(self):
"""The "unit" of time to divide the time differences by to obtain
a scalar, prior to passing to the time kernel."""
return self._time_unit
@time_unit.setter
def time_unit(self, v):
self._time_unit = _np.timedelta64(v)
@property
def time_kernel(self):
"""The weighting to apply to timestamps. Should be a callable object
correponds to a one-dimensional "kernel"."""
return self._time_kernel
@time_kernel.setter
def time_kernel(self, v):
self._time_kernel = v
@property
def space_kernel(self):
"""The kernel _estimator provider_ for the space coordinates. Needs to
have the interface of :class:`KernelProvider`."""
return self._space_kernel
@space_kernel.setter
def space_kernel(self, v):
self._space_kernel = v
def _kernel(self, start_time=None, end_time=None):
data = self.data
if start_time is not None:
start_time = _np.datetime64(start_time)
data = data[data.timestamps >= start_time]
if end_time is not None:
end_time = _np.datetime64(end_time)
data = data[data.timestamps < end_time]
if end_time is None:
end_time = data.timestamps[-1]
kernel = self.space_kernel(data.coords)
time_deltas = (end_time - data.timestamps) / self.time_unit
kernel.weights = self.time_kernel(time_deltas)
return kernel
def cts_predict(self, start_time=None, end_time=None):
"""Calculate a "continuous" prediction.
:param start_time: Only use data after (and including) this time. If
`None` then use from the start of the data.
:param end_time: Only use data before this time, and treat this as the
time point to calculate the time kernel relative to. If `None` then use
to the end of the data, and use the final timestamp as the "end time".
:return:
"""
kernel = self._kernel(start_time, end_time)
return _predictors.KernelRiskPredictor(kernel)
def predict(self, start_time=None, end_time=None, samples=None):
"""Calculate a grid based prediction.
:param start_time: Only use data after (and including) this time. If
`None` then use from the start of the data.
:param end_time: Only use data before this time, and treat this as the
time point to calculate the time kernel relative to. If `None` then use
to the end of the data, and use the final timestamp as the "end time".
:samples: As for :class:`ContinuousPrediction`.
:return: An instance of :class:`GridPredictionArray`
"""
kernel = self._kernel(start_time, end_time)
return _predictors.grid_prediction_from_kernel(kernel, self.region,
self.grid, samples)
|
import re
import os
import argparse
import sys
import errno
import optparse
import sqlite3
import uuid
import email
import email.utils
from email.message import EmailMessage
from email.parser import BytesParser, Parser
from email.policy import default
from datetime import datetime
import hashlib
import notesdb
import common
import constants
#
# MIT License
#
# https://opensource.org/licenses/MIT
#
# Copyright 2020 Rene Sugar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Description:
#
# This program loads a mac_apt notes database into a SQLite database.
#
global __name__, __author__, __email__, __version__, __license__
__program_name__ = 'twitterarchivelikes2sql'
__author__ = 'Rene Sugar'
__email__ = 'rene.sugar@gmail.com'
__version__ = '1.00'
__license__ = 'MIT License (https://opensource.org/licenses/MIT)'
__website__ = 'https://github.com/renesugar'
__db_schema_version__ = '1'
__db_schema_min_version__ = '1'
def _get_option_parser():
parser = optparse.OptionParser('%prog [options]',
version='%prog ' + __version__)
parser.add_option('', "--email",
action="store", dest="email_address", default=None,
help="Email address")
parser.add_option("", "--input",
action="store", dest="input_path", default=None,
help="Path to input Twitter archive file")
parser.add_option('', "--output",
action="store", dest="output_path", default=None,
help="Path to output notes SQLite directory")
parser.add_option("", "--cache",
action="store", dest="url_dict", default=None,
help="JSON dictionary containing expanded URLs")
parser.add_option("", "--error",
action="store", dest="error_dict", default=None,
help="JSON dictionary containing unexpanded URLs and errors")
return parser
def process_twitter_archive_note(sqlconn, columns):
# note_title
if columns["note_title"] is None:
note_title = constants.NOTES_UNTITLED
else:
note_title = common.remove_line_breakers(columns["note_title"]).strip()
print("processing '%s'" % (note_title,))
# note_original_format (email, apple, icloud, joplin, bookmark, twitterarchive, twitterapi)
note_original_format = "twitterarchive"
# note_internal_date
# note_title
# note_url
# note_data
note_data = columns['note_data']
if note_data is None:
note_data = ''
# note_data_format
note_data_format = columns['note_data_format']
# note_hash (hash the markdown text)
h = hashlib.sha512()
h.update(note_data.encode('utf-8'))
note_hash = h.hexdigest()
# apple_id
apple_id = None
# apple_title
apple_title = None
# apple_snippet
apple_snippet = None
# apple_folder
# apple_created
# apple_last_modified
# apple_data
apple_data = None
# apple_attachment_id
apple_attachment_id = None
# apple_attachment_path
apple_attachment_path = None
# apple_account_description
apple_account_description = None
# apple_account_identifier
apple_account_identifier = None
# apple_account_username
apple_account_username = None
# apple_version
apple_version = None
# apple_user
apple_user = None
# apple_source
apple_source = None
columns["note_type"] = "note"
columns["note_uuid"] = None
columns["note_parent_uuid"] = None
columns["note_original_format"] = note_original_format
#columns["note_internal_date"] = note_internal_date
columns["note_hash"] = note_hash
columns["note_title"] = note_title
columns["note_data"] = note_data
columns["note_data_format"] = note_data_format
columns["apple_id"] = apple_id
columns["apple_title"] = apple_title
columns["apple_snippet"] = apple_snippet
#columns["apple_folder"] = apple_folder
#columns["apple_created"] = apple_created
#columns["apple_last_modified"] = apple_last_modified
columns["apple_data"] = apple_data
columns["apple_attachment_id"] = apple_attachment_id
columns["apple_attachment_path"] = apple_attachment_path
columns["apple_account_description"] = apple_account_description
columns["apple_account_identifier"] = apple_account_identifier
columns["apple_account_username"] = apple_account_username
columns["apple_version"] = apple_version
columns["apple_user"] = apple_user
columns["apple_source"] = apple_source
notesdb.add_apple_note(sqlconn, columns)
sqlconn.commit()
def main(args):
parser = _get_option_parser()
(options, args) = parser.parse_args(args)
email_address = ''
if hasattr(options, 'email_address') and options.email_address:
email_address = options.email_address
if common.check_email_address(email_address) == False:
# Check if email address is valid
common.error("email address '%s' is not valid." % (email_address,))
else:
common.error("email address not specified.")
inputPath = ''
if hasattr(options, 'input_path') and options.input_path:
inputPath = os.path.abspath(os.path.expanduser(options.input_path))
if os.path.isfile(inputPath) == False:
# Check if input directory exists
common.error("input file '%s' does not exist." % (inputPath,))
else:
common.error("input file not specified.")
urlDictPath = ''
url_dict = {}
if hasattr(options, 'url_dict') and options.url_dict:
urlDictPath = os.path.abspath(os.path.expanduser(options.url_dict))
url_dict = common.load_dict(urlDictPath)
errorDictPath = ''
error_dict = {}
if hasattr(options, 'error_dict') and options.error_dict:
errorDictPath = os.path.abspath(os.path.expanduser(options.error_dict))
error_dict = common.load_dict(errorDictPath)
outputPath = ''
if hasattr(options, 'output_path') and options.output_path:
outputPath = os.path.abspath(os.path.expanduser(options.output_path))
if os.path.isdir(outputPath) == False:
# Check if output directory exists
common.error("output path '%s' does not exist." % (outputPath,))
else:
common.error("output path not specified.")
twitterdbfile = inputPath
notesdbfile = os.path.join(outputPath, 'notesdb.sqlite')
if not os.path.isfile(twitterdbfile):
common.error("input file does not exist")
new_database = (not os.path.isfile(notesdbfile))
twitter_sqlconn = sqlite3.connect(twitterdbfile,
detect_types=sqlite3.PARSE_DECLTYPES)
twitter_sqlconn.row_factory = sqlite3.Row
twitter_sqlcur = twitter_sqlconn.cursor()
sqlconn = sqlite3.connect(notesdbfile,
detect_types=sqlite3.PARSE_DECLTYPES)
sqlcur = sqlconn.cursor()
if (new_database):
notesdb.create_database(sqlconn=sqlconn, db_schema_version=__db_schema_version__, email_address=options.email_address)
db_settings = notesdb.get_db_settings(sqlcur, __db_schema_version__)
notesdb.check_db_settings(db_settings, '%prog', __version__, __db_schema_min_version__, __db_schema_version__)
twitter_sqlcur.execute('''SELECT tweetId,
fullText,
expandedUrl
FROM archive_like''')
notes_to_convert_results = twitter_sqlcur.fetchall()
current = 0
for row in notes_to_convert_results:
note_folder = "Twitter"
add_date = datetime.now()
note_url = row['expandedUrl']
note_text, url_dict, error_dict = common.expand_urls(row['fullText'], url_dict, error_dict)
note_title = common.defaultTitleFromBody(note_text.splitlines()[0])
note_data = note_text + "\n\n" + note_url
current += 1
columns = {}
columns["note_type"] = "note"
columns["note_uuid"] = None
columns["note_parent_uuid"] = None
columns["note_original_format"] = None
columns["note_internal_date"] = add_date
columns["note_hash"] = None
columns["note_title"] = note_title
columns["note_url"] = note_url
columns["note_data"] = note_data
columns["note_data_format"] = 'text/markdown'
columns["apple_folder"] = note_folder
columns["apple_created"] = add_date.strftime("%Y-%m-%d %H:%M:%S.%f")
columns["apple_last_modified"] = columns["apple_created"]
process_twitter_archive_note(sqlconn, columns)
sqlconn.commit()
if urlDictPath == "":
urlDictPath = "./url_dict.json"
common.save_dict(urlDictPath, url_dict)
if errorDictPath == "":
errorDictPath = "./error_dict.json"
common.save_dict(errorDictPath, error_dict)
if __name__ == "__main__":
main(sys.argv[1:])
|
'''
Auto login code to keep a c9 workspace up
@author Sercrash
@date 18/05/2016
'''
# Imports
import sys, time, threading, random, datetime, timeout, os.path
from random import randint
global ejecuciones
ejecuciones = 0
# Class definition
class KeepUp:
def __init__(self): # Constructor
self.version = "v1.0.1"
self.author = "SerCrAsH"
self.directory = "keepUp/"
self.directory_img = self.directory + "img/"
self.file_result_html = "login_result.html"
self.file_log = "log.txt"
# Create dir if not exist
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(self.directory_img):
os.makedirs(self.directory_img)
print "[KeepUp] Initialization - @" , self.author , " - Version KeepUp : " , self.version
def __getattr__(self, var): # Gets
return self.__dict__[var] if var in self.__dict__ else "["+str(var)+"] var does not exist."
def __str__(self): # To String
pass
##### Methods #####
# Main Method
def loadTask(self, user="default" , password="default", target_workspace="https://ide.c9.io/user-default/workspace-default"):
self.user = user
self.password = password
self.target_workspace = target_workspace
self.load()
# Load method
def load(self):
min_time = 3600 # 1 hour in seconds
max_time = 7179 # 2 hours in seconds (less 21)
tasktime = randint(min_time, max_time)
global ejecuciones
if ejecuciones<1:
tasktime = 180
if ejecuciones==1:
tasktime=tasktime-180
ejecuciones = 1
threading.Timer(tasktime, self.load).start()
print "[KeepUp] Load execution at ", time.ctime(), " waiting ", tasktime , "seconds for the next time."
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.keys import Keys
from pyvirtualdisplay import Display
# Initial
display = Display(visible=0, size=(1600, 900))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.cache.disk.enable", False)
profile.set_preference("browser.cache.memory.enable", False)
profile.set_preference("browser.cache.offline.enable", False)
profile.set_preference("network.http.use-cache", False)
driver = webdriver.Firefox()
driver.get("https://c9.io/dashboard.html")
driver.save_screenshot(self.directory_img + 'login.png')
#Username
username = driver.find_element_by_id("id-username")
username.click()
username.clear()
username.send_keys(self.user, Keys.ARROW_DOWN)
#Password
password = driver.find_element_by_id("id-password")
password.click()
password.clear()
password.send_keys(self.password, Keys.ARROW_DOWN)
#Submit
submit_button = driver.find_element_by_css_selector("button[type=submit]")
# print submit_button.text
# Click submition
submit_button.click();
time.sleep(5)
driver.save_screenshot(self.directory_img + 'user_profile.png')
# Target dir
driver.get(self.target_workspace)
time.sleep(10)
self.log(driver.page_source) #make log
driver.save_screenshot(self.directory_img + 'final_workspace.png')
# End
driver.quit()
display.stop()
# Log method
def log(self, content = ""):
log_html = self.directory + self.file_result_html
log_file = self.directory + self.file_log
f = open( log_html , 'w')
f.write(content)
f.close()
f = open( log_file , 'a')
f.write("[" + str(time.ctime()) + "] Log execution\n")
f.close()
print "[KeepUp] Log volcado"
|
# Generated by Django 2.1.1 on 2018-10-16 17:04
import discourz_app.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('username', models.CharField(max_length=20, primary_key=True, serialize=False)),
('password', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
('img', models.ImageField(upload_to=discourz_app.models.user_directory_path)),
('bio', models.TextField(max_length=500)),
],
),
]
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
#Exploring Data
'''
df['Age'].mean() #Average age of 44
df['Survived'] #13% survival rate, yoinks!
df.describe()
a = sns.catplot(x='Sex', y='Survived', kind="point", data=df)
plt.show()
df.groupby('Category').mean()
'''
#Cleaning data
df = pd.read_csv("data/estonia-passenger-list.csv") #read in csv
df.drop(['Firstname', 'Lastname', 'PassengerId', 'Country'], axis=1, inplace=True) #Remove identifying data and country because there didn't seem to be any statistical relevance
df['Sex'].replace(['M', 'F'], [0,1], inplace=True) #enumerate sex column
df['Category'].replace(['P', 'C'], [0,1], inplace=True) #enumerate passenger column P=passengers (0), C=Crew (1)
#Splitting Data into training, validation, and testing
features = df.drop('Survived', axis=1)
labels = df['Survived'] #What we are looking to predict
f_train, f_test, l_train, l_test = train_test_split(features, labels, test_size=.4, random_state=42) #training on 60% of the dataset
f_val, f_test, l_val, l_test = train_test_split(features, labels, test_size=.5, random_state=42) #Validation and testing take up 20% each
#Saving split data to a file so comparison can be recreated
f_train.to_csv('data/f_training.csv', index=False)
l_train.to_csv('data/l_training.csv', index=False, header=False)
f_val.to_csv('data/f_val.csv', index=False)
l_val.to_csv('data/l_val.csv', index=False, header=False)
f_test.to_csv('data/f_test.csv', index=False)
l_test.to_csv('data/l_test.csv', index=False, header=False)
|
import unittest
import pandas as pd
from Funcoes_Axiliares import Funcoes_Axiliares
import sys, os
sys.path.insert(-1,os.path.abspath('../../../Pyker'))
from cientista.Validar import Regra_descartar_linha_repetida
class Regra_descartar_linha_repetida_test(unittest.TestCase,Funcoes_Axiliares):
def test_repara(self):
print('------------ validar -----------------')
regra_descartar_linha_repetida = Regra_descartar_linha_repetida.Regra_descartar_linha_repetida()
dataframe = pd.DataFrame()
regra_descartar_linha_repetida.iniciar(dataframe ,['A' ,'B' ,'C' ,'D' ,'E' ,'F'])
lista_telas_embaralhadas = self.retornar_lista_telas_embaralhadas(4, 3)
for idx, tupla in enumerate(lista_telas_embaralhadas):
dataframe.loc[idx] = False
novo_ficheiro = self.gerador_de_ficheiros(tupla[1])
novo_ficheiro.setdefault('tela', tupla[0])
for chave in novo_ficheiro:
dataframe.loc[idx, (chave)] = novo_ficheiro[chave]
regra_descartar_linha_repetida.validar(idx)
tela_1 = dataframe[dataframe['tela'] == 1].drop(['tela'], axis=1).to_string(index=False)
tela_2 = dataframe[dataframe['tela'] == 2].drop(['tela'], axis=1).to_string(index=False)
tela_3 = dataframe[dataframe['tela'] == 3].drop(['tela'], axis=1).to_string(index=False)
self.assertEqual(tela_1, tela_2)
self.assertEqual(tela_1, tela_3)
self.assertEqual(tela_2, tela_3)
def gerador_de_ficheiros(self ,novo_ficheiro):
if novo_ficheiro == 0:
ficheiro = {'pote' :0.45 ,'pote_rodada' :0.00 ,'bord_etapa' :'RIVER' ,'mao' :1,
'bord_FLOP_1' :'C2' ,'bord_FLOP_2' :'C2' ,'bord_FLOP_3' :'C2' ,'bord_TURN' :'C2'
,'bord_RIVER' :'C2',
'diler_A' :True ,'diler_B' :False ,'diler_C' :False ,'diler_D' :False ,'diler_E' :False
,'diler_F' :False,
'aposta_A' :0 ,'aposta_B' :0.05 ,'aposta_C' :0.1 ,'aposta_D' :0.3 ,'aposta_E' :0 ,'aposta_F' :0,
'fichas_A' :0 ,'fichas_B' :0.05 ,'fichas_C' :0.1 ,'fichas_D' :0.3 ,'fichas_E' :0 ,'fichas_F' :0,
'vez_A' :False ,'vez_B' :False ,'vez_C' :False ,'vez_D' :False ,'vez_E' :True ,'vez_F' :False,
'hole_cards_A' :False ,'hole_cards_B' :False ,'hole_cards_C' :False ,'hole_cards_D' :False
,'hole_cards_E' :False ,'hole_cards_F' :False}
elif novo_ficheiro == 1:
ficheiro = {'pote' :0.75 ,'pote_rodada' :0.00 ,'bord_etapa' :'RIVER' ,'mao' :1,
'bord_FLOP_1' :'C2' ,'bord_FLOP_2' :'C2' ,'bord_FLOP_3' :'C2' ,'bord_TURN' :'C2'
,'bord_RIVER' :'C2',
'diler_A' :True ,'diler_B' :False ,'diler_C' :False ,'diler_D' :False ,'diler_E' :False
,'diler_F' :False,
'aposta_A' :0 ,'aposta_B' :0.05 ,'aposta_C' :0.1 ,'aposta_D' :0.3 ,'aposta_E' :0.3
,'aposta_F' :0,
'fichas_A' :0 ,'fichas_B' :0.05 ,'fichas_C' :0.1 ,'fichas_D' :0.3 ,'fichas_E' :0 ,'fichas_F' :0,
'vez_A' :False ,'vez_B' :False ,'vez_C' :False ,'vez_D' :False ,'vez_E' :False ,'vez_F' :False,
'hole_cards_A' :False ,'hole_cards_B' :False ,'hole_cards_C' :False ,'hole_cards_D' :False
,'hole_cards_E' :False ,'hole_cards_F' :True}
elif novo_ficheiro == 2:
ficheiro = {'pote' :0.75 ,'pote_rodada' :0.00 ,'bord_etapa' :'RIVER' ,'mao' :1,
'bord_FLOP_1' :'C2' ,'bord_FLOP_2' :'C2' ,'bord_FLOP_3' :'C2' ,'bord_TURN' :'C2'
,'bord_RIVER' :'C2',
'diler_A' :True ,'diler_B' :False ,'diler_C' :False ,'diler_D' :False ,'diler_E' :False
,'diler_F' :False,
'aposta_A' :0 ,'aposta_B' :0.05 ,'aposta_C' :0.1 ,'aposta_D' :0.3 ,'aposta_E' :0.3
,'aposta_F' :0,
'fichas_A' :0 ,'fichas_B' :0.05 ,'fichas_C' :0.1 ,'fichas_D' :0.3 ,'fichas_E' :0 ,'fichas_F' :0,
'vez_A' :True ,'vez_B' :False ,'vez_C' :False ,'vez_D' :False ,'vez_E' :False ,'vez_F' :False,
'hole_cards_A' :False ,'hole_cards_B' :False ,'hole_cards_C' :False ,'hole_cards_D' :False
,'hole_cards_E' :False ,'hole_cards_F' :True}
elif novo_ficheiro == 3:
ficheiro = {'pote' :4.04 ,'pote_rodada' :0.00 ,'bord_etapa' :'RIVER' ,'mao' :1,
'bord_FLOP_1' :'C2' ,'bord_FLOP_2' :'C2' ,'bord_FLOP_3' :'C2' ,'bord_TURN' :'C2'
,'bord_RIVER' :'C2',
'diler_A' :True ,'diler_B' :False ,'diler_C' :False ,'diler_D' :False ,'diler_E' :False
,'diler_F' :False,
'aposta_A' :0 ,'aposta_B' :0.1 ,'aposta_C' :0.3 ,'aposta_D' :0.3 ,'aposta_E' :0.3
,'aposta_F' :0,
'fichas_A' :0 ,'fichas_B' :0.05 ,'fichas_C' :0.1 ,'fichas_D' :0.3 ,'fichas_E' :0 ,'fichas_F' :0,
'vez_A' :False ,'vez_B' :False ,'vez_C' :False ,'vez_D' :True ,'vez_E' :False ,'vez_F' :False,
'hole_cards_A' :True ,'hole_cards_B' :False ,'hole_cards_C' :True ,'hole_cards_D' :False
,'hole_cards_E' :False ,'hole_cards_F' :True}
return ficheiro
if __name__ == "__main__":
print('Teste da classe lisnhas descartavel')
unittest.main()
|
import torch
def iou(pr, gt, eps=1e-7, threshold=None, activation='sigmoid'):
"""
Source:
https://github.com/catalyst-team/catalyst/
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: IoU (Jaccard) score
"""
if activation is None or activation == "none":
def activation_fn(x):
return x
elif activation == "sigmoid":
activation_fn = torch.nn.Sigmoid()
elif activation == "softmax2d":
activation_fn = torch.nn.Softmax2d()
else:
raise NotImplementedError(
"Activation implemented for sigmoid and softmax2d"
)
pr = activation_fn(pr)
if threshold is not None:
pr = (pr > threshold).float()
intersection = torch.sum(gt * pr)
union = torch.sum(gt) + torch.sum(pr) - intersection + eps
return (intersection + eps) / union
jaccard = iou
def f_score(pr, gt, beta=1, eps=1e-7, threshold=None, activation='sigmoid'):
"""
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
beta (float): positive constant
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: F score
"""
if activation is None or activation == "none":
def activation_fn(x):
return x
elif activation == "sigmoid":
activation_fn = torch.nn.Sigmoid()
elif activation == "softmax2d":
activation_fn = torch.nn.Softmax2d()
else:
raise NotImplementedError(
"Activation implemented for sigmoid and softmax2d"
)
pr = activation_fn(pr)
if threshold is not None:
pr = (pr > threshold).float()
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) \
/ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps)
return score
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
# encoding: utf-8
'''
Created on 26 Sep 2017
@author: MetalInvest
'''
try:
from kuanke.user_space_api import *
except ImportError as ie:
print(str(ie))
from jqdata import *
import pandas as pd
import numpy as np
class HerdHead(object):
def __init__(self, params):
self.gainThre = params.get('gainThre', 0.05)
self.count = params.get('count', 20)
self.period = params.get('period', '1d')
self.useIntraday = params.get('useIntraday', True)
self.intraday_period = params.get('intraday_period', '230m')
self.isAnal = params.get('isAnal', False)
def filterStocks(self, stock_list):
current_data = get_current_data()
stock_list = [stock for stock in stock_list
if not current_data[stock].is_st
and 'ST' not in current_data[stock].name
and '*' not in current_data[stock].name
and '退' not in current_data[stock].name]
stock_list = [stock for stock in stock_list if not current_data[stock].paused]
return stock_list
# 寻找指数的龙头股
def findLeadStock(self,index,method = 0,isConcept=False):
# 规则
# 1.涨幅大于阈值的股票;
# 2.指数涨幅在阈值的四分之一以上;
# 3.过去一周成交量大于过去两周成交量;
# 4.个股流通市值占总市值百分比达到阈值
# 取出该指数的股票:
oriStocks = get_industry_stocks(index) if not isConcept else get_concept_stocks(index)
if not self.isAnal:
oriStocks = self.filterStocks(oriStocks)
# 根据个股涨幅筛选
filtStocks = self.filtGain(oriStocks)
# 计算指数涨幅
gainIndex = self.calIndustryGain(oriStocks)
# 根据规则筛选龙头股
if float(gainIndex)/self.gainThre > 0.25:
if method == 0:
# 基本限制
return filtStocks
elif method == 1:
# 基本限制+成交量限制
filtStocks = self.filtVol(filtStocks)
return filtStocks
elif method == 2:
# 基本限制+流通市值限制
filtStocks = self.filtMarketCap(filtStocks, oriStocks)
return filtStocks
elif method == 3:
# 基本限制+流通市值限制+成交量限制
filtStocks = self.filtVol(filtStocks)
if len(filtStocks) != 0:
filtStocks = self.filtMarketCap(filtStocks,oriStocks)
else:
pass
return filtStocks
else:
return 'Error method order'
else:
return []
# 根据涨幅筛选股票
def filtGain(self, stocks):
# 初始化参数信息
rankValue = []
# 计算涨跌幅
for security in stocks:
# 获取过去pastDay的指数值
# stocksPrice = get_price(security, start_date = preDate, end_date = curDate, frequency = '1d', fields = 'close')
# stocksPrice = attribute_history(security, self.count, unit=self.period, fields = ['close'], skip_paused=True, df=True)
stocksPrice = self.getlatest_df(security, self.count, fields=['close'], skip_paused=True, df_flag=True)
if len(stocksPrice)!=0:
# 计算涨跌幅
errCloseOpen = [(float(stocksPrice.iloc[-1]) - float(stocksPrice.iloc[0])) / float(stocksPrice.iloc[0])]
rankValue += errCloseOpen
else:
rankValue += [0]
# 根据周涨跌幅排名
filtStocks = {'code':stocks,'rankValue':rankValue}
filtStocks = pd.DataFrame(filtStocks)
filtStocks = filtStocks.sort('rankValue',ascending = False)
filtStocks = filtStocks[filtStocks['rankValue'] > self.gainThre]
filtStocks = list(filtStocks['code'])
return filtStocks
# 根据成交量筛选股票
def filtVol(self, stocks):
# 初始化返回数组
returnStocks = []
# 筛选
stockVol = history(self.count, unit=self.period, field='volume', security_list=stocks, df=False, skip_paused=False, fq='pre')
if self.useIntraday:
stockVol_intraday = history(1, unit=self.intraday_period, field='volume', security_list=stocks, df=False, skip_paused=False, fq='pre')
for security in stocks:
stockVol[security] = np.append(stockVol[security], stockVol_intraday[security])
for security in stocks:
if float(stockVol[security][-5:].mean()) > float(stockVol[security][-10:].mean()):
returnStocks += [security]
else:
continue
return returnStocks
# 根据流通市值筛选股票
def filtMarketCap(self,stocks,oriStocks):
# 初始化返回数组
returnStocks = []
# 计算行业流通市值
indexMarketCap = get_fundamentals(query(valuation.circulating_market_cap).filter(valuation.code.in_(oriStocks)))
totalMarketCap = float(sum(indexMarketCap['circulating_market_cap']))
# 计算个股流通市值占总市值百分比阈值:以四分位为阈值
indexMarketCap = indexMarketCap.div(totalMarketCap,axis=0)
porThre = indexMarketCap.describe()
porThre = float(porThre.loc['25%'])
# 筛选
for security in stocks:
stockMarketCap = get_fundamentals(query(valuation.circulating_market_cap).filter(valuation.code.in_([security])))
if float(stockMarketCap.iloc[0]) > totalMarketCap * porThre:
returnStocks += [security]
else:
continue
return returnStocks
# 计算行业涨幅
def calIndustryGain(self, stocks):
gainIndex = 0
if not stocks:
return 0
for security in stocks:
# stocksPrice = get_price(security, start_date = preDate, end_date = curDate, frequency = '1d', fields = 'close')
# stocksPrice = attribute_history(security, self.count, unit=self.period, fields = ['close'], skip_paused=True, df=True)
stocksPrice = self.getlatest_df(security, self.count, fields=['close'], skip_paused=True, df_flag=True)
if len(stocksPrice) != 0:
gainIndex += (float(stocksPrice.iloc[-1]) - float(stocksPrice.iloc[0])) / float(stocksPrice.iloc[0])
else:
continue
return gainIndex/len(stocks)
def getlatest_df(self, stock, count, fields, skip_paused=True, df_flag = True):
df = attribute_history(stock, count, unit=self.period, fields = fields, skip_paused=skip_paused, df=df_flag)
if self.useIntraday:
containPaused = 'paused' in fields
if containPaused:
fields.remove('paused')
latest_stock_data = attribute_history(stock, 1, self.intraday_period, fields, skip_paused=skip_paused, df=df_flag)
if containPaused:
latest_stock_data.assign(paused=np.nan)
cd = get_current_data()
latest_stock_data.ix[-1,'paused'] = cd[stock].paused
if df_flag:
current_date = latest_stock_data.index[-1].date()
latest_stock_data = latest_stock_data.reset_index(drop=False)
latest_stock_data.ix[0, 'index'] = pd.DatetimeIndex([current_date])[0]
latest_stock_data = latest_stock_data.set_index('index')
df = df.reset_index().drop_duplicates(subset='index').set_index('index')
df = df.append(latest_stock_data, verify_integrity=True) # True
else:
final_fields = []
if isinstance(fields, basestring):
final_fields.append(fields)
else:
final_fields = list(fields)
# [np.append(df[field], latest_stock_data[field][-1]) for field in final_fields]
for field in final_fields:
df[field] = np.append(df[field], latest_stock_data[field][-1])
return df |
from django.contrib import admin
from .models import *
@admin.register(Task)
class TaskAdmin(admin.ModelAdmin):
exclude = ('created_at','updated_at')
|
from torch.utils.data import Dataset
from common.utils import get_img, get_img_recog
class AEDataSet(Dataset):
def __init__(self, home_path, path_list, transform=None, crop=True):
"""DataSet for Autoencoder
Args:
home_path (str): home dir path
path_list (list): image file names
transform (transforms): transforms
crop (bool): crop images or not
"""
self.home_path = home_path
self.path_list = path_list
self.transform = transform
self.crop = crop
self.seasons = ["spring", "summer", "autumn", "winter"]
def __len__(self):
return len(self.path_list)
def __getitem__(self, idx):
input_paths = ["{}/{}/images/{}".format(self.home_path, s, self.path_list[idx]) for s in self.seasons]
inputs = [get_img(path, self.crop) for path in input_paths]
target_path = self.home_path + "/normal/images/" + self.path_list[idx]
target = get_img(target_path, self.crop)
if self.transform:
inputs = [self.transform(img.copy()) for img in inputs]
target = self.transform(target.copy())
return inputs, target
class ControlDataSet(Dataset):
def __init__(self, img_df, transform=None, crop=True):
self.img_df = img_df
self.crop = crop
if transform is not None:
self.transform = transform
def __len__(self):
return len(self.img_df)
def __getitem__(self, idx):
label = self.img_df.iat[idx, 2]
img_path = self.img_df.iat[idx, 1]
img = get_img(img_path, self.crop)
if self.transform:
img = self.transform(img.copy())
return img, label
class RecogDataSet(Dataset):
def __init__(self, path_list, transform=None):
"""
Args:
path_list (list): image file paths
transform (transforms): transforms
"""
self.path_list = path_list
self.transform = transform
self.seasons = {
"spring": 0,
"summer": 1,
"autumn": 2,
"winter": 3
}
def __len__(self):
return len(self.path_list)
def __getitem__(self, idx):
s = self.path_list[idx].split('/')[-3]
label = self.seasons[s]
inputs = get_img_recog(self.path_list[idx])
if self.transform:
inputs = self.transform(inputs.copy())
return inputs, label
|
import argparse
import os
import sys
from PyQt5 import QtWidgets
from gui.GameWindow import GameWindow
def main():
parser = argparse.ArgumentParser(
usage='{} rom'.format(
os.path.basename(
sys.argv[0])),
description='CHIP8 emulator. Play a game which is 30 years old!',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('rom', type=str, help='way to rom file')
parser.add_argument('--debug', action="store_true",
help="enable debug mode")
parser.add_argument('-s', '--speed', type=int, default=12000,
help='try to adjust CPU speed. The bigger the value '
'is, the slower the game speed gets')
parser.add_argument('-d', '--delay', type=int, default=10,
help='try to adjust delay timer. The bigger the '
'value is, the slower things move')
args = parser.parse_args()
app = QtWidgets.QApplication(sys.argv)
GameWindow.DEBUG = args.debug
window = GameWindow(args.rom, args.speed + 1, args.delay + 1)
window.show()
app.exec_()
if __name__ == '__main__':
main()
|
import subprocess
from tkinter import *
from PIL import Image, ImageTk
root = Tk()
root.iconbitmap("img/master.ico")
root.title("Azure VM Control")
mainframe = Frame(root)
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
mainframe.pack(pady=10, padx=100)
pic = Image.open("img/eagle.jpg")
picture = ImageTk.PhotoImage(pic)
image = Button(mainframe, width=226, height=175, image=picture).grid(column=1, row=0)
# Create a Tkinter variable
VM_name = StringVar()
Rad_opt = StringVar()
# Dictionary with options
choices = {'OpenVPN Access Server', 'Windows Datacenter Server'}
VM_name.set('Select the VM')
popupMenu = OptionMenu(mainframe, VM_name, *choices)
Label(mainframe, text="Choose a Server").grid(row=2, column=1)
popupMenu.grid(row=3, column=1)
r3_ltxt = StringVar()
r4_ltxt = StringVar()
r3_label = Label(mainframe, textvariable=r3_ltxt).grid(row=4, column=1)
r4_label = Label(mainframe, textvariable=r4_ltxt).grid(row=5, column=1)
def vm_status():
v_flag = False
param = str(VM_name.get())
if param == "OpenVPN Access Server":
vm = "OVPN"
elif param == "Windows Datacenter Server":
vm = "Win10-VPN"
else:
vm = ""
cmd = "az vm get-instance-view -n " + vm + " -g VPN --query instanceView.statuses[1] -o table"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, shell=True, universal_newlines=True)
while True:
output = process.stdout.readline()
t_store = output.strip()
t_store = t_store.replace(" ", "")
if t_store == "PowerState/runningInfoVMrunning":
v_flag = True
return_code = process.poll()
if return_code is not None:
break
if v_flag is True:
activate_ctrls("Running")
else:
activate_ctrls("Stopped")
# on change dropdown value
def start_vm(param):
cmd = "az vm start -g VPN -n "
if str(param) == "OpenVPN Access Server":
cmd = cmd + "OVPN"
elif str(param) == "Windows Datacenter Server":
cmd = cmd + "Win10-VPN"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, shell=True, universal_newlines=True)
while True:
return_code = process.poll()
if return_code is not None:
break
vm_status()
def stop_vm(param):
cmd = "az vm deallocate -g VPN -n "
if str(param) == "OpenVPN Access Server":
cmd = cmd + "OVPN"
elif str(param) == "Windows Datacenter Server":
cmd = cmd + "Win10-VPN"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, shell=True, universal_newlines=True)
while True:
return_code = process.poll()
if return_code is not None:
break
vm_status()
def activate_ctrls(param):
x_val = str(VM_name.get()) + " Control Panel"
r3_ltxt.set(x_val)
content = str(VM_name.get()) + " is " + param
r4_ltxt.set(content)
if param == "Running":
Button(mainframe, text="Stop VM", command=lambda: stop_vm(VM_name.get())).grid(row=6, column=1)
elif param == "Stopped":
Button(mainframe, text="Start VM", command=lambda: start_vm(VM_name.get())).grid(row=6, column=1)
# link function to change dropdown
def throw(*args):
if str(VM_name.get()) == "Windows Datacenter Server" or str(VM_name.get()) == "OpenVPN Access Server":
vm_status()
VM_name.trace('w', throw)
root.mainloop()
|
import time
import os
import datetime
greeting = os.getenv("GREETINGS", "hello")
greeting_jp = os.getenv("GREETINGS_JP", "konnichiwa")
time.sleep(20)
out = f'{greeting} in Japanese is {greeting_jp}'
for i in range(19):
print(i, out, flush=True)
time.sleep(3)
|
import itertools
from collections import deque
import pygame
import random
import screen_utils
import config
def lines(screen, lines_lifespan=2000):
verbose = config.verbose
running = config.running
palette, palette_name = screen_utils.get_palette()
colors = itertools.cycle(palette)
if verbose:
print("lines, {}".format(palette_name))
base_color = next(colors)
next_color = next(colors)
step = 0
number_of_steps = 100
screen_width = screen.sizeX
screen_height = screen.sizeY
line_size = 2
number_of_lines = 100
line = [0, 0, 0, 0, pygame.color.Color("black")]
lines_list = deque([])
for i in range(number_of_lines - 1):
lines_list.append(line)
x1 = random.randint(1, screen_width)
x2 = random.randint(1, screen_width)
y1 = random.randint(1, screen_height)
y2 = random.randint(1, screen_height)
line = [x1, y1, x2, y2, pygame.color.Color("black")]
lines_list.append(line)
min_move = 1
max_move = 10
dir_x1 = random.randint(min_move, max_move)
dir_x2 = random.randint(min_move, max_move)
dir_y1 = random.randint(min_move, max_move)
dir_y2 = dir_x2 + 1
screen.clear()
for lifespan in range(lines_lifespan):
screen_utils.check_event(screen)
current_color = screen_utils.color_fade_from_palette(base_color, next_color, step, number_of_steps)
if step >= number_of_steps:
step = 1
base_color = next_color
next_color = next(colors)
else:
step += 1
pass
pygame.draw.line(screen.window, pygame.color.Color("black"), (lines_list[0][0], lines_list[0][1]),
(lines_list[0][2], lines_list[0][3]), line_size)
if x1 <= 1 or x1 >= screen_width:
dir_x1 = dir_x1 * -1
if x2 <= 1 or x2 >= screen_width:
dir_x2 = dir_x2 * -1
if y1 <= 1 or y1 >= screen_height:
dir_y1 = dir_y1 * -1
if y2 <= 1 or y2 >= screen_height:
dir_y2 = dir_y2 * -1
x1 += dir_x1
x2 += dir_x2
y1 += dir_y1
y2 += dir_y2
line = [x1, y1, x2, y2, current_color]
lines_list.append(line)
for i in range(1, len(lines_list)):
pygame.draw.line(screen.window, lines_list[i][4], (lines_list[i][0], lines_list[i][1]),
(lines_list[i][2], lines_list[i][3]), line_size)
lines_list.popleft()
pygame.display.update()
screen.clock.tick(100)
screen.clear()
return running
|
import argparse
import json
import time
from datetime import datetime, date
from enum import Enum
from http import HTTPStatus
import requests
import sqlalchemy
from attr import dataclass
from sqlalchemy import Column, INTEGER, DATE, TIMESTAMP, String, UniqueConstraint
from .common_tool_methods import remove_suffix, stat_get_request
from .dbconfig import Base, db_session, DbParams, RequestType
PC_PACKAGE_COUNT_SUFFIX = " packages"
PC_DOWNLOAD_DATE_FORMAT = "%Y%m%dZ"
PC_DOWNLOAD_DETAIL_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z"
DEFAULT_PAGE_RECORD_COUNT = 100
class PackageCloudRepo(Enum):
community = "community"
enterprise = "enterprise"
azure = "azure"
community_nightlies = "community-nightlies"
enterprise_nightlies = "enterprise-nightlies"
test = "test"
class PackageCloudOrganization(Enum):
citusdata = "citusdata"
citus_bot = "citus-bot"
class PackageCloudDownloadStats(Base):
__tablename__ = "package_cloud_download_stats"
id = Column(INTEGER, primary_key=True, autoincrement=True)
fetch_date = Column(TIMESTAMP, nullable=False)
repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False)
package_name = Column(String, nullable=False)
package_full_name = Column(String, nullable=False)
package_version = Column(String, nullable=False)
package_release = Column(String)
distro_version = Column(String, nullable=False)
epoch = Column(String, nullable=False)
package_type = Column(String, nullable=False)
download_date = Column(DATE, nullable=False)
download_count = Column(INTEGER, nullable=False)
detail_url = Column(String, nullable=False)
UniqueConstraint(
"package_full_name",
"download_date",
"distro_version",
name="ux_package_cloud_download_stats",
)
class PackageCloudDownloadDetails(Base):
__tablename__ = "package_cloud_download_details"
id = Column(INTEGER, primary_key=True, autoincrement=True)
fetch_date = Column(TIMESTAMP, nullable=False)
repo = Column(sqlalchemy.Enum(PackageCloudRepo), nullable=False)
package_name = Column(String, nullable=False)
package_full_name = Column(String, nullable=False)
package_version = Column(String, nullable=False)
package_release = Column(String)
distro_version = Column(String, nullable=False)
epoch = Column(String, nullable=False)
package_type = Column(String, nullable=False)
downloaded_at = Column(TIMESTAMP, nullable=False)
download_date = Column(DATE, nullable=False)
ip_address = Column(String)
user_agent = Column(String)
source = Column(String)
read_token = Column(String)
def package_count(
organization: PackageCloudOrganization,
repo_name: PackageCloudRepo,
package_cloud_api_token: str,
) -> int:
result = requests.get(
f"https://{package_cloud_api_token}:@packagecloud.io/api/v1/repos.json?include_collaborations=true",
timeout=60,
)
repo_list = json.loads(result.content)
for repo in repo_list:
if repo["fqname"] == f"{organization.name}/{repo_name.value}":
return int(
remove_suffix(repo["package_count_human"], PC_PACKAGE_COUNT_SUFFIX)
)
raise ValueError(
f"Repo name with the name {repo_name.value} could not be found on package cloud"
)
@dataclass
class PackageCloudParams:
# admin api token is citusdata token to get package details.
admin_api_token: str
# citus bot api token to make api calls other than package details
standard_api_token: str
organization: PackageCloudOrganization
repo_name: PackageCloudRepo
@dataclass
class ParallelExecutionParams:
parallel_count: int
parallel_exec_index: int
page_record_count: int
def fetch_and_save_package_cloud_stats(
db_params: DbParams,
package_cloud_params: PackageCloudParams,
parallel_execution_params: ParallelExecutionParams,
is_test: bool = False,
save_records_with_download_count_zero: bool = False,
):
"""It is called directly from pipeline. Packages are queried page by page from packagecloud. Packages are queried
with the given index and queried packages are saved into database using
fetch_and_save_package_stats_for_package_list method"""
repo_package_count = package_count(
organization=package_cloud_params.organization,
repo_name=package_cloud_params.repo_name,
package_cloud_api_token=package_cloud_params.standard_api_token,
)
session = db_session(db_params=db_params, is_test=is_test)
page_index = parallel_execution_params.parallel_exec_index + 1
start = time.time()
while is_page_in_range(
page_index, repo_package_count, parallel_execution_params.page_record_count
):
result = stat_get_request(
package_list_with_pagination_request_address(
package_cloud_params,
page_index,
parallel_execution_params.page_record_count,
),
RequestType.package_cloud_list_package,
session,
)
package_info_list = json.loads(result.content)
if len(package_info_list) > 0:
page_index = page_index + parallel_execution_params.parallel_count
else:
break
for package_info in package_info_list:
fetch_and_save_package_download_details(
package_info,
package_cloud_params.admin_api_token,
session,
package_cloud_params.repo_name,
)
fetch_and_save_package_stats(
package_info,
package_cloud_params.standard_api_token,
session,
save_records_with_download_count_zero,
package_cloud_params.repo_name,
)
session.commit()
end = time.time()
print("Elapsed Time in seconds: " + str(end - start))
def fetch_and_save_package_stats(
package_info,
package_cloud_api_token: str,
session,
save_records_with_download_count_zero: bool,
repo_name: PackageCloudRepo,
):
"""Gets and saves the package statistics of the given packages"""
request_result = stat_get_request(
package_statistics_request_address(
package_cloud_api_token, package_info["downloads_series_url"]
),
RequestType.package_cloud_download_series_query,
session,
)
if request_result.status_code != HTTPStatus.OK:
raise ValueError(
f"Error while getting package stat for package {package_info['filename']}"
)
download_stats = json.loads(request_result.content)
for stat_date in download_stats["value"]:
download_date = datetime.strptime(stat_date, PC_DOWNLOAD_DATE_FORMAT).date()
download_count = int(download_stats["value"][stat_date])
if (
download_date != date.today()
and not is_ignored_package(package_info["name"])
and not stat_records_exists(
download_date,
package_info["filename"],
package_info["distro_version"],
session,
)
and is_download_count_eligible_for_save(
download_count, save_records_with_download_count_zero
)
):
pc_stats = PackageCloudDownloadStats(
fetch_date=datetime.now(),
repo=repo_name,
package_full_name=package_info["filename"],
package_name=package_info["name"],
distro_version=package_info["distro_version"],
package_version=package_info["version"],
package_release=package_info["release"],
package_type=package_info["type"],
epoch=package_info["epoch"],
download_date=download_date,
download_count=download_count,
detail_url=package_info["downloads_detail_url"],
)
session.add(pc_stats)
def fetch_and_save_package_download_details(
package_info,
package_cloud_admin_api_token: str,
session,
repo_name: PackageCloudRepo,
):
print(
f"Download Detail Query for {package_info['filename']}: {package_info['downloads_detail_url']}"
)
page_number = 1
record_count = DEFAULT_PAGE_RECORD_COUNT
while record_count == DEFAULT_PAGE_RECORD_COUNT:
request_result = stat_get_request(
package_statistics_detail_request_address(
package_cloud_admin_api_token,
package_info["downloads_detail_url"],
DEFAULT_PAGE_RECORD_COUNT,
page_number,
),
RequestType.package_cloud_detail_query,
session,
)
page_number = page_number + 1
if request_result.status_code != HTTPStatus.OK:
raise ValueError(
f"Error while calling detail query for package {package_info['filename']}. "
f"Error Code: {request_result.status_code}"
)
download_details = json.loads(request_result.content)
record_count = len(download_details)
for download_detail in download_details:
downloaded_at = datetime.strptime(
download_detail["downloaded_at"], PC_DOWNLOAD_DETAIL_DATE_FORMAT
)
download_date = downloaded_at.date()
if (
download_date != date.today()
and not is_ignored_package(package_info["name"])
and not stat_records_exists(
download_date,
package_info["filename"],
package_info["distro_version"],
session,
)
):
download_detail_record = PackageCloudDownloadDetails(
fetch_date=datetime.now(),
repo=repo_name,
package_full_name=package_info["filename"],
package_name=package_info["name"],
distro_version=package_info["distro_version"],
package_version=package_info["version"],
package_release=package_info["release"],
package_type=package_info["type"],
epoch=package_info["epoch"],
download_date=download_date,
downloaded_at=downloaded_at,
ip_address=download_detail["ip_address"],
user_agent=download_detail["user_agent"],
source=download_detail["source"],
read_token=download_detail["read_token"],
)
session.add(download_detail_record)
def package_statistics_request_address(
package_cloud_api_token: str, series_query_uri: str
):
return f"https://{package_cloud_api_token}:@packagecloud.io/{series_query_uri}"
def package_statistics_detail_request_address(
package_cloud_api_token: str, detail_query_uri: str, per_page: int, page_number: int
):
return f"https://{package_cloud_api_token}:@packagecloud.io/{detail_query_uri}?per_page={per_page}&page={page_number}"
def package_list_with_pagination_request_address(
package_cloud_params: PackageCloudParams, page_index: int, page_record_count: int
) -> str:
return (
f"https://{package_cloud_params.standard_api_token}:@packagecloud.io/api/v1/repos/"
f"{package_cloud_params.organization.name}/{package_cloud_params.repo_name.value}"
f"/packages.json?per_page={page_record_count}&page={page_index}"
)
def is_download_count_eligible_for_save(
download_count: int, save_records_with_download_count_zero: bool
) -> bool:
return download_count > 0 or (
download_count == 0 and save_records_with_download_count_zero
)
def is_page_in_range(page_index: int, total_package_count: int, page_record_count: int):
return (page_index * page_record_count < total_package_count) or (
page_index * page_record_count
>= total_package_count
> (page_index - 1) * page_record_count
)
def stat_records_exists(
download_date: date, package_full_name: str, distro_version: str, session
) -> bool:
db_record = (
session.query(PackageCloudDownloadStats)
.filter_by(
download_date=download_date,
package_full_name=package_full_name,
distro_version=distro_version,
)
.first()
)
return db_record is not None
def detail_records_exists(
downloaded_at: datetime,
ip_address: str,
package_full_name: str,
distro_version: str,
session,
) -> bool:
db_record = (
session.query(PackageCloudDownloadDetails)
.filter_by(
downloaded_at=downloaded_at,
ip_address=ip_address,
package_full_name=package_full_name,
distro_version=distro_version,
)
.first()
)
return db_record is not None
def is_ignored_package(package_name: str) -> bool:
ignored_suffixes = ("debuginfo", "dbgsym")
ignored_prefixes = ("citus-ha-", "pg-auto-failover-cli")
return package_name.endswith(ignored_suffixes) or package_name.startswith(
ignored_prefixes
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--organization", choices=[r.value for r in PackageCloudOrganization]
)
parser.add_argument("--repo_name", choices=[r.value for r in PackageCloudRepo])
parser.add_argument("--db_user_name", required=True)
parser.add_argument("--db_password", required=True)
parser.add_argument("--db_host_and_port", required=True)
parser.add_argument("--db_name", required=True)
parser.add_argument("--package_cloud_api_token", required=True)
parser.add_argument("--package_cloud_admin_api_token", required=True)
parser.add_argument(
"--parallel_count", type=int, choices=range(1, 30), required=True, default=1
)
parser.add_argument(
"--parallel_exec_index",
type=int,
choices=range(0, 30),
required=True,
default=0,
)
parser.add_argument(
"--page_record_count", type=int, choices=range(5, 101), required=True, default=0
)
parser.add_argument("--is_test", action="store_true")
arguments = parser.parse_args()
db_parameters = DbParams(
user_name=arguments.db_user_name,
password=arguments.db_password,
host_and_port=arguments.db_host_and_port,
db_name=arguments.db_name,
)
package_cloud_parameters = PackageCloudParams(
admin_api_token=arguments.package_cloud_admin_api_token,
standard_api_token=arguments.package_cloud_api_token,
organization=PackageCloudOrganization(arguments.organization),
repo_name=PackageCloudRepo(arguments.repo_name),
)
parallel_execution_params = ParallelExecutionParams(
parallel_count=arguments.parallel_count,
parallel_exec_index=arguments.parallel_exec_index,
page_record_count=DEFAULT_PAGE_RECORD_COUNT,
)
fetch_and_save_package_cloud_stats(
db_parameters,
package_cloud_params=package_cloud_parameters,
parallel_execution_params=parallel_execution_params,
is_test=arguments.is_test,
)
|
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
from app import app, df_to_table, Graph, update_graph, indicator, marks
from scipy.stats import norm
# calculate the greeks
# ['delta', 'gamma', 'vega', 'theta', 'rho', 'lambdaa','epsilon','vanna','charm','volga','DvegaDtime','veta','color','ultima','speed','zomma']
def greeks(df):
rate = 0.015
q = 0
spot = df['Spot Price']
strike = df['strike']
vol = df['impliedVolatility']
maturity = df['days to expiration']
Call_Put_Flag = df['Call_Put_Flag']
"""ask, bid, change, contractSymbol, expiration,vol, inTheMoney, lastPrice, lastTradeDate,
openInterest, percentChange, strike, volume,maturity, BA_Spread, DaysSinceLastTrade,
spot, Call_Put_Flag = list(column_list)"""
d1 = (np.log(spot / strike) + (maturity / 365) * (rate - q + (vol ** 2) / 2)) / (vol * np.sqrt(maturity / 365))
d2 = d1 - vol * np.sqrt(maturity / 365)
gamma = ((np.exp(-q * maturity / 365) / (spot * vol * np.sqrt(maturity / 365))) * 1 / np.sqrt(2 * np.pi)) * np.exp(
(-d1 ** 2) / 2)
vega = (((spot * np.exp(-q * maturity / 365)) * np.sqrt(maturity / 365)) / 100) * 1 / np.sqrt(2 * 3.14159) * np.exp(
(-d1 ** 2) / 2)
vanna = np.exp(-q * maturity / 365) * np.sqrt(maturity / 365) * (d2 / vol) * np.exp(-(d1 ** 2) / 2) / (2 * np.pi)
volga = (spot * (np.exp(q * maturity / 365)) * np.sqrt(maturity / 365) * (np.exp(-(d1 ** 2) / 2) * d1 * d2) / (
np.sqrt(2 * np.pi)) * vol)
ultima = - vega / (vol * vol) * (d1 * d2 * (1 - d1 * d2) + d1 * d1 + d2 * d2)
color = - np.exp(-q * maturity / 365) * 1 / np.sqrt(2 * 3.14159) * np.exp((-d1 ** 2) / 2) * 1 / (
2 * spot * (maturity / 365) * vol * np.sqrt(maturity / 365)) * (2 * q * (maturity / 365) + 1 + (
2 * (rate - q) * (maturity / 365) - d2 * vol * np.sqrt(maturity / 365) / (
vol * np.sqrt(maturity / 365)) * d1))
zomma = gamma * ((d1 * d2 - 1) / vol)
speed = - gamma / spot * (d1 / (vol * np.sqrt(maturity / 365)) + 1)
veta = - spot * np.exp(-q * maturity / 365) * 1 / np.sqrt(2 * 3.14159) * np.exp((-d1 ** 2) / 2) * np.sqrt(
maturity / 365) * (
q + ((rate - q) * d1) / (vol * np.sqrt(maturity / 365) - (1 + d1 * d2) / (2 * (maturity / 365))))
if Call_Put_Flag == 'Call':
try:
delta = np.exp(-q * maturity / 365) * norm.cdf(d1)
except:
delta = np.nan
try:
theta = ((1 / (maturity / 365)) * -((spot * vol * np.exp(-q * maturity / 365))) / 2 * np.sqrt(
maturity / 365)) * (np.exp((-d1 ** 2) / 2)) / np.sqrt(2 * 3.14159) - rate * strike * np.exp(
-rate * maturity / 365) * norm.cdf(d2) + q * spot * np.exp(rate * maturity / 365) * norm.cdf(d1)
except:
theta = np.nan
try:
rho = strike * maturity / 365 * np.exp(-rate * maturity / 365) * norm.cdf(d2)
except:
rho = np.nan
try:
epsilon = -strike * (maturity / 365) * np.exp(-q * maturity / 365) * norm.cdf(d1)
except:
epsilon = np.nan
try:
part1 = -q * (np.exp(-q * maturity / 365)) * norm.cdf(d1)
part2 = np.exp(-q * maturity / 365) * norm.cdf(d1) * (
(2 * (rate - q) * maturity / 365) - (d2 * vol * np.sqrt(maturity / 365))) / (
2 * (maturity / 365) * vol * np.sqrt(maturity / 365))
charm = part1 + part2
except:
charm = np.nan
try:
DvegaDtime = 1
except:
DvegaDtime = np.nan
try:
fair_value = spot * (np.exp(-q * maturity / 365)) * norm.cdf(d1) - (
np.exp(-rate * maturity / 365)) * strike * norm.cdf(d2)
lambdaa = delta * spot / fair_value
except:
lambdaa = np.nan
if Call_Put_Flag == 'Put':
try:
delta = np.exp(-rate * maturity / 365) * (norm.cdf(d1) - 1)
except:
delta = np.nan
try:
theta = ((1 / maturity / 365) * -(spot * vol * np.exp(-q * maturity / 365)) / 2 * np.sqrt(
maturity / 365) * (
np.exp((-d1 ** 2) / 2)) / np.sqrt(2 * 3.14159)) + rate * strike * np.exp(
-rate * maturity / 365) * norm.cdf(
-d2) - q * spot * np.exp(rate * maturity / 365) * norm.cdf(-d1)
except:
theta = np.nan
try:
rho = -strike * maturity / 365 * np.exp(-rate * maturity / 365) * norm.cdf(-d2)
except:
rho = np.nan
try:
epsilon = strike * (maturity / 365) * np.exp(-q * maturity / 365) * norm.cdf(-d1)
except:
epsilon = np.nan
try:
part1 = q * (np.exp(-q * maturity / 365)) * norm.cdf(-d1)
part2 = np.exp(-q * maturity / 365) * norm.cdf(d1) * (
(2 * (rate - q) * maturity / 365) - (d2 * vol * np.sqrt(maturity / 365))) / (
2 * (maturity / 365) * vol * np.sqrt(maturity / 365))
charm = part1 + part2
except:
charm = np.nan
try:
DvegaDtime = 1
except:
DvegaDtime = np.nan
try:
fair_value = (np.exp(-rate * maturity / 365)) * norm.cdf(-d2) - spot * (
np.exp(-q * maturity / 365)) * norm.cdf(-d1)
lambdaa = delta * spot / fair_value
except:
lambdaa = np.nan
if df['Long_Short_Flag'] == 1:
return pd.Series([delta, gamma, vega, theta, rho, lambdaa, epsilon, vanna, charm, volga, DvegaDtime, veta, color, ultima, speed, zomma])
else:
return pd.Series([-delta, -gamma, -vega, -theta, -rho, -lambdaa, -epsilon, -vanna, -charm, -volga, -DvegaDtime, -veta, -color, -ultima, -speed, -zomma])
layout = [html.Div(
[indicator("#00cc96", "Delta", "delta_indicator",), #first line indicators: delta, gamma, vega
indicator("#119DFF", "Gamma", "gamma_indicator",),
indicator("#EF553B", "Vega", "vega_indicator",),
],
className="row",
),
html.Div(
[indicator("#00cc96", "Theta", "theta_indicator",), #second line indicators: theta, rho, veta
indicator("#119DFF", "Rho", "rho_indicator",),
indicator("#EF553B", "Veta", "veta_indicator",),
],
className="row",
),
html.Div(
[indicator("#00cc96", "Speed", "speed_indicator",), #third line indicators: speed, zomma, color
indicator("#119DFF", "Zomma", "zomma_indicator",),
indicator("#EF553B", "Color", "color_indicator",),
],
className="row",
),
html.Div(
[indicator("#00cc96", "Ultima", "ultima_indicator",), #fourth line indicators: ultima, lambdaa, epsilon
indicator("#00cc96", "Lambdaa", "lambdaa_indicator",),
indicator("#00cc96", "Epsilon", "epsilon_indicator",),
],
className="row",
),
html.Div(
[indicator("#00cc96", "Vanna", "vanna_indicator", ), #fifth line indicators: vanna, charm, volga
indicator("#00cc96", "Charm", "charm_indicator", ),
indicator("#00cc96", "Volga", "volga_indicator", ),
],
className="row",
),
]
# updates delta indicator value based on df updates
@app.callback(Output("delta_indicator", "children"),
[Input("options_df", "children")],)
def delta_indicator_callback(df):
df = pd.read_json(df, orient="split")
print('portfolio', df)
global result
result = df.apply(greeks, axis=1).fillna(0)
print('greeks', result)
return round(result[0].sum(), 3)
# updates gamma indicator value based on df updates
@app.callback(Output("gamma_indicator", "children"),
[Input("options_df", "children")],)
def gamma_indicator_callback(df):
return round(result[1].sum(), 3)
# updates vega indicator value based on df updates
@app.callback(Output("vega_indicator", "children"),
[Input("options_df", "children")],)
def vega_indicator_callback(df):
return round(result[2].sum(), 3)
#second line indicators: theta, rho, veta
@app.callback(Output("theta_indicator", "children"),
[Input("options_df", "children")],)
def theta_indicator_callback(df):
return round(result[3].sum(), 3)
@app.callback(Output("rho_indicator", "children"),
[Input("options_df", "children")],)
def rho_indicator_callback(df):
return round(result[4].sum(), 3)
@app.callback(Output("veta_indicator", "children"),
[Input("options_df", "children")],)
def veta_indicator_callback(df):
return round(result[11].sum(), 3)
#third line indicators: speed, zomma, color
@app.callback(Output("speed_indicator", "children"),
[Input("options_df", "children")],)
def speed_indicator_callback(df):
return round(result[14].sum(), 3)
@app.callback(Output("zomma_indicator", "children"),
[Input("options_df", "children")],)
def zomma_indicator_callback(df):
return round(result[15].sum(), 3)
@app.callback(Output("color_indicator", "children"),
[Input("options_df", "children")],)
def color_indicator_callback(df):
return round(result[12].sum(), 3)
#fourth line indicators: ultima, lambdaa, epsilon
@app.callback(Output("ultima_indicator", "children"),
[Input("options_df", "children")],)
def ultima_indicator_callback(df):
return round(result[13].sum(), 3)
@app.callback(Output("lambdaa_indicator", "children"),
[Input("options_df", "children")],)
def lambdaa_indicator_callback(df):
return round(result[5].sum(), 3)
@app.callback(Output("epsilon_indicator", "children"),
[Input("options_df", "children")],)
def epsilon_indicator_callback(df):
return round(result[6].sum(), 3)
#fifth line indicators: vanna, charm, volga
@app.callback(Output("vanna_indicator", "children"),
[Input("options_df", "children")],)
def vanna_indicator_callback(df):
return round(result[7].sum(), 3)
@app.callback(Output("charm_indicator", "children"),
[Input("options_df", "children")],)
def charm_indicator_callback(df):
return round(result[8].sum(), 3)
@app.callback(Output("volga_indicator", "children"),
[Input("options_df", "children")],)
def volga_indicator_callback(df):
return round(result[9].sum(), 3)
|
from numpy import *
mat = array(eval(input("Matricula dos alunos: ")))
x = zeros(2,dtype=int)
for elemento in mat:
if (elemento % 2 != 0):
x[1] = x[1] + 1 # Acumulador dos impares
v = zeros(x[1], dtype=int)
i=0
for elemento2 in mat:
if(elemento2 % 2 != 0):
v[i] = elemento2
i = i + 1
print(v)
|
# Name: Efi Pecani
# ID: 307765230
from tkinter import * #import all of this stuff in order to wor kwith tkinter methods
import tkinter as tk
from tkinter import filedialog
from tkinter import ttk, messagebox
from PIL import ImageTk
from PIL import Image
import os, sys
import tkinter.font as tkFont
from image_processing import * #my processing file and functions
#______________________________________________
#______________________________________________
def loadDirectory(path): # gets the list of imges and loads to a listbox
try:
imgList.delete(0,END)
if (type(path))== str:
picDir = os.listdir(path)
countFile=0 #initial value
for fileName in picDir:
if fileName[-4:] == ".gif" or fileName[-4:] == ".bmp" or fileName[-4:] == ".jpg" or fileName[-4:] == ".JPG" or fileName[-4:] == ".png":# or fileName[-5:] == ".tiff" :or
imgList.insert("end", fileName)
countFile += 1
if countFile == 0: #in case the firectory is empty
noPics() #notify the user
else:
global adress
adress= pathEntry.get()
loadDirectory(adress)
except:
emptyDir() #notify the user
return ()
#______________________________________________
def createPhoto(): #creates a pic frame and prepers it for the users choise
global photoframe
global photo_canvas
photoframe = Frame(imageZone)
photo_canvas = Canvas(imageZone,height= 300, width= 400)
photo_canvas.create_rectangle(300, 400, 0, 0)
photoframe.grid()
photoframe.place(x=400, y=40)
return()
#______________________________________________
def clearBox(): # gets the list of imges and loads to a listbox
try:
pathEntry.delete(0,END) #clearing entries
saveEntry.delete(0,END)
imgList.delete(0,END) #delets boxlist
photoLable.grid_remove()
photoframe.destroy() #removes pic frame
photo_canvas.destroy()
createPhoto()
thershZone.destroy()#threshold related widgets
root.mainloop()
except:
field_been_cleared()
#_______________________________________
def deletUpper(event):
pathEntry.delete(0,END)
#_______________________________________
def deletLower(event):
saveEntry.delete(0,END)
#_______________________________________
def previewPic(name,adress):
global picFile
picFile = Image.open(adress+"/"+name) # gets the full path of the file
dim = picFile.size # get the dimntions
height = dim[0]
width = dim[1] # Calculate the img size to 372x234
new_height = (372/float(height))*float(height)
new_width = (234/float(width))*float(width)
size = (int(new_height), int(new_width))
picFile= picFile.resize(size) # Change the size of the img
photo = ImageTk.PhotoImage(picFile)
global photoLable
photoLable = Label(photoframe, image=photo)
photoLable.image = photo
photoLable.grid(row=0, column=1)
#______________________________________________
def previewProcessed(photo_file):
dim = photo_file.size # get the dimentions
width = dim[0]
height = dim[1] # Calculate the img size to 372x234
new_height =(372/float(height))*float(height) # (240/float(height))*float(height)
new_width = (234/float(width))*float(width) #(180/float(width))*float(width)
size = (int(new_height), int(new_width))
photo_file= photo_file.resize(size) # Change the size of the img
photo = ImageTk.PhotoImage(photo_file)
photoLable2 = Label(photoframe, image=photo)
photoLable2.image = photo
photoLable2.grid(row=5, column=1)
#______________________________________________
def getPic(event): # Show the img in a new window
try:
try:
dirc = os.listdir(adress)
except:
adress= pathEntry.get()
tikia = os.listdir(adress)
global fileList
fileList = []
items_num = imgList.curselection()
first_item = imgList.get("active")
for file in tikia: # If the file is not from this list it will not add it to the list
if file[-4:] == ".gif" or file[-4:] == ".bmp" or file[-4:] == ".jpg" or file[-4:] == ".JPG" or file[-4:] == ".png":
fileList.append(file)
global fileName
fileName = fileList[items_num[0]]
return previewPic(fileName,adress)
except FileNotFoundError:
noCanDo()
#______________________________________________
def openFile(): #opens file from directory
global adress
adress = filedialog.askdirectory()
pathEntry.insert(0,adress)
loadDirectory(adress)
#______________________________________________
def closeWin():
root.destroy() #quits the program if user hits "yes"
#______________________________________________
def questionQuit():
result= messagebox.askyesno("Close Window", "Are you sure you want to exit?")
if result == True: #checks for yousers response
closeWin()
#______________________________________________
def browseSave():
saveEntry.delete(0,END)
savePath = filedialog.askdirectory()#mode='w', defaultextension=".jpg")
sPath=savePath #in order not loosing save path after inserted
saveEntry.insert(0,sPath)
#______________________________________________
def getProcess(event):
try:
proc_num = opList.curselection()
proc = opList.get("active")
global procNum
procNum=proc_num[0]
if procNum==3 :
createThreshold() #opens a thershelod textbox
selctedProcess(savePath)
except ValueError: #to solve problem with some file formats
PicformatError()
except TypeError:
PicformatError() #to solve problem with some file formats
except IndexError:
processError() #to solve problem when no process is selcted and prewviw beeing hit anyway
#______________________________________________
def saveFile(): #saves the modified image
try:
proc_num = opList.curselection()
procNum=proc_num[0]
savePath= saveEntry.get()
filesWanted = []
for place in imgList.curselection():
savedPicCount=0
filesWanted.append(fileList[place])
for pic in filesWanted: # For each file from selected
if procNum == 0:
rotatePicture((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #0 rotate
savedPicCount+=1
elif procNum == 1:
mirrorPicture((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #1 mirror
savedPicCount+=1
elif procNum==2:
resizePicture((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #2 resize
savedPicCount+=1
elif procNum==3:
# print("thUser:" +str(thUser))
#try:
thUser=thEntry.get()
if thUser != "" and int(thUser)<200 and int(thUser)>0 : #checks if the user inserted a value
edge((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:],int(thUser)) #3 edges -"kavei mitaar"
savedPicCount+=1
else:
noTH() #in case no threshold was not inserted correctly
#except:
# noTH ()
# thershZone.destroy()
elif procNum==4:
MyAlgorithm1((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #4 primary colors
savedPicCount+=1
elif procNum==5:
MyAlgorithm2((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #5 half-tone gray
savedPicCount+=1
elif procNum==6:
gaussBlur((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #6 blur
savedPicCount+=1
elif procNum==7:
minFilter((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #7 min filter
savedPicCount+=1
elif procNum==8:
sharpen((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #8 sharpen
savedPicCount+=1
elif procNum==9:
contour((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #9 contour
savedPicCount+=1
elif procNum==10:
detail((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #10 detail
savedPicCount+=1
elif procNum==11:
edgeEnhanceMore((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #11 edge enhance more
savedPicCount+=1
elif procNum==12:
emboss((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #12 emboss
savedPicCount+=1
elif procNum==13:
kernelSmooth((adress+"/"+pic),savePath+"/"+pic[:-4]+"_processed"+pic[-4:]) #13 smooth more
savedPicCount+=1
if savedPicCount!=0:
PicSaved(savedPicCount)
else:
savingError()
except PermissionError:
emptyDir()
except ValueError:
PicformatError()
except TypeError:
PicformatError()
except NameError:
processError()
except IndexError:
processError()
#______________________________________________
def loadProName():
global procList
procList=[] #list of processes
procList=["Rotate 180 ⤵ ",#0
"Mirror image ⇹",#1
"Resize ⇱ ⇲",#2
"Edge recognition",#3
"Primary colors",#4 my algorith
"Half-Tone", #5 and some extra
"Blur",#6 and some extra
"Min Filter",#7
"Sharpen",#8
"Contour",#9
"Detail",#10
"Edge Enhance+",#11
"Emboss Filter",#12
"Kernel Smooth + ",] #13
for i in range(len(procList)):
opList.insert("end", procList[i]) #puts them in the widget
return()
#______________________________________________
def callAgainWithTH(): #calls the function again with the new parameter assigned by user
thUser=thEntry.get() #takes the parameter
procNum=3
selctedProcess(savePath) #goes back to function
#______________________________________________
def createThreshold(): #creates a text box for threshold value fron user
global thLable
global thEntry
global thershZone
thershZone = Frame(imageZone) #creates new frame for threshold procedure
thershZone.configure(bg = lsb4)
thershZone.grid(row = 12, column=0 )#, columnspan=3)
thLable=Label(thershZone, text="Threshold:" ,bg=nw4,fg="khaki1" ,padx=10)
thLable.grid(row=12, column = 0,sticky=W)
thEntry=Entry(thershZone,bg = nw4,fg="khaki1",selectbackground ="turquoise4")
thEntry.grid(row=12, column = 1,sticky=E)
thEntry.bind("<Button-1>", createThButton) #click on the textbox creates the button
def createThButton(event): #creates a button for processing edges when user clicks on textbox
global thButton
thButton=Button(thershZone, text="Process!" ,command=callAgainWithTH)
thButton.grid(row=12, column = 1,sticky=E) #positiong of button
#______________________________________________
def selctedProcess(savePath): #calls selcted process from "img process" and displayes preview
if procNum==0:
newPic=rotatePicture((adress+"/"+fileName),savePath) #0 rotate
previewProcessed(newPic)
if procNum==1:
newPic=mirrorPicture((adress+"/"+fileName),savePath) #1 mirror
previewProcessed(newPic)
if procNum==2:
newPic=resizePicture((adress+"/"+fileName),savePath) #2 resize smaller
previewProcessed(newPic)
if procNum==3:
try:
thUser=thEntry.get()
if thUser != "" and int(thUser)<200 and int(thUser)>0 : #checks if the user inserted a value
newPic=edge((adress+"/"+fileName),savePath,int(thUser)) #3 edges
previewProcessed(newPic)
else:
noTH()
thEntry.delete(0,END)
except:
noTH() #in case no threshold was not inserted correctly
thEntry.delete(0,END)
if procNum==4:
newPic=MyAlgorithm1((adress+"/"+fileName),savePath) #4 primary
previewProcessed(newPic)
if procNum==5:
newPic=MyAlgorithm2((adress+"/"+fileName),savePath) #5 half-tone gray
previewProcessed(newPic)
if procNum==6:
newPic=gaussBlur((adress+"/"+fileName),savePath) #6 blur
previewProcessed(newPic)
if procNum==7:
newPic=minFilter((adress+"/"+fileName),savePath) #7 min filter
previewProcessed(newPic)
if procNum==8:
newPic=sharpen((adress+"/"+fileName),savePath) #8 sharpen
previewProcessed(newPic)
if procNum==9:
newPic=contour((adress+"/"+fileName),savePath) #9 contour
previewProcessed(newPic)
if procNum==10:
newPic=detail((adress+"/"+fileName),savePath) #10 detail
previewProcessed(newPic)
if procNum==11:
newPic=edgeEnhanceMore((adress+"/"+fileName),savePath) #11 edge Enhance More
previewProcessed(newPic)
if procNum==12:
newPic=emboss((adress+"/"+fileName),savePath) #12 emboss
previewProcessed(newPic)
if procNum==13:
newPic=kernelSmooth((adress+"/"+fileName),savePath) #13 SMOOTH_MORE
previewProcessed(newPic)
#_____________________________MESSEEGES TO USER_________________________________
def emptyDir(): #incase a directory path is empty displays an error lable near it
messagebox.showinfo("Error", "The Folder doesn't contain any photo files.")
def field_been_cleared(): #incaes a button is pressed in vain
messagebox.showinfo("Clear", "Fields been cleared \nInsert a valid directory path if you wish to continue editing.")
#______________________________________________
def emptyDir(): #incase a directory path is empty displays an error lable near it
messagebox.showerror("Error", "Empty Directory \nPlease insert a correct directory adress.")
#______________________________________________
def noCanDo(): #general error
messagebox.showerror("Error", "General Error \nTry pressing the 'Clear' button first and try again. ")
def theFilesSaved(): #general error
messagebox.showinfo("Save Preformed", "Image Processing completed Sucessfully \n Pictures saved in designated folder.")
#______________________________________________
def PicformatError(): #format error that is common with gifs
messagebox.showinfo("Oops Something Went Wrong", "Image can not be processed \ntry changing pic format or using diffrent filter." )
def noTH(): #format error that is common with gifs
messagebox.showinfo("Threshold wasn't inserted", "Please insert a value between 0 and 200 and try again!")
def processError():
messagebox.showerror("Process Selection Required", "Please select a process and hit 'Preview ➩' try again!")
def PicSaved(piCounter):
messagebox.showinfo("Process Completed", str(piCounter)+" Images were Saved! \nPlease check folder!")
def savingError():
messagebox.showerror("Attention!", "NOT all Images were Saved! \nPlease check folder! and try again")
#_____________________ END OF SECONDERY FUNCTIONS______________________________________________________
''' %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% '''
#______________________BEGINING OF MAIN FUNCTION WITH WIDGETS _________________________________________
def mainWin():
global intro
global imageZone
global savingZone
global pathEntry
global imgList
global opList
global savePath
global saveEntry
global thUser
thUser="" #initail value for threshold
savePath="" #initial value for a save
#frames defined
intro = Frame(root)
imageZone = Frame(root)
imageZone.configure(bg = lsb4)
savingZone = Frame(root)
savingZone.configure(bg = lsb4)
#order of frames
intro.grid(row = 0, columnspan = 4, )
imageZone.grid(rowspan=9, row = 1, column = 0, ipadx=50)
savingZone.grid(row = 10,columnspan = 3, ipadx=50)
# choosing image area
header = Label(intro,padx = 200, pady = 10, text=" Welcome to Efi's Editor ", font=root.customFont, bg="khaki1",fg="gray25")
pathLabel = Label(imageZone, text=" Please enter image directory path:",bg=nw4,fg="khaki1")
pathEntry = Entry(imageZone,bg = nw4,fg="khaki1",selectbackground ="turquoise4")
pathEntry.bind_class("Entry","<Return>", loadDirectory)
pathEntry.bind("<Button-1>",deletUpper)
goButton = Button(imageZone, text="Go")
goButton.bind("<Button-1>", loadDirectory)
clearButton = Button(imageZone, text="Clear", command=clearBox) #clears all
listLabel = Label(imageZone, text=" The folder containes the folowing images:",bg=nw4,fg="khaki1")
opendir= Button(imageZone, text=" Open Directory... ", fg="navy", command=openFile)
# pic list area
scrollbar1 = Scrollbar(imageZone, orient="vertical")
imgList = Listbox(imageZone, selectmode=MULTIPLE, yscrollcommand=scrollbar1.set, width=27,selectbackground =nw4,bg = "khaki1",fg="gray25",exportselection=0) #first list
scrollbar1.config(command=imgList.yview)
choosePics = Button(imageZone ,text=" Choose ➩ ")
choosePics.bind("<Button-1>", getPic)
# operation command
processLabel = Label(imageZone, text="Process your image with:", bg=nw4,fg="khaki1")
scrollbar2 = Scrollbar(imageZone, orient="vertical")
opList = Listbox(imageZone, selectmode=SINGLE, yscrollcommand=scrollbar2.set, width=27,selectbackground =nw4,bg = "khaki1",fg="gray25",exportselection=0)
scrollbar2.config(command=opList.yview)
processPics = Button(imageZone ,text=" Preview ➩ ")
processPics.bind("<Button-1>", getProcess)
# save zone area
savedPath = Label(savingZone,text=" Please enter processed image path: ", bg=nw4,fg="khaki1")
saveEntry = Entry(savingZone ,bg = nw4,fg="khaki1",selectbackground ="turquoise4")
saveEntry.bind("<Button-1>",deletLower)
browseSaveButton = Button(savingZone, text="Browse Saving Directory...",command=browseSave)
saveButton = Button(savingZone, text="Save",command=saveFile)
quitbutton = Button(root, text="Quit", command=questionQuit)
#+++++++++++++++++++++++ Grid Positioning: +++++++++++++++++++++++++++++
# browse area grid
header.grid(row=0, ipadx=100)#columnspan=20) # the welcome title
pathLabel.grid(row=1, column=0, sticky=W) # image path lable "please enter.."
pathEntry.grid(row=1, column = 1, ipadx=50 )
goButton.grid(row=1, column=2, sticky=W)
clearButton.grid(row=1, column=4, sticky=W)
opendir.grid(row=1, column=3, sticky=W)
listLabel.grid(row=3, sticky=N)
choosePics.grid(row=6, column=0,sticky=E) #1st button
# choosePics.place(x=290, y=120)
createPhoto() # photo zone
# listbox for img
scrollbar1.grid(row=5, column=0,sticky=E, ipady=72)
imgList.grid(row=5, column=0)
imgList.columnconfigure(0, weight=1)
#listbox for processing
processLabel.grid(row=9 ,column=0) #process your image..."
scrollbar2.grid(row=10, column=0,sticky=E, ipady=72)
opList.grid(row=10 ,column=0 )
loadProName()
processPics.grid(row=11, column=0,sticky=E) #2nd button
# processPics.place(x=290, y=320)
#emptyLable = Label(imageZone, text=" ", bg=lsb4)
#emptyLable.grid(row=12 ,column=0,pady=45) #for spacing it up
# save area grid
savedPath.grid(row=1, column=0, sticky= W) #please enter processed.."
saveEntry.grid(row=1, column=1, ipadx=50, pady=20)
browseSaveButton.grid(row=1, column=2, sticky= E,ipadx = 20) # "save as..."
saveButton.grid(row=1, column=3, sticky= E) #when ready to save
quitbutton.grid(row=10, column=0,sticky= E)#, sticky= S+E)#row=1, column=5, sticky= E)
# createThreshold()
root.mainloop()
'''$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ ACTUAL PROGRAM $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$'''
# Root/window container definition
root = tk.Tk()
lsb4="LightSteelBlue4"
nw4="NavajoWhite4"
root.geometry("950x700") #windos size
root.configure(bg=lsb4)
root.title("Efi's Editor")
root.customFont = tkFont.Font(family="Verdana", size=25, weight=tkFont.BOLD, underline = True)
mainWin()
#adress for self checking: /Users/EfiPaka/Pictures/
|
#!/usr/bin/env python
import subprocess
import optparse
import re
def get_argument():
parser = optparse.OptionParser()
parser.add_option("-i","--interface", dest="interface", help="Interface to change its MAC address")
parser.add_option("-m","--mac", dest="new_mac", help="MAC address the user want")
(options,arguments)= parser.parse_args()
if not options.interface:
parser.error("[-] Please specify an interface, use --help for info")
elif not options.new_mac:
parser.error("[-] Please specify a new mac, use --help for info")
return options
def get_arguments1():
parser = optparse.OptionParser()
parser.add_option("-i","--interface", dest="interface", help="Interface to change its MAC address")
parser.add_option("-m","--mac", dest="new_mac", help="MAC address the user want")
return parser.parse_args()
def change_mac(interface, new_mac):
print("[+] Changing mac address for " + interface + " to " + new_mac)
# subprocess.call("sudo ifconfig " +interface + " down", shell=True)
# subprocess.call("sudo ifconfig " +interface + " hw ether "+ new_mac , shell=True)
# subprocess.call("sudo ifconfig " +interface + " up", shell=True)
# subprocess.call("sudo ifconfig "+interface, shell=True)
subprocess.call(["sudo", "ifconfig" , interface,"down"])
subprocess.call(["sudo", "ifconfig" , interface,"hw","ether",new_mac])
subprocess.call(["sudo", "ifconfig" , interface,"up"])
def get_current_mac(interface):
ifconfig_result = subprocess.check_output(["sudo","ifconfig",interface])
mac_address_search_result = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", str(ifconfig_result))
if mac_address_search_result:
return mac_address_search_result.group(0)
else:
print("[-] Could not read MAC Address.")
# interface=options.interface
# new_mac=options.new_mac
options=get_argument()
current_mac = get_current_mac(options.interface)
print("Current MAC : "+str(current_mac))
change_mac(options.interface,options.new_mac)
current_mac=get_current_mac(options.interface)
if current_mac == options.new_mac:
print("[+] MAC address was successfully changed to "+ current_mac)
else:
print("[-]MAC address did not get changed") |
from __future__ import division, print_function
import opticstools as ot
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
try:
import ipdb
except:
#The following ling is dodgy, but still enables set_trace()
import pdb as ipdb
import scipy.ndimage as nd
import scipy.linalg as la
import matplotlib.cm as cm
import time
TENTH_AIRY_RING = 10.25
COLORBAR_FRACTION = 0.046
COLORBAR_PAD = 0.04
plt.ion()
def strehl(psf, psf_dl):
""" Calculate the Strehl ratio of an aberrated input PSF given the diffraction-limited PSF. """
return np.amax(psf) / np.amax(psf_dl)
def centre_crop(psf, psf_sz_cropped):
"""Crop an image about the center"""
return psf[psf.shape[0]/2-psf_sz_cropped//2:psf.shape[0]/2-psf_sz_cropped//2+psf_sz_cropped,
psf.shape[1]/2-psf_sz_cropped//2:psf.shape[1]/2-psf_sz_cropped//2+psf_sz_cropped]
class SCFeedBackAO():
"""A single-congugate adaptive optics system. The initialization order should be:
1) Wavefront() for every wavelength.
2) A wavefront sensor.
3) A deformable mirror.
4) This AO system.
5) An Atmosphere().
By convention, if not set, all wavefronts not used for sensing are
used to create separate science images.
Parameters
----------
dm: DeformableMirror instance, which contains all wavefronts.
wfs: WFS or child class instance
atm: A single atmosphere for all wavefronts.
conjugate_location: float
location of wfs and DM conjugate
image_ixs: list
A list of indexes for which images should be calculated, i.e. the
science wavefront indices in the dm wavefronts list.
dm_poke_scale: float
A normalisation for poking the deformable mirror, used in response matrix
sensing and the loop. Should be well within the linear regime."""
def __init__(self,
dm,
wfs,
atm,
conjugate_location=0.0,
image_ixs=None,
dm_poke_scale=1e-7):
# A list of indexes for which images should be calculated, i.e. the science wavefront indices from the dm instance.
# If no image indexes are specified, then it is assumed that all wavefronts in the DM
# that are not sensed by the WFS are used for imaging.
if not image_ixs:
image_ixs = []
for j in range(len(dm.wavefronts)):
if dm.wavefronts[j] not in wfs.wavefronts:
image_ixs.append(j)
elif type(image_ixs) == int:
image_ixs = [image_ixs]
elif type(image_ixs) != list:
print("OOPS: invalid type for image_ixs - must be of type list or int!")
raise UserWarning
# Conjugate location
if conjugate_location != 0:
print("OOPS: Not implemented yet - only ground layer conugation so far")
raise UserWarning
# location of WFS and DM conjugate
self.conjugate_location=conjugate_location
self.image_ixs = image_ixs
self.wavefronts = dm.wavefronts
self.dm = dm
self.wfs = wfs
self.atm = atm
self.dm_poke_scale = dm_poke_scale
self.response_matrix = None
self.reconstructor = None
#Add an atmosphere to all wavefronts.
for wf in self.wavefronts:
wf.add_atmosphere(atm)
#############################################################################################################
def psf_dl(self,
plate_scale_as_px = None,
N_OS = None,
psf_ix = None,
crop = True, # Whether or not to crop the PSF. If set to False, the below two arguments are irrelevant.
psf_sz_cropped = None, # Size of the PSF. By default cropped at the 10th Airy ring
psf_sigma_limit_N_os = TENTH_AIRY_RING, # Corresponds to 10 Airy rings
plotit = False
):
"""
Compute and return the diffraction-limited PSF at the specified
wavelength and plate scale.
By default, the returned PSF is cropped at the 10th Airy ring.
AZ: there is a problem that I didn't think through properly when I
wrote this - the returned PSF only has the correct sampling if the
pupil covers the whole wavefront grid (i.e.,
m_per_px = wavefrontPupil['dout'] / wave_height_px)
This will be relatively easy (but annoying) to fix.
e.g. if the pupil only covers 1/2 of the wavefront, then the
returned PSF is sampled TWICE as finely as you would expect & the
PSF is cropped at the 5th Airy ring instead of at the 10th!
BUT does this need to be fixed, I wonder? The only reason (?) that
we would have m_per_px < D / sz is to pad the FFT, but this already
happens when we generate the PSF (in method image()), so there's no
need!
If this is indeed the case, then m_per_px should be a private variable
that is constrained by D and sz.
"""
print("Generating the diffraction-limited PSF at wavelength {:.2f} nm...".format(self.wavefronts[psf_ix].wave * 1e9))
# Generating the PSF
psf = self.wavefronts[psf_ix].psf_dl(plate_scale_as_px = plate_scale_as_px, N_OS=N_OS, plotit = plotit, return_efield = False)
psf_sz = psf.shape[0]
# If crop == False, we simply return the PSF at the native size generated by the FFT method that is used to compute the PSF.
if crop:
# How many 'sigmas' we want the returned PSF grid to encompass. By default, the PSF is cropped at the 10th Airy ring.
if psf_sz_cropped == None:
if N_OS == None:
N_OS = self.wavefronts[psf_ix].wave / self.wavefronts[psf_ix].D / 2 / np.deg2rad(plate_scale_as_px / 3600) # Nyquist sampling (HWHM in pixels)
else:
plate_scale_as_px = np.rad2deg(self.wavefronts[psf_ix].wave / self.wavefronts[psf_ix].D / 2 / N_OS) * 3600
psf_sz_cropped = np.ceil(min(psf_sz, 4 * N_OS * psf_sigma_limit_N_os))
psf = centre_crop(psf, psf_sz_cropped)
if plotit:
plt.imshow(psf, extent = np.array([-psf_sz_cropped/2, psf_sz_cropped/2, -psf_sz_cropped/2, psf_sz_cropped/2]) * plate_scale_as_px)
plt.title(r'Diffraction-limited PSF at $\lambda = %.1f$ $\mu$m' % (self.wavefronts[psf_ix].wave * 1e9))
plt.show()
return psf
#############################################################################################################
def find_response_matrix(self,mode='onebyone',amplitude=1e-7):
"""Poke the actuators and record the WFS output"""
self.response_matrix = np.empty( (self.dm.nactuators,self.wfs.nsense) )
print("Computing AO system response matrix.")
if mode=='onebyone':
# We poke each actuator twice (once +ve, once -ve) and take the abs. mean WFS response
print(".",end="")
for i in range(self.dm.nactuators):
# Flatten the WFS wavefronts
for wf in self.wfs.wavefronts:
# wf.field = wf.pupil
wf.flatten_field()
# Poking an actuator in the +ve direction.
act = np.zeros(self.dm.nactuators)
act[i] = self.dm_poke_scale
self.dm.apply(act) # Apply the poke to the wavefront, perturb its phase
wfs_plus = self.wfs.sense() # Get the corresonding WFS measurement
# Flatten the WFS wavefronts
for wf in self.wfs.wavefronts:
# wf.field = wf.pupil
wf.flatten_field()
# Poking an actuator in the -ve direction.
act = np.zeros(self.dm.nactuators)
act[i] = -self.dm_poke_scale
self.dm.apply(act)
wfs_minus = self.wfs.sense()
#Check that poking in both directions is equivalent!
if (np.sum(wfs_plus*wfs_minus)/np.sum(wfs_plus*wfs_plus) > -0.9):
print("WARNING: Poking the DM is assymetric!!!")
# Taking the mean response value.
self.response_matrix[i] = 0.5*(wfs_plus - wfs_minus).flatten()
else:
print("ERROR: invalid response matrix mode")
raise UserWarning
self.response_matrix = self.response_matrix.T
#############################################################################################################
def compute_reconstructor(self,mode='eigenclamp', threshold=0.2):
"""Compute the reconstructor
Parameters
----------
mode: string
Mode for this calculation. 'eigenclamp' computes a Moore-Penrose
pseudo-inverse with SVD eigenvalue clamping.
threshold: float
Threshold for eigenvalue clamping.
"""
#!!! for speed, with larger problems, full_matrices should *not* be
# true. But then we have to think more carefully about the math!
u,s,v = la.svd(self.response_matrix,full_matrices=True)
if mode=='eigenclamp':
bad_modes = s < np.mean(s)*threshold
s[bad_modes] = np.inf
self.reconstructor = np.dot(np.dot(u,la.diagsvd(1.0/s,len(u),len(v))),v).T
else:
print("ERROR: reconstructor mode")
raise UserWarning
#############################################################################################################
def correct_twice(self,plotit=False):
"""Find the pupil field, then correct it twice.
TEST method, but ERROR checking still needed!
Returns
-------
measurements: list
A list of the sensor output for before correction, after 1 and after 2
applications of the reconstructor
ims: list
A list of wavefront sensor images (before correction, after 1 and 2
reconstructor applications)
"""
# Reset the distorted wavefront to that caused by the atmosphere and nothing else
for wf in self.wfs.wavefronts:
wf.atm_field() # Reset the pupil field
field0 = wf.field
# Sense the wavefront.
# wfs.sense() uses the field attribute of the wf to make a measurement.
measurements0 = self.wfs.sense() # WF measurements
im0 = self.wfs.im.copy() # WFS image
# Calculate the DM coefficients corresponding to the measurements.
coefficients = -np.dot(self.reconstructor,measurements0.flatten())*self.dm_poke_scale
# Reset the wavefront (since it gets modified by wfs.sense())
for wf in self.wfs.wavefronts:
wf.atm_field()
field1 = wf.field
# Apply a correction. This modifies (but does not reset) the
# field attribute of wf:
# wf.field = wf.field*np.exp(2j*np.pi*phasescreen/wf.wave)
self.dm.apply(coefficients)
# Sense after the first correction
measurements1 = self.wfs.sense()
im1 = self.wfs.im.copy()
coefficients += -np.dot(self.reconstructor,measurements1.flatten())*self.dm_poke_scale
for wf in self.wfs.wavefronts:
wf.atm_field()
field2 = wf.field
# Apply a second correction
# self.dm.apply(coefficients)
# Sense after the second correction.
measurements2 = self.wfs.sense()
im2 = self.wfs.im.copy()
if plotit==True:
plt.figure()
plt.suptitle('WFS detector images')
plt.subplot(131)
plt.imshow(im0)
plt.title('Before sensing')
plt.subplot(132)
plt.imshow(im1)
plt.title('After first correction')
plt.subplot(133)
plt.imshow(im2)
plt.title('After second correction')
return [measurements0,measurements1,measurements2],[im0,im1,im2]
#############################################################################################################
def run_loop(self, dt,
mode = 'integrator',
niter = 100,
gains = {
'K_p' : 0.0,
'K_i' : 1.0,
'K_d' : 0.0,
'K_leak' : 0.9
},
psf_ix = 0, # Index in the DM's list of wavefronts of the PSF you want to be returned
psf_sz_cropped = None, # Size of the PSF. By default cropped at the 10th Airy ring
psf_sigma_limit_N_os = TENTH_AIRY_RING, # Corresponds to 10 Airy rings
plate_scale_as_px = None, # Plate scale of the output images/PSFs
plot_sz_px = 80, # For now this is only used in plotting
nframesbetweenplots = 10,
plotit = False
):
"""Run an AO servo loop.
Parameters
----------
dt: float
Time between samples
mode: string
Servo loop mode.
* 'integrator' is classic integral control with parameters K_i and K_leak.
* 'PID' is simple proportional-integral-derivative control.
* 'open_loop' involves no wavefront correction (only sensing).
For PD, PI, etc. control, simply set the unneeded gain terms to zero.
niter: int
Number of iterations of the loop.
plotit: boolean
Do we plot potentially useful outputs? NB, this is all pretty slow with
matplotlib.
K_i: float
Integral gain.
K_p: float
Proportional gain.
K_d: float
Derivative gain.
K_leak: float
Leak coefficient for integral control. (allows transient errors in WFS
measurements to leak out of the control input over time)
Is automatically zeroed if K_i is zeroed.
psf_ix: int
Index in self.image_ixs corresponding to the wavelength of the PSF to be returned.
plate_scale_as_px: float
Plate scale for the output science images. For now, only put in the plate scale instead of the Nyquist sampling because we image at a range of wavelengths with the same detector.
"""
# Zero the leak coefficient if no integral control is used.
# DM coefficients (control commands)
coefficients_current = np.zeros(self.dm.nactuators) # timestep k
# WFS measurements
y_integral = np.zeros(self.wfs.nsense)
y_current = np.zeros(self.wfs.nsense) # timestep k
y_old = np.zeros(self.wfs.nsense) # timestep k - 1
# Figuring out the size of the PSF image to return.
psf_sz = self.wavefronts[psf_ix].image(plate_scale_as_px=plate_scale_as_px).shape[0]
if plate_scale_as_px and not psf_sz_cropped:
# Oversampling factor of the PSF. Note that N_OS is equivalent to the 'sigma' of the pupil image in the DL.
plate_scale_rad_px = np.deg2rad(plate_scale_as_px / 3600)
N_OS = self.wavefronts[psf_ix].wave / self.wavefronts[psf_ix].D / 2 / plate_scale_rad_px
psf_sz_cropped = np.ceil(min(psf_sz, 4 * N_OS * psf_sigma_limit_N_os))
else:
if psf_sz_cropped:
psf_sz_cropped = np.ceil(min(psf_sz, psf_sz_cropped))
else:
psf_sz_cropped = psf_sz
#Make plate_scale_as_px for Nyquist sampling of the full size.
#See XXX below for a bug.
if not plate_scale_as_px:
plate_scale_rad_px = self.wavefronts[psf_ix].wave / (self.wavefronts[psf_ix].sz * self.wavefronts[psf_ix].m_per_px)
plate_scale_as_px = np.degrees(plate_scale_rad_px) * 3600
plate_scale_as_px_in = None
else:
plate_scale_as_px_in = plate_scale_as_px
# Arrays to hold images
psfs_cropped = np.zeros((niter, psf_sz_cropped, psf_sz_cropped))# at the psf_ix wavelength
""" AO Control loop """
print("Starting the AO control loop with control logic mode '%s'..." % mode)
for k in range(niter):
#------------------ EVOLVING THE ATMOSPHERE ------------------#
if ((k / niter) * 100) % 10 == 0:
print("{:d}% done...".format(int((k / niter) * 100)))
# Evolve the atmosphere & update the wavefront fields to reflect the new atmosphere.
self.atm.evolve(dt * k)
for wf in self.wavefronts:
wf.atm_field()
#------------------ AO CONTROL ------------------#
if mode == 'open loop':
# We still measure the wavefront for plotting.
self.wfs.sense()
else:
self.dm.apply(coefficients_current) # Apply the wavefront correction. Note that this does NOT reset the wavefront but simply applies the phase change to the field variable.
measurements = self.wfs.sense() # Sense the corrected wavefront. Does not modify the field variable.
y_old = y_current # y at timestep k - 1
y_current = measurements.flatten() # y at timestep k
y_integral += y_current * dt # Integrate over time.
coefficients_next = np.zeros(self.dm.nactuators) # timestep k + 1
# Apply control logic.
if mode == 'PID':
coefficients_next += - gains['K_i'] * np.dot(self.reconstructor,y_integral) * self.dm_poke_scale
coefficients_next += gains['K_leak'] * coefficients_current - gains['K_p'] * np.dot(self.reconstructor,y_current) * self.dm_poke_scale
coefficients_next += - gains['K_d'] * (1/dt) * (np.dot(self.reconstructor,y_current) - np.dot(self.reconstructor,y_old)) * self.dm_poke_scale
elif mode == 'integrator':
coefficients_next = gains['K_leak'] * coefficients_current - gains['K_i'] * np.dot(self.reconstructor,y_current) * self.dm_poke_scale
else:
print("ERROR: invalid control logic specified!")
raise UserWarning
coefficients_current = coefficients_next; # Update the DM coefficients.
#-------------------- SAVING PSF ----------------------#
# Create the PSF
# NB if the following line has non-none, then we have a uint 8 error XXX
psf = self.wavefronts[psf_ix].image(plate_scale_as_px = plate_scale_as_px_in)
psf /= np.sum(psf.flatten())
psfs_cropped[k] = centre_crop(psf, psf_sz_cropped)
#------------------ PLOTTING ------------------#
if plotit & ((k % nframesbetweenplots) == 0):
if k == 0:
axes = []
plots = []
plt.rc('text', usetex=True)
# WFS plot
fig_wfs = plt.figure()
ax_wfs = fig_wfs.add_subplot(111) # WFS detector image
ax_wfs.title.set_text(r'WFS detector')
ax_wfs.axis( [0,self.wavefronts[0].sz,0,self.wavefronts[0].sz] )
plot_wfs = ax_wfs.imshow(self.wfs.im,interpolation='nearest',cmap=cm.gray)
fig = plt.figure(figsize=(10,5))
# Phase
axes.append(fig.add_subplot(1,2,1))
axes[-1].title.set_text(r'Corrected phase ($\lambda$ = %d nm)' % (self.wavefronts[psf_ix].wave*1e9))
plots.append(axes[-1].imshow(np.angle(self.wavefronts[psf_ix].field)*self.wavefronts[psf_ix].pupil,interpolation='nearest', cmap=cm.gist_rainbow, vmin=-np.pi, vmax=np.pi))
plt.colorbar(plots[-1],fraction=COLORBAR_FRACTION, pad=COLORBAR_PAD)
# Science image
axes.append(fig.add_subplot(1,2,2))
axes[-1].title.set_text(r'Science Image ($\lambda$ = %d nm)' % (self.wavefronts[psf_ix].wave*1e9))
plots.append(axes[-1].imshow(centre_crop(psf, plot_sz_px),interpolation='nearest', cmap=cm.gist_heat,extent = np.array([-plot_sz_px/2, plot_sz_px/2, -plot_sz_px/2, plot_sz_px/2]) * plate_scale_as_px))
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.2f\"'))
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.2f\"'))
plt.colorbar(plots[-1],fraction=COLORBAR_FRACTION, pad=COLORBAR_PAD)
else:
# Update the plots
plot_wfs.set_data(self.wfs.im)
fig.suptitle(r'AO-corrected phase and science images, $k = %d, K_i = %.2f, K_{leak} = %.2f$' % (k, gains['K_i'], gains['K_leak']))
plots[0].set_data(np.angle(self.wavefronts[psf_ix].field)*self.wavefronts[psf_ix].pupil)
plots[1].set_data(centre_crop(psf, plot_sz_px))
plt.draw()
plt.pause(0.00001) # Need this to plot on some machines.
return psfs_cropped
|
"""Definition of additional qt helper objects."""
from collections import deque
import logging
from os import path as op
import pyqtgraph as pg
from PyQt5 import uic
from pyqtgraph.Qt import (QtCore, QtGui, QtWidgets)
from .utils import create_button
def warning(txt, title="Warning"):
"""Inform user about missing information."""
msg_box = QtWidgets.QMessageBox()
msg_box.setIcon(QtWidgets.QMessageBox.Information)
msg_box.setText(txt)
msg_box.setWindowTitle(title)
msg_box.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg_box.exec_()
class CircleShape(pg.EllipseROI):
"""Define a Elliptic Shape with a fixed aspect ratio (aka circle)."""
def __init__(self, pos, size):
"""Create a circular region of interest.
Parameters:
pos (tuple) : centre of the circle (x, y)
size (int) : diameter of the circle
"""
pen = QtGui.QPen(QtCore.Qt.red, 0.002)
pg.ROI.__init__(self,
pos=pos,
size=size,
removable=True,
movable=False,
invertible=False,
pen=pen)
self.aspectLocked = True
self.handleSize = 0
_ = [self.removeHandle(handle) for handle in self.getHandles()]
def __repr__(self):
size = self.size()[0]
return 'Circle(d:{})'.format(size)
class SquareShape(pg.RectROI):
"""Define a rectangular Shape with a fixed aspect ratio (aka square)."""
def __init__(self, pos, size):
"""Create a squared region of interest.
Parameters:
pos (tuple) : centre of the circle (x, y)
size (int) : diameter of the circle
"""
pen = QtGui.QPen(QtCore.Qt.red, 0.002)
pg.ROI.__init__(self,
pos=pos,
size=size,
removable=True,
movable=False,
invertible=False,
pen=pen)
self.aspectLocked = True
self.handleSize = 0
_ = [self.removeHandle(handle) for handle in self.getHandles()]
def __repr__(self):
size = self.size()[0]
return 'Square(a:{})'.format(size)
class DetectorHelper(QtGui.QDialog):
"""Show quadrant positions in a table"""
filename_set_signal = QtCore.pyqtSignal()
def __init__(self, quad_pos, det_type='AGIPD', parent=None):
"""Create a table element for quad selection and file selection.
Parameters:
det (str): the detector (AGIPD or LPD)
Keywords
fname (str) : file name of the geometry file (Default '')
"""
super().__init__(parent)
ui_file = op.join(op.dirname(__file__), 'editor/load_detector.ui')
uic.loadUi(ui_file, self)
self.setWindowTitle('{} Geometry'.format(det_type))
self.quad_pos = list(quad_pos.itertuples(index=False))
self.populate_table()
self.bt_copy_csv.clicked.connect(self._copy_csv)
def populate_table(self):
"""Update the Qudrant table."""
for n, quad_pos in enumerate(self.quad_pos):
self.tb_quadrants.setItem(
n, 0, QtGui.QTableWidgetItem(str(quad_pos[0])))
self.tb_quadrants.setItem(
n, 1, QtGui.QTableWidgetItem(str(quad_pos[1])))
self.tb_quadrants.move(0, 0)
def _copy_csv(self):
clipboard = QtGui.QGuiApplication.clipboard()
clipboard.setText("\n".join(f"{x},{y}" for (x, y) in self.quad_pos))
class LogCapturer(logging.Handler, QtCore.QObject):
"""Emit Qt signal for Python logging, and store recent messages.
"""
new_msg = QtCore.Signal(str)
def __init__(self, parent, level=logging.NOTSET):
QtCore.QObject.__init__(self, parent)
logging.Handler.__init__(self, level)
self.msg_buffer = deque(maxlen=500)
def emit(self, record):
msg = self.format(record)
self.msg_buffer.append(msg)
self.new_msg.emit(msg)
class LogDialog(QtGui.QDialog):
"""A Dialog that displays the log connected to logging.
Parameters:
main_widget : Parent creating this dialog
"""
def __init__(self, main_window):
super().__init__(parent=main_window)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle('GeoAssembler Logs')
self.log_capturer = main_window.log_capturer
self.text_area = QtGui.QPlainTextEdit(self)
self.text_area.setReadOnly(True)
self.ok_btn = create_button('Ok', 'ok')
self.ok_btn.clicked.connect(self.close)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(self.text_area, 0, 0, 10, 10)
layout.addWidget(self.ok_btn, 11, 0, 1, 1)
self.setLayout(layout)
for msg in self.log_capturer.msg_buffer:
self.text_area.appendPlainText(msg)
self.log_capturer.new_msg.connect(self.text_area.appendPlainText)
|
def lol(a):
b=0
c=0
a=a-75
b+=1
c+=6
rou=1
while a>=30:
rou+=1
b+=1
a-=30
if b<3:
c+=6
elif b==3:
b=0
c+=7
if rou%2==0:
c-=1
return (a,b,c)
time=input("輸入遊戲時間").split(":")
second=int(time[0])*60+int(time[1])
result=lol(second)
print(result[2],"隻兵")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('MyANSRSource', '0067_project_customercontact'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'permissions': (('create_project', 'Create ANSR projects'), ('manage_project', 'Manage ANSR Project'), ('approve_timesheet', 'Approve timesheets'), ('manage_milestones', 'Manage Project Milestones'), ('view_all_projects', 'View all projects'))},
),
migrations.AlterField(
model_name='project',
name='salesForceNumber',
field=models.IntegerField(default=0, verbose_name=b'SF Oppurtunity Number', validators=[django.core.validators.MinValueValidator(0)]),
preserve_default=True,
),
]
|
import logging
import os
import librosa
import numpy as np
from elephantcallscounter.data_transformations.filters import Filters
from elephantcallscounter.data_transformations.noise_reduction import \
NoiseReduction
from elephantcallscounter.data_visualizations.plots import Plots
from elephantcallscounter.utils.path_utils import create_necessary_directories
logger = logging.getLogger(__name__)
class AnalyseSoundData:
def __init__(self, file_read_location, save_image_location, sr, hop_length=512):
self.file_read_location = file_read_location
self.save_image_location = save_image_location
self.noise_reduce = NoiseReduction(save_image_location)
self.plot = Plots()
self.hop_length = hop_length
self.sr = sr
@classmethod
def load_data(cls, file_name, sr):
"""This function loads the audio data from the file.
:param string file_name:
:param int sr:
:return: tuple
"""
# Keeping audio at original sample rate
try:
signal, sr = librosa.load(file_name, sr=sr)
logger.info("Duration of sample: {} ms".format(len(signal) / sr))
return signal, sr
except Exception as ex:
logger.info("Failed to load data: " + repr(ex))
return None, None
def analyse_audio(self):
# .wav is lossless
create_necessary_directories(self.save_image_location)
input_signal, sr = self.load_data(self.file_read_location, self.sr)
if input_signal is None:
return
self.plot.plot_amp_time(input_signal, sr)
duration = int(len(input_signal) / sr)
# plots upto sampling rate/2(Nyquist theorem)
# Filter requirements.
fs = sr # sample rate, Hz
cutoff = 50 # desired cutoff frequency of the filter, Hz
nyq = 0.5 * fs # Nyquist Frequency
order = 4 # sin wave can be approx represented as quadratic
time = np.linspace(0, duration, len(input_signal), endpoint=False)
lowpass_signal = Filters.butter_lowpass_filter(
input_signal, cutoff, nyq, order, time
)
filename = self.file_read_location.split("/")[-1]
cutoff_high = 10
highpass_signal = Filters.butter_highpass_filter(
lowpass_signal, cutoff_high, nyq, order, time
)
self.plot.plot_and_save_spectrogram(
highpass_signal,
sr,
file_location=os.path.join(
self.save_image_location, f"spec_image_{filename}.png"
),
)
self.plot.plot_mel(highpass_signal, sr)
self.plot.fft_plot(highpass_signal, sr, filename, plot=False)
return
|
class AllBooksPageLocators:
BOOKS='div.page_inner section li.col-xs-6'
PAGER='div.page_inner section ul.pager li.current' |
#coding=utf-8
import math
def move(x,y,step,angle):
nx=x+step*math.cos(angle)
ny=y+step*math.sin(angle)
return nx,ny
x1,y1=move(0,0,100,math.pi/6)
print(x1,y1)
result=move(0,0,200,math.pi/6)
print(result)
|
# import required packages
import pandas as pd
import numpy as np
import os, gc, time, warnings
import itertools
from scipy import sparse
import scipy.stats as ss
from scipy.sparse import csr_matrix, hstack, vstack
import matplotlib.pyplot as plt, matplotlib.gridspec as gridspec
import seaborn as sns
from wordcloud import WordCloud ,STOPWORDS
from PIL import Image
import matplotlib_venn as venn
import pydot, graphviz
from IPython.display import Image
import string, re, nltk, collections
from nltk.util import ngrams
from nltk.corpus import stopwords
import spacy
from nltk import pos_tag
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras.backend as K
from keras.models import Model, Sequential
from keras.utils import plot_model
from keras.layers import Input, Dense, Embedding, SpatialDropout1D, concatenate, BatchNormalization
from keras.layers import GRU, LSTM, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D, Conv1D
from keras.preprocessing import text, sequence
from keras.callbacks import Callback
from keras.models import model_from_json
def get_reverse_mapping(data):
result = list()
_dict = ['Normal','Suggestion','Task Assigned']
for y in data:
result.append(_dict.index(y))
return np.array(result)
def get_mapping(data):
result = list()
_dict = ['Normal','Suggestion','Task Assigned']
for y in data:
result.append(_dict[y])
return np.array(result)
def get_max(data):
result = list()
for _data in data:
result.append(np.argmax(_data))
return np.array(result)
def get_classification_results (sentences):
maxlen = 600
max_features = 11760
test_sentence = sentences
test_corpus = pd.DataFrame(test_sentence)
pos_tags_test = test_corpus[0].apply(lambda x : " ".join(item[1] for item in pos_tag(word_tokenize(x)))).values
test_corpus = test_corpus[0].values + " " + pos_tags_test
tokenizer = text.Tokenizer(num_words = max_features)
tokenizer.fit_on_texts(list(test_corpus))
test_corpus = tokenizer.texts_to_sequences(test_corpus)
test_corpus = sequence.pad_sequences(test_corpus, maxlen = maxlen)
return get_mapping(get_max(loaded_model.predict(test_corpus, batch_size = 128, verbose = 1)))
def get_all_sentences(sentences):
all_word = []
for text in sentences:
all_word.append(text['sentence'])
return all_word
if __name__ == '__main__':
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
sentences = ['Hi']
with open('/content/data_meeting_text1.txt') as sentences_file:
sentences_json = json.load(sentences_file)
sentences = get_all_sentences(sentences_json['structured_meeting_texts_without_introduction'])
classification = get_classification_results(sentences)
df = pd.DataFrame({'sentence': sentences, 'result': classification}, columns=['sentence', 'result']) |
def get_unicode(text: str) -> bytes:
"""Returns the unicode for input text
Parameters
----------
text : str
Text to encode
Returns
-------
bytes
Text with characters encoded in raw unicode.
"""
return text.encode("raw_unicode_escape")
def check_positive_integer(value: float, var_name: str):
"""Raises ValueError if the input value is not a positive integer.
Parameters
----------
value : float
Input value
var_name : str
Variable name to include it in the error message
Raises
------
ValueError
if the input value is not a positive integer.
"""
if value < 1:
raise ValueError(f"'{var_name}' should be greater than 0")
if value != int(value):
raise ValueError(f"Cannot assign a float value to '{var_name}'")
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
"""
110. 平衡二叉树
https://leetcode-cn.com/problems/balanced-binary-tree/
给定一个二叉树,判断它是否是高度平衡的二叉树。
本题中,一棵高度平衡二叉树定义为:一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1 。
"""
def isBalanced(self, root: TreeNode) -> bool:
if not root:
return True
if not self.isBalanced(root.left) or not self.isBalanced(root.right):
return False
left = self.maxDepth(root.left) + 1
right = self.maxDepth(root.right) + 1
return abs(left - right) <= 1
# 查询节点的最大深度
def maxDepth(self, root):
if not root:
return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1 |
print(__doc__)
# ---------
import sys
import os
COMMON_DIR = os.path.abspath(__file__ + '/../../common/')
sys.path.insert(0, COMMON_DIR)
from constants import *
sys.path.insert(0, UTILS_DIR)
from file_utils import *
# ---------
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import linear_model, datasets
lines = read_to_lines('/home/manish/Dropbox/Thesis/MGSeCor/result/cve_cve/cross_site_scripting/from85_result.data', False)
results = []
for line in lines:
result = line.split(',')
result = [float(x.strip()) for x in result]
results.append(result)
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
X = np.array([x[0:2] for x in results][0:75])
Y = np.array([x[2] for x in results][0:75])
X_test = np.array([x[0:2] for x in results][75:])
Y_test = np.array([x[2] for x in results][75:])
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
print '**************************'
print 'Weight Vector'
print logreg.coef_
print '**************************'
print 'Actual: ', ['%.2f' % z for z in Y_test.tolist()]
error = 0
errors = []
for i in range(len(Y_test.tolist())):
error = error + ((Y_test.tolist()[i]-logreg.predict(X_test).tolist()[i])**2)
errors.append((Y_test.tolist()[i]-logreg.predict(X_test).tolist()[i])**2),
print list(sorted(errors))
print 'Combination:', ['%.2f' % z for z in logreg.predict(X_test).tolist()], (error/len(Y_test.tolist()))**0.5
error = 0
for i in range(len(Y_test.tolist())):
error = error + ((Y_test.tolist()[i]-[x[0] for x in results][75:][i])**2)
print 'Secure: ', ['%.2f' % z for z in [x[0] for x in results][75:]], (error/len(Y_test.tolist()))**0.5
error = 0
for i in range(len(Y_test.tolist())):
error = error + ((Y_test.tolist()[i]-[x[1] for x in results][75:][i])**2)
print 'Generic: ', ['%.2f' % z for z in [x[1] for x in results][75:]], (error/len(Y_test.tolist()))**0.5
print '**************************'
"""
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
"""
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-25 03:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0008_auto_20180121_1649'),
]
operations = [
migrations.AlterField(
model_name='student',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='student',
name='modified',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='Last Updated'),
),
]
|
"""
要实现的功能:
从中国国家地理网上解析网页源码,循环获取图片的资源,并下载到本地
这是自己写的**************
"""
# 一.获取图片资源
# 二.保存在本地
# 原版程序出现的问题是:判断的第一步是是否有文件夹而不是有文件
# 可以组合
# r :只读
# r+ : 读写
# w : 新建(会对原有文件进行覆盖)
# a : 追加
# b : 二进制文件
"""
同时添加或者减少缩进的快捷方式:
tab+shift或者
shift+tab
"""
import os
import requests
from bs4 import BeautifulSoup
import re # 用正则表达式来解析网页的img标签下的src资源或者用美丽汤
# 测试用网页:http://www.dili360.com/gallery/cate/1.htm
x = input("1.风光 2.人文 3.航拍 4.水下 5.建筑 6.动物 7.植物 8.星空 9.黑白 10.专题 \r\n请输入需要爬取的页数:") # \r\n换行符
res = requests.get("http://www.dili360.com/gallery/cate/"+x+".htm") # 获取网页源码
texts = res.text
bf = BeautifulSoup(texts)
s = bf.find_all("img")
name = bf.find_all("a", href="/gallery/cate/"+x+".htm") # 标签
count = len(name)
for Name in range(count):
kk = name[Name].string
if kk is not None:
wenJianMing=""
wenJianMing = kk
"""
<img alt="春天的密码" src="http://img0.dili360.com/ga/M02/02/18/wKgBzFQ2x9eAbZn8AATCSQolols997.jpg@!rw5"/>
"""
listCount = len(s)
root = "G://pycharm//爬虫专用文件夹//"+wenJianMing+"//"
num = 1
for i in range(listCount):
if not os.path.exists(root):
os.mkdir(root)
path = root + '%d'%num + ".jpg" # int类型转换成str型,要加上.jpg才可以识别文件类型
src = s[i].get("src") # 相当于正则表达式的方法获取src资源
l = len(src)
sr = src[0:l-5] # 删减后缀,变成jpg
if sr[-3:] == "jpg": # 判断是否是.jpg后缀的文件
num += 1
# url = "http://img0.dili360.com/ga/M02/02/18/wKgBzFQ2x9eAbZn8AATCSQolols997.jpg"
print('{}{}{}{}'.format(num, " ", ":", sr))
re = requests.get(sr)
with open(path, "wb") as f:
f.write(re.content)
f.close()
print(wenJianMing+"图片保存成功")
|
#!/usr/bin/env python3
#! -*- coding:utf8 -*-
import sys
if sys.version_info.major != 3:
print("Python2 not supported, Please use python3 run again.")
sys.exit(1)
import argparse
import setproctitle
import os,time
import common.daemon as daemon
# init log handler
project_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(project_path, "log/topargus-agent.log")
os.environ['LOG_PATH'] = log_path
import common.slogging as slogging
from common.slogging import slog
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.description='TOP-Argus Agent: get config from remote server and do log-uploading job'
parser.add_argument('-a', '--alarm', help='alarm proxy host, agent pull config and push alarm to this proxy host, eg: 127.0.0.1:9090', default='127.0.0.1:9090')
parser.add_argument('-f', '--file', help="log file for agent to watch, eg: ./xtop.log", default='/chain/log/xtop.log')
parser.add_argument('--nodaemon', action='store_true', help='start as no-daemon mode')
args = parser.parse_args()
# set process title
proc_title = 'topargus-agent: '
for i in range(len(sys.argv)):
proc_title = '{0} {1}'.format(proc_title, sys.argv[i])
setproctitle.setproctitle(proc_title)
if args.nodaemon:
print("start as no-daemon mode")
else:
# forbidden using slog befor daemon_init
print("start as daemon mode")
try:
daemon.daemon_init()
except RuntimeError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
# attention: must behind daemon_init
slogging.start_log_monitor()
from agent import argus_agent
argus_agent.run(args)
while True:
time.sleep(1000)
|
"""Contains the primary entrypoint and exit handling code for the Twitter Ingress tooling."""
import logging
import sys
from os import environ
import tweepy
from ingress import ES_CONNECTION_STRING
from ingress.utils import get_singleton_instance, setup_mappings
from ingress.listeners import QueueListener
LOG = logging.getLogger(__name__)
def shutdown(exit_code=0) -> None:
"""
Safely close down the ingress application.
:param exit_code: raise a system exit with the provided exit code. Defaults to 0.
"""
LOG.info('Shutting Down.')
get_singleton_instance(tweepy.Stream).disconnect()
sys.exit(exit_code)
def main() -> None:
"""
Primary entrypoint to the Twitter Ingress tool.
Sets up access to twitter based on secrets stored locally on the filesystem
and connects to twitter to start consuming tweets.
"""
LOG.debug('Loading twitter authentication confifg')
consumer_key: str = environ['CONSUMER_KEY']
consumer_secret: str = environ['CONSUMER_SECRET']
oauth_key: str = environ['OAUTH_KEY']
oauth_secret: str = environ['OAUTH_SECRET']
if 'HASHTAGS' in environ:
tweet_filters = environ['HASHTAGS'].split(',')
else:
tweet_filters = ['#brexit', '#remain', '#leave']
LOG.info('Streaming tweets matching these keywords: %s', tweet_filters)
index_suffix = '-'.join(tweet_filters).lower().replace('#', '')
twitter_index = 'tweets-{}'.format(index_suffix)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(oauth_key, oauth_secret)
LOG.debug('Creating Stream instance')
api = get_singleton_instance(
tweepy.Stream,
auth=auth,
listener=QueueListener(ignore_retweets=True, twitter_index=twitter_index)
)
try:
setup_mappings(
index_suffix,
ES_CONNECTION_STRING,
)
LOG.info('Collecting Tweets.')
api.filter(track=tweet_filters, is_async=False)
except KeyboardInterrupt:
LOG.info('Caught Ctrl+C')
shutdown()
except Exception: # pylint: disable = broad-except
LOG.error('Caught Exception!', exc_info=True)
shutdown(1)
if __name__ == '__main__':
main()
|
import re
File1 = open('test', 'r')
regex = re.compile(r'\b(?:22822)#[^#]+#')
string = File1.read()
itemdesc = regex.findall(string)
for word in itemdesc: print (word) |
import os
import sys
import json
import pdb
from collections import Counter
def read_json(filename):
with open(filename) as f:
a=json.load(f)
return a
def count_key(key_type,object):
a=[object[0]['type'] for k in object if k.get('type')==key_type]
#print(a)
return len(a)
def get_val_key(keyname,keyval,object):
keyname=str(keyname)
#print(keyname)
a=[k for k in object if k.get(keyname)==keyval]
return a
def list_flatten(inp_list):
print("type()inp_list",type(inp_list))
print("inp_list is ",inp_list)
if isinstance(inp_list,str):
print("here")
return [inp_list]
else:
if len(inp_list) > 1:
return [item for subitem in inp_list for item in subitem]
else:
return inp_list[0]
def compute_acc_type(inp_list,output_list):
soft_measure=[]
hard_measure=[]
for question in inp_list:
#print(question.keys())
exact_answer=question['exact_answer']
exact_answer_flatted=list_flatten(exact_answer)
#if exact_answer=='yes':
# exact_answer_flatted=['yes']
#if exact_answer=='no':
# exact_answer_flatted=['no']
id_ques=question['id']
output_answer=get_val_key('id',id_ques,output_list)
#print(output_answer)
predicted_answer=output_answer[0]['ideal_answer']
print(predicted_answer)
print(exact_answer_flatted)
if any(exact_ans_instance in predicted_answer for exact_ans_instance in exact_answer_flatted):
soft_measure.append(1)
else:
soft_measure.append(0)
if all(exact_ans_instance in predicted_answer for exact_ans_instance in exact_answer_flatted):
hard_measure.append(1)
else:
hard_measure.append(0)
print(soft_measure)
print(hard_measure)
return soft_measure.count(1)/float(len(soft_measure)), hard_measure.count(1)/float(len(hard_measure))
#print(id_ques)
#print(exact_answer)
def get_input_question_list(input_list_json,output_list):
input_question_list=[]
for question in output_list:
input_question_list.append(get_val_key('id',question['id'],input_list_json['questions'])[0])
#print(input_question_list)
assert(len(output_list)==len(input_question_list))
return input_question_list
def compute_accuracy(input_json,output_json):
input_question_json=read_json(input_json)
output_answer_json=read_json(output_json)
inp_lst_count=count_key('list',input_question_json['questions'])
print('list type is',inp_lst_count)
inp_factoid_count=count_key('factoid',input_question_json['questions'])
print('factoid type is',inp_factoid_count)
inp_yesno_count=count_key('yesno',input_question_json['questions'])
print('yesno type is',inp_yesno_count)
op_lst_count=count_key('list',output_answer_json['questions'])
print('list type is',op_lst_count)
op_factoid_count=count_key('factoid',output_answer_json['questions'])
print('factoid type is',op_factoid_count)
op_yesno_count=count_key('yesno',output_answer_json['questions'])
print('yesno type is',op_yesno_count)
op_summary_count=count_key('summary',output_answer_json['questions'])
print('summary type is',op_summary_count)
#print(input_question_json['questions'][0])
#print(input_question_json['questions'][0].keys())
output_list_type=get_val_key('type','list',output_answer_json['questions'])
print(len(output_list_type))
input_question_list=get_input_question_list(input_question_json,output_list_type)
print(len(input_question_list))
soft_measure,hard_measure=compute_acc_type(input_question_list,output_list_type)
print("List type soft and hard measure", soft_measure,hard_measure)
output_yesno_type=get_val_key('type','yesno',output_answer_json['questions'])
#print(len(output_yesno_type))
input_question_list=get_input_question_list(input_question_json,output_yesno_type)
soft_measure,hard_measure=compute_acc_type(input_question_list,output_yesno_type)
print("Yesno type soft and hard measure", soft_measure,hard_measure)
output_factoid_type=get_val_key('type','factoid',output_answer_json['questions'])
input_question_list=get_input_question_list(input_question_json,output_factoid_type)
soft_measure,hard_measure=compute_acc_type(input_question_list,output_factoid_type)
print("Factoid type soft and hard measure", soft_measure,hard_measure)
if __name__=="__main__":
compute_accuracy(sys.argv[1],sys.argv[2])
|
'''
Created on Oct 23, 2009
@author: capitalmarkettools
'''
import unittest, datetime
from src.bo.Date import Date
class Test(unittest.TestCase):
"Unit test for Date"
def setUp(self):
self.date = Date(day=14, month=5, year=2009)
def testFromPythonDate(self):
d = datetime.date(year=2011, month=10, day=15)
self.date.fromPythonDate(d)
self.failUnlessEqual(self.date, Date(day=15,month=10,year=2011), 'From Python date fails')
def testStr(self):
self.failUnlessEqual(str(self.date), "05/14/2009", "__str__ returns unexpected value")
def testStr_yyyymmdd(self):
self.failUnlessEqual(self.date.str_yyyymmdd(), "20090514", "str_yyymmdd() returns unexpected value")
def test__cmp__(self):
self.failIf(Date(14,5,2009) <> Date(14,5,2009), "Error in not equal")
self.failIf(Date(14,5,2009) == Date(14,6,2009), "Error in equal")
self.failUnless(Date(14,5,2009) == Date(14,5,2009), "Error in not equal")
self.failUnless(Date(14,5,2009) <> Date(14,6,2009), "Error in equal")
def test_str_MySQLdb(self):
#print self.date.str_MySQLdb()
self.failUnlessEqual(self.date.str_MySQLdb(), "2009-05-14", "str_MySQLdb returns unexpected results")
def suite():
return unittest.makeSuite(Test)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
# Generated by Django 2.1.5 on 2019-05-29 15:19
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0074_auto_20190529_1436'),
]
operations = [
migrations.CreateModel(
name='FleetCheckListItemResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('request_object_id', models.IntegerField(blank=True, null=True)),
('request_object_type', models.CharField(blank=True, choices=[('machine', 'Machine'), ('fleet', 'fleet')], max_length=150, null=True)),
('status', models.CharField(blank=True, choices=[('before', 'Before'), ('after', 'after')], max_length=50, null=True)),
('fleet_check_list_item', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Fleet')),
],
options={
'abstract': False,
},
),
]
|
from django.db import models
class User(models.Model):
firstname = models.CharField(max_length=255)
lastname = models.CharField(max_length=255)
email = models.CharField(max_length=255)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self):
return f"<User object: {self.firstname} {self.lastname}({self.id})>"
class Post(models.Model):
content = models.TextField()
# uploader = models.CharField(user, related_name="posts_uploaded", on_delete=models.CASCADE)
# likes =
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self):
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from agentzero.meta import BaseStrategy as Strategy
from agentzero.metrics.consumer import MetricConsumer
__all__ = ['Strategy', 'MetricConsumer']
|
import argparse
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from args.spiders.email_extract import EmailExtractSpider
def main(sites):
settings = get_project_settings()
settings['FEEDS'] = {
'extracted_emails.csv': {
'format': 'csv',
'overwrite': False,
},
}
process = CrawlerProcess(settings)
process.crawl(EmailExtractSpider, sites=sites)
process.start()
print("\n\nDONE")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sites", help="Enter comma separated website URLs without space ")
args = parser.parse_args()
if args.sites:
main(args.sites)
else:
print("Missing Required Arguments:\n\t-s SITES"
"https://example1.com,https://example2.com")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
App 类:
app 常用的方法:比如 启动应用,关闭应用,重启应用,进入首页
"""
from appium import webdriver
from test_appium.page.basepage import BasePage
from test_appium.page.mainpage import MainPage
class App(BasePage):
def start(self):
"""
启动应用
如果driver已经被实例化,就复用已有的driver
如果driver=None ,就要重新创建一个driver
"""
if self.driver == None:
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "hogwarts"
caps["appPackage"] = "com.tencent.wework"
caps["appActivity"] = ".launch.LaunchSplashActivity"
caps["noReset"] = "true"
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
# 隐式等待
self.driver.implicitly_wait(5)
else:
# 启动 caps 里面设置的appPackage appActivity
self.driver.launch_app()
# 启动 任何一个包和activity
# self.driver.start_activity()
return self
def restart(self):
self.driver.close_app()
self.driver.launch_app()
def stop(self):
self.driver.quit()
def goto_main(self) -> MainPage:
return MainPage(self.driver)
|
import pickle
import numpy as np
import h5py
from qml.aglaia.aglaia import ARMP_G
from sklearn.base import clone
data = h5py.File("/Volumes/Transcend/data_sets/CN_isobutane_model/pruned_cn_isobutane/train_isopentane_cn_dft.hdf5", "r")
n_samples = 2
xyz = np.array(data.get("xyz")[:n_samples])
ene = np.array(data.get("ene")[:n_samples])*4.184
ene = ene - ene[0]
zs = np.array(data["zs"][:n_samples], dtype = int)
forces = np.array(data.get("forces")[:n_samples])*4.184
acsf_params = {"nRs2":10, "nRs3":10, "nTs": 5, "eta2":4.0, "eta3":4.0, "zeta":8.0}
estimator = ARMP_G(iterations=1, representation='acsf', representation_params=acsf_params, batch_size=2)
estimator.set_xyz(xyz)
estimator.set_classes(zs)
estimator.set_properties(ene)
estimator.set_gradients(forces)
estimator.generate_representation(method='fortran')
pickle.dump(estimator, open('model.pickle', 'wb'))
with open('idx.csv', 'w') as f:
for i in range(n_samples):
f.write('%s\n' % i)
|
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from collections import defaultdict
import platform
import struct
import numpy as np
import numpy
import random
import tensorflow as tf
import os
import pickle as pickle
import matplotlib.pyplot as plt
import sys
from datetime import datetime
import time
from pathlib import Path
def format_time(t):
return t.strftime("%Y-%m-%d %H:%M:%S")
#this function helps to visualize the dict
from itertools import islice
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
# take(1, prod_desc.values())
# load data after creating features
def load_data_hybrid(data_path, min_items=2, min_users=2):
user_ratings = defaultdict(set)
item_ratings = defaultdict(set)
max_u_id = -1
max_i_id = -1
user_count = 0
item_count = 0
reviews = 0
users = {} # aid to id LUT
items = {} # asid to id LUT
records = {} # all records
features = {}
random.seed(0)
columns = None
offset_to_features = 3
with open(data_path, 'r') as f:
bad_actor = 0
for line in f.readlines():
record = {}
split_line = line.split('\t')
if columns is None:
columns = [e.rstrip() for e in split_line]
continue
#if (sampling and random.random()>sample_size):
# continue
reviews += 1
if (len(split_line) != len(columns)):
raise Exception ("Line %d isn't aligned. Found %d values on line. In %d column file"
% (reviews, len(split_line), len(columns)))
#bad_actor = bad_actor + 1
#continue
else:
auid, asid, _ = split_line[0:offset_to_features]
record = {columns[i]:split_line[i].rstrip() for i in range (offset_to_features, len(split_line))}
u, i = None, None
if auid in users:
u = users[auid]
else:
user_count += 1 # new user so increment
users[auid] = user_count
u = user_count
if asid in items:
i = items[asid]
else:
item_count += 1 # new i so increment
items[asid] = item_count
i = item_count
if 'cluster' in record:
record['cluster'] = float(record['cluster'])
for c in ['price_delta_calc1','price_delta_calc2','price_delta_l4avg']:
if c in record:
record[c] = float(record[c])
if 'price' in record:
if record['price'] == '':
record['price'] = 0
else:
record['price'] = float(record['price'])
if 'level4_average' in record:
if record['level4_average'] == '':
record['level4_average'] = 0
else:
record['level4_average'] = float(record['level4_average'])
# Price ratio feature added
record['level4_ratio_price']= record['price']/record['level4_average']
if 'polarity' in record:
record['polarity']= round((float(record['polarity'])),2)
if 'feature_vector' in record:
if len(record['feature_vector']) == 0:
record['feature_vector'] = list(np.zeros(4524))
else:
record['feature_vector'] = [int(el) for el in list(record['feature_vector'])[:-1][1:]]
for c in ['top_categories','rating','percentile_hotcoded','season','level4','sentiment']:
if c in record:
record[c] = [int(el) for el in list(record[c])[:-2][1:]]
records[i] = record
user_ratings[u].add(i)
item_ratings[i].add(u)
max_u_id = max(u, max_u_id)
max_i_id = max(i, max_i_id)
print ("max_u_id: ", max_u_id)
print ("max_i_id: ", max_i_id)
print ("reviews : ", reviews)
# filter out users w/ less than X reviews
num_u_id = 0
num_i_id = 0
num_reviews = 0
user_ratings_filtered = defaultdict(set)
for u, ids in user_ratings.items():
if len(ids) > min_items:
user_ratings_filtered[u] = ids
num_u_id += 1
num_reviews += len(ids)
item_ratings_filtered = defaultdict(set)
for ids, u in item_ratings.items():
if len(u) > min_users:
# keep
item_ratings_filtered[ids] = u
num_i_id += 1
feature_keys = records[1].keys() #should be same as columns[offset:]
features = {k:{i:records[i][k] for i in range(1,len(records)+1)} for k in feature_keys}
print ("u_id: ", num_u_id)
print ("i_id: ", num_i_id)
print ("reviews : ", num_reviews)
#return max_u_id, max_i_id, users, items, user_ratings_filtered,\
# item_ratings_filtered, brands, prices, prod_desc, prod_cat,price_feature,season_feature
return max_u_id, max_i_id, users, items, user_ratings_filtered,item_ratings_filtered, features
#load image features for the given asin collection into dictionary
def load_image_features(path, items):
count=0
image_features = {}
f = open(path, 'rb')
while True:
asin = f.read(10)
if asin == '': break
features_bytes = f.read(16384) # 4 * 4096 = 16KB, fast read, don't unpack
if asin in items: #only unpack 4096 bytes if w need it -- big speed up
features = (np.fromstring(features_bytes, dtype=np.float32)/58.388599)
iid=items[asin]
if len(features)==0:
image_features[iid] = np.zeros(4096)
else:
image_features[iid] = features
return image_features
def uniform_sample_batch(train_ratings, test_ratings, item_count, advanced_features):
neg_items = 2
for u in train_ratings.keys():
t = []
iv = []
jv = []
for i in train_ratings[u]:
if (u in test_ratings.keys()):
if (i != test_ratings[u]): # make sure it's not in the test set
for k in range(1,neg_items):
j = random.randint(1, item_count)
while j in train_ratings[u]:
j = random.randint(1, item_count)
# sometimes there will not be an image for given product
try:
advanced_features[i]
advanced_features[j]
except KeyError:
continue
iv.append(advanced_features[i])
jv.append(advanced_features[j])
t.append([u, i, j])
else:
for k in range(1,neg_items):
j = random.randint(1, item_count)
while j in train_ratings[u]:
j = random.randint(1, item_count)
# sometimes there will not be an image for given product
try:
advanced_features[i]
advanced_features[j]
except KeyError:
continue
iv.append(advanced_features[i])
jv.append(advanced_features[j])
t.append([u, i, j])
# block if queue is full
if len(iv)>1:
yield numpy.asarray(t), numpy.vstack(tuple(iv)), numpy.vstack(tuple(jv))
else:
continue
def test_batch_generator_by_user(train_ratings, test_ratings, item_ratings, item_count, advanced_features, cold_start = False, cold_start_thresh=10):
# using leave one cv
for u in random.sample(test_ratings.keys(), min(len(test_ratings),4000)):
#for u in test_ratings.keys():
i = test_ratings[u]
if (cold_start and len(item_ratings[i]) > cold_start_thresh-1):
continue
t = []
ilist = []
jlist = []
count = 0
for j in random.sample(range(item_count), 100):
# find item not in test[u] and train[u]
if j != test_ratings[u] and not (j in train_ratings[u]):
try:
advanced_features[i]
advanced_features[j]
except KeyError:
continue
count += 1
t.append([u, i, j])
ilist.append(advanced_features[i])
jlist.append(advanced_features[j])
# print numpy.asarray(t).shape
# print numpy.vstack(tuple(ilist)).shape
# print numpy.vstack(tuple(jlist)).shape
if (len(ilist) == 0):
#print "could not find neg item for user, count: ", count, u
continue
yield numpy.asarray(t), numpy.vstack(tuple(ilist)), numpy.vstack(tuple(jlist))
def generate_test(user_ratings):
'''
for each user, random select one rating into test set
'''
user_test = dict()
for u, i_list in user_ratings.items():
user_test[u] = random.sample(user_ratings[u], 1)[0]
return user_test
def price_transform(value, levels, price_max):
prices_vec = np.zeros(levels + 1, dtype = int)
idx = int(numpy.ceil(float(value)/(price_max/levels)))
prices_vec[idx] = 1
level = prices_vec[1::]
#level_return = ''.join(str(e) for e in level)
#level_return = ''.join(str(e) for e in level)
return level
"""
l4_avg_adjusted_min = df['l4_avg_adjusted'].min()
df['l4_avg_adjusted_log'] = df['l4_avg_adjusted'].apply(lambda x: \
np.log(x-l4_avg_adjusted_min+1))
l4_avg_adjusted_max = df['l4_avg_adjusted'].max()
df['l4_avg_adjusted_1hotencoded'] = df['l4_avg_adjusted'].apply(lambda x: \
price_transform(x,20, l4_avg_adjusted_max))
l4_avg_adjusted_log_max = df['l4_avg_adjusted_log'].max()
df['l4_avg_adjusted_log_1hotencoded'] = df['l4_avg_adjusted_log'].apply(lambda x: \
price_transform(x,6, l4_avg_adjusted_log_max))
"""
#user_count, item_count, users, items, user_ratings, item_ratings, brands, prices, prod_desc = load_data_hybrid(data_path, min_items=4, min_users=0, sampling= True, sample_size = 0.8)
def transform_features (features):
#
# Code below could be cleaned up into functions
#
# Level 4 Ratio Price
level4_ratio_price = features['level4_ratio_price']
level4_ratio_price_max = max(level4_ratio_price.values())
level4_ratio_price_min = min(level4_ratio_price.values())
print("l4 ratio price max", level4_ratio_price_max)
features['level4_ratio_price_1hotencoded'] = {k:price_transform(v,26,level4_ratio_price_max)
for k, v in level4_ratio_price.items()}
# Level 4 log ratio price log transformed
level4_ratio_price_log = {k:np.log(1+v) for k, v in level4_ratio_price.items()}
level4_ratio_price_log_max = max(level4_ratio_price_log.values())
level4_ratio_price_log_1hotencoded = {k:price_transform(v,12,level4_ratio_price_log_max)
for k, v in level4_ratio_price_log.items()}
features['level4_ratio_price_log_1hotencoded'] = level4_ratio_price_log_1hotencoded
# Level 4 Average
level4_average = features['level4_average']
level4_average_max = max(level4_average.values())
level4_average_min = min(level4_average.values())
features['level4_average_1hotencoded'] = {k:price_transform(v,20,level4_average_max)
for k, v in level4_average.items()}
# Level 4 Average log transformed
level4_average_log = {k:np.log(1+v) for k, v in level4_average.items()}
level4_average_log_max = max(level4_average_log.values())
level4_average_log_1hotencoded = {k:price_transform(v,10,level4_average_log_max)
for k, v in level4_average_log.items()}
features['level4_average_log_1hotencoded'] = level4_average_log_1hotencoded
# Standard Price 1-Hot Encoding
price = features['price']
price_max = max(price.values())
price_min = min(price.values())
features['price_1hotencoded'] = {k:price_transform(v,20,price_max)
for k, v in price.items()}
# Price log transformed
price_log = {k:np.log(1+v) for k, v in price.items()}
price_log_max = max(price_log.values())
price_log_1hotencoded = {k:price_transform(v,10,price_log_max)
for k, v in price_log.items()}
features['price_log_1hotencoded'] = price_log_1hotencoded
# # This is the product's price - the average product price in the level 4 subcategory
# l4_avg_adjusted = features['price_delta_l4avg']
# l4_avg_adjusted_max = max(l4_avg_adjusted.values())
# l4_avg_adjusted_min = min(l4_avg_adjusted.values())
# l4_avg_adjusted_1hotencoded = {k:price_transform(v,30,l4_avg_adjusted_max)
# for k, v in l4_avg_adjusted.items()}
# features['l4_avg_adjusted_1hotencoded'] = l4_avg_adjusted_1hotencoded
# # Adjusted price log transformed
# l4_avg_adjusted_log = {k:np.log(1.1-l4_avg_adjusted_min+v)
# for k, v in l4_avg_adjusted.items()}
# l4_avg_adjusted_log_max = max(l4_avg_adjusted_log.values())
# l4_avg_adjusted_log_1hotencoded = {k:price_transform(v,30,l4_avg_adjusted_log_max)
# for k, v in l4_avg_adjusted_log.items()}
# features['l4_avg_adjusted_log_1hotencoded'] = l4_avg_adjusted_log_1hotencoded
# below could be modified as the hotencode item is the length of the number of items;
# it could be shortened to the unique number of brands
if 'brand' in features:
brands_features = {}
brands = features['brand']
brands_all = list(set(brands.values()))
for key, value in brands.items():
brands_vec = numpy.zeros(len(brands_all))
brands_vec[brands_all.index(value)] = 1
brands_features[key] = brands_vec
features['brand_1hotencoded'] = brands_features
if 'cluster' in features:
cluster_features = {}
cluster = features['cluster']
cluster_all = sorted( list(set(cluster.values())) )
for key, value in cluster.items():
cluster_vec = numpy.zeros(len(cluster_all))
cluster_vec[int(v)] = 1
cluster_features[key] = cluster_vec
features['cluster_1hotencoded'] = cluster_features
return features
# list of features defined as dicts can be passed and they are combined, if none array of zeros are created
def feature_set(feature_dicts=None,item_count=None):
#print(type(feature_dicts))
#print(str(feature_dicts))
if feature_dicts==None:
return {n: [0] for n in range(1,item_count+1)} #return just zeros dummy advanced features for baseline BPR
else:
combined_features = defaultdict(list)
for d in feature_dicts:
for k, v in d.items():
combined_features[k].extend(v)
return dict([(k,v) for k,v in combined_features.items()])
def abpr(user_count, item_count, advanced_features, hidden_dim=10, hidden_adv_dim=10,
l2_regulization=0.1,
bias_regulization=0.01,
embed_regulization = 0,
adv_feature_regulization =0.1,
adv_feature_bias_regulization = 0.01):
"""
user_count: total number of users
item_count: total number of items
hidden_dim: hidden feature size of MF
hidden_adv_dim: hidden visual/non-visual feature size of MF
P.S. advanced_features can be one or many features combined. it can only be image features, non-image features, or both
"""
advanced_feat_dim = len(advanced_features[1])
iv = tf.placeholder(tf.float32, [None, advanced_feat_dim])
jv = tf.placeholder(tf.float32, [None, advanced_feat_dim])
u = tf.placeholder(tf.int32, [None])
i = tf.placeholder(tf.int32, [None])
j = tf.placeholder(tf.int32, [None])
# model parameters -- LEARN THESE
# latent factors
user_emb_w = tf.get_variable("user_emb_w", [user_count + 1, hidden_dim],
initializer=tf.random_normal_initializer(0, 0.1))
item_emb_w = tf.get_variable("item_emb_w", [item_count + 1, hidden_dim],
initializer=tf.random_normal_initializer(0, 0.1))
# biases
item_b = tf.get_variable("item_b", [item_count + 1, 1], initializer=tf.constant_initializer(0.0))
# pull out the respective latent factor vectors for a given user u and items i & j
u_emb = tf.nn.embedding_lookup(user_emb_w, u)
i_emb = tf.nn.embedding_lookup(item_emb_w, i)
j_emb = tf.nn.embedding_lookup(item_emb_w, j)
# get the respective biases for items i & j
i_b = tf.nn.embedding_lookup(item_b, i)
j_b = tf.nn.embedding_lookup(item_b, j)
# MF predict: u_i > u_j
# UxD Advanced feature latent factors for users
user_adv_w = tf.get_variable("user_adv_w", [user_count + 1, hidden_adv_dim],
initializer=tf.random_normal_initializer(0, 0.1))
# this is E, the embedding matrix
item_adv_w = tf.get_variable("item_adv_w", [hidden_adv_dim, advanced_feat_dim],
initializer=tf.random_normal_initializer(0, 0.1))
theta_i = tf.matmul(iv, item_adv_w, transpose_b=True) # (f_i * E), eq. 3
theta_j = tf.matmul(jv, item_adv_w, transpose_b=True) # (f_j * E), eq. 3
adv_feature_bias = tf.get_variable("adv_feature_bias", [1, advanced_feat_dim], initializer=tf.random_normal_initializer(0, 0.1))
# pull out the visual factor, 1 X D for user u
u_img = tf.nn.embedding_lookup(user_adv_w, u)
xui = i_b + tf.reduce_sum(tf.multiply(u_emb, i_emb), 1, keep_dims=True) + tf.reduce_sum(tf.multiply(u_img, theta_i), 1, keep_dims=True) \
+ tf.reduce_sum(tf.multiply(adv_feature_bias, iv), 1, keep_dims=True)
xuj = j_b + tf.reduce_sum(tf.multiply(u_emb, j_emb), 1, keep_dims=True) + tf.reduce_sum(tf.multiply(u_img, theta_j), 1, keep_dims=True) \
+ tf.reduce_sum(tf.multiply(adv_feature_bias, jv), 1, keep_dims=True)
l2_norm = tf.add_n([
l2_regulization * tf.reduce_sum(tf.multiply(u_emb, u_emb)),
adv_feature_regulization * tf.reduce_sum(tf.multiply(u_img, u_img)),
l2_regulization * tf.reduce_sum(tf.multiply(i_emb, i_emb)),
l2_regulization * tf.reduce_sum(tf.multiply(j_emb, j_emb)),
embed_regulization * tf.reduce_sum(tf.multiply(item_adv_w, item_adv_w)),
bias_regulization * tf.reduce_sum(tf.multiply(i_b, i_b)),
bias_regulization * tf.reduce_sum(tf.multiply(j_b, j_b)),
adv_feature_bias_regulization * tf.reduce_sum(tf.multiply(adv_feature_bias, adv_feature_bias))
])
xuij = xui - xuj
auc = tf.reduce_mean(tf.to_float(xuij > 0))
loss = l2_norm - tf.reduce_mean(tf.log(tf.sigmoid(xuij)))
train_op = tf.train.AdamOptimizer().minimize(loss)
return xuij,u, i, j, iv, jv, loss, auc, train_op
def session_run(num_iter, user_count, item_count, users, items,
user_ratings, item_ratings, advanced_features,hidden_dim=10,hidden_adv_dim=10,cold_start_thresh=10):
### Loading and parsing the review matrix for Women 5-core dataset
auc_train = []
auc_test = []
auc_test_cs = []
#data_path = os.path.join('/Users/nolanthomas/Public/amazon', 'out_topcategories_pricepercentile_seasonmeteorological.csv')
#user_count, item_count, users, items, user_ratings, item_ratings, brands, features = load_data_hybrid(data_path, min_items=4, min_users=0, sampling= True, sample_size = 0.8)
user_ratings_test = generate_test(user_ratings)
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope('abpr'):
xuij,u, i, j, iv, jv, loss, auc, train_op = abpr(user_count, item_count, advanced_features,hidden_dim,hidden_adv_dim)
session.run(tf.global_variables_initializer())
for epoch in range(1, num_iter+1):
print ("epoch ", epoch)
_loss_train = 0.0
user_count = 0
auc_train_values = []
for d, _iv, _jv in uniform_sample_batch(user_ratings, user_ratings_test, item_count, advanced_features):
user_count += 1
_loss, _auc, _ = session.run([loss, auc, train_op], feed_dict={u:d[:,0], i:d[:,1], j:d[:,2], iv:_iv, jv:_jv})
_loss_train += _loss
auc_train_values.append(_auc)
print ("train_loss:", _loss_train/user_count, "train auc: ", numpy.mean(auc_train_values))
auc_train.append(numpy.mean(auc_train_values))
user_items_test=[]
auc_values = []
_loss_test = 0.0
user_count = 0
for d, _iv, _jv in test_batch_generator_by_user(user_ratings, user_ratings_test, item_ratings, item_count, advanced_features, cold_start = False):
user_count += 1
_loss, _auc = session.run([loss, auc], feed_dict={u: d[:, 0], i: d[:, 1], j: d[:, 2], iv: _iv, jv: _jv})
_loss_test += _loss
auc_values.append(_auc)
print ("test_loss: ", _loss_test / user_count, "test auc: ", numpy.mean(auc_values))
auc_test.append(numpy.mean(auc_values))
auc_values_cs = []
_loss_test_cs = 0.0
user_count = 0
for d, _iv, _jv in test_batch_generator_by_user(user_ratings, user_ratings_test, item_ratings, item_count, advanced_features, cold_start=True,cold_start_thresh=cold_start_thresh):
user_count += 1
_xuij,_loss, _auc = session.run([xuij,loss, auc], feed_dict={u: d[:, 0], i: d[:, 1], j: d[:, 2], iv: _iv, jv: _jv})
_loss_test_cs += _loss
auc_values_cs.append(_auc)
if epoch==num_iter:
user_items_test.append((d,_xuij))
print ("cold start test_loss: ", _loss_test_cs / user_count, "cold start auc: ", numpy.mean(auc_values_cs))
auc_test_cs.append(numpy.mean(auc_values_cs))
return user_items_test,auc_train, auc_test, auc_test_cs
def run(num_sessions, user_count, item_count, users, items,
user_ratings, item_ratings, advanced_features,hidden_dim=10,hidden_adv_dim=10,cold_start_thresh=10):
t1 = datetime.now()
user_items_test, auc_train, auc_test, auc_test_cold = session_run(num_sessions, user_count, item_count,
users, items, user_ratings, item_ratings,
advanced_features, hidden_dim=10, hidden_adv_dim=10,cold_start_thresh=10)
t2 = datetime.now()
return {'num_sessions':num_sessions,
'num_features':len(advanced_features),
'hidden_dim':hidden_dim,
'hidden_adv_dim':hidden_adv_dim,
'cold_start_thresh':cold_start_thresh,
'user_count':user_count,
'item_count':item_count,
'sys.platform':str(sys.platform),
'platform.processor':str(platform.processor()),
'sys.version':str(sys.version),
'user_items_test': user_items_test,
'auc_train': auc_train, 'auc_test': auc_test,
'auc_cold_test': auc_test_cold,
'start':format_time(t1),'end':format_time(t1),
'delta_sec':(t2-t1).total_seconds()}
import matplotlib as mpl
import seaborn as sns
mpl.style.use('seaborn')
def plot_auc_curve(results_to_graph, title, highlight):
dt_str = datetime.now().strftime("%Y%m%d.%H%M")
sns.set_context("poster")
sns.set_palette("cubehelix",8)
plt.figure(figsize=(20,10))
plt.tick_params(axis='both', which='major', labelsize=20)
plt.title(title,fontsize=30)
plt.xticks(range(0,20))
for calc_desc, calc_results in results_to_graph.items():
ls='solid'
if calc_desc == 'BPR':
ls='dashed'
if calc_desc == highlight:
ls='-.'
plt.plot(calc_results['auc_test'],
label=calc_desc,
linestyle=ls,
marker='o')
plt.annotate(xy=[calc_results['num_sessions']-1,calc_results['auc_test'][-1]],
s=str(round(calc_results['auc_test'][-1],4)), #+ ' ' + calc_desc,
fontsize=15,
textcoords='offset points')
plt.legend()
plt.ylabel("Test AUC",fontsize=20)
plt.xlabel("Number of Iterations",fontsize=20)
#savefig('auc_curve.' + dt_str + '.png')
#show()
def plot_auc_cold_start_curve(results_to_graph, title,highlight):
dt_str = datetime.now().strftime("%Y%m%d.%H%M")
sns.set_context("poster")
sns.set_palette("cubehelix",8)
plt.figure(figsize=(20,10))
plt.tick_params(axis='both', which='major', labelsize=20)
plt.title(title,fontsize=30)
plt.xticks(range(0,20))
for calc_desc, calc_results in results_to_graph.items():
ls='solid'
if calc_desc == 'BPR':
ls='dashed'
if calc_desc == highlight:
ls='-.'
plt.plot(calc_results['auc_cold_test'],
label=calc_desc,
linestyle=ls,
marker='o')
plt.annotate(xy=[calc_results['num_sessions']-1,calc_results['auc_cold_test'][-1]],
s=str(round(calc_results['auc_cold_test'][-1],4)), #+ ' ' + calc_desc,
fontsize=15,
textcoords='offset points')
plt.legend()
plt.ylabel("Test AUC",fontsize=20)
plt.xlabel("Number of Iterations",fontsize=20)
#plt.savefig('auc_cold_start_curve.' + dt_str + '.png')
#plt.show()
|
def findAllInitActivity(log):
return list(set([l[0] for l in log]))
def findOrCreateInitActivity(log):
init = findAllInitActivity(log)
if len(init) > 1:
return 'a_i'
else:
return init[0]
def findAllEndActivity(log):
return list(set([l[-1] for l in log]))
def findOrCreateEndActivity(log):
end = findAllEndActivity(log)
if len(end) > 1:
return 'a_o'
else:
return end[0]
def preProcessLog(log, act):
if log is not None:
initAct = findOrCreateInitActivity(log)
endAct = findOrCreateEndActivity(log)
if initAct not in act:
for l in log:
l.insert(0,initAct)
if endAct not in act:
for l in log:
l.insert(len(l), endAct)
return log
|
from snakemake.utils import R
# def getContigNames():
# if isinstance(config['resources'][ORGANISM]['contigNames'], Error):
# return ["ERROR"]
# f = open(config['resources'][ORGANISM]['contigNames'],'r')
# result = []
# for line in f:
# result.append(line.strip())
# return result
#
# # This rule uses the custom samtools version of bicSeq2 to extract unique mappings for bicSeq2
# if not 'BICSEQ2IN' in globals():
# BICSEQ2IN = REMOVEPCRDUBLICATESOUT
# if not 'BICSEQ2OUT' in globals():
# BICSEQ2OUT = OUTDIR + 'bicseq2/'
# rule bicSeq_samtoolsUnique:
# input:
# bam = BICSEQ2IN + '{sample}.bam',
# contigNamnes =config['resources'][ORGANISM]['contigNames']
# output:
# seq = expand(BICSEQ2OUT + '{{sample}}' + '/{contigNames}.seq', contigNames=getContigNames())
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}/unique.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}/unique.lsferr.log',
# scratch = config['tools']['bicseq2']['unique']['scratch'],
# mem = config['tools']['bicseq2']['unique']['mem'],
# time = config['tools']['bicseq2']['unique']['time'],
# mapper = config['tools']['bicseq2']['unique']['mapper'],
# directory = BICSEQ2OUT + '{sample}' + '/'
# threads:
# config['tools']['bicseq2']['unique']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}/unique.benchmark'
# shell:
# ('mkdir -p {params.directory} && ' +
# '{config[tools][bicseq2][unique][call]} view -U {params.mapper},{params.directory},N,N {input.bam}')
#
# rule extractContigsFromFasta:
# input:
# fasta = config['resources'][ORGANISM]['reference']
# output:
# fasta = expand(config['resources'][ORGANISM]['reference'] + '_contigs/{contigs}.fasta', contigs = getContigNames()),
# suc = config['resources'][ORGANISM]['reference'] + '_contigs/complete.txt'
# params:
# lsfoutfile = config['resources'][ORGANISM]['reference'] + '_contigs/complete.lsfout.log',
# lsferrfile = config['resources'][ORGANISM]['reference'] + '_contigs/complete.lsferr.log',
# scratch = config['tools']['extractContigs']['scratch'],
# mem = config['tools']['extractContigs']['mem'],
# time = config['tools']['extractContigs']['time'],
# threads:
# config['tools']['extractContigs']['threads']
# benchmark:
# config['resources'][ORGANISM]['reference'] + '_contigs/complete.benchmark'
# shell:
# '{config[tools][extractContigs][call]} {input.fasta} && touch {output.suc}'
#
#
# rule craeteConfigBicSeqNorm:
# input:
# suc = config['resources'][ORGANISM]['reference'] + '_contigs/complete.txt'
# output:
# config = BICSEQ2OUT + '{sample}/configNorm.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}/configNorm.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}/configNorm.txt.lsferr.log',
# scratch = config['tools']['bicSeqConfigNorm']['scratch'],
# mem = config['tools']['bicSeqConfigNorm']['mem'],
# time = config['tools']['bicSeqConfigNorm']['time'],
# sample = '{sample}',
# reference = config['resources'][ORGANISM]['reference'],
# mappabilityFile = config['resources'][ORGANISM]['pathBicSeq2Mappability']
# threads:
# config['tools']['bicSeqConfigNorm']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}/configNorm.txt.benchmark'
# run:
# outfile = open(output.config, 'w')
# outfile.write('chromName\tfaFile\tMapFile\treadPosFile\tbinFileNorm\n')
# #TODO: This needs to be adapted to work for different organisms and versions
# BICSEQMAPPINGCHROM = sorted([file.replace(config['resources'][ORGANISM]['pathBicSeq2Mappability'], '').strip().split('.')[-2] for file in glob.glob(config['resources'][ORGANISM]['pathBicSeq2Mappability'] + '/*.txt')])
# for chr in BICSEQMAPPINGCHROM:
# outfile.write(chr + '\t')
# outfile.write(params.reference + '_contigs/' + chr + '.fasta' + '\t')
# outfile.write(params.mappabilityFile + '/hg19.CRC.75mer.' + chr + '.txt' + '\t')
# outfile.write(BICSEQ2OUT + params.sample + '/' + chr + '.seq\t')
# outfile.write(BICSEQ2OUT + params.sample + '/' + chr + '.norm.bin\n')
#
# # This rule applies bicSeq2-norm
# rule bicSeq_norm:
# input:
# insertSizeFile = BICSEQ2IN + '{sample}.bam_stats/insert_size_metrics.txt',
# config = BICSEQ2OUT + '{sample}/configNorm.txt',
# seq = expand(BICSEQ2OUT + '{{sample}}' + '/{contigNames}.seq', contigNames=getContigNames())
# output:
# out = BICSEQ2OUT + '{sample}/paramsEstimate.txt',
# tmp = temp(BICSEQ2OUT + '{sample}/tmp/')
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}/paramsEstimate.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}/paramsEstimate.txt.lsferr.log',
# scratch = config['tools']['bicseq2']['norm']['scratch'],
# mem = config['tools']['bicseq2']['norm']['mem'],
# time = config['tools']['bicseq2']['norm']['time'],
# readLength = config['tools']['bicseq2']['norm']['readLength'],
# directory = BICSEQ2OUT + '{sample}'
# threads:
# config['tools']['bicseq2']['norm']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}/paramsEstimate.txt.benchmark'
# shell:
# ("inSize=$(head -n 8 {input.insertSizeFile} | tail -n 1 | cut -f 1) && " +
# "{config[tools][bicseq2][norm][call]} " +
# "-l={params.readLength} " +
# "-s=${{inSize}} " +
# "--gc_bin {input.config} " +
# "--tmp={output.tmp} " +
# "{output.out}")
#
# rule craeteConfigBicSeqSeg:
# input:
# suc = config['resources'][ORGANISM]['reference'] + '_contigs/complete.txt'
# output:
# config = BICSEQ2OUT + '{tumor}_vs_{normal}/configSeg.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{tumor}_vs_{normal}/configSeg.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{tumor}_vs_{normal}/configSeg.txt.lsferr.log',
# scratch = config['tools']['bicSeqConfigSeq']['scratch'],
# mem = config['tools']['bicSeqConfigSeq']['mem'],
# time = config['tools']['bicSeqConfigSeq']['time'],
# normal = '{normal}',
# tumor = '{tumor}'
# threads:
# config['tools']['bicSeqConfigSeq']['threads']
# benchmark:
# BICSEQ2OUT + '{tumor}_vs_{normal}/configSeg.txt.benchmark'
# run:
# outfile = open(output.config, 'w')
# outfile.write('chromName\tbinFileNorm.Case\tbinFileNorm.Control\n')
# #TODO: This needs to be adapted to work for different organisms and versions
# BICSEQMAPPINGCHROM = sorted([file.replace(config['resources'][ORGANISM]['pathBicSeq2Mappability'], '').strip().split('.')[-2] for file in glob.glob(config['resources'][ORGANISM]['pathBicSeq2Mappability'] + '/*.txt')])
# for chr in BICSEQMAPPINGCHROM:
# outfile.write(chr + '\t')
# outfile.write(BICSEQ2OUT + params.tumor + '/' + chr + '.norm.bin\t')
# outfile.write(BICSEQ2OUT + params.normal + '/' + chr + '.norm.bin\n')
#
# # This rule applies bicSeq2-seg
# rule bicSeq_seg:
# input:
# config = BICSEQ2OUT + '{tumor}_vs_{normal}/configSeg.txt',
# paramsNormal = BICSEQ2OUT + '{normal}/paramsEstimate.txt',
# paramsTumor = BICSEQ2OUT + '{tumor}/paramsEstimate.txt'
# output:
# out = BICSEQ2OUT + '{tumor}_vs_{normal}.cnvsRaw.txt',
# fig = BICSEQ2OUT + '{tumor}_vs_{normal}.png',
# tmp = temp(BICSEQ2OUT + '{tumor}_vs_{normal}/tmp/')
# params:
# lsfoutfile = BICSEQ2OUT + '{tumor}_vs_{normal}.cnvsRaw.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{tumor}_vs_{normal}.cnvsRaw.txt.lsferr.log',
# scratch = config['tools']['bicseq2']['seg']['scratch'],
# mem = config['tools']['bicseq2']['seg']['mem'],
# time = config['tools']['bicseq2']['seg']['time'],
# title = '{tumor}_vs_{normal}_CNV'
# threads:
# config['tools']['bicseq2']['seg']['threads']
# benchmark:
# BICSEQ2OUT + '{tumor}_vs_{normal}.cnvsRaw.txt.benchmark'
# shell:
# '{config[tools][bicseq2][seg][call]} ' +
# '--fig={output.fig} ' +
# '--title={params.title} ' +
# '--nrm ' +
# '--control ' +
# '--tmp={output.tmp} ' +
# '{input.config} {output.out}'
# #'{config[tools][bicseq2][seg][call]} --fig={output.fig} --title={params.title} --control {input.config} {output.out}'
#
#
# # This rule applies bicSeq2 genotype.pl to assess event significance
# rule bicSeq_genotype:
# input:
# inCNV = BICSEQ2OUT + '{sample}.cnvsRaw.txt',
# config = BICSEQ2OUT + '{sample}/configSeg.txt'
# output:
# out = BICSEQ2OUT + '{sample}.cnvsGenotype.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}.cnvsGenotype.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}.cnvsGenotype.txt.lsferr.log',
# scratch = config['tools']['bicseq2']['genotype']['scratch'],
# mem = config['tools']['bicseq2']['genotype']['mem'],
# time = config['tools']['bicseq2']['genotype']['time']
# threads:
# config['tools']['bicseq2']['genotype']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}.cnvsGenotype.txt.benchmark'
# shell:
# 'cut -f 1-3 {input.inCNV} > {input.inCNV}.forGenotype && ' +
# '{config[tools][bicseq2][genotype][call]} {input.config} {input.inCNV}.forGenotype {output.out}'
#
#
# # This rule applies a simple filter script to filter cnv events given a certain pvalue threshold and to determine the copy number
# rule bicSeq_filter:
# input:
# inCNV = BICSEQ2OUT + '{sample}.cnvsGenotype.txt'
# output:
# out = BICSEQ2OUT + '{sample}.filtered.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}.filtered.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}.filtered.txt.lsferr.log',
# scratch = config['tools']['bicseq2']['filter']['scratch'],
# mem = config['tools']['bicseq2']['filter']['mem'],
# time = config['tools']['bicseq2']['filter']['time'],
# pvalue = config['tools']['bicseq2']['filter']['pvalueThreshold']
# threads:
# config['tools']['bicseq2']['filter']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}.filtered.txt.benchmark'
# shell:
# '{config[tools][bicseq2][filter][call]} {input.inCNV} {output.out} {params.pvalue}'
#
#
# # call VarScan copynumner
# if not 'VARSCANCNVIN' in globals():
# VARSCANCNVIN = MPILEUPOUT
# if not 'VARSCANCNVOUT' in globals():
# VARSCANCNVOUT = OUTDIR + 'copy_number/varscan_cnv/'
# rule varscan_copy_number:
# input:
# tumor = VARSCANCNVIN + '{tumor}.mpileup',
# normal = VARSCANCNVIN + '{normal}.mpileup'
# output:
# out = VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber'
# params:
# lsfoutfile = VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber.lsfout.log',
# lsferrfile = VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber.lsferr.log',
# scratch = config['tools']['varscan']['copyNumber']['scratch'],
# mem = config['tools']['varscan']['copyNumber']['mem'],
# time = config['tools']['varscan']['copyNumber']['time'],
# params = config['tools']['varscan']['copyNumber']['params'],
# outputTag = VARSCANCNVOUT + '{tumor}_vs_{normal}'
# threads:
# config['tools']['varscan']['copyNumber']['threads']
# benchmark:
# VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber.benchmark'
# log:
# VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber.log'
# shell:
# ('{config[tools][varscan][call]} copynumber ' +
# '{input.normal} ' +
# '{input.tumor} ' +
# '{params.outputTag} ' +
# '{params.params}')
#
# # call VarScan copyCaller
# rule varscan_copy_caller:
# input:
# rawCN = VARSCANCNVOUT + '{tumor}_vs_{normal}.copynumber'
# output:
# out = VARSCANCNVOUT + '{tumor}_vs_{normal}.cn'
# params:
# lsfoutfile = VARSCANCNVOUT + '{tumor}_vs_{normal}.cn.lsfout.log',
# lsferrfile = VARSCANCNVOUT + '{tumor}_vs_{normal}.cn.lsferr.log',
# scratch = config['tools']['varscan']['copyCaller']['scratch'],
# mem = config['tools']['varscan']['copyCaller']['mem'],
# time = config['tools']['varscan']['copyCaller']['time'],
# params = config['tools']['varscan']['copyCaller']['params']
# threads:
# config['tools']['varscan']['copyCaller']['threads']
# benchmark:
# VARSCANCNVOUT + '{tumor}_vs_{normal}.cy.benchmark'
# log:
# VARSCANCNVOUT + '{tumor}_vs_{normal}.cn.log'
# shell:
# ('{config[tools][varscan][call]} copyCaller ' +
# '{input.rawCN} ' +
# '--output-file {output.out} ' +
# '{params.params}')
#
# rule bicSeq2annovar:
# input:
# BICSEQ2OUT + '{sample}.filtered.txt'
# output:
# BICSEQ2OUT + '{sample}.filtered.forAnnovar.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}.filtered.forAnnovar.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}.filtered.forAnnovar.txt.lsferr.log',
# scratch = config['tools']['bicSeq2annovar']['scratch'],
# mem = config['tools']['bicSeq2annovar']['mem'],
# time = config['tools']['bicSeq2annovar']['time']
# threads:
# config['tools']['bicSeq2annovar']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}.filtered.forAnnovar.txt.benchmark'
# shell:
# '{config[tools][bicSeq2annovar][call]} {input} {output}'
#
# #TODO: This needs to be adapted to work for different organisms and versions
# rule wgsAnnovar:
# input:
# txt = BICSEQ2OUT + '{sample}.filtered.forAnnovar.txt',
# db = config['resources'][ORGANISM]['annovarDB']
# output:
# out = BICSEQ2OUT + '{sample}.filtered.annotated.hg19_multianno.txt'
# params:
# lsfoutfile = BICSEQ2OUT + '{sample}.filtered.annotated.hg19_multianno.txt.lsfout.log',
# lsferrfile = BICSEQ2OUT + '{sample}.filtered.annotated.hg19_multianno.txt.lsferr.log',
# scratch = config['tools']['annovar']['scratch'],
# mem = config['tools']['annovar']['mem'],
# time = config['tools']['annovar']['time'],
# buildver = config['tools']['annovar']['buildver'],
# params = config['tools']['annovar']['params'],
# out = BICSEQ2OUT + '{sample}.filtered.annotated'
# threads:
# config['tools']['annovar']['threads']
# benchmark:
# BICSEQ2OUT + '{sample}.filtered.annotated.hg19_multianno.txt.benchmark'
# shell:
# ('{config[tools][annovar][call]} ' +
# '{input.txt} ' +
# '{input.db} ' +
# '-buildver {params.buildver} ' +
# '-out {params.out} ' +
# '{params.params}')
if not 'FACETSIN' in globals():
FACETSIN = BASERECALIBRATIONOUT
if not 'FACETSOUT' in globals():
FACETSOUT = OUTDIR + 'copy_number/facets/'
rule createBedForFacets:
input:
vcf = config['resources'][ORGANISM]['dbSNP'],
regions = config['resources'][ORGANISM]['regions']
output:
vcf = FACETSOUT + 'snps.vcf'
params:
lsfoutfile = FACETSOUT + 'snps.vcf.lsfout.log',
lsferrfile = FACETSOUT + 'snps.vcf.lsferr.log',
scratch = config['tools']['facets']['region']['scratch'],
mem = config['tools']['facets']['region']['mem'],
time = config['tools']['facets']['region']['time'],
params = config['tools']['facets']['region']['params']
threads:
config['tools']['facets']['region']['threads']
benchmark:
FACETSOUT + 'snps.vcf.benchmark'
shell:
('zgrep "^#" {input.vcf} > {output.vcf}; ' +
'{config[tools][facets][region][call]} ' +
'{params.params} ' +
'-a {input.vcf} ' +
'-b {input.regions} ' +
'>> {output.vcf}')
rule getSNPInfoForFacets:
input:
vcf = FACETSOUT + 'snps.vcf',
normal = FACETSIN + '{normal}.bam',
tumor = FACETSIN + '{tumor}.bam'
output:
csv = FACETSOUT + '{tumor}_vs_{normal}.csv.gz'
params:
lsfoutfile = FACETSOUT + '{tumor}_vs_{normal}.csv.gz.lsfout.log',
lsferrfile = FACETSOUT + '{tumor}_vs_{normal}.csv.gz.lsferr.log',
scratch = config['tools']['facets']['snpPileup']['scratch'],
mem = config['tools']['facets']['snpPileup']['mem'],
time = config['tools']['facets']['snpPileup']['time'],
params = config['tools']['facets']['snpPileup']['params']
threads:
config['tools']['facets']['snpPileup']['threads']
benchmark:
FACETSOUT + '{tumor}_vs_{normal}.csv.gz.benchmark'
shell:
('{config[tools][facets][snpPileup][call]} ' +
'{params.params} ' +
'{input.vcf} ' +
'{output.csv} ' +
'{input.normal} ' +
'{input.tumor}')
rule facets:
input:
csv = FACETSOUT + '{tumor}_vs_{normal}.csv.gz'
output:
pdf = FACETSOUT + '{tumor}_vs_{normal}.pdf',
txt = FACETSOUT + '{tumor}_vs_{normal}.cn'
params:
lsfoutfile = FACETSOUT + '{tumor}_vs_{normal}.cn.lsfout.log',
lsferrfile = FACETSOUT + '{tumor}_vs_{normal}.cn.lsferr.log',
scratch = config['tools']['facets']['facets']['scratch'],
mem = config['tools']['facets']['facets']['mem'],
time = config['tools']['facets']['facets']['time'],
params = config['tools']['facets']['facets']['params']
threads:
config['tools']['facets']['facets']['threads']
benchmark:
FACETSOUT + '{tumor}_vs_{normal}.cn.benchmark'
shell:
('{config[tools][facets][facets][call]} ' +
'{params.params} ' +
'{input.csv} {output.txt} {output.pdf}')
# reformat facets results to enable annotation
rule facets_reformat:
input:
originalCN = FACETSOUT + '{tumor}_vs_{normal}.cn',
refFile = config['resources'][ORGANISM]['reference']
output:
reformatTXT = FACETSOUT + '{tumor}_vs_{normal}.reformat.txt'
params:
lsfoutfile = FACETSOUT + '{tumor}_vs_{normal}.reformat.txt.lsfout.log',
lsferrfile = FACETSOUT + '{tumor}_vs_{normal}.reformat.txt.lsferr.log',
scratch = config['tools']['facets']['reformat']['scratch'],
mem = config['tools']['facets']['reformat']['mem'],
time = config['tools']['facets']['reformat']['time'],
params = config['tools']['facets']['reformat']['params']
threads:
config['tools']['facets']['reformat']['threads']
benchmark:
FACETSOUT + '{tumor}_vs_{normal}.reformat.txt.benchmark'
shell:
('{config[tools][facets][reformat][call]} ' +
'{params.params} ' +
'--inFile {input.originalCN} --outFile {output.reformatTXT} --refFile {input.refFile}')
# filter facets results and categorize the CNVS
rule facets_filter:
input:
txt = FACETSOUT + '{tumor}_vs_{normal}.txt'
output:
filteredCN = FACETSOUT + '{tumor}_vs_{normal}.filtered.txt'
params:
lsfoutfile = FACETSOUT + '{tumor}_vs_{normal}.filtered.txt.lsfout.log',
lsferrfile = FACETSOUT + '{tumor}_vs_{normal}.filtered.txt.lsferr.log',
scratch = config['tools']['facets']['filter']['scratch'],
mem = config['tools']['facets']['filter']['mem'],
time = config['tools']['facets']['filter']['time'],
colName_totalCopy = config['tools']['facets']['filter']['colName_totalCopy'],
colName_snpNum = config['tools']['facets']['filter']['colName_snpNum'],
threshold_snpNum = config['tools']['facets']['filter']['threshold_snpNum']
threads:
config['tools']['facets']['filter']['threads']
benchmark:
FACETSOUT + '{tumor}_vs_{normal}.filtered.txt.benchmark'
shell:
('{config[tools][facets][filter][call]} ' +
'--infile {input.txt} --outfile {output.filteredCN} ' +
'--colName_totalCopy {params.colName_totalCopy} --colName_snpNum {params.colName_snpNum} --threshold_snpNum {params.threshold_snpNum}')
# This rule annotates the CNV call results (for excavator reformatting is needed first)
rule annotateCNVsWithBedtools:
input:
inRes = '{sample}.txt',
inDB = config['resources'][ORGANISM]['geneAnnotationDB']
output:
out = '{sample}.annotated.txt'
params:
lsfoutfile = '{sample}.annotated.lsfout.log',
lsferrfile = '{sample}.annotated.lsferr.log',
scratch = config['tools']['bedtools']['intersect']['scratch'],
mem = config['tools']['bedtools']['intersect']['mem'],
time = config['tools']['bedtools']['intersect']['time']
threads:
config['tools']['bedtools']['intersect']['threads']
benchmark:
'{sample}.annotated.benchmark'
shell:
'{config[tools][bedtools][call]} intersect -a {input.inRes} -b {input.inDB} -wa -wb > {output.out}.temp1.txt ; ' +
'{config[tools][bedtools][call]} intersect -a {input.inRes} -b {input.inDB} -v -wa > {output.out}.temp2.txt ; ' +
'cat {output.out}.temp1.txt {output.out}.temp2.txt | sort -k1,1 -k2,2n > {output.out}'
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 2 15:31:04 2018
@author: Tuong Lam & Simon Nilsson
"""
import imageio as imio
import numpy as np
import re
import os
import sys
def fingerprint_parser(index_file_dir, index_file_name):
""" Parser for Precise Biometrics fingerprint database with alignment data.
Input:
index_file_dir - directory of the index file (ending with a forward slash "/")
index_file_name - name of the index file
Returns: lists with information used in the siamese network for fingerprint verification
"""
person_id = []
finger_id = []
fingerprints = []
translation = []
rotation = []
new_origin_counter = 0
curr_finger = 0
with open(index_file_dir + index_file_name,"r") as file:
for line in file:
words = re.split("\t",line)
if len(words) > 4 and words[0] != "#":
if len(words[4]) > 40: # only consider data that contains alignment information
curr_finger = int(words[1])
last_word = re.split(":",words[-1])
alignment_word = last_word[-1].split()
person_id.append(int(words[0]))
fingerprint_path = last_word[0].strip()
# if counter % 100 == 0:
# print(counter)
finger = imio.imread(index_file_dir + fingerprint_path)
fingerprints.append(np.ndarray.flatten(np.array(finger)))
translation.append([int(alignment_word[1]),int(alignment_word[2])])
rotation.append(int(alignment_word[3]))
if new_origin_counter == 46:
finger_id.append(-curr_finger)
new_origin_counter = 0
elif len(finger_id) > 0 and curr_finger == -finger_id[-1]:
finger_id.append(-curr_finger)
elif len(finger_id) > 0 and curr_finger != finger_id[-1]:
finger_id.append(curr_finger)
new_origin_counter = 0
else:
finger_id.append(curr_finger)
new_origin_counter += 1
return person_id, finger_id, fingerprints, translation, rotation
def main(argv):
dir_path = os.path.dirname(os.path.realpath(__file__)) # directory of file being executed
person_id, finger_id, fingerprints, translation, rotation = fingerprint_parser(argv[0],argv[1])
# convert to numpy arrays and corrects scaling
# person_id = np.array(person_id,dtype='int32')
finger_id = np.array(finger_id,dtype='int32')
# fingerprints = np.array(fingerprints,dtype='float32')/255
# translation = np.array(translation,dtype='float32')/256
# rotation = np.array(rotation,dtype='float32')/65536*360
# save paths
# filename_1 = dir_path + "/person_id_new"
filename_2 = dir_path + "/finger_id_mt_vt_112_new"
# filename_3 = dir_path + "/fingerprints_new"
# filename_4 = dir_path + "/translation_new"
# filename_5 = dir_path + "/rotation_new"
# saves numpy arrays
# np.save(filename_1,person_id)
np.save(filename_2,finger_id)
# np.save(filename_3,fingerprints)
# np.save(filename_4, translation)
# np.save(filename_5, rotation)
if __name__ == "__main__":
main(sys.argv[1:])
|
# CSE 101 - IP HW2
# K-Map Minimization
# Name: DUSHYANT PANCHAL
# Roll Number: 2018033
# Section: A
# Group: 1
# Date: 12-10-2018
import unittest
from HW2_2018033 import *
class testpoint(unittest.TestCase):
def test_minFunc(self):
self.assertEqual(minFunc(3,'(0,1,2,3,4,5,6,7) d -'),"1")
self.assertEqual(minFunc(3,'() d (0,1,2)'),"0")
self.assertEqual(minFunc(2,'(0,3) d (2)'),"x' + w")
self.assertEqual(minFunc(2,'(0) d (1,2)'),"0")
self.assertEqual(minFunc(4,'(0,1,2,4,5,6,8,9,12,13,14) d -'),"x.z' + w'.z' + y'")
self.assertEqual(minFunc(4,'(1,3,7,11,15) d (0,2,5)'),"y.z + w'.z OR w'.x' + y.z")
self.assertEqual(minFunc(3,'(1,3,4,6,7) d -'),"w'.y + x.y + w.y'")
self.assertEqual(minFunc(3,'(3,4,6,7) d -'),"x.y + w.y'")
if __name__=='__main__':
unittest.main()
|
MISSING_TOKEN = 'Missing access token'
INVALID_TOKEN = 'Invalid access token'
INVALID_PASSWORD = 'Your password is invalid, please try again'
UNAUTHORIZED = 'The user is not authorized to perform this action'
LOGGED_IN = 'You already logged in'
INVALID_CREDENTIALS = 'Invalid username or password'
USERNAME_DUPLICATED = 'Username already exists'
CATEGORY_DUPLICATED = 'Category name already exists'
CATEGORY_NOT_FOUND = 'Category not found'
ITEM_DUPLICATED = 'Category name already exists'
ITEM_NOT_FOUND = 'Item not found'
|
#!/usr/bin/env python
# Author: Giulia Muzio
'''
Script for saving the permuted genes;
the permutation is done by following
the degree-permutation strategy
File in input:
- 'data/ppi.pkl': PPI network in form of pandas DataFrame. The
keys of the data frame are the names of the
genes included in the network. The values of
the dataframe is the non-weighted adjacency
matrix of the PPI network, e.g. 1 when there is
an edge between the 2 genes, and 0 when there is
no edge, e.g. no interaction
- 'data/gene_name.pkl': numpy vector containing the names of the genes
included in the network
- 'output/permutations/permuted_genes/': string where to save the permuted
genes.
Command-line arguments:
--nperm: integer; it's the number of permutation to perform
minimum should be 1000.
'''
import numpy as np
import pandas as pd
import argparse
from utils import *
import re
def main(args):
nperm = args
# LOADING INPUT FILES
network = load_file('data/ppi.pkl') # network
gene_name = load_file('data/gene_name.pkl') # genes
outdir = 'output/permutations/permuted_genes/'
# Calling the function for performing the
# degree-preserving permutation technique
degree_permutations(network, gene_name, nperm, outdir)
return 0
def degree_permutations(network_df, gene_name, nperm, minimum = 5):
'''
Function for performing the swapping of genes having the same
degree (or close); it directly save the permuted genes in the
output folder
Input
-------------------
network_df: pandas dataframe containing the network
in form of adjacency matrix
gene_name: genes
nperm: number of permutations
mimimun: minimum cardinality of genes in the same
swapping group
Output
-------------------
'''
# finding the degrees
network = network_df.values
degree = network.sum(axis = 0) # also axis = 1 is fine since it's simmetric
deg, count = np.unique(degree, return_counts = True)
# - the degrees having numerosity equal or higher than 5 are taken
# separately
# - the degrees with cardinality less than 5 are grouped with the
# closest degrees to reach at leat 5 as cardinality
# we do that starting from the end, because usually the genes with
# higher degree have lower cardinality
swapping_group = []
temp, counts, flag = [], 0, False
for d, c in zip(deg[::-1], count[::-1]):
if((c < minimum) | (flag == True)): # if for the current degree there are
# no more than minimum genes
temp.append(d)
flag = True
counts += c
if (counts >= minimum):
swapping_group.append(temp)
flag = False
counts = 0
temp = []
elif(d == deg[0]): # if the last neighbourhood hasn't been
# assigned yet to a "degree group" which
# could reach the minimum number defined
swapping_group[-1] = [swapping_group[-1], d]
else: # if the number of genes having the current degree is enough
# e.g. higher than minimum
swapping_group.append(d)
swapping_group = swapping_group[::-1]
# actual degree-preserving permutation technique
for j in range(nperm):
tot_perm = np.empty(len(gene_name)).astype(object)
for d in swapping_group:
if(type(d) is int):
idx = np.where(degree == d)
else:
idx = np.array([]).astype(int)
for d_ in d:
idx = np.concatenate((idx, np.where(degree == d_)[0]))
genes = gene_name[idx]
perm = np.random.permutation(genes)
tot_perm[idx] = perm
# save the permutations
if(not os.path.exists(outdir)):
os.makedirs(outdir)
save_file(outdir + 'genes_' + str(j) + '.pkl', tot_perm)
return 0
def parse_arguments():
'''
Definition of the command line arguments
Input
---------
Output
---------
nperm: number of permutations
'''
parser = argparse.ArgumentParser()
parser.add_argument('--nperm', required = False, default = 1000, type = int)
args = parser.parse_args()
nperm = args.nperm
return nperm
if __name__ == '__main__':
arguments = parse_arguments()
main(arguments)
|
from collections import deque
from threading import Condition
from typing import BinaryIO, Deque, Optional
from pytils.mixins import DaemonHandler
from ._base import IOReceiver, IOSender
__all__ = [
'QueuedReceiver',
'QueuedSender',
]
_DEFAULT_MAX_QUEUE_SIZE = 4096
class QueuedSender(DaemonHandler, IOSender):
def __init__(self, dst: BinaryIO, max_queue_size: int = _DEFAULT_MAX_QUEUE_SIZE):
super().__init__(dst)
self.is_closed = False
self._cv = Condition()
self._queue = deque(maxlen=max_queue_size) # type: Optional[Deque[bytes]]
def send(self, msg: bytes):
with self._cv:
self._queue.append(msg)
self._cv.notify()
def is_active(self) -> bool:
return not self.is_closed
def handle_one(self):
with self._cv:
self._cv.wait_for(lambda: self.is_closed or self._queue)
self._cv.notify()
if self._queue:
super().send(self._queue.popleft())
def close(self):
self.is_closed = True
with self._cv:
self._cv.notify_all()
class QueuedReceiver(DaemonHandler, IOReceiver):
def __init__(self, src: BinaryIO, max_queue_size: int = _DEFAULT_MAX_QUEUE_SIZE):
super().__init__(src)
self.is_closed = False
self._cv = Condition()
self._queue = deque(maxlen=max_queue_size) # type: Optional[Deque[bytes]]
def receive(self) -> Optional[bytes]:
with self._cv:
self._cv.wait_for(lambda: self.is_closed or self._queue)
self._cv.notify()
return self._queue.popleft() if self._queue else None
def is_active(self) -> bool:
return not self.is_closed
def handle_one(self):
msg = super().receive()
with self._cv:
if msg is not None:
self._queue.append(msg)
else:
self.is_closed = True
self._cv.notify()
|
from .models import Post, Comment, Category, SubCategory, Tag
from django.views.generic import ListView, DetailView, FormView
from django.views.generic.edit import ModelFormMixin
from django.shortcuts import redirect, get_object_or_404
from .forms import CommentCreateForm, PostSearchForm
from django.shortcuts import render
from django.db.models import Q
# 記事の一覧のベース
class BaseListView(ListView):
# 表示する記事の数
paginate_by = 5
# 作成日が新しい順にソート
def get_queryset(self):
queryset = Post.objects.order_by("-created_datetime").select_related("target_subcategory")
return queryset
# TOP
class TopView(BaseListView):
def get_queryset(self):
global_form = PostSearchForm(self.request.GET)
global_form.is_valid()
keyword = global_form.cleaned_data["keyword"]
queryset = super().get_queryset()
if keyword:
for word in keyword.split():
queryset = queryset.filter(
Q(title__icontains=word) | Q(main_sentence__contains=word)
)
return queryset
else:
return queryset
# サブカテゴリーでソート
class SubCategoryView(BaseListView):
def get_queryset(self):
subcategory_name = self.kwargs["subcategory"]
self.target_subcategory = SubCategory.objects.get(name=subcategory_name)
queryset = super().get_queryset().filter(target_subcategory=self.target_subcategory)
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["subcategory"] = self.target_subcategory
return context
# タグでソート
class TagView(BaseListView):
def get_queryset(self):
tag_name = self.kwargs["t"]
self.tag = Tag.objects.get(name=tag_name)
queryset = super().get_queryset().filter(tag=self.tag)
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["tag"] = self.tag
return context
# 記事の詳細
class PostDetailView(ModelFormMixin, DetailView):
model = Post
form_class = CommentCreateForm
template_name = "app/post_detail.html"
def form_valid(self, form):
post_pk = self.kwargs["pk"]
comment = form.save(commit=False)
comment.post = get_object_or_404(Post, id = post_pk)
comment.save()
return redirect("app:post_detail", pk=post_pk)
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
self.object = self.get_object()
return self.form_invalid(form)
def get_context_data(self, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
context["category_list"] = Category.objects.all()
context["subcategory_list"] = SubCategory.objects.all()
global_form = PostSearchForm()
context["global_form"] = global_form
return context
|
def helloWorld(string):
print(string)
if __name__ == '__main__':
string = "Hello World"
helloWorld(string)
|
import os
import Game.config.internal as internal
import Game.program.misc.exceptions as exceptions
import Game.program.misc.sdl as sdl
class BaseOverlay:
def __init__(self, name, location, size, background_color, *args, **kwargs):
super(BaseOverlay, self).__init__(*args, **kwargs)
# Name of the overlay!
self.name = name
# Where it is visually on the screen.
self.location = sdl.Rect(location, size)
# What is is visually, on the screen.
self.screen = sdl.Surface(size)
self.screen.set_offset(location)
# The background colour of its screen
self.background_color = background_color
# The keys (on the keyboard) that should give KEYDOWN events whilst being *held* down.
self.listen_keys = set()
# The mouse buttons that should give MOUSEBUTTONDOWN events whilst being *held* down.
self.listen_mouse = set()
# Whether the screen is visible
self.screen_enabled = False
# Whether the interface should listen for inputs
self.listen_enabled = False
# The game itself
self._game_instance = None
# An interface (technical term) to the overall interface (non-technical term) (!)
self._interface_overlayer = None
# Whether an overlay should be closed if it loses the selection
self.must_be_top = False
self.reset()
def reset(self):
self.wipe()
# Endpoint for super calls.
def register_interface(self, interface_overlayer):
self._interface_overlayer = interface_overlayer
def handle(self, event):
raise NotImplementedError
def output(self, *args, **kwargs):
raise NotImplementedError
def wipe(self):
"""Fills the screen with its background color."""
self.screen.fill(self.background_color)
def enable(self, state=True):
"""Sets the enabled attributes to 'state', or True if no 'state' argument is passed."""
self.screen_enabled = state
self.listen_enabled = state
def disable(self):
"""Sets the enabled attributes to False."""
self.enable(False)
def toggle(self):
"""Toggles whether the overlay is enabled."""
self.listen_enabled = not self.screen_enabled # Deliberately screen_enabled, not listen_enabled
self.screen_enabled = not self.screen_enabled
def enable_listener(self, state=True):
"""Sets the listener_enabled attribute to 'state', or True if no 'state' argument is passed."""
self.listen_enabled = state
def disable_listener(self):
"""Sets the listener_enabled attribute to False."""
self.enable_listener(False)
def toggle_listener(self):
"""Toggles whether just the listener of the overlay is enabled."""
self.listen_enabled = not self.listen_enabled
class GraphicsOverlay(BaseOverlay):
def output(self, source, dest=(0, 0), area=None, special_flags=0, offset=None, *args, **kwargs):
if offset is not None:
dest = dest[0] - offset.x, dest[1] - offset.y
self.screen.blit_offset(source, dest, area, special_flags)
class AlignmentMixin:
"""Provides methods for placing objects on the instances's screen. The instance must already have a screen for this
to work - this mixin does not provide one."""
def _align(self, image_rect, horz_alignment=internal.Alignment.CENTER, vert_alignment=internal.Alignment.CENTER):
"""Takes a rectangle and some alignment options and returns the coordinates that the top left of the rectangle
should be at on the instance's screen.
:Rect image_rect: pygame.Rect instance of the object to tbe placed.
:str horz_alignment: Optional string describing where the object should be placed.
:str vert_alignment: Optional string describing where the object should be placed.
"""
screen_rect = self.screen.get_rect()
if horz_alignment == internal.Alignment.LEFT:
horz_pos = 0
elif horz_alignment == internal.Alignment.RIGHT:
horz_pos = screen_rect.width - image_rect.width
elif horz_alignment == internal.Alignment.CENTER:
horz_pos = (screen_rect.width - image_rect.width) // 2
else:
horz_pos = horz_alignment
if vert_alignment == internal.Alignment.TOP:
vert_pos = 0
elif vert_alignment == internal.Alignment.BOTTOM:
vert_pos = screen_rect.height - image_rect.height
elif vert_alignment == internal.Alignment.CENTER:
vert_pos = (screen_rect.height - image_rect.height) // 2
else:
vert_pos = vert_alignment
return horz_pos, vert_pos
def _view_rect(self, image_rect, horz_alignment=internal.Alignment.CENTER, vert_alignment=internal.Alignment.CENTER):
"""Takes a rectangle and some alignment options and returns the rectangle, translated according to the alignment
options."""
horz_pos, vert_pos = self._align(image_rect, horz_alignment, vert_alignment)
moved_image_rect = sdl.Rect(horz_pos, vert_pos, image_rect.width, image_rect.height)
return moved_image_rect
def _view(self, image_rect, horz_alignment=internal.Alignment.CENTER, vert_alignment=internal.Alignment.CENTER):
"""As _align, but returns a subsurface of the instance's screen corresponding to where :image_rect: should be
placed."""
moved_image_rect = self._view_rect(image_rect, horz_alignment, vert_alignment)
return self.screen.subsurface(moved_image_rect)
def _view_cutout(self, target, horz_alignment=internal.Alignment.CENTER, vert_alignment=internal.Alignment.CENTER):
"""As _view, but instead wires up an already created screen using cutouts."""
cutout_rect = self._view_rect(target.get_rect(), horz_alignment, vert_alignment)
self.screen.cutout(cutout_rect, target)
def font(font_filepath, font_size, font_color):
"""Wrapper around pygame's fonts."""
abs_font_filepath = os.path.join(os.path.dirname(__file__), '..', '..', 'data', font_filepath)
font = sdl.freetype.Font(abs_font_filepath, font_size)
font.fgcolor = font_color
font.pad = True
return font
class FontMixin:
"""Allows for using fonts, for text."""
def __init__(self, font, *args, **kwargs):
self.font = font
super(FontMixin, self).__init__(*args, **kwargs)
def render_text(self, text):
surf, rect = self.font.render(text)
return surf
def render_text_with_newlines(self, text_pieces, background=(255, 255, 255)):
if len(text_pieces) == 0:
raise exceptions.ProgrammingException
rendered_pieces = []
font_height = self.font.get_sized_height()
total_height = font_height * len(text_pieces)
max_width = 0
for piece in text_pieces:
rendered_piece = self.render_text(piece)
max_width = max(max_width, rendered_piece.get_rect().width)
rendered_pieces.append(rendered_piece)
return_surf = sdl.Surface((max_width, total_height))
return_surf.fill(background)
cursor = 0
for rendered_piece in rendered_pieces:
return_surf.blit(rendered_piece, (0, cursor))
cursor += font_height
return return_surf
|
#학교에서 코딩 대회를 주최하는데 댓글 이벤트를 진행하기로 했다.
#댓글 작성자중에 랜덤으로 1명은 치킨, 3명은 커피 쿠폰을 받게 된다.
# 댓글은 20명이 작성하였고 편의상 아이디는 1~20으로 가정한다
# 댓글 내용과 상관 없이 무작위로 추첨되며 중복은 불가한 프로그램을 작성하시오.
# (출력 예)
# -- 당첨자 발표 --
# 치킨 당첨자 : 5
# 커피 당첨자 :[2,3,4]
# -- 축하합니다 --
from random import *
users = range(1,21)
users = list(users)
#print(type(users))
winners = sample(users, 4)
print("-- 당첨자 발표 --")
print("치킨 당첨자 : {0}".format(winners[0]))
print("커피 당첨자 : {0}".format(winners[1:]))
print("-- 축하합니다 --") |
from functools import wraps
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
from common import music_graph_client as mgc
from common.user_base import UserBase
from server import music_graph_helper as mgh, user_library as ul, server
def apitest(f):
@wraps(f)
def _wrap(*args, **kwargs):
try:
print(f)
v = f(*args, **kwargs)
if v is not None:
print(v)
except Exception as exc:
print(str(type(exc)) + ':' + str(exc))
return _wrap
def test_mgc_sweep(user):
playlists = mgc.get_possible_playlists(user)
for pt in playlists:
l = playlists[pt]
for d in l:
print('creating playlist %s in %s' % (d['name'], pt))
mgc.create_playlist(user, pt, 60*60*1000, d['plid'])
def test_mgc(user, user_unk):
apitest(mgc.get_library)(user)
apitest(mgc.get_library)(user_unk)
apitest(mgc.get_possible_playlists)(user)
apitest(mgc.get_possible_playlists)(user, 'fall_asleep')
apitest(mgc.get_possible_playlists)(user, 'wake_up')
apitest(mgc.create_playlist)(user, 'fall_asleep', 60*60*1000,828)
def test_get_best_playlist_id(user, playlist_type):
server.start()
library = ul.load_library(user.spotify_id)
ul._delete_library(user.spotify_id, ul.UserLibraryProps)
lib_song_features, _ = mgh.load_user_library_song_features(library)
lib_song_features, _, _ = mgh.prepare_songs(lib_song_features, mgh.G.features_scaler)
if playlist_type == 'fall_asleep':
top_sleepys, _ = mgh.compute_sleep_genres(lib_song_features, library.artists)
else:
top_sleepys, _ = mgh.compute_wakeup_genres(lib_song_features, library.artists)
for _ in range(10):
playlist_id = ul.get_best_playlist_id(playlist_type, library, top_sleepys, keep_n_last=4)
print('got plid %i' % playlist_id)
ul._delete_library(user.spotify_id, ul.UserLibraryProps)
for _ in range(len(top_sleepys)+5):
playlist_id = ul.get_best_playlist_id(playlist_type, library, top_sleepys, keep_n_last=len(top_sleepys)+1)
print('got plid %i' % playlist_id)
user = UserBase.from_file('test_accounts/rudolfix-us.json')
user_unk = UserBase.from_file('test_accounts/rudolfix-us.json')
user_unk.spotify_id = 'unk'
apitest(test_mgc_sweep)(user)
# test_get_best_playlist_id(user, 'fall_asleep')
|
## ***************************************
# Authors: Jing Xie, Xin Li
# Updated Date: 2019-4-30
# Emails: jing.xie@ucdconnect.ie
# ***************************************
import re
import json
import os.path
import ntpath
class DssParser:
def __init__(self, master_file):
self.master_file = ntpath.basename(master_file)
self.file_path = ntpath.dirname(master_file)
self.master_file_noext, _ = os.path.splitext(master_file)
self.switch_line_threshold = 0.005
self.special_transformer_flag = 0
self.special_transformer = self.Transformer(0,0,0)
self.lines_list = []
self.switches_list = []
self.loads_list = []
def get_master_file_noext(self):
return self.master_file_noext
class Transformer:
def __init__(self, name, bus1, bus2):
self.name = name
self.bus1 = bus1
self.bus2 = bus2
def __str__(self):
return json.dumps({"type": "transformer", "name": self.name, "bus1": self.bus1, "bus2": self.bus2})
def get_dict(self):
return {"name": self.name, "bus1": self.bus1, "bus2": self.bus2}
class Line:
def __init__(self, name, bus1, bus2):
self.name = name
self.bus1 = bus1
self.bus2 = bus2
def __str__(self):
return json.dumps({"type": "line", "name": self.name, "bus1": self.bus1, "bus2": self.bus2})
def get_dict(self):
return {"name": self.name, "bus1": self.bus1, "bus2": self.bus2}
class Switch:
def __init__(self, name, bus1, bus2):
self.name = name
self.bus1 = bus1
self.bus2 = bus2
def __str__(self):
return json.dumps({"type": "switch", "name": self.name, "bus1": self.bus1, "bus2": self.bus2})
def get_dict(self):
return {"name": self.name, "bus1": self.bus1, "bus2": self.bus2}
class Load:
def __init__(self, name, bus1, kW, kvar):
self.name = name
self.bus1 = bus1
self.kW = kW
self.kvar = kvar
def __str__(self):
return json.dumps({"type": "load", "name": self.name, "bus1": self.bus1, "kw": self.kW, "kvar": self.kvar})
def get_dict(self):
return {"name": self.name, "bus1": self.bus1, "kw": self.kW, "kvar": self.kvar}
def parse_transformer(self, line):
if 'new transformer' in line:
# print("transformer found")
name = re.match(r'.*new transformer.(.*?) .*', line, re.M | re.I).group(1)
bus1 = re.match(r'.* buses=\[(.*?) (.*?)\] .*', line, re.M | re.I).group(1)
bus2 = re.match(r'.* buses=\[(.*?) (.*?)\] .*', line, re.M | re.I).group(2)
#[This helps with removing the leading and trailing space. However, there may be other tricky formats/symbols need to be handled.]
#[@TODO: In the DSS file, there cannot be space between the '=' and bus ID, e.g., "bus= 61s" will not work.]
bus2 = bus2.strip()
cur_trans = self.Transformer(name, bus1, bus2)
print(cur_trans)
self.lines_list.append(cur_trans.get_dict()) # [Note that transformers are counted as lines.]
else:
pass
def parse_line_or_switch(self, line):
if 'New Line' in line:
length = re.match(r'.*Length=(.*?)[ |\n]', line, re.M | re.I).group(1)
name = re.match(r'.*NEW Line.(.*?) .*', line, re.M | re.I).group(1)
#[Solved: It is good to be able to hanlde this case, in which the line name has space and is quoted.
# e.g., New Line."MG1 Circuit Breaker". Although it is uncertain whether this is allowed in OpenDSS or not.]
if '"' in name:
name = re.match(r'.*NEW Line."(.*?)" .*', line, re.M | re.I).group(1)
bus1 = re.match(r'.*Bus1=(.*?) .*', line, re.M | re.I).group(1)
bus2 = re.match(r'.*Bus2=(.*?) .*', line, re.M | re.I).group(1)
if float(length) > self.switch_line_threshold:
# print("Line found")
cur_line = self.Line(name, bus1, bus2)
print(cur_line)
self.lines_list.append(cur_line.get_dict())
else:
# print("Switch found")
cur_switch = self.Switch(name, bus1, bus2)
print(cur_switch)
self.switches_list.append(cur_switch.get_dict())
else:
pass
def parse_load(self, line):
if 'New Load' in line:
name = re.match(r'.*New Load.(.*?) .*', line, re.M | re.I).group(1)
bus1 = re.match(r'.*Bus1=(.*?) .*', line, re.M | re.I).group(1)
kw = re.match(r'.*kw=(.*?) .*', line, re.M | re.I).group(1)
kvar = re.match(r'.*kvar=(.*?)[ |\n]', line, re.M | re.I).group(1)
cur_load = self.Load(name, bus1, kw, kvar)
print(cur_load)
self.loads_list.append(cur_load.get_dict())
def read_content(self, file_name):
o_file = open(file_name, 'r')
for line in o_file.readlines():
# print(line)
if line[0] == '!':
continue
if 'Redirect' in line:
new_file_name = re.sub("\s+", " ", line).split(' ')[1]
if os.path.exists(self.file_path + new_file_name):
self.read_content(self.file_path + new_file_name)
continue
if 'New Transformer' in line:
self.special_transformer.name = re.match(r'.*New Transformer.(.*?) .*', line, re.M | re.I).group(1)
self.special_transformer_flag = 1
continue
if self.special_transformer_flag == 1 and '~ wdg=1' in line:
self.special_transformer.bus1 = re.match(r'.*bus=(.*?) .*', line, re.M | re.I).group(1)
self.special_transformer_flag = 1
if self.special_transformer_flag == 1 and '~ wdg=2' in line:
self.special_transformer.bus2 = re.match(r'.*bus=(.*?) .*', line, re.M | re.I).group(1)
# print('special transformer found')
print(self.special_transformer)
self.lines_list.append(self.special_transformer.get_dict())
self.special_transformer_flag = 0
continue
self.parse_transformer(line)
self.parse_line_or_switch(line)
self.parse_load(line)
def sum_load_microgrid(self, mg_bus_list):
mg_kw_total = 0
mg_kvar_total = 0
for cur_item in self.loads_list:
cur_bus_id_str = cur_item["bus1"].split('.')[0]
if cur_bus_id_str in mg_bus_list:
mg_kw_total += float(cur_item["kw"])
mg_kvar_total += float(cur_item["kvar"])
return mg_kw_total, mg_kvar_total
if __name__ == '__main__':
dss_file_path_fn = 'IEEE123Master.dss' #Note that the filename cannot end with slash(es)
p = DssParser(dss_file_path_fn)
p.read_content(dss_file_path_fn)
#==Test the function that sums up the total load of a microgrid
#print(p.loads_list)
#--MG2
mg2_bus_list = [48,47,49,50,51,151,
44,45,46,
42,43,
40,41,
135,35,39,
37,36,38]
mg2_bus_str_list = list(map(str,mg2_bus_list))
mg2_kw_total, mg2_kvar_total = p.sum_load_microgrid(mg2_bus_str_list)
print("Microgrid #2: {} kW, {} kVar".format(mg2_kw_total,mg2_kvar_total))
#--MG3
mg3_bus_list = [300,111,
108,109,110,112,113,
105,106,107,114,
101,102,103,104,
197]
mg3_bus_str_list = list(map(str,mg3_bus_list))
mg3_kw_total, mg3_kvar_total = p.sum_load_microgrid(mg3_bus_str_list)
print("Microgrid #3: {} kW, {} kVar".format(mg3_kw_total,mg3_kvar_total))
#--MG4
mg4_bus_list = [97,98,99,100,450,451,
67,68,69,70,71,
72,73,74,75,
76,77,78,79,
96,94,80,85,
95,93,91,89,87,86,81,84,
195,92,90,88,82,83]
mg4_bus_str_list = list(map(str,mg4_bus_list))
mg4_kw_total, mg4_kvar_total = p.sum_load_microgrid(mg4_bus_str_list)
print("Microgrid #4: {} kW, {} kVar".format(mg4_kw_total,mg4_kvar_total))
#--MG_ALL
mg_all_bus_list = str(list(range(1000)))
mg_all_kw_total, mg_all_kvar_total = p.sum_load_microgrid(mg_all_bus_list)
print("Microgrid #all: {} kW, {} kVar".format(mg_all_kw_total,mg_all_kvar_total))
|
# Databricks notebook source
# MAGIC %run "../helper_functions"
# COMMAND ----------
baseTable = spark.table(dbutils.widgets.get("baseTable_table_path"))
baseUsersCountPath = dbutils.widgets.get("baseUsersCount_table_path")
# baseTable = spark.table('insight.baseTable')
# baseUsersCountPath = 'insight.baseUsersCount'
# COMMAND ----------
def gen_baseUsersCount(baseTable, time_period):
"""Return a dataframe containing unique users in all time periods for the baseTable"""
if time_period != 'day':
baseTable = (baseTable
.withColumn('date', F.date_trunc(time_period, F.col('date')))
.withColumn('date', F.trunc(F.col('date'), 'month'))
)
baseUsersCount_time_period = (baseTable
.groupBy('date')
.agg(F.countDistinct('unique_mem_id').alias('unique_users'))
.withColumn('time_period', F.lit(time_period))
)
return baseUsersCount_time_period
# COMMAND ----------
time_periods = ['day', 'month', 'year', 'quarter']
inputs = [baseTable]
time_period_dfs = gen_time_period_dfs(gen_baseUsersCount, inputs, time_periods)
baseUsersCount = combine_time_periods(time_period_dfs)
# COMMAND ----------
baseUsersCount.write.mode('overwrite').format('delta').save(baseUsersCountPath)
# COMMAND ----------
|
# https://leetcode.com/problems/course-schedule-ii/
# 210. Course Schedule II
# There are a total of n courses you have to take, labeled from 0 to n-1.
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
# Example 1:
# Input: 2, [[1,0]]
# Output: [0,1]
# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
# course 0. So the correct course order is [0,1] .
# Example 2:
# Input: 4, [[1,0],[2,0],[3,1],[3,2]]
# Output: [0,1,2,3] or [0,2,1,3]
# Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
# courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
# So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
# Note:
# The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
def findOrder(numCourses, prerequisites):
graph = [[] for i in range(numCourses)]
indegree = [0 for i in range(numCourses)]
for pre in prerequisites:
indegree[pre[0]] += 1
graph[pre[1]].append(pre[0])
queue = []
for i in range(len(indegree)):
if not indegree[i]:
queue.append(i)
count = 0
result = []
while queue:
curr = queue.pop(0)
result.append(curr)
count += 1
for i in range(len(graph[curr])):
indegree[graph[curr][i]] -= 1
if indegree[graph[curr][i]] == 0:
queue.append(graph[curr][i])
if len(result) != numCourses:
return []
return result
|
import os
import smtplib
from email.message import EmailMessage
# email addresses and passwords are stored as environment variables to be more secure
# EMAIL_ADDRESS = os.getenv('WORK_EMAIL')
# EMAIL_PASSWORD = os.getenv('WORK_APP_PASS')
def send_text_message(to, message, teacher):
# 'to' is a variable that is a gateway sms email that will end up sending a text message (thus no subject) (freecarrierlookup.com)
# 'message' is the text message to send to the 'to' gateway sms email
# teacher is the identifier (last name recommended) that also matches the envireonment variables for e-mail and app password
# Get e-mail address of teacher from environment variables
EMAIL_ADDRESS = os.getenv(teacher + '_email') # note the formatting of what your enviroment variables should like like
EMAIL_PASSWORD = os.getenv(teacher + '_app_pass') # note the formatting of what your environment variables should look like
msg = EmailMessage()
msg['From'] = EMAIL_ADDRESS
msg['To'] = to
msg.set_content(message)
# conenct to gmail server...
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
# ...and send the message
smtp.send_message(msg)
# print(f'Sent message to {to}') # optional to see if this is working correctly |
#!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
class hsr_BooleanDummy(EventState):
'''
Example for a state to demonstrate which functionality is available for state implementation.
This example lets the behavior wait until the given target_time has passed since the behavior has been started.
-- text string text
#> follow_me Bool name
<= continue continue
'''
def __init__(self, text):
super(hsr_BooleanDummy,self).__init__(outcomes=['continue'],output_keys=['follow_me'])
def execute(self, userdata):
userdata.follow_me = True
return 'continue'
def on_enter(self, userdata):
pass
def on_exit(self, userdata):
pass
def on_start(self):
pass
def on_stop(self):
pass
|
"""
Each ListNode holds a reference to its previous node
as well as its next node in the List.
"""
class ListNode:
def __init__(self, value, prev=None, next=None):
self.prev = prev
self.value = value
self.next = next
"""
Our doubly-linked list class. It holds references to
the list's head and tail nodes.
"""
class DoublyLinkedList:
def __init__(self, node=None):
self.head = node
self.tail = node
self.length = 1 if node is not None else 0
def __len__(self):
return self.length
"""
Wraps the given value in a ListNode and inserts it
as the new head of the list. Don't forget to handle
the old head node's previous pointer accordingly.
"""
def add_to_head(self, value):
#create new node to add to list
newNode = ListNode(value)
# check if empty list
if self.length == 0:
#sets the prev and next to None
newNode.next = None
newNode.prev = None
#makes the new node both head and tail since it's the only node in the list
self.head = newNode
self.tail = newNode
#check if only 1 element in list
elif self.length == 1:
#make new node the head
self.head = newNode
self.head.next = self.tail
self.head.prev = None
#make the tail point to the new head
self.tail.prev = self.head
else:
#saves the old head node into a value
old_head = self.head
#changes the new node to be the head
self.head = newNode
self.head.next = old_head
self.head.prev = None
#old head node previous link to the new head node
old_head.prev = self.head
#increments the length attribute after adding node to list
self.length += 1
"""
Removes the List's current head node, making the
current head's next node the new head of the List.
Returns the value of the removed Node.
"""
def remove_from_head(self):
#check if list is empty
if self.length == 0:
return None
#check if one element in list
elif self.length == 1:
#save the value we are removing to return later
removed = self.head.value
#set the head and tail to none since we are removing the only element in the list
self.head = None
self.tail = None
self.length -= 1
#return removed value
return removed
else:
old_head = self.head
self.head = old_head.next
self.head.prev = None
self.length -= 1
return old_head.value
"""
Wraps the given value in a ListNode and inserts it
as the new tail of the list. Don't forget to handle
the old tail node's next pointer accordingly.
"""
def add_to_tail(self, value):
#creates new node with given value
new_node = ListNode(value)
#check if empty list
if self.length == 0:
#set new node as head and tail
self.head = new_node
self.tail = new_node
#check if one element in list
elif self.length == 1:
#set new node as tail
self.tail = new_node
#set tail prev as current head
self.tail.prev = self.head
#set current head next as current tail
self.head.next = self.tail
else:
#save old tail into a variable
old_tail = self.tail
#set new node as the current tail
self.tail = new_node
#set the old tail next to point to the current tail
old_tail.next = self.tail
#set the current tail previous to point to the old tail
self.tail.prev = old_tail
#increase length of list
self.length += 1
"""
Removes the List's current tail node, making the
current tail's previous node the new tail of the List.
Returns the value of the removed Node.
"""
def remove_from_tail(self):
#check if empty list
if self.length == 0:
return None
#check if list has only 1 element
elif self.length == 1:
#save removed node
removed = self.tail
self.head = None
self.tail = None
self.length -= 1
return removed.value
else:
#save old tail ndoe
removed = self.tail
#set new tail and old tail's prev pointer
self.tail = removed.prev
#set new tail next as None
self.tail.next = None
self.length -= 1
return removed.value
"""
Removes the input node from its current spot in the
List and inserts it as the new head node of the List.
"""
def move_to_front(self, node):
#check that length has more than 1 element
if self.length > 1:
#checks if the node is the current tail
if self.tail == node:
self.tail = node.prev
self.tail.next = None
old_head = self.head
old_head.prev = node
self.head = node
self.head.prev = None
self.head.next = old_head
else:
old_head = self.head
old_head.prev = node
self.head = node
self.head.prev = None
self.head.next = old_head
"""
Removes the input node from its current spot in the
List and inserts it as the new tail node of the List.
"""
def move_to_end(self, node):
#checks that list has more than 1 element
if self.length > 1:
#checks if node is head
if self.head == node:
#changes the head to the next node in the list
self.head = node.next
self.head.prev = None
#saves the old tail node as a variable
old_tail = self.tail
#set old tail next as new tail
old_tail.next = node
#set node as new tail
self.tail = node
#set current tail next as None and prev as old tail
self.tail.next = None
self.tail.prev = old_tail
else:
old_tail = self.tail
#set old tail next as new tail
old_tail.next = node
#set node as new tail
self.tail = node
#set current tail next as None and prev as old tail
self.tail.next = None
self.tail.prev = old_tail
"""
Deletes the input node from the List, preserving the
order of the other elements of the List.
"""
def delete(self, node):
#check if list is empty
if self.length == 0:
return None
elif self.length == 1:
self.head = None
self.tail = None
self.length -= 1
else:
removed = node
#checks if node is head
if self.head == node:
self.head = node.next
self.head.prev = None
#checks if node is tail
elif self.tail == node:
self.tail = node.prev
self.tail.next = None
else:
#changes previous node to point to next node
node.prev.next = node.next
#changes next node to point to previous node
node.next.prev = node.prev
self.length -= 1
return removed.value
"""
Finds and returns the maximum value of all the nodes
in the List.
"""
def get_max(self):
#sets max value to first node
max_value = self.head.value
current_node = self.head
#iterates over list
while current_node.next != None:
#increases current node to the next node
current_node = current_node.next
#checks if the value is larger than our max value so far
if max_value < current_node.value:
max_value = current_node.value
return max_value
"""
Prints the doubly linked list
"""
def print(self):
self = self.head
while self != None:
print(self.value)
self = self.next |
from spanserver import SpanAPI
from ._version import __version__
from ._name import SERVICE_NAME
# the service api is declared here
api = SpanAPI(
title=SERVICE_NAME, version=__version__, openapi="3.0.0", docs_route="/docs"
)
|
import os
import json
import functools
from abc import ABC
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.gen
import pymysql
from nsq import Writer, Error
from dequeue.redis_dequeue import PriorityQueue, get_redis_from_settings
class BaseHandler(tornado.web.RequestHandler, ABC):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
@property
def nsq(self):
return self.application.nsq
@property
def queue(self):
_queue = self.application.Queue(get_redis_from_settings(self.schedule.settings), 'spider')
return _queue
def finish_pub(self, conn, data, topic, msg):
print(data)
if isinstance(data, Error):
# try to re-pub message again if pub failed
self.nsq.pub(topic, msg)
class KeywordHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "keyword_us", "topic": "keyword", "url": "", "meta": {"keyword": ""}}'.encode('utf-8')
:return:
"""
pass
class ReviewHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "review_us", "topic": "review", "url": "", "meta": {"asin": ""}}'.encode('utf-8')
:return:
"""
pass
class BestsellerHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "bestseller_us", "topic": "bestseller", "url": "", "meta": {"node": ""}}'.encode('utf-8')
:return:
"""
pass
class QuestionHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "question_us", "topic": "question", "url": "", "meta": {"asin": ""}}'.encode('utf-8')
:return:
"""
pass
class AnswerHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "answer_us", "topic": "answer", "url": "", "meta": {"question_id": ""}}'.encode('utf-8')
:return:
"""
pass
class ListingHandler(BaseHandler):
def get(self):
"""
msg = '{"spider": "local_spider", "topic": "qiushispider", "url": "http://192.168.5.222:9501/", "meta": {}}'.encode('utf-8')
msg = '{"spider": "qiushi", "topic": "qiushispider", "url": "https://www.qiushibaike.com/text/", "meta": {}}'.encode('utf-8')
msg = '{"spider": "listing_us", "topic": "listing", "url": "https://www.amazon.com/dp/B019U00D7K", "meta": {"asin": "B019U00D7K"}}'.encode('utf-8')
self.nsq.pub(topic, msg) # pub
self.nsq.mpub(topic, [msg, msg_cn]) # mpub
self.nsq.dpub(topic, 60, msg) # dpub
"""
urls = {
'us': 'https://www.amazon.com',
'uk': 'https://www.amazon.co.uk',
}
topic = 'spider'
country = ''
if "country" in self.request.arguments:
country = self.get_argument('country', 'us')
def struct_msg(asin):
return (self.schedule.settings['MESSAGE_MODEL'] % {
'spider': 'listing_' + country,
'target_topic': 'listing',
'url': urls[country] + '/dp/' + asin,
'meta': json.dumps({'asin': asin})
}).encode('utf-8')
if "asin" in self.request.arguments:
asin = self.get_argument('asin')
msg = struct_msg(asin)
callback = functools.partial(self.finish_pub, topic=topic, msg=msg)
self.nsq.pub(topic, msg, callback=callback)
self.write(msg)
else:
if self.get_argument('type') == "file":
filename = "asins.txt"
if not os.path.exists(filename):
raise FileNotFoundError('[define] %s file not found' % filename)
with open(filename, 'r') as f:
asins = f.read().split('\n')
msgs = [struct_msg(asin) for asin in asins]
self.write({"messages": msgs.insert(0, {"length": len(msgs)})})
elif self.get_argument('type') == "sql":
conn = pymysql.connect(
host='192.168.5.215',
user='cc',
password="mysql123",
database='spider_db_xc',
port=3306
)
cursor = conn.cursor()
query_sql = """
SELECT
asin
FROM
xc_amazon_listing_{country}
WHERE
add_date_time > DATE_FORMAT( DATE_SUB( now(), INTERVAL 1 DAY ), "%Y-%m-%d" )
AND add_date_time < DATE_FORMAT( now(), "%Y-%m-%d" )
GROUP BY
asin
""".format(country=country)
effect_row = cursor.execute(query_sql)
result = [item[0] for item in cursor.fetchall()]
msgs = [struct_msg(asin) for asin in result]
self.write({"length": effect_row, "messages": [json.loads(ms.decode('utf-8')) for ms in msgs if isinstance(ms, bytes)]})
else:
raise KeyError('[define] not found key type')
callback = functools.partial(self.finish_pub, topic=topic, msg=msgs)
if self.schedule.settings['USE_REDIS']:
for msg in msgs:
self.queue.push(msg.decode('utf-8'), 0)
else:
self.nsq.mpub(topic, msgs, callback=callback)
class Application(tornado.web.Application):
def __init__(self, handlers, **settings):
self.nsq = Writer(['192.168.5.134:4150'])
self.Queue = PriorityQueue
# self.nsq = Writer(['127.0.0.1:4150'])
super(Application, self).__init__(handlers, **settings)
class Publisher(object):
def __init__(self):
self.writer = Writer(['192.168.5.134:4150'])
self.topic = ''
self.message = ''
@classmethod
def run_web_spider(cls, schedule=None):
if schedule:
ListingHandler.schedule = schedule
application = Application([
(r"/listing", ListingHandler),
(r"/keyword", KeywordHandler),
(r"/review", ReviewHandler),
(r"/question", QuestionHandler),
(r"/answer", AnswerHandler),
(r"/bestseller", BestsellerHandler)
])
application.listen(8888)
@tornado.gen.coroutine
def do_pub(self):
yield tornado.gen.sleep(0.05)
topic = self.topic
if isinstance(self.message, bytes):
msg = self.message.decode('utf-8')
else:
msg = self.message.body
self.writer.pub(topic, msg, self.finish_pub)
yield tornado.gen.sleep(0.05)
def start(self, topic, message):
self.topic = topic
self.message = message
tornado.ioloop.IOLoop.current().spawn_callback(self.do_pub)
# tornado.ioloop.IOLoop.current().run_sync(self.do_pub)
# tornado.ioloop.IOLoop.instance().run()
def finish_pub(self, conn, data):
print(data)
# if isinstance(data, Error):
# self.writer.pub(topic, msg)
if __name__ == "__main__":
from scheduler import Scheduler
s = Scheduler()
Publisher.run_web_spider(s)
tornado.ioloop.IOLoop.current().start()
|
#!../../python3.9/bin/python
import sys
import argparse
import time
import numpy as np
import matplotlib.pyplot as plt
from generadorFiveBitProblem import fivebit
from reca import ProblemClassification
def visualizer_5bit(automat, i, I, R, C):
plt.figure
plt.imshow(automat, cmap='Greys', interpolation='nearest')
plt.title('Automata cellular elemental del 5 bit')
# plt.show()
plt.savefig('automats_fivebit/automata%d_I%d_R%d_C%d.png' % (i, I, R, C))
if __name__ == '__main__':
"""
Arguments
----------
argument -I: required
number of iterations of elemental cellular automata
argument -R: required
number of random mapping
argument -C: not required
number to multiply the size of input
argument -b: NOT required
number of iterations of whole program
argument -t: NOT required
number of distractors of 5-bit
example:
./main.py -I 10 -R 4 -C 4 -b 100 -t 20
"""
parser = argparse.ArgumentParser(prog = 'main.py')
parser.add_argument('-I', required=True, type=int)
parser.add_argument('-R', required=True, type=int)
parser.add_argument('-C', required=True, type=int)
parser.add_argument('-b', required=True, type=int)
parser.add_argument('-t', required=True, type=int)
args = parser.parse_args()
I = args.I
R = args.R
C = args.C
bucle = args.b
distractor = args.t
print(I,R,C)
start_time = time.time()
input, output = fivebit(distractor).generateProblem()
r = 0
pred = np.zeros(32, dtype=int)
fail = np.zeros(32, dtype=int)
f = open('dades_fivebit_update/fivebit_I%d_R%d_C%d_bucle%d_distractor%d'
% (I, R, C, bucle, distractor), 'w+')
while r < bucle:
print('bucle:', r)
r += 1
for i in range(32):
pc = ProblemClassification(I, R, C, input[i], output[i])
predictor, automat = pc.generatingProblem()
""" Predictor es la prediccio generada pel classificador
i automat és una matiu amb tots els automats generats
per poder veure'l amb el matplotlib"""
# visualizer_5bit(automata, i, I, R, C)
success = True
for a,b in zip(predictor,output[i]):
if not np.array_equal(a, b):
fail[i] += 1
success = False
break
if success:
pred[i] += 1
for i in range(32):
print('nombre dencerts:', pred[i], "nombre d'errors:", fail[i], "per l'input:", i+1)
f.write("nombre encerts: %d, nombre d'errors: %d per l'input: %d\n" % (pred[i], fail[i], i+1))
""" Calculem el % final """
res = np.ndarray(32, dtype=int)
for i in range(32):
p = pred[i]
res[i] = (p/bucle)*100
a = res.mean()
a = float(a)
print("El percentatge final d'encerts es: %f\n" % a)
print('I: %d R: %d distractor: %d\n' % (I, R, distractor))
f.write("\nEl percentatge final d'encerts es: %f\n" % a)
f.close()
print('--- %s seconds ---', '%.3f' % (time.time() - start_time))
|
# Form implementation generated from reading ui file 'resources/ui/tools_dialog.ui'
#
# Created by: PyQt6 UI code generator 6.4.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_UtilityFunctionsDialog(object):
def setupUi(self, UtilityFunctionsDialog):
UtilityFunctionsDialog.setObjectName("UtilityFunctionsDialog")
UtilityFunctionsDialog.resize(422, 345)
font = QtGui.QFont()
font.setPointSize(10)
UtilityFunctionsDialog.setFont(font)
self.buttonBox = QtWidgets.QDialogButtonBox(parent=UtilityFunctionsDialog)
self.buttonBox.setGeometry(QtCore.QRect(250, 310, 171, 32))
self.buttonBox.setStyleSheet("color: rgb(0, 0, 0);")
self.buttonBox.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.StandardButton.Cancel|QtWidgets.QDialogButtonBox.StandardButton.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lbl_selected_text = QtWidgets.QLabel(parent=UtilityFunctionsDialog)
self.lbl_selected_text.setGeometry(QtCore.QRect(20, 10, 391, 16))
font = QtGui.QFont()
font.setPointSize(11)
font.setItalic(True)
self.lbl_selected_text.setFont(font)
self.lbl_selected_text.setObjectName("lbl_selected_text")
self.function_selector = QtWidgets.QComboBox(parent=UtilityFunctionsDialog)
self.function_selector.setGeometry(QtCore.QRect(10, 80, 401, 26))
self.function_selector.setObjectName("function_selector")
self.btn_copy_transformed = QtWidgets.QToolButton(parent=UtilityFunctionsDialog)
self.btn_copy_transformed.setGeometry(QtCore.QRect(370, 110, 41, 22))
self.btn_copy_transformed.setObjectName("btn_copy_transformed")
self.txt_transformed_text = QtWidgets.QPlainTextEdit(parent=UtilityFunctionsDialog)
self.txt_transformed_text.setGeometry(QtCore.QRect(10, 110, 350, 181))
self.txt_transformed_text.setObjectName("txt_transformed_text")
self.retranslateUi(UtilityFunctionsDialog)
self.buttonBox.accepted.connect(UtilityFunctionsDialog.accept) # type: ignore
self.buttonBox.rejected.connect(UtilityFunctionsDialog.reject) # type: ignore
QtCore.QMetaObject.connectSlotsByName(UtilityFunctionsDialog)
def retranslateUi(self, UtilityFunctionsDialog):
_translate = QtCore.QCoreApplication.translate
UtilityFunctionsDialog.setWindowTitle(_translate("UtilityFunctionsDialog", "Dialog"))
self.lbl_selected_text.setText(_translate("UtilityFunctionsDialog", "<selected text>"))
self.btn_copy_transformed.setText(_translate("UtilityFunctionsDialog", "Copy"))
|
from django.conf.urls import url
from django.contrib.auth.views import LogoutView
from apps.users import views
urlpatterns = [
url(r'^login/$', views.LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
]
|
import os
import pickle
import sys
torchfuel_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
sys.path.append(torchfuel_path)
from torchfuel.utils.state import Namespace, State
def test_ns():
ns = Namespace()
ns.test = 10
print(ns.stored_objects)
print(ns)
plk_object = ns.pickle_safe()
assert isinstance(plk_object, bytes)
state = State()
state.add_namespace("new namespace")
assert hasattr(state, "new_namespace")
if __name__ == "__main__":
test_ns()
|
from keras.layers import GRU, Activation, Input, Reshape, Concatenate, Lambda, Dropout
from keras import Model
import keras
import functools
import operator
import numpy as np
from processing import DataGenerator
from data import Data
DROPOUT_RATE = 0.3
# NODES_LAYERS_DMS = (100, 100)
# EDGES_LAYERS_DMS = (100, 100)
COMBINED_LAYERS_DMS = (100, 50)
# Input is expected to be flattened concatenated Node Label ohv and edge labels' adjacency list ohv
# In the vocabulary of node labels, the first token is SOS, second is EOS.
# In the vocabulary of edge labels, the first token is NO_EDGE.
# d is datagen for graphs
def combined_gru(d, layers_dms=COMBINED_LAYERS_DMS, dropout_rate=DROPOUT_RATE):
input_shape = (d.max_nodes, d.node_one_hot_vector_size +
d.edge_one_hot_vector_size*d.M)
x = Input(input_shape)
y = x
for dm in layers_dms:
y = GRU(dm, activation='tanh', dropout=dropout_rate,
recurrent_dropout=dropout_rate, return_sequences=True)(y)
y = Dropout(dropout_rate)(y)
y = GRU(input_shape[1], dropout=dropout_rate,
recurrent_dropout=dropout_rate, return_sequences=True)(y)
node_softmax = Lambda(lambda x: x[:, :, :d.node_one_hot_vector_size])(y)
edge_softmax = Lambda(lambda x: x[:, :, d.node_one_hot_vector_size:])(y)
edge_softmax = Reshape(
(d.max_nodes, d.M, d.edge_one_hot_vector_size))(edge_softmax)
node_softmax = Activation('softmax')(node_softmax)
edge_softmax = Activation(
lambda x: keras.activations.softmax(x, -1))(edge_softmax)
edge_softmax = Reshape(
(d.max_nodes, d.M*d.edge_one_hot_vector_size,))(edge_softmax)
y = Concatenate()([node_softmax, edge_softmax])
model = Model(inputs=x, outputs=y)
model.compile(optimizer='Adam', loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
return model
# def node_gru(input_shape, layers_dms=NODES_LAYERS_DMS, dropout_rate=DROPOUT_RATE):
# x = Input(input_shape)
# y = x
# for dm in layers_dms:
# y = GRU(dm, activation='tanh', dropout=dropout_rate,
# recurrent_dropout=dropout_rate, return_sequences=True)(y)
# y = GRU(input_shape[1], activation=lambda x: keras.activations.softmax(x, 1), dropout=dropout_rate,
# recurrent_dropout=dropout_rate, return_sequences=True)(y)
# model = Model(inputs=x, outputs=y)
# model.compile(optimizer='Adam', loss='binary_crossentropy',
# metrics=['accuracy'])
# model.summary()
# return model
#
#
# def edge_gru(input_shape, layers_dms=EDGES_LAYERS_DMS, dropout_rate=DROPOUT_RATE):
# x = Input(input_shape)
# # size = functools.reduce(operator.mul, input_shape, 1)
# input_shape_list = list(input_shape)
# y = Reshape(tuple(
# input_shape_list[:2]+[input_shape_list[-2]*input_shape_list[-1]]), name='predictions')(x)
# for dm in layers_dms:
# y = GRU(dm, activation='tanh', dropout=dropout_rate,
# recurrent_dropout=dropout_rate, return_sequences=True)(y)
# # size = flattened version of the output shape
# # size = functools.reduce(operator.mul, input_shape, 1)
# # print(size)
# # print(input_shape)
# y = GRU(input_shape_list[-1]*input_shape_list[-2], activation='tanh', dropout=dropout_rate,
# recurrent_dropout=dropout_rate, return_sequences=True)(y)
# model = Model(inputs=x, outputs=y)
# model.summary()
# y = Reshape(input_shape, name='predictions')(y)
# y = keras.activations.softmax(y, axis=1)
#
# model = Model(inputs=x, outputs=y)
# model.compile(optimizer='Adam', loss='binary_crossentropy',
# metrics=['accuracy'])
# model.summary()
# return model
def sample(adj_mat_row, d):
node_part = adj_mat_row[:d.node_one_hot_vector_size]
edge_part = adj_mat_row[d.node_one_hot_vector_size:]
edge_part = np.reshape(edge_part, (d.M, d.edge_one_hot_vector_size))
out_node_part = np.zeros((d.node_one_hot_vector_size,))
a = np.random.choice(d.node_one_hot_vector_size, 1, node_part.tolist())
out_node_part[a] = 1
out_edge_part = np.zeros((d.M, d.edge_one_hot_vector_size))
for i in range(d.M):
out_edge_part[i, int(np.random.choice(
d.edge_one_hot_vector_size, 1, edge_part[i].tolist())[0])] = 1
out_edge_part = np.reshape(
out_edge_part, (d.M * d.edge_one_hot_vector_size,))
return np.concatenate((out_node_part, out_edge_part))
def generate(model, d, batch_size):
# init starting graph input
nodes_input = np.zeros(d.node_one_hot_vector_size)
# SOS
nodes_input[0] = 1
edges_input = np.zeros((d.M, d.edge_one_hot_vector_size))
# NO_EDGE
edges_input[:, 0] = 1
edges_input = np.reshape(edges_input, (d.M * d.edge_one_hot_vector_size,))
combined_input = np.concatenate((nodes_input, edges_input))
X = np.zeros((batch_size, d.max_nodes, d.node_one_hot_vector_size +
d.edge_one_hot_vector_size*d.M))
for i in range(batch_size):
X[i, 0, :] = combined_input
for i in range(d.max_nodes-1):
ys = model.predict(X)
for idx, y in enumerate(ys):
y = sample(y[i, :], d)
X[idx, i + 1, :] = y
# if y[1] == 1:
# break
return X
|
"""Geometry calculations.
Copyright 2011 University of Auckland.
This file is part of PyTOUGH.
PyTOUGH is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
PyTOUGH is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with PyTOUGH. If not, see <http://www.gnu.org/licenses/>."""
from __future__ import print_function
try:
import numpy as np
from numpy import float64
from numpy import int8 as int8
from numpy.linalg import norm, solve, LinAlgError
except ImportError: # try importing Numeric on old installs
import Numeric as np
from Numeric import Float64 as float64
from Numeric import Int8 as int8
def norm(a): # Numeric doesn't have a norm function
from math import sqrt
return sqrt(np.dot(a, a))
from LinearAlgebra import solve_linear_equations as solve
from LinearAlgebra import LinAlgError
# check Python version:
import sys
vinfo = sys.version_info
if (vinfo[0] <= 2) and (vinfo[1] < 5): # < 2.5:
def any(a):
for x in a:
if x: return True
return False
def all(a):
for x in a:
if not x: return False
return True
def in_polygon(pos, polygon):
"""Tests if the (2-D) point a lies within a given polygon."""
tolerance = 1.e-6
numcrossings = 0
ref = polygon[0]
v = pos - ref
for i in range(len(polygon)):
p1 = polygon[i] - ref
i2 = (i+1) % len(polygon)
p2 = polygon[i2] - ref
if p1[1] <= v[1] < p2[1] or p2[1] <= v[1] < p1[1]:
d = p2 - p1
if abs(d[1]) > tolerance:
x = p1[0] + (v[1] - p1[1]) * d[0] / d[1]
if v[0] < x: numcrossings += 1
return (numcrossings % 2)
def in_rectangle(pos, rect):
"""Tests if the 2-D point lies in an axis-aligned rectangle, defined
as a two-element list of arrays [bottom left, top right].
"""
return all([rect[0][i] <= pos[i] <= rect[1][i] for i in range(2)])
def rectangles_intersect(rect1, rect2):
"""Returns True if two rectangles intersect."""
return all([(rect1[1][i] >= rect2[0][i]) and
(rect2[1][i] >= rect1[0][i]) for i in range(2)])
def sub_rectangles(rect):
"""Returns the sub-rectangles formed by subdividing the given rectangle evenly in four."""
centre = 0.5 * (rect[0] + rect[1])
r0 = [rect[0], centre]
r1 = [np.array([centre[0], rect[0][1]]), np.array([rect[1][0], centre[1]])]
r2 = [np.array([rect[0][0], centre[1]]), np.array([centre[0], rect[1][1]])]
r3 = [centre, rect[1]]
return [r0, r1, r2, r3]
def bounds_of_points(points):
"""Returns bounding box around the specified 2D points."""
bottomleft = np.array([min([pos[i] for pos in points]) for i in range(2)])
topright = np.array([max([pos[i] for pos in points]) for i in range(2)])
return [bottomleft, topright]
def rect_to_poly(rect):
"""Converts a rectangle to a polygon."""
return [rect[0], np.array(rect[1][0], rect[0][1]),
rect[1], np.array(rect[0][0], rect[1][1])]
def polygon_area(polygon):
"""Calculates the area of an arbitrary polygon."""
area = 0.0
n = len(polygon)
if n > 0:
polygon -= polygon[0]
for j, p1 in enumerate(polygon):
p2 = polygon[(j+1) % n]
area += p1[0] * p2[1] - p2[0] * p1[1]
return 0.5 * area
def polygon_centroid(polygon):
"""Calculates the centroid of an arbitrary polygon."""
c, area = np.zeros(2), 0.
n = len(polygon)
shift = polygon[0]
polygon -= shift # shift to reduce roundoff for large coordinates
if n < 3: return sum(polygon) / n + shift
else:
for j, p1 in enumerate(polygon):
p2 = polygon[(j+1) % n]
t = p1[0] * p2[1] - p2[0] * p1[1]
area += t
c += (p1 + p2) * t
area *= 0.5
return c / (6. * area) + shift
def line_polygon_intersections(polygon, line, bound_line = (True,True),
indices = False):
"""Returns a list of the intersection points at which a line crosses a
polygon. The list is sorted by distance from the start of the
line. The parameter bound_line controls whether to limit
intersections between the line's start and end points. If indices
is True, also return polygon side indices of intersections.
"""
crossings = []
ref = polygon[0]
l1, l2 = line[0] - ref, line[1] - ref
tol = 1.e-12
ind = {}
def in_unit(x): return -tol <= x <= 1.0 + tol
for i, p in enumerate(polygon):
p1 = p - ref
p2 = polygon[(i+1)%len(polygon)] - ref
dp = p2 - p1
A, b = np.column_stack((dp, l1 - l2)), l1 - p1
try:
xi = solve(A,b)
inline = True
if bound_line[0]: inline = inline and (-tol <= xi[1])
if bound_line[1]: inline = inline and (xi[1] <= 1.0 + tol)
if in_unit(xi[0]) and inline :
c = tuple(ref + p1 + xi[0] * dp)
ind[c] = i
except LinAlgError: continue
crossings = [np.array(c) for c, i in ind.items()]
# Sort by distance from start of line:
sortindex = np.argsort([norm(c - line[0]) for c in crossings])
if indices: return [crossings[i] for i in sortindex], \
[ind[tuple(crossings[i])] for i in sortindex]
else: return [crossings[i] for i in sortindex]
def polyline_polygon_intersections(polygon, polyline):
"""Returns a list of intersection points at which a polyline (list of
2-D points) crosses a polygon."""
N = len(polyline)
intersections = [
line_polygon_intersections(polygon, [pt, polyline[(i+1) % N]])
for i, pt in enumerate(polyline)]
from itertools import chain # flatten list of lists
return list(chain.from_iterable(intersections))
def simplify_polygon(polygon, tolerance = 1.e-6):
"""Simplifies a polygon by deleting colinear points. The tolerance
for detecting colinearity of points can optionally be
specified.
"""
s = []
N = len(polygon)
for i, p in enumerate(polygon[:]):
l, n = polygon[i-1], polygon[(i+1)%N]
dl, dn = p - l, n - p
if np.dot(dl, dn) < (1. - tolerance) * norm(dl) * norm(dn):
s.append(p)
return s
def polygon_boundary(this, other, polygon):
"""Returns point on a line between vector this and other and also on
the boundary of the polygon"""
big = 1.e10
ref = polygon[0]
a = this - ref
b = other - ref
v = None
dmin = big
for i in range(len(polygon)):
c = polygon[i] - ref
i2 = (i + 1) % len(polygon)
d = polygon[i2] - ref
M = np.transpose(np.array([b - a, c - d]))
try:
r = solve(M, c - a)
if r[0] >= 0.0 and 0.0 <= r[1] <= 1.0:
bdy = c * (1.0 - r[1]) + d * r[1]
dist = norm(bdy - a)
if dist < dmin:
dmin = dist
v = bdy + ref
except LinAlgError: continue
return v
def line_projection(a, line, return_xi = False):
"""Finds projection of point a onto a line (defined by two vectors). Optionally
return the non-dimensional distance xi between the line start and end."""
d = line[1] - line[0]
try:
xi = np.dot(a - line[0], d) / np.dot(d, d)
p = line[0] + d * xi
except ZeroDivisionError: # line ill-defined
p, xi = None, None
if return_xi: return p, xi
else: return p
def point_line_distance(a, line):
"""Finds distance between point a and a line."""
return np.linalg.norm(a - line_projection(a, line))
def polyline_line_distance(polyline, line):
"""Returns minimum distance between a polyline and a line."""
dists = []
for i, pt in enumerate(polyline):
pline = [pt, polyline[(i + 1) % len(polyline)]]
if line_polygon_intersections(line, pline): return 0.0
else: dists.append(min([point_line_distance(p, line) for p in pline]))
return min(dists)
def vector_heading(p):
"""Returns heading angle of a 2-D vector p, in radians clockwise from
the y-axis ('north')."""
from math import asin
theta = asin(p[0] / norm(p))
if p[1] < 0: theta = np.pi - theta
if theta < 0: theta += 2. * np.pi
elif theta > 2. * np.pi: theta -= 2. * np.pi
return theta
class linear_trans2(object):
"""Class for 2D linear transformation (Ax+b)."""
def __init__(self, A = np.identity(2), b = np.zeros(2)):
self.A = A
self.b = b
def __repr__(self): return repr(self.A) + ' ' + repr(self.b)
def __call__(self, x):
"""Transforms a 2-D point x, or another linear transformation."""
if isinstance(x, linear_trans2):
result = linear_trans2()
result.A = np.dot(self.A, x.A)
result.b = self.b + np.dot(self.A, x.b)
return result
else: return np.dot(self.A, x) + self.b
def get_inverse(self):
"""Returns the inverse of a transformation, if it exists."""
result = linear_trans2()
try:
result.A = np.linalg.inv(self.A)
result.b= -np.dot(result.A, self.b)
return result
except np.linalg.LinAlgError: return None
inverse = property(get_inverse)
def between_rects(self, rect1, rect2):
"""Returns a linear transformation mapping one rectangle to another.
Each rectangle should be a list or tuple of two 2-D numpy
arrays (bottom left and top right corners).
"""
tolerance = 1.e-6
d1 = rect1[1] - rect1[0]
if abs(d1[0]) > tolerance and abs(d1[1]) > tolerance:
d2 = rect2[1] - rect2[0]
s = [l2 / l1 for l1, l2 in zip(d1, d2)]
self.A = np.array([[s[0], 0.0], [0.0, s[1]]])
self.b = rect2[0] - rect1[0]
self.b = np.array([r2 - s * r1 for r1, r2, s in zip(rect1[0], rect2[0], s)])
else:
self.A=None
return self
def between_points(self, points1, points2):
"""Returns a least-squares best fit linear transformation between two
sets of points (e.g. the same set of points in two different
coordinate systems). The parameters points1 and points2
should contain lists of the corresponding points in the first
and second coordinate systems respectively. At least three
points are needed to find a best-fit linear transformation.
"""
npts1, npts2 = len(points1), len(points2)
if npts1 == npts2:
npts = npts1
if npts >= 3:
# first normalise the coordinates:
r1, r2 = bounds_of_points(points1), bounds_of_points(points2)
dx1 = np.array([r1[1][0] - r1[0][0], r1[1][1] - r1[0][1]])
dx2 = np.array([r2[1][0] - r2[0][0], r2[1][1] - r2[0][1]])
N1 = linear_trans2(np.array([[1. / dx1[0], 0.], [0., 1. / dx1[1]]]),
-np.array([r1[0][0] / dx1[0], r1[0][1] / dx1[1]]))
N2 = linear_trans2(np.array([[1. / dx2[0], 0.], [0., 1. / dx2[1]]]),
-np.array([r2[0][0] / dx2[0], r2[0][1] / dx2[1]]))
n1, n2 = [N1(p) for p in points1], [N2(p) for p in points2]
# assemble the least squares fitting system for the normalised points:
M, r = np.zeros((6,6)), np.zeros(6)
for p1, p2 in zip(n1, n2):
p00, p01, p11 = p1[0] * p1[0], p1[0] * p1[1], p1[1] * p1[1]
M[0,0] += p00; M[0,1] += p01; M[0,4] += p1[0]
M[1,0] += p01; M[1,1] += p11; M[1,4] += p1[1]
M[2,2] += p00; M[2,3] += p01; M[2,5] += p1[0]
M[3,2] += p01; M[3,3] += p11; M[3,5] += p1[1]
M[4,0] += p1[0]; M[4,1] += p1[1]; M[4,4] += 1.0
M[5,2] += p1[0]; M[5,3] += p1[1]; M[5,5] += 1.0
r += np.array([p1[0] * p2[0], p1[1] * p2[0], p1[0] * p2[1],
p1[1] * p2[1], p2[0], p2[1]])
try:
a = np.linalg.solve(M, r)
L = linear_trans2(np.array([[a[0], a[1]], [a[2], a[3]]]),
np.array([a[4], a[5]]))
return N2.inverse(L(N1))
except np.linalg.LinAlgError:
print('Could not solve least squares fitting system.')
return None
else:
print('At least three points are needed to find the best fit linear transformation.')
return None
else:
print('The two points lists must contain the same number of points.')
return None
def rotation(self, angle, centre = np.zeros(2)):
"""Returns a linear transformation representing a rotation by the
specified angle (degrees clockwise), about an optionally
specified centre.
"""
T = linear_trans2(b = -centre)
from math import radians, sin, cos
angleradians = radians(angle)
cosangle, sinangle = cos(angleradians), sin(angleradians)
R = linear_trans2(np.array([[cosangle, sinangle], [-sinangle, cosangle]]))
result = R(T)
result.b += centre # i.e. return T.inverse(R(T))
return result
|
# kPicture.py
import pyqtgraph as pg
from pyqtgraph import QtCore, QtGui
# dat = data.NewData() 指标数据
# ddat = dat.ddata['000001.SZ'].iloc[-100:][['open','high','low','close','vol','ma10','ma20','ma30']]
# ddat['t'] = range(1, ddat.shape[0] + 1)
# wdat = dat.wdata['000001.SZ'].iloc[-100:][['open','high','low','close','vol','ma10','ma20','ma30']]
# wdat['t'] = range(1, wdat.shape[0] + 1)
# mdat = dat.mdata['000001.SZ'].iloc[-100:][['open','high','low','close','vol','ma10','ma20','ma30']]
# mdat['t'] = range(1, mdat.shape[0] + 1)
## Create a subclass of GraphicsObject.
## The only required methods are paint() and boundingRect()
## (see QGraphicsItem documentation)
class kDialog(QtGui.QDialog):
def __init__(self, data, parent = None):
super(kDialog, self).__init__(parent)
typedict ={ 'd':'日线', 'w':'周线', 'm':'月线'}
self.data = data()
self.setWindowTitle(f'{self.data["name"]}{typedict[self.data["type"]]}')
self.resize(800, 600)
self.createPlot
def createPlot(self):
self.item = CandlestickItem(self.data['data']) # 建立 k 线图实例
self.vlayout = QtGui.QVBoxLayout(self)
self.plt = pg.plot(self.vlayout)
self.plt.addItem(self.item)
self.plt.plot(self.data['data']['ma30'], pen = 'r')
self.plt.plot(self.data['data']['ma20'], pen = 'b')
self.plt.plot(self.data['data']['ma10'], pen = 'y')
self.vlayout.addWidget(self.item)
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
w = 1 / 3.
for (open, max, min, close, vol, ma10, ma20, ma30, t) in self.data.values:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('g'))
else:
p.setBrush(pg.mkBrush('r'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
def showKline(data):
# print(data['code'], data['name'], data['type'], data['data'])
item = CandlestickItem(data['data']) # 建立 k 线图实例
typedict ={ 'd':'日线', 'w':'周线', 'm':'月线'}
plt = pg.plot()
plt.addItem(item)
plt.plot(data['data']['ma30'], pen = 'r')
plt.plot(data['data']['ma20'], pen = 'b')
plt.plot(data['data']['ma10'], pen = 'y')
plt.setWindowTitle(f'{data["name"]}{typedict[data["type"]]}')
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_() |
from urllib.parse import urlencode
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def page_replace(context, new_page, page_name="page"):
query = dict(context['request'].GET)
query[page_name] = new_page
return urlencode(query, doseq=True)
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/25 下午 02:35
# @Author : Yuanxuan
# @FileName: run_start.py.py
# @Software: PyCharm
import os
import time
"""
笔记
os.getcwd() 获取当前文件的路径
os.path.dirname() 获取当前文件的文件夹的路径,即上一级的路径
os.path.join() 拼接路径
"""
# 项目文件夹的路径
BASE_DIR_PATH = os.path.dirname(os.getcwd())
# 元素控件
ELEMENT_PATH = os.path.join(BASE_DIR_PATH, "yaml", "element")
# 测试数据
TEST_DATA_PATH = os.path.join(BASE_DIR_PATH, "yaml", "test_data")
# 测试报告
now_time = time.strftime("%Y-%m-%d_%H_%M_%S")
RAW_REPORT_PATH = os.path.join(BASE_DIR_PATH, "report", "raw_file") # 未解码
TEST_REPORT_PATH = os.path.join(BASE_DIR_PATH, "report", "report_file", now_time)
# 测试用例
TEST_CASE_PATH = os.path.join(BASE_DIR_PATH, "test_case")
# WebURL地址
URL_chouti = 'https://dig.chouti.com'
URL_baidu = "https://www.baidu.com"
URL_tencent = 'https://www.qq.com/'
URL_126 = 'https://www.126.com/'
# url选择器
URL = URL_tencent
def sleeping(t=1):
"""
全局等待函数
"""
# 添加删除标记
# sdfsfsdf
import warnings
warnings.warn("该方法不再维护", DeprecationWarning, stacklevel=2)
time.sleep(t)
|
import numpy as np
import pickle
import matplotlib
import pyfftw
from math import pi as PI
from decimal import Decimal
from matplotlib import cm, rc
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.mplot3d import Axes3D
#from matplotlib import animation
#from JSAnimation import IPython_display
matplotlib.rc('xtick', labelsize=30)
matplotlib.rc('ytick', labelsize=30)
try:
import matplotlib.pyplot as plt
ifplot = True
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
from matplotlib import ticker
from pylab import axes
try:
import seaborn as sns
sns.set(style='white')
colors = [sns.xkcd_rgb['denim blue'], sns.xkcd_rgb['pale red'],
sns.xkcd_rgb['olive green'], sns.xkcd_rgb['golden yellow']]
except:
colors = ['b', 'r', 'g', 'y']
except:
print 'Problem with matplotlib.'
matplotlib.rc('axes', labelsize=25)
matplotlib.rc('xtick', labelsize=25)
matplotlib.rc('ytick', labelsize=25)
rc('text', usetex=True)
def initc(x): # Initial condition
u0 = np.zeros_like(x, dtype='longdouble')
u0 = np.cos(x/L)*(1.+np.sin(x/L))
#u0 = np.sin(x/L)
return u0
def wavenum(Mx): # Wavenumber evaluation in Fourier space
kxx = np.fft.rfftfreq(Mx, 1./Mx).astype(float)
kx = np.zeros_like(kxx, dtype='longdouble')
kx = kxx/L
return kx
def fftrtf(uspec): # Replicate half of the variable for symmetry in Fourier space
rtf = pyfftw.empty_aligned((Mx,), dtype='clongdouble')
usp = np.conjugate(uspec[::-1])
uspect = np.delete(usp, [0,Mx//2], None)
rtf = np.concatenate((uspec[:], uspect[:]), axis=0)
return rtf
def weights(x): # Spatial integration weights
weights = np.empty_like(x, dtype='longdouble')
dx = np.empty_like(x, dtype='longdouble')
nx = len(x)
for i in range(nx-1):
dx[i] = x[i+1] - x[i]
dx = np.delete(dx, [len(x)-1], None)
for j in range(nx):
if j == 0:
weights[j] = dx[0]/2
elif j == nx-1:
weights[j] = dx[-1]/2
else:
weights[j] = dx[j-1]/2 + dx[j]/2
return weights
def antialias(uhat,vhat): # Anti-aliasing using padding technique
N = len(uhat)
M = 2*N
uhat_pad = np.concatenate((uhat[0:N/2], np.zeros((M-N,)), uhat[N/2:]), axis=0)
vhat_pad = np.concatenate((vhat[0:N/2], np.zeros((M-N,)), vhat[N/2:]), axis=0)
u_pad = pyfftw.interfaces.numpy_fft.ifft(uhat_pad)
v_pad = pyfftw.interfaces.numpy_fft.ifft(vhat_pad)
w_pad = u_pad*v_pad
what_pad = pyfftw.interfaces.numpy_fft.fft(w_pad)
what = 2.*np.concatenate((what_pad[0:N/2], what_pad[M-N/2:M]), axis=0)
return what
def aalias(uhat): # To calculate (uhat)^2 in real space and transform to Fourier space
ureal = pyfftw.interfaces.numpy_fft.irfft(uhat)
nlt = ureal.real*ureal.real
usp = pyfftw.interfaces.numpy_fft.rfft(nlt)
return usp
def alias(uht): # To calculate (uht)^2 in real space
url = pyfftw.interfaces.numpy_fft.ifft(uht)
nlter = url.real*url.real
return nlter
def fwnum(Mx):
alpha = np.fft.fftfreq(Mx, 1./Mx).astype(int)
alpha[Mx//2] *= -1
return alpha
def kssol1(u0): # Solver for start at time t = 0
Tf = Decimal("150.0") # Final time
t = Decimal("0.0") # Current time
h = Decimal("0.25")
#Tf = Decimal("1000.0") # Final time
#t = Decimal("0.0") # Current time
#h = Decimal("0.01")
dt = float(h) # Size of the time step
nt = int(Tf/h)
kx = wavenum(Mx)
A = np.ones((Mx//2+1,))
k2 = -(kx**2)+(kx**4)
u = pyfftw.empty_aligned((Mx//2+1,nt+1), dtype='clongdouble')
us0 = pyfftw.empty_aligned((Mx//2+1,), dtype='clongdouble')
u[:,0] = pyfftw.interfaces.numpy_fft.rfft(u0)
u[0,0] -= u[0,0]
nlin = pyfftw.empty_aligned((Mx//2+1,nt+1), dtype='clongdouble')
nlin[:,0] = aalias(u[:,0])
nlinspec = pyfftw.empty_aligned((Mx//2+1,nt+1), dtype='clongdouble')
nlinspec[:,0] = -0.5*1j*kx*nlin[:,0]
nls = pyfftw.empty_aligned((Mx//2+1,), dtype='clongdouble')
nlspec = pyfftw.empty_aligned((Mx//2+1,), dtype='clongdouble')
nondx = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
nondx2 = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
nondx[:,0] = alias(1j*fwnum(Mx)*fftrtf(u[:,0]))
nondx2[:,0] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,0]))
ur = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
ur[:,0] = pyfftw.interfaces.numpy_fft.irfft(u[:,0]).real
wt = weights(x)
en = np.empty((nt+1,), dtype='longdouble')
en[0] = np.dot(wt, ur[:,0]*ur[:,0])
ent = np.empty((nt+1,), dtype='longdouble')
ent[0] = (2.*np.dot(wt, nondx[:,0]))-(2.*np.dot(wt, nondx2[:,0]))
for i in range(nt):
t += h
print t
if i==0:
us0 = (u[:,i] + (dt*nlinspec[:,i]))/(A + (dt*k2))
us0[0] -= us0[0]
us0[-1] -= us0[-1]
nls[:] = aalias(0.5*(u[:,0]+us0))
nlspec[:] = -0.5*1j*kx*nls[:]
u[:,i+1] = (u[:,i] - (0.5*dt*k2*u[:,i]) + (dt*nlspec[:]))/(A + (0.5*dt*k2))
u[0,i+1] -= u[0,i+1]
u[-1,i+1] -= u[-1,i+1]
ur[:,i+1] = pyfftw.interfaces.numpy_fft.irfft(u[:,i+1]).real
en[i+1] = np.dot(wt, ur[:,i+1]*ur[:,i+1])
nondx[:,i+1] = alias(1j*fwnum(Mx)*fftrtf(u[:,i+1]))
nondx2[:,i+1] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]))
#nondx[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(1j*fwnum(Mx)*fftrtf(u[:,i+1]),1j*fwnum(Mx)*fftrtf(u[:,i+1])))
#nondx2[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]),-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1])))
ent[i+1] = (2.*np.dot(wt, nondx[:,i+1]))-(2.*np.dot(wt, nondx2[:,i+1]))
elif i==1:
nlin[:,i] = aalias(u[:,i])
nlinspec[:,i] = -0.5*1j*kx*nlin[:,i]
u[:,i+1] = ((4*u[:,i]) - u[:,i-1] + (4*dt*nlinspec[:,i]) - (2*dt*nlinspec[:,i-1]))/((3*A) + (2*dt*k2))
u[0,i+1] -= u[0,i+1]
u[-1,i+1] -= u[-1,i+1]
ur[:,i+1] = pyfftw.interfaces.numpy_fft.irfft(u[:,i+1]).real
en[i+1] = np.dot(wt, ur[:,i+1]*ur[:,i+1])
nondx[:,i+1] = alias(1j*fwnum(Mx)*fftrtf(u[:,i+1]))
nondx2[:,i+1] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]))
#nondx[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(1j*fwnum(Mx)*fftrtf(u[:,i+1]),1j*fwnum(Mx)*fftrtf(u[:,i+1])))
#nondx2[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]),-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1])))
ent[i+1] = (2.*np.dot(wt, nondx[:,i+1]))-(2.*np.dot(wt, nondx2[:,i+1]))
else:
nlin[:,i] = aalias(u[:,i])
nlinspec[:,i] = -0.5*1j*kx*nlin[:,i]
u[:,i+1] = ((18*u[:,i]) - (9*u[:,i-1]) + (2*u[:,i-2]) + (18*dt*nlinspec[:,i]) - (18*dt*nlinspec[:,i-1]) + (6*dt*nlinspec[:,i-2]))/((11*A) + (6*dt*k2))
u[0,i+1] -= u[0,i+1]
u[-1,i+1] -= u[-1,i+1]
ur[:,i+1] = pyfftw.interfaces.numpy_fft.irfft(u[:,i+1]).real
en[i+1] = np.dot(wt, ur[:,i+1]*ur[:,i+1])
nondx[:,i+1] = alias(1j*fwnum(Mx)*fftrtf(u[:,i+1]))
nondx2[:,i+1] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]))
#nondx[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(1j*fwnum(Mx)*fftrtf(u[:,i+1]),1j*fwnum(Mx)*fftrtf(u[:,i+1])))
#nondx2[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1]),-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+1])))
ent[i+1] = (2.*np.dot(wt, nondx[:,i+1]))-(2.*np.dot(wt, nondx2[:,i+1]))
return u, ur, en, ent
def kssol2(u1,u2,u3): # Solver for restart at any time step
Tf = Decimal("150.0")
h = Decimal("0.25")
dt = float(h)
nt = int(Tf/h)
kx = wavenum(Mx)
u = pyfftw.empty_aligned((Mx//2+1,nt+3), dtype='clongdouble')
u[:,0] = u1
u[:,1] = u2
u[:,2] = u3
ur = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
ur[:,0] = pyfftw.interfaces.numpy_fft.irfft(u[:,2]).real
nlin = pyfftw.empty_aligned((Mx//2+1,nt+3), dtype='clongdouble')
nlin[:,0] = aalias(u[:,0])
nlin[:,1] = aalias(u[:,1])
nlin[:,2] = aalias(u[:,2])
nlinspec = pyfftw.empty_aligned((Mx//2+1,nt+3), dtype='clongdouble')
nlinspec[:,0] = -0.5*1j*kx*nlin[:,0]
nlinspec[:,1] = -0.5*1j*kx*nlin[:,1]
nlinspec[:,2] = -0.5*1j*kx*nlin[:,2]
nondx = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
nondx2 = pyfftw.empty_aligned((Mx,nt+1), dtype='longdouble')
nondx[:,0] = alias(1j*fwnum(Mx)*fftrtf(u[:,2]))
nondx2[:,0] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,2]))
A = np.ones((Mx//2+1,))
k2 = -(kx**2)+(kx**4)
k2 += (c*A)
wt = weights(x)
en = np.empty((nt+1,), dtype='longdouble')
en[0] = np.dot(wt, ur[:,0]*ur[:,0])
ent = np.empty((nt+1,), dtype='longdouble')
ent[0] = (2.*np.dot(wt, nondx[:,0]))-(2.*nu*np.dot(wt, nondx2[:,0]))
for i in range(nt):
print i
u[:,i+1] = ((18*u[:,i]) - (9*u[:,i-1]) + (2*u[:,i-2]) + (18*dt*nlinspec[:,i]) - (18*dt*nlinspec[:,i-1]) + (6*dt*nlinspec[:,i-2]))/((11*A) + (6*dt*k2))
u[0,i+3] -= u[0,i+3]
u[-1,i+3] -= u[-1,i+3]
#u[:,i+3].real -= u[:,i+3].real
ur[:,i+1] = pyfftw.interfaces.numpy_fft.irfft(u[:,i+3]).real
en[i+1] = np.dot(wt, ur[:,i+1]*ur[:,i+1])
nlin[:,i+3] = aalias(u[:,i+3])
nlinspec[:,i+3] = -0.5*1j*kx*nlin[:,i+3]
nondx[:,i+1] = alias(1j*fwnum(Mx)*fftrtf(u[:,i+3]))
nondx2[:,i+1] = alias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+3]))
#nondx[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(1j*fwnum(Mx)*fftrtf(u[:,i+3]),1j*fwnum(Mx)*fftrtf(u[:,i+3])))
#nondx2[:,i] = pyfftw.interfaces.numpy_fft.ifft(antialias(-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+3]),-1.*((fwnum(Mx))**2)*fftrtf(u[:,i+3])))
ent[i+1] = (2.*np.dot(wt, nondx[:,i+1]))-(2.*nu*np.dot(wt, nondx2[:,i+1]))
return u, ur, en, ent
Mx = 2**7 # Number of modes
L = 16 # Length of domain
dx = (2.*L*PI)/np.float(Mx)
x = np.arange(0., Mx)*dx
u0 = initc(x) # Initial condition
u, ur, en, ent = kssol1(u0)
t = np.linspace(0., 150., 601)
X, T = np.meshgrid(x,t,indexing='ij')
# Plot contour map of the solution
fig = plt.figure(figsize=(10,5))
ax = fig.gca()
levels = np.linspace(ur.min(), ur.max(), 50)
cp = plt.contourf(X,T,ur,levels=levels,cmap=plt.cm.jet,extend='both')
cb = plt.colorbar(cp)
cb.cmap.set_under('black')
tick_locator = ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
cb.update_ticks()
ax.set_xlim([0.,100.])
ax.set_xticks([0., 20., 40., 60., 80., 100.])
ax.set_ylim([0.,150.])
ax.set_yticks([0., 50., 100., 150.])
ax.margins(0,0)
ax.set_xlabel(r'$x$', fontweight='bold')
ax.set_ylabel(r'$t$', rotation=90., fontweight='bold')
plt.show()
|
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-hours', type=int, help='number of gpu hours to request for job', default=24)
parser.add_argument('-allocation', type=str, help='Allocation name for billing', default='deepmpc')
parser.add_argument('-part', type=str, help='datasubset', default='d2')
parser.add_argument('-env', type=str, help='Name of conda environment for running code.', default='fiery')
args = parser.parse_args()
template = '#!/bin/bash\n' +\
'#SBATCH -A %s\n' % args.allocation +\
'#SBATCH -t %s:00:00\n' % args.hours +\
'#SBATCH --gres=gpu:1\n' +\
'#SBATCH -p shared_dlt\n' +\
'#SBATCH -N 1\n' +\
'#SBATCH -n 8\n' +\
'#SBATCH -o %j.out\n' +\
'#SBATCH -e %j.err\n' +\
'source /etc/profile.d/modules.sh\n' +\
'module purge\n' +\
'module load cuda/9.2.148\n' +\
'module load python/anaconda2\n' +\
'ulimit\n' +\
'source activate %s\n\n' % args.env
for start in range(0, 200, 10):
cmd = 'python split_rnn_exp.py -partition %s -start %s ' % (args.part, start)
with open('%s.slurm' % start, 'w') as cmdfile: # unique name for sbatch script
cmdfile.write(template + cmd)
|
#문자열자료형
print('풍선')
print('나비')
print('ㅋㅋㅋㅋㅋㅋㅋ')
print('ㅋ' * 7)
#boolean #참/거짓
print(5 > 10)
print(5 < 10)
print(True + 1)
print(not True)
print(not False)
print(not (5 > 10)) |
from collections import defaultdict
import heapq
def ceil(x, y):
return (x + y - 1) // y
n, m, s = map(int, input().split())
E = defaultdict(dict)
for i in range(m):
u, v, a, b = map(int, input().split())
E[u - 1][v - 1] = (b, a)
E[v - 1][u - 1] = (b, a)
penalty = [list(map(int, input().split())) for _ in range(n)]
INF = 2 * 10 ** 9
D = [INF for v in range(n)]
D[0] = 0
Q = [(d, v, 0, 0) for v, d in enumerate(D)]
heapq.heapify(Q)
prev = [None for v in range(n)]
while len(Q) > 0:
du, u, au, tu = heapq.heappop(Q)
p0, p1 = penalty[tu]
ru = p1 / p0
for v, (duv, auv) in E[u].items():
pv0, pv1 = penalty[v]
rv = pv1 / pv0
av = au + auv
dv = du + duv
if ru > rv:
if av > s:
dv += ceil(av, pv0) * pv1
tv = v
else:
if av > s:
dv += ceil(av, p0) * p1
tv = tu
if D[v] > dv:
D[v] = dv
prev[v] = u
heapq.heappush(Q, (dv, v, av, tv))
for i in range(1, n):
print(D[i])
|
import numpy as np
import cv2
img = cv2.imread('WeChat Image_20200221164050.jpg', 0)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0)
ret, mask = cv2.threshold(img, 190, 255, cv2.THRESH_BINARY)
cv2.namedWindow('mask of characters', cv2.WINDOW_NORMAL)
cv2.imshow('mask of characters', mask)
cv2.waitKey(0)
dst = cv2.inpaint(img, mask, 20, cv2.INPAINT_NS)
cv2.namedWindow('Characters removed', cv2.WINDOW_NORMAL)
cv2.imshow('Characters removed', dst)
cv2.waitKey(0)
thresh = 100
max_thresh = 255
blur = cv2.blur(dst, (3, 3))
cv2.namedWindow('after blur', cv2.WINDOW_NORMAL)
cv2.imshow('after blur', blur)
cv2.waitKey(0)
# Detect edges using canny
canny_output = cv2.Canny(blur, thresh, thresh * 2, 3)
# Find contours
img2, contours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.namedWindow('findcontoursinblack', cv2.WINDOW_NORMAL)
cv2.imshow('findcontoursinblack', img2)
cv2.waitKey(0)
cv2.drawContours(img, contours, -1, (0,255,0), 3)
cv2.namedWindow('findcontours', cv2.WINDOW_NORMAL)
cv2.imshow('findcontours', img)
cv2.waitKey(0)
# blur = cv2.medianBlur(dst,1)
# cv2.imwrite('blur.jpg',blur)
# cv2.imshow('image', blur)
# cv2.waitKey(0)
#
# #gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# sift = cv2.xfeatures2d.SIFT_create()
# kp = sift.detect(blur,None)
# img=cv2.drawKeypoints(blur,kp,img)
# cv2.imwrite('sift_keypoints.jpg',img)
# cv2.imshow('image', img)
# cv2.waitKey(0)
#
# #blur = cv2.cvtColor(blur,cv2.COLOR_GRAY2BGR)
# circles = cv2.HoughCircles(blur,cv2.HOUGH_GRADIENT,1,20, param1=50,param2=30,minRadius=0,maxRadius=0)
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
# # draw the outer circle
# cv2.circle(blur,(i[0],i[1]),i[2],(0,255,0),2)
# # draw the center of the circle
# cv2.circle(blur,(i[0],i[1]),2,(0,0,255),3)
# cv2.imshow('detected circles',blur)
# cv2.waitKey(0)
cv2.destroyAllWindows()
|
import re
import numpy as np
import joblib
memory = joblib.Memory('joblib')
# no apostrophe to remove e.g. "president's" office, no bracket to remove e.g. (V.O.)
character_pattern = r"\b[A-Z]{2}[A-Z\.]+(?: [A-Z\.]{3,})*\b"
character_pattern = r"(?:^|(?:[a-z]+[.!?]\s+))([A-Z-]{2,}[\.\(\)\':;-]*(?:\s+[A-Z-]{2,}[\.\(\)\':;-]*)*)"
stopwords = ['INT', 'EXT', 'CUT', 'BACK', 'END', 'ANGLE', 'CONTINUOUS', "CONT'D", 'RINGS', 'LAUGH', 'DAY', 'NIGHT', 'CONTINUED', 'CAMERA', 'VOICE', 'ROOM', 'OFFICE', 'APARTMENT', 'FALL', 'SPRING', 'WINTER', 'SUMMER', 'LATER', 'MOMENTS LATER', 'HOUSE', 'SAME', 'TIME', 'JUMP', 'CONTINUING']
patterns = [r'\b{}\b'.format(word) for word in stopwords]
def return_character_list(script_string, remove_stopwords=True):
'''Returns a list of likely character names present in script_string'''
if remove_stopwords:
for pattern in patterns:
script_string = re.sub(pattern, '_stopword_', script_string)
script_string = re.sub('\t', ' ', script_string)
script_string = re.sub('\n', ' ', script_string)
script_string = re.sub(r' +', ' ', script_string)
matches = [m.group(1).strip('()') for m in re.finditer(character_pattern, script_string)]
matches, counts = np.unique(matches, return_counts=True)
return {match: count for match, count in zip(matches, counts)}
def debug_cheap_tokenization(script_string, words_to_keep):
'''Does very simple tokenization for all words in words_to_keep:
Returns an N-dimensional ndarray, where N is the number of words in script_string and a dictionary that maps from words to numbers'''
# replace character names with tokens
words_to_keep = list(reversed(sorted(words_to_keep, key=len)))
for word_i, word in enumerate(words_to_keep):
script_string = re.sub(word, '_TOKEN_{} '.format(word), script_string)
# TODO: remove a.b patterns
script_string = re.sub('\t', ' ', script_string)
script_string = re.sub(r' +', ' ', script_string)
script_string = script_string.split(' ')
return script_string
def cheap_tokenization(script_string, words_to_keep):
'''Does very simple tokenization for all words in words_to_keep:
Returns an N-dimensional ndarray, where N is the number of words in script_string and a dictionary that maps from words to numbers'''
# replace character names with tokens
words_to_keep = list(reversed(sorted(words_to_keep, key=len)))
for word_i, word in enumerate(words_to_keep):
script_string = re.sub(word, '_TOKEN_{} '.format(word_i+1), script_string)
# TODO: remove a.b patterns
script_string = re.sub('\t', ' ', script_string)
script_string = re.sub(r' +', ' ', script_string)
script_string = script_string.split(' ')
activation = np.zeros(len(script_string))
for i, word in enumerate(script_string):
if word.startswith('_TOKEN_'):
activation[i] = int(word.split('_')[-1])
word_dict = {word: word_i+1 for word_i, word in enumerate(words_to_keep)}
inverted_dict = {val: word for word, val in word_dict.items()}
return activation, word_dict, inverted_dict
def single_activation_to_multi_activation(single_act):
'''Helper function to go from 1-D activation to multi D activation'''
multi_act = np.zeros((single_act.shape[0], len(np.unique(single_act))-1))
for i in range(1, multi_act.shape[1]+1):
multi_act[single_act==i, i-1] = 1
return multi_act
def get_often_occurring_characters(char_occ_dict, frequency_threshold=10):
return [character for character, count in char_occ_dict.items() if count >= frequency_threshold]
def is_duplicate(character, characters):
return any([(character.startswith(character2) and len(character)==len(character2)+1) for character2 in characters])
def has_part_duplicate(character1, character2):
if ' ' in character1 and ' ' in character2 and character1 != character2:
character1 = character1.split(' ')
character2 = character2.split(' ')
return character1[0] == character2[0]
else:
return False
def load_script_dict():
import glob
files = glob.glob('data/*.txt')
script_dict = {}
for fl in files:
with open(fl, 'r') as op:
script_dict[fl.split('_')[-1][:-4]] = op.read()
return script_dict
def moving_average_act(multi_act, N=150):
from scipy.ndimage.filters import uniform_filter1d
multi_act = uniform_filter1d(multi_act, N, axis=0, origin=N//2-1)
multi_act[multi_act>1./N] = 1.
return multi_act
def lp_filter_act(multi_act, N=6, Wn=0.05):
from scipy.signal import butter, sosfilt
sos = butter(N, Wn, output='sos')
# we don't need linear phase
filtered_mact = np.hstack([sosfilt(sos, act)[:, None] for act in multi_act.T])
filtered_mact[filtered_mact<0] = 0
return filtered_mact
def prune_characters(char_occ_dict, threshold=0.1):
from dirty_cat import SimilarityEncoder
from sklearn.preprocessing import minmax_scale
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from scipy.spatial.distance import squareform
simenc = SimilarityEncoder(similarity='jaro-winkler')
transf = simenc.fit_transform(np.array(sorted(char_occ_dict.keys())).reshape(-1, 1))
corr_dist = minmax_scale(-transf)
dense_distance = squareform(corr_dist, checks=False)
Z = linkage(dense_distance, 'average', optimal_ordering=True)
return get_merged_characters(Z, char_occ_dict, threshold=threshold)
# watch out that everything is sorted
def get_merged_characters(Z, char_occ_dict, threshold=0.1):
'''Returns a dictionary that specifies which character names are to be replaced'''
replace_dict = dict()
char_occ_copy = {}
# identify which clusters
bolstered_categories = [[cat] for cat in sorted(char_occ_dict.keys())]
for z_i in Z:
if z_i[2] < threshold:
bolstered_categories.append(bolstered_categories[int(z_i[0])]+bolstered_categories[int(z_i[1])])
for cluster in bolstered_categories[::-1]:
# get the most common character in cluster
most_common = sorted([items for items in char_occ_dict.items() if items[0] in cluster], key=lambda x: x[1])[-1][0]
# sum occurrences for thresholding based on frequency
cluster_occ_sum = sum([occ for char, occ in char_occ_dict.items() if char in cluster])
for character in cluster:
if character not in replace_dict and character != most_common:
replace_dict[character] = most_common
if most_common not in char_occ_copy and most_common not in replace_dict:
char_occ_copy[most_common] = cluster_occ_sum
return replace_dict, char_occ_copy
def clean_script_string(script_string, replace_dict):
for to_replace, replace_with in replace_dict.items():
script_string = re.sub(to_replace, replace_with, script_string)
return script_string
def process_script(script_string, frequency_threshold=10, char_name_dist_threshold=0.1, **filter_kwargs):
'''Explain how it works'''
char_occ_dict = return_character_list(script_string)
if char_name_dist_threshold:
replace_dict, char_occ_dict = prune_characters(char_occ_dict, threshold=char_name_dist_threshold)
script_string = clean_script_string(script_string, replace_dict)
chars_to_keep = get_often_occurring_characters(char_occ_dict, frequency_threshold=frequency_threshold)
activation, char_dict, inv_char_dict = cheap_tokenization(script_string, chars_to_keep)
multi_act = single_activation_to_multi_activation(activation)
assert multi_act.shape[1] == len(chars_to_keep)
assert multi_act.shape[1] > 1
return moving_average_act(multi_act, **filter_kwargs), char_dict, inv_char_dict
@memory.cache
def process_all_scripts():
script_dict = load_script_dict()
act_dict = {}
for script_name, script in script_dict.items():
try:
act_dict[script_name] = process_script(script)
except AssertionError:
continue
print('Completed. Errors in {} of {} scripts.'.format(len(script_dict)-len(act_dict),len(script_dict)))
return act_dict
def process_scripts_debug():
script_dict = load_script_dict()
act_dict = {}
i = 0
for script_name, script in script_dict.items():
i += 1
if i > 100:
break
act_dict[script_name] = process_script(script)
return act_dict
def plot_connectivity(multi_act, characters, func=None):
from nilearn.plotting import plot_matrix
if func is None:
func = np.corrcoef
corrs = np.corrcoef(multi_act.T)
return plot_matrix(corrs, labels=characters, reorder=True)
def plot_from_dict(movie, act_dict):
characters = list(act_dict[movie][1].keys())
return plot_connectivity(act_dict[movie][0], characters)
if __name__=='__main__':
act_dict = process_all_scripts()
|
import os
import concurrent.futures
import pandas as pd
from collections import Counter
from time import time
from sys import platform
from tqdm import tqdm
def main():
csv_store_path = os.environ['GP_HIST_PATH']
results = []
ts = time()
idf = pd.read_pickle("./ignore/rev100k.pkl").sort_values(by=["EPOCH"])
idf = idf[idf.REV_AT_EPOCH==10000]
ids = idf.NORAD_CAT_ID.unique()
for p in tqdm(sorted([x for x in os.listdir(f'{csv_store_path}/') if x.endswith(".csv.gz")])):
df = pd.read_csv(f"{csv_store_path}/{p}", compression='gzip', low_memory=False)
df = df[df.NORAD_CAT_ID.isin(ids)]
results.append(df)
final_df = pd.concat(results)
print(f'Took {time()-ts}')
final_df.to_pickle(f"./ignore/rev100k_2.pkl")
if __name__ == '__main__':
main() |
import urllib.request
import json
import csv
import re
import datetime
from time import sleep
import requests
import time
class CollectCommitUser():
def Request(self,url):
id = 'rlrlaa123'
pw = 'ehehdd009'
# auth = base64.encodestring(id + ':' + pw)
auth = id+':'+pw
return requests.get(url,auth=(id,pw))
def collectCommitUser(self):
url_name = 'https://api.github.com/repos/jquery/jquery/commits/866ba43d0a8cd9807a39483df48d9366558cb6dd'
print (url_name)
content = self.Request(url_name).headers['X-RateLimit-Reset']
print (time.time())
print (content)
sleeptime = float(content)-float(time.time())
current_time = datetime.datetime.fromtimestamp(time.time())
print(current_time.strftime('%Y-%m-%d %H:%M:%S'))
rate_time = datetime.datetime.fromtimestamp(float(content))
print (rate_time.strftime('%Y-%m-%d %H:%M:%S'))
remaining_time = float(content)-float(time.time())
print (remaining_time)
wait_time = datetime.datetime.fromtimestamp(remaining_time)
print (wait_time.strftime('%Y-%m-%d %H:%M:%S'))
# print (sleeptime)
# time.sleep(sleeptime)
# user_name = content['commit']['committer']['name']
# print (user_name)
# url_id = 'https://api.github.com/users/'+str(user_name)
# print (url_id)
# content = self.Request(url_id).json()
# print (content['id'])
collector = CollectCommitUser()
collector.collectCommitUser() |
import os
import flask
from flask import Flask, request
import telebot
import json
import urllib.request
import datetime
import pytz
import requests
def json_getweather() :
url = "https://api.weatherlink.com/v1/NoaaExt.json?user=001D0A0124D3&pass=zaharberkut2019&apiToken=ED47A9235AF1472A8B5BC594D830B39D"
data = urllib.request.urlopen(url).read().decode()
obj = json.loads(data)
pressure = round(float(obj['pressure_mb']) * 0.75006375541921,1)
temperature = float(obj['temp_c'])
wind_speed = round(float(obj['wind_mph']) * 0.44704,1)
humidity = obj['relative_humidity']
rain_ratehr = round(float(obj['davis_current_observation']['rain_rate_in_per_hr']) * 25.4,2)
rain_dayin = round(float(obj['davis_current_observation']['rain_day_in']) * 25.4,2)
wind_direction = obj['wind_dir']
collecting_data ='Температура: '+str(temperature)+' C\n'+str(pressure)+' мм рт. ст.\n'+'Вологість: '+str(humidity)+'%\n'+'Швидкість вітру: '+str(wind_speed)+' м/с\n'+'Напрямок вітру: '+wind_direction+'\n'+'Опади: '+str(rain_dayin)+' мм/день, '+str(rain_ratehr)+' мм/год.'
return wind_direction, collecting_data
wind_direction = json_getweather()
wind_direction = wind_direction[0]
collecting_data = json_getweather()
collecting_data = collecting_data[1]
|
import pysat
import datetime as dt
import matplotlib.pyplot as plt
print('making inst')
ivm = pysat.Instrument(platform='cnofs', name='ivm')
start = dt.datetime(2014,3,1)
stop = dt.datetime(2014,3,2)
date_array = pysat.utils.time.create_date_range(start, stop)
output_resolution = (320, 240)
my_dpi = 192
figsize = tuple([x/my_dpi for x in output_resolution])
save_dir = '/Users/jonathonsmith/apex_animation/frames'
print('starting iteration')
for date in date_array:
ivm.load(date=date)
if ivm.data.epmty:
continue
print('creating figure')
fig, ax = plt.subplots(1,1, figsize=figsize)
fig.figsize(figsize)
ax.plot(ivm.data.index, ivm.data.apex_altitude)
ax.set_lim(0, 900)
ax.set_xlim(0, 24)
ax.grid(True)
fig.tight_layout()
filename = os.join(save_dir, str(date))
plt.savefig(filename, dpi=my_dpi)
plt.close()
|
from Logic.TextFunctions import *
from Logic.GetInput import *
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, new_path):
self.newPath = os.path.expanduser(new_path)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def make_directory():
user = os.getlogin()
path = 'C:\\Users\\{}\\Documents\\Card Tournament'.format(user)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(path+'\\Replays'):
os.makedirs(path+'\\Replays')
return path + '\\Replays'
def make_file(patch, lis=None):
if lis is None:
lis = []
path = make_directory()
with cd(path):
x = 1
while 1: # Find number for replay
filename = 'Game {}.txt'.format(x)
if os.access(os.path.join(path, filename), os.F_OK):
x += 1
continue
else:
text = ''
text += '{}\n'.format(patch)
for move in lis:
text += ','.join(move)
text += '\n'
file = open(os.path.join(path, filename), 'w')
file.write(text)
file.close()
break
def read_file(name):
path = make_directory()
with cd(path):
file = open(os.path.join(path, name), 'r')
string = file.readlines()
lis = [x.strip() for x in string]
file.close()
return lis
def read_patch(name):
path = make_directory()
with cd(path):
file = open(os.path.join(path, name), 'r')
string = file.readline().split('\n')[0]
file.close()
return string
class Replay:
def __init__(self):
self.open = True
self.path = make_directory()
self.replays = None
self.note = None
self.return_value = None
self.actions = dict()
self.get_replays()
self.set_available_inputs()
self.loop()
def __repr__(self):
text = ''
width = SETTINGS['SCREEN_WIDTH']
text += full_line('=', width)
if self.note is not None:
text += center_line(self.note, width)
text += full_line('-', width)
else:
text += ' \n'
text += full_line('-', width)
if self.replays is not None:
text += center_line('What replay would you like to watch?', width)
text += full_line('-', width)
if self.replays is not None:
for i in range(len(self.replays)):
text += '{}: {}'.format(i + 1, self.replays[i]).ljust(width)
else:
text += center_line('You have no saved replays, write return back to return to main menu', width)
return text
def get_replays(self):
list_of_replays = list(x for x in os.listdir(self.path) if x.endswith(".txt"))
if len(list_of_replays) > 0:
for i in range(len(list_of_replays)):
list_of_replays[i] = list_of_replays[i][:-4]
self.replays = list_of_replays
def set_available_inputs(self):
self.actions['Exit Game'] = [['q', 'g'], ['qg'], ['exit', 'game']]
self.actions['Return Back'] = [['r', 'b'], ['rb'], ['return', 'back']]
self.actions['Watch Replay'] = [['w', 'r'], ['wr'], ['watch', 'replay']]
def do(self, what_to_do, extra_info=None):
if what_to_do == 'Exit Game':
quit()
if what_to_do == 'Return Back':
self.open = False
if what_to_do == 'Watch Replay':
try:
if 0 < int(extra_info) <= len(self.replays):
replay = 'Game {}.txt'.format(extra_info)
self.return_value = {'Goto': 'Game', 'Parameters': {'mode': 'Replay',
'replay_moves': read_file(replay),
'patch': read_patch(replay)}}
self.open = False
else:
self.note = 'You have to write the number next to replay name to watch it'.format(len(self.replays))
except (TypeError, ValueError):
self.note = 'You have to write the number of replay you want to watch'
else:
self.note = 'You have written the unknown command! Try again!'
def loop(self):
while self.open:
clear_screen()
print(self)
self.note = None
input_list = get_input()
possible_action = check_input(input_list, self.actions)
self.do(possible_action, input_list[-1])
return self.return_value
|
# Author : ThammeGowda Narayanaswamy
# Email : tnarayan@usc.edu
# Student ID : 2074669439
# Subject : Naive Bayes Classifier (continuous features) implementation
# for CSCI 567 Fall 16 Homework 1
# Date : September 17, 2016
from __future__ import print_function
import sys
import os
import numpy as np
import math
class NBModel(object):
def __init__(self, train_X, train_Y):
assert len(train_X) == len(train_Y)
self.labels = set(train_Y)
self.n_attrs = train_X.shape[1] # number of columns
total = train_X.shape[0] # number of rows
self.prob_l = {}
self.kb = {}
for l in self.labels:
self.kb[l] = {'mean':[], 'var':[]}
train_lX = train_X[train_Y == l]
self.prob_l[l] = len(train_lX) / float(total)
# Compute mean, variance for each attribute i
means, variances = [], []
for i in range(self.n_attrs):
lXi = train_lX[:, i] # Label 'l' attribute 'i'th X vector
# Mean = sum/N
mean = np.sum(lXi) / len(lXi)
# Variance = SquaredDifference/(N-1)
variance = np.sum((lXi - mean)**2) / (len(lXi) - 1)
self.kb[l]['mean'].append(mean)
self.kb[l]['var'].append(variance)
def predict(self, new_X):
assert self.n_attrs == len(new_X)
preds = {}
for l in self.labels:
means = self.kb[l]['mean']
variances = self.kb[l]['var']
'''
P(l | new_X) = P(l) . P(new_X|l)
-----------------
P(new_X)
# Here we ignore the denominator, since it is same for all labels, it doesnt affect comparison
~= P(l) . P(new_X1|l) . P(new_X2|l) ... P(new_Xn|l)
# taking log, whole expression convert into summation
P(x1|l) = 1 / (sqrt(2.pi.var)) . e^( - (x1 - mean)^2 / (2.var))
log(p(x1/l)) = -log(sqrt(2.pi.var)) - (x1 - mean)^2 / (2.var)
'''
prob = 0
# prob of label or class
prob += math.log(self.prob_l[l])
for i, x in enumerate(new_X):
if np.isclose(0, variances[i]):
# the probability is concentrated at the mean
if np.isclose(x, means[i]):
prob += 0 # log(1) = 0
else:
prob += float('-inf') # log(0) --> -inf
else:
prob -= math.log(math.sqrt(2 * math.pi * variances[i]))
prob -= ((x - means[i]) ** 2) / (2 * variances[i])
preds[l] = prob
#print(preds)
return max(preds, key=preds.get)
def read_dataset(path):
data = np.loadtxt(path, delimiter=',')
X = data[:, 1:-1]
Y = data[:, -1].astype(int)
return X, Y
def test(model, csv_db_path):
test_X, test_Y = read_dataset(csv_db_path)
pred_Y = np.array([model.predict(test_X[i]) for i in range(len(test_X))])
res = np.sum(test_Y == pred_Y)
'''for i in range(len(pred_Y)):
print("expected : %d predict: %d " % (test_Y[i], pred_Y[i]))
'''
return (res * 1.0 / len(test_X))
def main(train_data_path, test_data_path):
train_X, train_Y = read_dataset(train_data_path)
model = NBModel(train_X, train_Y)
print("#Training data")
train_acc = test(model, train_data_path)
print(" Accuracy = %f" % train_acc)
print("#Testing data")
test_acc = test(model, test_data_path)
print(" Accuracy = %f" % test_acc)
if __name__ == '__main__':
print(sys.argv[1], sys.argv[2])
|
from prefect import task, Flow
@task
def hello_world():
print("Hello World!")
return "Hello Prefect!"
@task
def prefect_say(s: str):
print(s)
with Flow("my_first_flow") as f:
r = hello_world()
s2 = prefect_say(r)
f.visualize()
f.run() |
n=int(input())
arr=[int(x) for x in input().split()]
print("Before sorting")
print(arr)
for i in range(0,n-1):
min=i
for j in range(i+1,n):
if(arr[min]>=arr[j]):
min=j
temp=arr[min]
arr[min]=arr[i]
arr[i]=temp
print("After sorting")
print(arr)
|
x = 3.1
y = x//1
if (x-y ==0):
print("Es entero")
else:
print("es flotante")
|
'''
将所有输入字符大写输出
可以回车输入
'''
lines = []
while True:
oring_string = input()
if oring_string:
lines.append(oring_string.upper())
else :
break
print(' '.join(lines))
'''
学习点:这里最主要的是回车输入, 如果简单的用upper, 不写循环,那么会出现回车输入直接输出内容,这里用一个while 循环和if循环,如果输入为空,直接停止循环。
学习一下while True 的作用吧
while True 在这里的作用就是每次输入回车之后回到while判断条件,然后继续输如,如果输入为空,那么直接break循环。
总结一下就是要继续执行某个操作,知道这个操作条件执行不下去,break,例如写一个登录程序,当输入用户名密码不正确的时候,你需要重新让他输入,那么,while True就可以让程序回到重新输入
的地方,继续执行,直到登录成功,break
关于if判断条件:表达式可以是一个单纯的布尔值或变量,也可以是比较表达式或逻辑表达式
当下面的值作为 bool 表达式时,会被解释器当作 False 处理:
False、None、0、""、()、[]、{}
本例中,if 的判断条件是一个字符串,如果字符串为空,那么判断条件为假,所以跳出循环
''' |
# 检测字符串是否只由数字组成。
# str.isdigit()
# 返回值:字符串只包含数字则返回 True 否则返回 False。
str = "12345"
print(str.isdigit()) # True
str2 = "12345s"
print(str2.isdigit()) # False
str3 = "123 456 79"
print(str3.isdigit()) # False |
m = int(input("Enter rows: "))
n = int(input("Enter columns: "))
print("Enter array 1: ")
arr1 = [list(map(int, input().split())) for i in range(m)]
print("Enter array 2: ")
arr2 = [list(map(int, input().split())) for i in range(m)]
for i in range(m):
for j in range(n):
print(arr1[i][j]+arr2[i][j], end=" ")
print("")
|
from product.form import Productform
from django.shortcuts import render,redirect
from django.http import HttpResponse,JsonResponse
from rest_framework import serializers
from rest_framework.parsers import JSONParser
from .models import Product
from .serializer import ProductSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response, responses
from rest_framework import status
from rest_framework.views import APIView
@api_view(['GET','POST'])
def product_list(request):
prod=Productform(request.POST, request.FILES)
if request.method == 'GET':
if prod.is_valid():
prod.save()
products = Product.objects.all()
serializer = ProductSerializer(products,many=True)
#return Response(serializer.data)
context = {'prod':prod, 'products':products}
return render(request, 'product.html', context)
elif request.method == 'POST':
serializer = ProductSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
products = Product.objects.all()
return render(request, 'product.html', {'prod':prod, 'products':products})
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET','PUT'])
def product_details(request,pk):
try:
product = Product.objects.get(pk=pk)
except Product.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ProductSerializer(product)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = ProductSerializer(product,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
def product_delete(request,pk):
try:
product = Product.objects.get(pk=pk)
except Product.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
product.delete()
prod=Productform(request.POST, request.FILES)
products = Product.objects.all()
return redirect("/product/")
def product_edit(request, pk):
product = Product.objects.get(pk=pk)
return render(request,'edit_product.html', {'product':product})
def product_update(request, pk):
product = Product.objects.get(pk=pk)
print(request.POST)
form = Productform(request.POST, instance = product)
if form.is_valid():
form.save()
return redirect("/product/")
# return render(request, 'edit_product.html', {'product': product})
return redirect("/product/")
# try:
# product = Product.objects.get(pk=pk)
# except Product.DoesNotExist:
# return HttpResponse(status=status.HTTP_404_NOT_FOUND)
# serializer = ProductSerializer(product,data=request.data)
# if serializer.is_valid():
# serializer.save()
# render(request,'product.html', {'product':product})
# return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST) |
# -*- coding: utf-8 -*-
{
'name': "VietERP Mailbox",
'summary': """
Simple Odoo mailbox""",
'description': """
1. Main features:
- Sending email from odoo
- Receiving email from odoo
- Composing email from odoo
- Choosing template when composing email
2. Why choose this?
- Quickly compose email by using template
- Don't need to setup any email client on computer
- Can access it from any where
3. Settings:
- To receiving email, you should follow below:
Step1:
<img src="/vieterp_mailbox/static/description/step1.png"/>
Step2:
<img src="/vieterp_mailbox/static/description/step2.png"/>
Step3:
<img src="/vieterp_mailbox/static/description/step3.png"/>
4. Support:
For any feedback, please send email to info@vieterp.net
""",
'author': "VietERP / Sang",
'website': "http://www.vieterp.net",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Discuss',
'version': '1.1',
# any module necessary for this one to work correctly
'depends': [
'base',
'mail',
],
# always loaded
'data': [
'data/cron.xml',
'data/data.xml',
'security/record_rules.xml',
'security/ir.model.access.csv',
'views/mail_mail_views.xml',
# 'views/fetchmail_server_views.xml',
'views/mail_server_source_views.xml',
],
# only loaded in demonstration mode
'demo': [
],
'images': ['static/description/icon.png'],
'installable': True,
'application': True,
}
|
import requests
import os
import ast
import random
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to WordBox. Ask for a synonym, an antonym, rhyme, definition, and more for a word by saying something like 'synonym for happy'. Hear all commands by saying 'all commands'."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, frequency, or pronunciation!"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_all_commands():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "All Commands"
speech_output = "You can ask for a synonym, antonym, rhyme, definition, part of speech, syllables, or frequency of a word by saying something like 'synonym for happy'. You can also ask for a random synonym, antonym, definition, or rhyme by saying something like 'random synonym for happy'. If you want all of them, say something like 'all synonyms for happy.'"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, or frequency of a word! Or say 'all commands' to get hear all commands."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Bye!"
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def get_synonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING SYNONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["synonyms"]) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
else:
speech_output = "A common synonym for " + word + " is " + ast.literal_eval(r.text)["synonyms"][0] + "."
response = build_speechlet_response("Synonym", speech_output, None, True)
return build_response({}, response)
def get_random_synonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM SYNONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["synonyms"]) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
else:
speech_output = "A synonym for " + word + " is " + random.choice(ast.literal_eval(r.text)["synonyms"]) + "."
response = build_speechlet_response("Synonym", speech_output, None, True)
return build_response({}, response)
def get_all_synonyms(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonyms", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ALL SYNONYMS OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
synonyms_list = ast.literal_eval(r.text)["synonyms"]
if len(synonyms_list) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
elif len(synonyms_list) == 1:
speech_output = "The only synonym for " + word + " is " + synonyms_list[0] + "."
else:
speech_output = "The synonyms for " + word + " are " + ", ".join([synonym for synonym in synonyms_list[:-1]]) + ", and " + synonyms_list[-1] + "."
response = build_speechlet_response("Synonyms", speech_output, None, True)
return build_response({}, response)
def get_antonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ANTONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["antonyms"]) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
else:
speech_output = "A common antonym for " + word + " is " + ast.literal_eval(r.text)["antonyms"][0] + "."
response = build_speechlet_response("Antonym", speech_output, None, True)
return build_response({}, response)
def get_random_antonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM ANTONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["antonyms"]) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
else:
speech_output = "An antonym for " + word + " is " + random.choice(ast.literal_eval(r.text)["antonyms"]) + "."
response = build_speechlet_response("Antonym", speech_output, None, True)
return build_response({}, response)
def get_all_antonyms(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonyms", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ALL ANTONYMS OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
antonyms_list = ast.literal_eval(r.text)["antonyms"]
if len(antonyms_list) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
elif len(antonyms_list) == 1:
speech_output = "The only antonym for " + word + " is " + antonyms_list[0] + "."
else:
speech_output = "The antonyms for " + word + " are " + ", ".join([antonym for antonym in antonyms_list[:-1]]) + ", and " + antonyms_list[-1] + "."
response = build_speechlet_response("Antonyms", speech_output, None, True)
return build_response({}, response)
def get_pos(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Part of Speech", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING PART OF SPEECH OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
speech_output = word + " is a " + ast.literal_eval(r.text)["results"][0]["partOfSpeech"] + "."
response = build_speechlet_response("Part of Speech", speech_output, None, True)
return build_response({}, response)
def get_rhyme(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Rhyme", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RHYME OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/rhymes"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["rhymes"]) == 0:
speech_output = "Sorry, I couldn't find anything that rhymes with " + word + "."
else:
speech_output = "A common rhyme for " + word + " is " + ast.literal_eval(r.text)["rhymes"]["all"][0] + "."
response = build_speechlet_response("Rhyme", speech_output, None, True)
return build_response({}, response)
def get_random_rhyme(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Rhyme", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM RHYME OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/rhymes"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["rhymes"]) == 0:
speech_output = "Sorry, I couldn't find anything that rhymes with " + word + "."
else:
speech_output = "A rhyme for " + word + " is " + random.choice(ast.literal_eval(r.text)["rhymes"]["all"]) + "."
response = build_speechlet_response("Rhyme", speech_output, None, True)
return build_response({}, response)
def get_definition(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Definition", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING DEFINITION OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/definitions"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["definitions"]) == 0:
speech_output = "Sorry, I couldn't find any definitions for " + word + "."
else:
speech_output = "The most popular definition for " + word + " is " + ast.literal_eval(r.text)["definitions"][0]["definition"] + "."
response = build_speechlet_response("Definition", speech_output, None, True)
return build_response({}, response)
def get_random_definition(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Definition", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM DEFINITION OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/definitions"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["definitions"]) == 0:
speech_output = "Sorry, I couldn't find any definitions for " + word + "."
else:
speech_output = "A definition for " + word + " is " + random.choice(ast.literal_eval(r.text)["definitions"])["definition"] + "."
response = build_speechlet_response("Definition", speech_output, None, True)
return build_response({}, response)
def get_all_definitions(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Definitions", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ALL DEFINITIONS OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/definitions"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
definitions_list = ast.literal_eval(r.text)["definitions"]
if len(definitions_list) == 0:
speech_output = "Sorry, I couldn't find any definitions for " + word + "."
elif len(definitions_list) == 1:
speech_output = "The only definition for " + word + " is " + definitions_list[0] + "."
else:
speech_output = word + " has " + str(len(definitions_list)) + " definitions. It could mean " + ", ".join([definition["definition"] for definition in definitions_list[:-1]]) + ", or " + definitions_list[-1]["definition"] + "."
response = build_speechlet_response("Definitions", speech_output, None, True)
return build_response({}, response)
def get_syllables(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Syllables", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING SYLLABLES OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/syllables"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if not ast.literal_eval(r.text)["syllables"]:
speech_output = "Sorry, I couldn't find any syllables for " + word + "."
else:
speech_output = "There are " + str(ast.literal_eval(r.text)["syllables"]["count"]) + " syllables in " + word + ". They are: " + ", ".join(ast.literal_eval(r.text)["syllables"]["list"]) + "."
response = build_speechlet_response("Syllables", speech_output, None, True)
return build_response({}, response)
# NOTE: Not used in production
def get_pronunciation(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Pronunciation", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING PRONUNCIATION OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/pronunciation"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if not ast.literal_eval(r.text)["pronunciation"]:
speech_output = "Sorry, I couldn't find any pronunications for " + word + "."
# Only one pronunciation
if len(ast.literal_eval(r.text)["pronunciation"]) == 1:
speech_output = "You can pronounce " + word + " as " + ast.literal_eval(r.text)["pronunciation"]["all"] + "."
# Multiple pronunications
elif len(ast.literal_eval(r.text)["pronunciation"]) > 1:
speech_output = "".join([(("As a" + ("n " if key in ["adjective", "adverb"] else " ") + key + " it's pronounced as " + value + ". ") if key != "all" else "") for key, value in ast.literal_eval(r.text)["pronunciation"].items()])
response = build_speechlet_response("Pronunciation", speech_output, None, True)
return build_response({}, response)
def get_frequency(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Frequency", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING FREQUENCY OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/frequency"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if not ast.literal_eval(r.text)["frequency"]:
speech_output = "Sorry, I couldn't find the frequency of " + word + "."
else:
speech_output = word + " is used about " + str(int(ast.literal_eval(r.text)["frequency"]["perMillion"])) + " times per million words in writing."
response = build_speechlet_response("Frequency", speech_output, None, True)
return build_response({}, response)
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print("---INTENT: " + intent_name)
# Dispatch to your skill's intent handlers
try:
if intent_name == "GetSynonymIntent":
return get_synonym(intent, session)
elif intent_name == "GetRandomSynonymIntent":
return get_random_synonym(intent, session)
elif intent_name == "GetAllSynonymsIntent":
return get_all_synonyms(intent, session)
elif intent_name == "GetAntonymIntent":
return get_antonym(intent, session)
elif intent_name == "GetRandomAntonymIntent":
return get_random_antonym(intent, session)
elif intent_name == "GetAllAntonymsIntent":
return get_all_antonyms(intent, session)
elif intent_name == "GetPOSIntent":
return get_pos(intent, session)
elif intent_name == "GetRhymeIntent":
return get_rhyme(intent, session)
elif intent_name == "GetRandomRhymeIntent":
return get_random_rhyme(intent, session)
elif intent_name == "GetDefinitionIntent":
return get_definition(intent, session)
elif intent_name == "GetRandomDefinitionIntent":
return get_random_definition(intent, session)
elif intent_name == "GetAllDefinitionsIntent":
return get_all_definitions(intent, session)
elif intent_name == "GetSyllablesIntent":
return get_syllables(intent, session)
elif intent_name == "GetFrequencyIntent":
return get_frequency(intent, session)
elif intent_name == "GetPronunciationIntent":
return get_pronunciation(intent, session)
elif intent_name == "GetAllCommandsIntent":
return get_all_commands()
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
response = build_speechlet_response("Error", "Sorry, I don't know that command. I can find definitions, synonyms, antonyms, and more if you say something like 'a synonym for happy'.", None, True)
return build_response({}, response)
except:
response = build_speechlet_response("Error", "Sorry, I don't know that word!", None, True)
return build_response({}, response)
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" + event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']}, event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.