content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#
# copyright_notice
#
"""smap wrappers
"""
__all__ = ()
from opengltk.extent import smaplib, utillib
|
nilq/baby-python
|
python
|
import pytest
from lj506.skeleton import fib, main
__author__ = "Eric Busboom"
__copyright__ = "Eric Busboom"
__license__ = "MIT"
def test_access():
"""API Tests"""
|
nilq/baby-python
|
python
|
import logging
import uuid
from assistant.orders.models import LineItem
from .models import Stock
from .exceptions import InsufficientStock
logger = logging.getLogger(__name__)
def process_simple_stock_allocation(**data):
stocks = Stock.objects.filter(product_variant=data.get("variant"))
line_items = data.get("orders", None)
assigned_to = []
for line_item in line_items:
quantity_required = line_item.quantity_unfulfilled
for stock in stocks:
try:
done = stock.allocate_to_order_line_item(
line_item=line_item, quantity=quantity_required
)
if done:
assigned_to.append(line_item)
except InsufficientStock as ins:
logger.info(
"Allocating to order %s but ran out of stock %s continue the loop. %s",
line_item,
stock,
ins
)
continue
return assigned_to
def allocate_stock(guid: uuid.UUID) -> Stock:
stocks = Stock.objects.filter(product_variant__guid=guid)
lines_items = LineItem.objects.filter(variant__guid=guid)
for item in lines_items:
for stock in stocks:
try:
stock.allocate_to_order_line_item(
line_item=item,
)
except InsufficientStock as ins:
logger.info(
"Allocating to order %s but ran out of stock %s continue the loop. %s",
item,
stock,
ins
)
return stocks
|
nilq/baby-python
|
python
|
from flask import request, render_template, make_response
from datetime import datetime
import psycopg2
import os
#__ Configure access to .env file
from dotenv import load_dotenv
from pathlib import Path # python3 only
def get_query_by_id(id_to_update):
"""
Get all ranks from pathogen table
"""
#Load env
load_dotenv()
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
dbname = os.environ.get('DBCALL')
username = os.environ.get('DBUSER')
password = os.environ.get('DBPASS')
dbhost = os.environ.get('DBHOST')
con = psycopg2.connect(database=dbname, user=username,
password=password, host=dbhost, port=5432)
with con:
cur = con.cursor()
postgreSQL_select_Query = "SELECT * FROM pathogens WHERE id = %s"
try:
cur.execute(postgreSQL_select_Query, (id_to_update,))
mappedqyery = cur.fetchall()
return mappedqyery
except (Exception, psycopg2.Error) as error:
print("Error fetching data from PostgreSQL table", error)
def update_query_by_id(id_to_update, organism, taxonid, rank, gram, aerobe, habitat, isolation, pathostate):
"""
Commit the changes to the database
"""
dbname = os.environ.get('DBCALL')
username = os.environ.get('DBUSER')
password = os.environ.get('DBPASS')
dbhost = os.environ.get('DBHOST')
try:
con = psycopg2.connect(database=dbname, user=username, password=password, host=dbhost, port=5432)
cur = con.cursor() # cursor
# insert data
now=datetime.now()
timestamp= now.strftime("%Y-%m-%d %H:%M:%S")
#Update organism field
sql_update_query = """Update pathogens set organism = %s where id = %s"""
cur.execute(sql_update_query, (str(organism), id_to_update))
print(sql_update_query,id_to_update)
con.commit()
#Update taxonId field
sql_update_query = """Update pathogens set taxonid = %s where id = %s"""
cur.execute(sql_update_query, (str(taxonid), id_to_update))
con.commit()
#Update rank field
sql_update_query = """Update pathogens set rank = %s where id = %s"""
cur.execute(sql_update_query, (str(rank), id_to_update))
con.commit()
#Update gram field
sql_update_query = """Update pathogens set gram = %s where id = %s"""
cur.execute(sql_update_query, (str(gram), id_to_update))
con.commit()
#Update aerobe field
sql_update_query = """Update pathogens set aerobe = %s where id = %s"""
cur.execute(sql_update_query, (str(aerobe), id_to_update))
con.commit()
#Update habitat field
sql_update_query = """Update pathogens set habitat = %s where id = %s"""
cur.execute(sql_update_query, (str(habitat), id_to_update))
con.commit()
#Update isolation field
sql_update_query = """Update pathogens set isolation = %s where id = %s"""
cur.execute(sql_update_query, (str(isolation), id_to_update))
con.commit()
#Update pathostate field
sql_update_query = """Update pathogens set pathostate = %s where id = %s"""
cur.execute(sql_update_query, (str(pathostate), id_to_update))
con.commit()
#Update taxonId field
sql_update_query = """Update pathogens set timestamp = %s where id = %s"""
cur.execute(sql_update_query, (str(timestamp), id_to_update))
con.commit()
messageOk="Ok"
print(messageOk)
return messageOk
except con.Error as err: # if error
messageOk="Database error"
print(messageOk)
return messageOk
finally:
con.close() # close the connection
|
nilq/baby-python
|
python
|
##
## Copyright (C) 2017, Amit Aides, all rights reserved.
##
## This file is part of Camera Network
## (see https://bitbucket.org/amitibo/cameranetwork_git).
##
## Redistribution and use in source and binary forms, with or without modification,
## are permitted provided that the following conditions are met:
##
## 1) The software is provided under the terms of this license strictly for
## academic, non-commercial, not-for-profit purposes.
## 2) Redistributions of source code must retain the above copyright notice, this
## list of conditions (license) and the following disclaimer.
## 3) Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions (license) and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## 4) The name of the author may not be used to endorse or promote products derived
## from this software without specific prior written permission.
## 5) As this software depends on other libraries, the user must adhere to and keep
## in place any licensing terms of those libraries.
## 6) Any publications arising from the use of this software, including but not
## limited to academic journal and conference publications, technical reports and
## manuals, must cite the following works:
## Dmitry Veikherman, Amit Aides, Yoav Y. Schechner and Aviad Levis, "Clouds in The Cloud" Proc. ACCV, pp. 659-674 (2014).
##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.##
from __future__ import division
from PyQt4 import QtCore
from PyQt4 import QtGui
#.QtCore import Qt, QRectF
#from PyQt4.QtGui import QApplication, QHBoxLayout, QLabel, QSizePolicy, QSlider, QSpacerItem, \
#QVBoxLayout, QWidget
#import QtCore.QString.fromUtf8 as asdf
import glob
import numpy as np
import os
import pandas as pd
import pymap3d
import pyqtgraph as pg
pg.setConfigOptions(imageAxisOrder='row-major')
import skimage.io as io
import sys
def convertMapData(lat, lon, hgt, lat0=32.775776, lon0=35.024963, alt0=229):
"""Convert lat/lon/height data to grid data."""
n, e, d = pymap3d.geodetic2ned(
lat, lon, hgt,
lat0=lat0, lon0=lon0, h0=alt0)
x, y, z = e, n, -d
return x, y
class Slider(QtGui.QWidget):
def __init__(self, maximum, parent=None):
super(Slider, self).__init__(parent=parent)
#
# Create the Slider (centered)
#
self.horizontalLayout = QtGui.QHBoxLayout(self)
spacerItem = QtGui.QSpacerItem(0, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.slider = QtGui.QSlider(self)
self.slider.setOrientation(QtCore.Qt.Vertical)
self.horizontalLayout.addWidget(self.slider)
spacerItem1 = QtGui.QSpacerItem(0, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.resize(self.sizeHint())
self.slider.setMaximum(maximum)
def value(self):
return self.slider.value()
class MainWindow(QtGui.QWidget):
"""main widget."""
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent=parent)
#
# Create the main window
#
self.verticalLayout = QtGui.QVBoxLayout(self)
self.label = QtGui.QLabel(self)
self.verticalLayout.addWidget(self.label)
self.cameras_view = pg.GraphicsWindow(title="Basic plotting examples")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.addWidget(self.cameras_view)
self.view = self.cameras_view.addViewBox()
self.verticalLayout.addLayout(self.horizontalLayout)
#
# lock the aspect ratio so pixels are always square
#
self.view.setAspectLocked(True)
#
# Load the thumbnails dataframes
#
dfs = pd.read_pickle(r"..\ipython\system\thumbnails_downloaded.pkl")
self.thumbs = {}
self.image_items = {}
server_id_list, df_list = [], []
for server_id, df in dfs.items():
server_id_list.append(server_id)
#
# Load all the images.
#
print("Processing camera {}".format(server_id))
images, indices = [], []
index = 0
for _, row in df.iterrows():
try:
images.append(io.imread(os.path.join(r"..\ipython\system", row["thumbnail"])))
indices.append(index)
index += 1
except:
indices.append(None)
self.thumbs[server_id] = images
df["thumb_index"] = indices
df_list.append(df)
#
# Create image widgets
#
image_item = pg.ImageItem()
image_label = pg.LabelItem(text=server_id)
image_label.scale(1, -1)
self.view.addItem(image_item)
self.view.addItem(image_label)
self.image_items[server_id] = (image_item, image_label)
self.df = pd.concat(df_list, axis=1, keys=server_id_list)
#
# Create the thumbnail slider
#
self.w1 = Slider(len(self.df)-1)
self.horizontalLayout.addWidget(self.w1)
self.w1.slider.valueChanged.connect(lambda: self.update())
self.update()
def update(self):
#
# Get the current image time/index.
#
img_index = int(self.w1.value())
row = self.df.iloc[img_index]
self.label.setText(repr(row.name))
for server_id, (image_item, image_label) in self.image_items.items():
server_data = row[server_id]
if not np.isfinite(server_data["thumb_index"]):
image_item.hide()
image_label.hide()
continue
x, y = convertMapData(server_data["latitude"], server_data["longitude"], 0)
x = int(x/10)
y = int(y/10)
image_item.show()
image_label.show()
image_item.setImage(self.thumbs[server_id][int(server_data["thumb_index"])])
image_item.setRect(QtCore.QRectF(x, y, 100, 100))
image_label.setX(x)
image_label.setY(y+120)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
"""Module for the main SqsTestPrefix Construct."""
# Standard library imports
import json
# Third party imports
from aws_cdk import core as cdk, aws_sqs as sqs, aws_lambda as lambda_
# Local application/library specific imports
from filter_dynamodb_event_streams_sent_to_lambda.lambda_function import LambdaFunction
class SqsTestPrefix(cdk.Construct):
"""The SqsTestPrefix Construct."""
def __init__(
self,
scope: cdk.Construct,
construct_id: str,
queue: sqs.Queue,
**kwargs,
) -> None:
"""
Initialize a new SqsTestPrefix Construct.
This Construct contains the Lambda Function and Event
Source Mapping to process events where the body starts
with the value "Test".
"""
super().__init__(scope, construct_id, **kwargs)
# The Lambda Function to process the messages on the queue
processor_function = LambdaFunction(
scope=self,
construct_id="ProcessorFunction",
code=lambda_.Code.from_asset("lambda_functions/queue_processor"),
)
queue.grant_consume_messages(processor_function.function)
test_prefix = lambda_.CfnEventSourceMapping(
scope=self,
id="TestPrefixEventSourceMapping",
function_name=processor_function.function.function_name,
event_source_arn=queue.queue_arn,
maximum_batching_window_in_seconds=1,
batch_size=1,
)
test_prefix.add_property_override(
property_path="FilterCriteria",
value={
"Filters": [
{"Pattern": json.dumps({"body": [{"prefix": "Test"}]})},
],
},
)
|
nilq/baby-python
|
python
|
from com.sun.star.style.ParagraphAdjust import CENTER, LEFT, RIGHT, BLOCK, STRETCH
from com.sun.star.text.ControlCharacter import PARAGRAPH_BREAK, APPEND_PARAGRAPH, LINE_BREAK
def populateTopText(cursor, doc, text, practice):
styles = doc.StyleFamilies
page_styles = styles.getByName("PageStyles")
oDefaultStyle = page_styles.getByName("Standard")
oDefaultStyle.HeaderIsOn = True
oDefaultStyle.setPropertyValue("TopMargin", 500)
header_text = oDefaultStyle.getPropertyValue("HeaderText")
header_cursor = header_text.createTextCursor()
header_cursor.setPropertyValue( "CharFontName", "Liberation Serif" )
header_cursor.setPropertyValue( "CharHeight", 18.0 )
header_cursor.setPropertyValue( "ParaAdjust", CENTER )
header_text.insertString(header_cursor, str(practice["practice_name"]), 0)
header_text.insertControlCharacter( header_cursor, PARAGRAPH_BREAK, False )
header_cursor.setPropertyValue( "CharHeight", 12.0 )
header_text.insertString( header_cursor, practice["qualification"], 0 )
header_text.insertControlCharacter( header_cursor, PARAGRAPH_BREAK, False )
header_text.insertString( header_cursor, practice["specialisation"], 0 )
header_text.insertControlCharacter( header_cursor, PARAGRAPH_BREAK, False )
return doc, text, cursor
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
This script computes the features necessary to achieve the results on the SHS
training set reported in the paper:
Humphrey, E. J., Nieto, O., & Bello, J. P. (2013). Data Driven and
Discriminative Projections for Large-Scale Cover Song Identification. In Proc.
of the 14th International Society for Music Information Retrieval Conference.
Curitiba, Brazil.
A previously learned dictionary to convert the 2D-FMC features into codes clean_feats
be found in "models/BasisProjection2_ke2045_actEdot_shkE0x200_anormETrue.pk".
To use it, run the script as follows:
./cover_id_train.py -dictfile models/BasisProjection2_ke2045_actEdot_shkE0x200_anormETrue.pk
The PCA transform previously learned by Thierry can be found in:
"models/pca_250Kexamples_900dim_nocovers.pkl"
To use it, with an N number of dimensions, run the script as follows:
./cover_id_train.py -pca models/pca_250Kexamples_900dim_nocovers.pkl N
Th script saves the provisional codes in "results/codes-$DICTNAME$.pk". To learn
a LDA transform based on the codes, use the function "fit_LDA_from_codes_file"
in the utils.py file.
For more info, run:
./cover_id_train.py -h
----
Authors:
Uri Nieto (oriol@nyu.edu)
Eric J. Humphrey (ejhumphrey@nyu.edu)
----
License:
This code is distributed under the GNU LESSER PUBLIC LICENSE
(LGPL, see www.gnu.org).
Copyright (c) 2012-2013 MARL@NYU.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of MARL, NYU nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import argparse
import cPickle
import numpy as np
import os
import pickle
from scipy.spatial import distance
import sys
import time
# local stuff
import pca
import hdf5_getters as GETTERS
import dan_tools
import time
import utils
import scipy.cluster.vq as vq
import pylab as plt
from transforms import load_transform
import analyze_stats as anst
# Thierry's original parameters for ISMIR paper
WIN = 75
PWR = 1.96
PATCH_LEN = WIN*12
# Set up logger
logger = utils.configure_logger()
def compute_feats(track_ids, maindir, d, lda_file=None, lda_n=0, codes=None,
ver=True, pca="", pca_n=0):
"""Computes the features using the dictionary d. If it doesn't exist,
computes them using Thierry's method.
The improved pipeline is composed of 11 steps:
1.- Beat Synchronous Chroma
2.- L2-Norm
3.- Shingle (PATCH_LEN: 75 x 12)
4.- 2D-FFT
5.- L2-Norm
6.- Log-Scale
7.- Sparse Coding
8.- Shrinkage
9.- Median Aggregation
10.- Dimensionality Reduction
11.- L2-Norm
Original method by Thierry doesn't include steps 5,6,7,8,11.
"""
if d != "":
fx = load_transform(d)
K = int(d.split("_")[1].split("E")[1])
else:
K = PATCH_LEN
if codes is None:
compute_codes = True
codes = np.ones((len(track_ids),K)) * np.nan
else:
compute_codes = False
K = codes[0].shape[0]
if lda_file is not None:
if lda_n == 0: n_comp = 50
elif lda_n == 1: n_comp = 100
elif lda_n == 2: n_comp = 200
else:
n_comp = K
if pca != "":
pca = utils.load_pickle(pca)
pca = pca[pca_n]
final_feats = np.ones((codes.shape[0],n_comp)) * np.nan
orig_feats = []
for cnt, tid in enumerate(track_ids):
if compute_codes:
path = utils.path_from_tid(maindir, tid)
# 1.- Beat Synchronous Chroma
# 2.- L2-Norm
# 3.- Shingle (PATCH_LEN: 75 x 12)
# 4.- 2D-FFT
feats = utils.extract_feats(path)
#orig_feats.append(feats) # Store orig feats
if feats == None:
continue
if d != "":
# 5.- L2-Norm
# 6.- Log-Scale
# 7.- Sparse Coding
# 8.- Shrinkage
H = fx(feats)
else:
H = feats
#. 9.- Median Aggregation
H = np.median(H, axis=0)
else:
H = codes[cnt]
if compute_codes:
codes[cnt] = H.copy()
if pca != "":
H = pca.transform(H)
# Apply LDA if needed
if lda_file is not None:
#H = dan_tools.chromnorm(H.reshape(H.shape[0], 1)).squeeze()
# 10.- Dimensionality Reduction
H = lda_file[lda_n].transform(H)
# 11.- L2-Norm
final_feats[cnt] = dan_tools.chromnorm(H.reshape(H.shape[0], 1)).squeeze()
if ver:
if cnt % 50 == 1:
logger.info("----Computing features %.1f%%" % \
(cnt/float(len(track_ids)) * 100))
if d == "":
d = "orig" # For saving purposes
# Save codes
utils.create_dir("results")
if compute_codes:
utils.save_pickle(codes, "results/codes-" + os.path.basename(d) + ".pk")
# Save features
#utils.save_pickle(orig_feats, "results/feats-" + os.path.basename(d) + ".pk")
logger.info("Features Computed")
return final_feats
def score(feats, clique_ids, lda_idx=0, stats_len=None, ver=True):
"""Compute the scores of the entire train dataset."""
if stats_len is None:
stats = [np.inf]*len(feats)
else:
stats = [np.inf]*stats_len
# For each track id that has a clique id
q = 0
for i, clique_id in enumerate(clique_ids):
if clique_id == -1:
continue
D = distance.cdist(feats[i][np.newaxis,:], feats, metric="euclidean")
s = np.argsort(D)[0]
sorted_cliques = clique_ids[s]
r = np.argwhere( sorted_cliques == clique_id )[1:]
if len(r) > 0:
stats[i] = r
q += 1
if ver:
if q % 400 == 0:
logger.info('After %d queries: average rank per track: %.2f, '\
'clique: %.2f, MAP: %.2f%%' \
% (q, anst.average_rank_per_track(stats),
anst.average_rank_per_clique(stats),
anst.mean_average_precision(stats) * 100))
return stats
def main():
# Args parser
parser = argparse.ArgumentParser(description=
"Cover song ID on the training Second Hand Song dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("msd_dir", action="store",
help="Million Song Dataset main directory")
parser.add_argument("-dictfile", action="store", default="",
help="Pickle to the learned dictionary")
parser.add_argument("-lda", action="store", nargs=2, default=[None,0],
help="LDA file and version", metavar=('lda.pkl', 'n'))
parser.add_argument("-codes", action="store", default=None, dest="codesfile",
help="Pickle to the features file")
parser.add_argument("-f", action="store", default="", dest="featfile",
help="Pickle to the final features")
parser.add_argument("-pca", nargs=2, metavar=('f.pkl', 'n'),
default=("", 0),
help="pca model saved in a pickle file, " \
"use n dimensions")
args = parser.parse_args()
start_time = time.time()
maindir = args.msd_dir
shsf = "SHS/shs_dataset_train.txt"
dictfile = args.dictfile
# sanity cheks
utils.assert_file(dictfile)
utils.assert_file(maindir)
utils.assert_file(shsf)
# read clique ids and track ids
cliques, all_tracks = utils.read_shs_file(shsf)
track_ids = all_tracks.keys()
clique_ids = np.asarray(utils.compute_clique_idxs(track_ids, cliques))
logger.info("Track ids and clique ids read")
utils.save_pickle(clique_ids, "SHS/clique_ids_train.pk")
utils.save_pickle(track_ids, "SHS/track_ids_train.pk")
# read LDA file
lda_file = args.lda[0]
if lda_file != None:
lda_file = utils.load_pickle(lda_file)
logger.info("LDA file read")
# read codes file
codesfile = args.codesfile
if codesfile != None:
codesfile = utils.load_pickle(codesfile)
logger.info("Codes file read")
# Compute features if needed
if args.featfile == "":
feats = compute_feats(track_ids, maindir, dictfile,
lda_file=lda_file, lda_n=int(args.lda[1]), codes=codesfile,
pca=args.pca[0], pca_n=int(args.pca[1]))
else:
feats = utils.load_pickle(args.featfile)
# Apply PCA
pcafile = args.pca[0]
pcadim = int(args.pca[1])
if pcafile != "" and False:
trainedpca = utils.load_pickle(pcafile)
assert pcadim > 0
logger.info('trained pca loaded')
pcafeats = np.zeros((feats.shape[0], pcadim))
for i,feat in enumerate(feats):
pcafeats[i] = trainedpca.apply_newdata(feat, ndims=pcadim)
feats = pcafeats
# Scores
feats, clique_ids, track_ids = utils.clean_feats(feats, clique_ids, track_ids)
stats = score(feats, clique_ids)
# Save data
if dictfile == "":
dictfile = "thierry" # For saving purposes
utils.save_pickle(stats, "results/stats-" + os.path.basename(dictfile) + ".pk")
# done
logger.info('Average rank per track: %.2f, clique: %.2f, MAP: %.2f%%' \
% (anst.average_rank_per_track(stats),
anst.average_rank_per_clique(stats),
anst.mean_average_precision(stats) * 100))
logger.info("Done! Took %.2f seconds" % (time.time() - start_time))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import json
import pika
#create client class,since we want to connect to different cloudamqp instances
class CloudAMQPClient:
def __init__(self, cloud_amqp_url, queue_name):
self.cloud_amqp_url = cloud_amqp_url
self.queue_name = queue_name
self.params = pika.URLParameters(cloud_amqp_url)
# only allow to retry to build connection for 3 seconds
self.params.socket_timeout = 3
self.connection = pika.BlockingConnection(self.params)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=queue_name)
# send a message
def sendMessage(self, message):
# message is json object, when send message to queue,
# we need to convert it to string
self.channel.basic_publish(exchange = '',
routing_key = self.queue_name,
body = json.dumps(message))
# get a message
def getMessage(self):
method_frame, header_frame, body = self.channel.basic_get(self.queue_name)
# if error, method_frame null
if method_frame:
message = json.loads(body.decode('utf-8'))
self.channel.basic_ack(method_frame.delivery_tag)
# decode bytes to string, then convert string to json format
return message
else:
print ("No message returned")
return None
def clearQueue(self):
num_of_messages = 0
while True:
msg = self.getMessage()
if msg is None:
print ("Cleared %d messages." % num_of_messages)
return
num_of_messages += 1
# BlockingConnection.sleep is a safer way to sleep than time.sleep(). This
# will repond to server's heartbeat.
def sleep(self, seconds):
print ('Sleep for %d secs ' % seconds)
self.connection.sleep(seconds)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Class Piece:
The shapes of tetrimonos
each of them was a tuple of tuples containing the rotations
PIECES is a dict containing all the shapes where number as a key.
{1: I, 2: J, 3: L, 4: O, 5: S, 6:T, 7:Z}
@author: Chens
https://github.com/ChenSunMac
"""
class Piece:
# I
I = (
(
(0, 0, 0, 0),
(1, 1, 1, 1),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 0, 1, 0),
(0, 0, 1, 0),
(0, 0, 1, 0),
(0, 0, 1, 0)
),
(
(0, 0, 0, 0),
(0, 0, 0, 0),
(1, 1, 1, 1),
(0, 0, 0, 0)
),
(
(0, 1, 0, 0),
(0, 1, 0, 0),
(0, 1, 0, 0),
(0, 1, 0, 0)
)
)
# J
J = (
(
(2, 0, 0, 0),
(2, 2, 2, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 2, 2, 0),
(0, 2, 0, 0),
(0, 2, 0, 0),
(0, 0, 0, 0)
),
(
(0, 0, 0, 0),
(2, 2, 2, 0),
(0, 0, 2, 0),
(0, 0, 0, 0)
),
(
(0, 2, 0, 0),
(0, 2, 0, 0),
(2, 2, 0, 0),
(0, 0, 0, 0)
)
)
# L
L = (
(
(0, 0, 3, 0),
(3, 3, 3, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 3, 0, 0),
(0, 3, 0, 0),
(0, 3, 3, 0),
(0, 0, 0, 0)
),
(
(0, 0, 0, 0),
(3, 3, 3, 0),
(3, 0, 0, 0),
(0, 0, 0, 0)
),
(
(3, 3, 0, 0),
(0, 3, 0, 0),
(0, 3, 0, 0),
(0, 0, 0, 0)
)
)
# O
O = (
(
(0, 4, 4, 0),
(0, 4, 4, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 4, 4, 0),
(0, 4, 4, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 4, 4, 0),
(0, 4, 4, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 4, 4, 0),
(0, 4, 4, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
)
)
# S
S = (
(
(0, 5, 5, 0),
(5, 5, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 5, 0, 0),
(0, 5, 5, 0),
(0, 0, 5, 0),
(0, 0, 0, 0)
),
(
(0, 0, 0, 0),
(0, 5, 5, 0),
(5, 5, 0, 0),
(0, 0, 0, 0)
),
(
(5, 0, 0, 0),
(5, 5, 0, 0),
(0, 5, 0, 0),
(0, 0, 0, 0)
)
)
# T
T = (
(
(0, 6, 0, 0),
(6, 6, 6, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 6, 0, 0),
(0, 6, 6, 0),
(0, 6, 0, 0),
(0, 0, 0, 0)
),
(
(0, 0, 0, 0),
(6, 6, 6, 0),
(0, 6, 0, 0),
(0, 0, 0, 0)
),
(
(0, 6, 0, 0),
(6, 6, 0, 0),
(0, 6, 0, 0),
(0, 0, 0, 0)
)
)
# Z
Z = (
(
(7, 7, 0, 0),
(0, 7, 7, 0),
(0, 0, 0, 0),
(0, 0, 0, 0)
),
(
(0, 0, 7, 0),
(0, 7, 7, 0),
(0, 7, 0, 0),
(0, 0, 0, 0)
),
(
(0, 0, 0, 0),
(7, 7, 0, 0),
(0, 7, 7, 0),
(0, 0, 0, 0)
),
(
(0, 7, 0, 0),
(7, 7, 0, 0),
(7, 0, 0, 0),
(0, 0, 0, 0)
)
)
PIECES = {1: I, 2: J, 3: L, 4: O, 5: S, 6:T, 7:Z}
TETRIMINO_SIZE = 4
|
nilq/baby-python
|
python
|
import sys,os
keywords = ['PLPS_path', 'PDB2PQR_path', 'APBS_path', 'XLOGP3_path', 'ligand_file', 'BABEL_path',\
'n_conf', 'OMEGA_path']
def read_input(input_file):
file = open(input_file, 'r')
lig_file = []
for line in file:
key = line.split()[0]
if(key == keywords[0]):
PLPS_dir = line.split()[1]
elif(key == keywords[1]):
PDB2PQR_dir = line.split()[1]
elif(key == keywords[2]):
APBS_dir = line.split()[1]
elif(key == keywords[3]):
XLOGP3_dir = line.split()[1]
elif(key == keywords[4]):
lig_file.append(line.split()[1])
elif(key == keywords[5]):
BABEL_dir = line.split()[1]
elif(key == keywords[6]):
n_conf = int(line.split()[1])
elif(key == keywords[7]):
OMEGA_dir = line.split()[1]
elif(key not in keywords):
sys.exit('Please enter proper parameter name in input file')
return PLPS_dir, PDB2PQR_dir, APBS_dir, XLOGP3_dir, lig_file, BABEL_dir, n_conf, OMEGA_dir
def split_conf(mol_id):
conf_file = '%s_omega.mol2'%(mol_id)
file = open(conf_file, 'r')
i_conf = 0
for line in file:
if(line[0:17] == '@<TRIPOS>MOLECULE'):
i_conf += 1
if(i_conf < 10):
t_conf_file = '%s_conf_0%i.mol2'%(mol_id, i_conf)
else:
t_conf_file = '%s_conf_%i.mol2'%(mol_id, i_conf)
t_file = open(t_conf_file, 'w')
t_file.writelines(line)
t_file.close()
return i_conf
def generate_ssic(mol_id, i_conf, BABEL, PDB2PQR, script_dir, apbs_tool, APBS, bin_dir):
if(i_conf+1 < 10):
conf_pref = '%s_conf_0%i'%(mol_id, i_conf+1)
else:
conf_pref = '%s_conf_%i'%(mol_id, i_conf+1)
file = open('%s.mol2'%(conf_pref), 'a')
file.write('@<TRIPOS>SUBSTRUCTURE\n')
file.write(' 1 **** 1 TEMP 0 **** **** 0 ROOT\n')
file.close()
os.system("sed -i 's/<0>/MOL/g' %s.mol2"%(conf_pref))
os.system('%s -imol2 %s.mol2 -opdb %s.pdb'%(BABEL, conf_pref, conf_pref))
os.system("sed -i 's/ATOM /HETATM/g' %s.pdb"%(conf_pref))
os.system('%s --ligand=%s.mol2 --ff=amber %s.pdb %s.pqr'%(PDB2PQR, conf_pref, conf_pref, conf_pref))
convert_success = check_convert('%s.pqr'%(conf_pref))
if(not convert_success):
os.system('python %s/mol2topqr.py %s.mol2 %s.pqr'%(script_dir, conf_pref, conf_pref))
os.system("sed -i 's/HETATM/ATOM /g' %s.pdb"%(conf_pref))
os.system("sed -i 's/HETATM/ATOM /g' %s.pqr"%(conf_pref))
os.system('%s/psize.py %s.pqr > %s.psize'%(apbs_tool, conf_pref, conf_pref))
grid_pts, cntr_crd = get_grid_info('%s.psize'%(conf_pref))
write_apbs_input(conf_pref, grid_pts, cntr_crd)
os.system('%s %s.in'%(APBS, conf_pref))
os.system('%s/genLocInvPocketLig -s %s_smol.dx -d %s_pot.dx -q %s.pqr -xlp %s.xlp -o %s -l %s.pdb -mol2 %s.mol2 -rad 5 -psel -ar -sa 3.0'%(bin_dir, conf_pref, conf_pref, conf_pref, mol_id, conf_pref, conf_pref, conf_pref))
os.system('python %s/convert_seed_to_ssic.py %s.seed %s.ssic'%(script_dir, conf_pref, conf_pref))
def get_grid_info(psize_file):
file = open(psize_file, 'r')
grid_pts = []
cntr_crd = []
for line in file:
if(line.startswith('Num.')):
grid_pts.append(line.split()[5])
grid_pts.append(line.split()[7])
grid_pts.append(line.split()[9])
elif(line.startswith('Center')):
cntr_crd.append(line.split()[2])
cntr_crd.append(line.split()[4])
cntr_crd.append(line.split()[6])
file.close()
return grid_pts, cntr_crd
def write_apbs_input(conf_pref, grid_pts, cntr_crd):
input_file = '%s.in'%(conf_pref)
pqr_file = '%s.pqr'%(conf_pref)
pot_file = '%s_pot'%(conf_pref)
surf_file = '%s_smol'%(conf_pref)
file = open(input_file, 'w')
file.write('read\n')
file.write('mol pqr %s\n'%(pqr_file))
file.write('end\n\n')
file.write('# ENERGY OF PROTEIN CHUNK\n')
file.write('elec name solv\n')
file.write('mg-manual\n')
file.write('dime %s %s %s\n'%(grid_pts[0], grid_pts[1], grid_pts[2]))
file.write('grid 0.6 0.6 0.6\n')
file.write('gcent %s %s %s\n'%(cntr_crd[0], cntr_crd[1], cntr_crd[2]))
file.write('mol 1\n')
file.write('lpbe\n')
file.write('bcfl sdh\n')
file.write('pdie 2.0\n')
file.write('sdie 78.4\n')
file.write('chgm spl2\n')
file.write('srfm smol\n')
file.write('srad 1.4\n')
file.write('swin 0.3\n')
file.write('sdens 10.0\n')
file.write('temp 298.15\n')
file.write('calcenergy total\n')
file.write('calcforce no\n')
file.write('write pot dx %s\n'%(pot_file))
file.write('write smol dx %s\n'%(surf_file))
file.write('end\n\n')
file.write('quit\n')
file.close()
def check_convert(pqr_file):
convert_success = True
if(not os.path.isfile(pqr_file)):
convert_success = False
atom_exist = False
if(convert_success):
file = open(pqr_file, 'r')
for line in file:
if(line.startswith('ATOM') or line.startswith('HETATM')):
atom_exist = True
file.close()
if(not atom_exist):
convert_success = False
return convert_success
def main():
if(len(sys.argv) == 2):
input_file = sys.argv[1]
else:
print 'USAGE: python prepare_ligands.py [input file]'
exit(0)
# read parameters and set variables for binary files
PLPS_dir, PDB2PQR_dir, APBS_dir, XLOGP3_dir, lig_file, BABEL_dir, max_conf, OMEGA_dir = read_input(input_file)
apbs_tool = PLPS_dir + '/apbs_tool'
script_dir = PLPS_dir + '/scripts'
bin_dir = PLPS_dir + '/bin'
XLOGP3 = XLOGP3_dir + '/xlogp3.lnx.x86'
OMEGA = OMEGA_dir + '/omega2'
PDB2PQR = PDB2PQR_dir + '/pdb2pqr'
APBS = APBS_dir + '/apbs'
BABEL = BABEL_dir + '/babel'
for ligand in lig_file:
mol_id = ligand[:-5]
os.system('%s -ewindow 15.0 -maxconfs %i -rmsrange "0.5,0.8,1.0" -rangeIncrement 5 -commentEnergy -in %s.mol2 -out %s_omega.mol2 -strictstereo false'%(OMEGA, max_conf, mol_id, mol_id))
n_conf = split_conf(mol_id)
os.system('%s -v %s_conf_01.mol2 %s.xlp'%(XLOGP3, mol_id, mol_id))
for i_conf in range(n_conf):
generate_ssic(mol_id, i_conf, BABEL, PDB2PQR, script_dir, apbs_tool, APBS, bin_dir)
os.system('rm %s_conf*.in %s*.dx %s*.psize %s*.seed %s*.pqr %s*conf*.mol2 %s.xlp %s_omega.mol2'%(mol_id, mol_id, mol_id, mol_id, mol_id, mol_id, mol_id, mol_id))
os.system('mkdir %s'%(mol_id))
os.system('mv %s*.pdb %s*.ssic %s'%(mol_id, mol_id, mol_id))
os.system('rm omega* io.mc')
main()
|
nilq/baby-python
|
python
|
from featurechart import *
from treeview import *
def demo():
cp = load_earley('gazdar6.cfg', trace=2)
trees = cp.parse('the man who chased Fido returned')
for tree in trees: print tree
#run_profile()
if __name__ == '__main__': demo()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import logging
import re
from collections import defaultdict
from google.appengine.ext import db, deferred
from lxml import etree
from mcfw.cache import cached
from mcfw.rpc import arguments, returns
from rogerthat.dal import parent_key, put_and_invalidate_cache
from rogerthat.dal.mfd import get_multilanguage_message_flow_designs_by_status
from rogerthat.dal.profile import get_service_profile
from rogerthat.dal.service import get_service_identities
from rogerthat.models import ServiceTranslation, ServiceTranslationSet, ServiceMenuDef, ServiceInteractionDef, \
MessageFlowDesign, Branding
from rogerthat.rpc import users
from rogerthat.utils import channel
from rogerthat.utils.languages import convert_iso_lang_to_web_lang, get_iso_lang
from rogerthat.utils.transactions import run_after_transaction, run_in_transaction
MFLOW_XPATH_MAP = {'''//definition[@language=$lang]/message/content[text()!='']/text()''': ServiceTranslation.MFLOW_TEXT,
'''//definition[@language=$lang]/message/answer[@caption!='']/@caption''': ServiceTranslation.MFLOW_BUTTON,
'''//definition[@language=$lang]/message/answer[@action!='']/@action''': ServiceTranslation.MFLOW_POPUP,
'''//definition[@language=$lang]/message[@brandingKey!='']/@brandingKey''': ServiceTranslation.MFLOW_BRANDING,
'''//definition[@language=$lang]/formMessage/content[text()!='']/text()''': ServiceTranslation.MFLOW_TEXT,
'''//definition[@language=$lang]/formMessage[@brandingKey!='']/@brandingKey''': ServiceTranslation.MFLOW_BRANDING,
'''//definition[@language=$lang]/formMessage/form[@positiveButtonConfirmation!='']/@positiveButtonConfirmation''': ServiceTranslation.MFLOW_POPUP,
'''//definition[@language=$lang]/formMessage/form[@negativeButtonConfirmation!='']/@negativeButtonConfirmation''': ServiceTranslation.MFLOW_POPUP,
'''//definition[@language=$lang]/formMessage/form[@positiveButtonCaption!='']/@positiveButtonCaption''': ServiceTranslation.MFLOW_BUTTON,
'''//definition[@language=$lang]/formMessage/form[@negativeButtonCaption!='']/@negativeButtonCaption''': ServiceTranslation.MFLOW_BUTTON,
'''//definition[@language=$lang]/formMessage/form/widget[@placeholder!='']/@placeholder''': ServiceTranslation.MFLOW_FORM,
'''//definition[@language=$lang]/formMessage/form/widget[@unit!='']/@unit''': ServiceTranslation.MFLOW_FORM,
'''//definition[@language=$lang]/formMessage/form[@type='auto_complete' or @type='text_line' or @type='text_block']/widget[@value!='']/@value''': ServiceTranslation.MFLOW_FORM,
'''//definition[@language=$lang]/formMessage/form/widget/choice[@label!='']/@label''': ServiceTranslation.MFLOW_FORM,
'''//definition[@language=$lang]/formMessage/form/javascriptValidation/text()''': ServiceTranslation.MFLOW_JAVASCRIPT_CODE,
'''//definition[@language=$lang]/flowCode/javascriptCode/text()''': ServiceTranslation.MFLOW_JAVASCRIPT_CODE,
}
MFLOW_REFERENCES = ['startReference', 'reference', 'dismissReference', 'positiveReference', 'negativeReference']
JS_TRANSLATE_REGEX = re.compile('rogerthat\.util\.translate\s*\(\s*(?P<start>[\"\'])(?P<key>.*?)(?P=start)\s*(\)|,)')
def assemble_qrcode_strings(service_user):
button_caption_set = set()
qry = ServiceInteractionDef.gql("WHERE ANCESTOR IS :ancestor AND deleted = FALSE AND multilanguage = TRUE")
qry.bind(ancestor=parent_key(service_user))
for sid in qry.fetch(None):
button_caption_set.add(sid.description)
button_caption_set.discard(None)
button_caption_set.discard("")
return {ServiceTranslation.SID_BUTTON: button_caption_set}
def assemble_homescreen_strings(service_user):
home_text_set = set()
home_branding_set = set()
identity_text_set = set()
identity_branding_set = set()
service_profile = get_service_profile(service_user)
home_text_set.update([service_profile.aboutMenuItemLabel,
service_profile.messagesMenuItemLabel,
service_profile.shareMenuItemLabel,
service_profile.callMenuItemLabel])
qry = ServiceMenuDef.gql("WHERE ANCESTOR IS :ancestor")
qry.bind(ancestor=parent_key(service_user))
items = qry.fetch(None)
for item in items:
home_text_set.add(item.label)
home_branding_set.add(item.screenBranding)
for service_identity in get_service_identities(service_user):
identity_text_set.update([service_identity.name,
service_identity.qualifiedIdentifier,
service_identity.description,
service_identity.mainPhoneNumber,
service_identity.callMenuItemConfirmation])
identity_branding_set.update([service_identity.descriptionBranding,
service_identity.menuBranding])
strings = {ServiceTranslation.HOME_TEXT: home_text_set,
ServiceTranslation.HOME_BRANDING: home_branding_set,
ServiceTranslation.IDENTITY_TEXT: identity_text_set,
ServiceTranslation.IDENTITY_BRANDING: identity_branding_set}
for set_ in strings.values():
set_.discard(None)
set_.discard("")
return strings
@returns(dict)
@arguments(default_language=unicode, flow_xml=str)
def get_message_flow_strings(default_language, flow_xml):
# Dont want complex xpath queries due to namespace
thexml = flow_xml.replace('xmlns="https://rogerth.at/api/1/MessageFlow.xsd"', '')
tree = etree.fromstring(thexml.encode('utf-8')) # @UndefinedVariable
keys = defaultdict(set)
for (path, translation_type) in MFLOW_XPATH_MAP.iteritems():
for default_str in tree.xpath(path, lang=default_language):
if default_str:
if translation_type in (ServiceTranslation.MFLOW_TEXT,
ServiceTranslation.MFLOW_BUTTON,
ServiceTranslation.MFLOW_FORM,
ServiceTranslation.MFLOW_POPUP,
ServiceTranslation.MFLOW_BRANDING):
keys[translation_type].add(default_str.strip())
elif translation_type == ServiceTranslation.MFLOW_JAVASCRIPT_CODE:
for match in JS_TRANSLATE_REGEX.findall(default_str):
keys[translation_type].add(match[1])
else:
logging.warning("XPATH ERROR - found empty str for path %s", path)
return keys
@returns(dict)
@arguments(service_user=users.User)
def assemble_message_flow_strings(service_user):
"""Go over all flows of this service user and create an in-memory dict.
Key = translation_type e.g. ServiceTranslation.MFLOW_POPUP
Value = set of strings in default language
Must run from a deferred
Returns dict(translation_type: set(default strings))
"""
flows = get_multilanguage_message_flow_designs_by_status(service_user, MessageFlowDesign.STATUS_VALID)
language_map = dict((translation_type, set()) for translation_type in set(MFLOW_XPATH_MAP.values()))
default_language = get_service_profile(service_user).defaultLanguage
for flow in flows:
for translation_type, strings in get_message_flow_strings(default_language, flow.xml).iteritems():
language_map[translation_type].update(strings)
return language_map
def assemble_service_strings(service_user):
d = assemble_homescreen_strings(service_user)
d.update(assemble_message_flow_strings(service_user))
d.update(assemble_qrcode_strings(service_user))
return d
def sync_service_translations(service_user):
service_profile = get_service_profile(service_user)
translation_set = None
if service_profile.editableTranslationSet:
translation_set = ServiceTranslationSet.get(db.Key(encoded=service_profile.editableTranslationSet))
translation_set.status = ServiceTranslationSet.SYNCING
translation_set.put()
else:
translation_set = ServiceTranslationSet.create_editable_set(service_user)
translation_set.status = ServiceTranslationSet.SYNCING
translation_set.put()
service_profile.editableTranslationSet = str(translation_set.key())
service_profile.put()
current_translations = get_all_translations(translation_set)
current_service_strings = assemble_service_strings(service_user)
current_service_strings[ServiceTranslation.BRANDING_CONTENT] = current_translations.get(
ServiceTranslation.BRANDING_CONTENT, dict())
updated_translations = dict()
for translation_type, default_strings in current_service_strings.iteritems():
current_translations_for_type = current_translations.get(translation_type, dict())
updated_translations_for_type = dict()
for default_string in default_strings:
updated_translations_for_type[default_string] = current_translations_for_type.get(default_string, None)
updated_translations[translation_type] = updated_translations_for_type
save_translations(translation_set, updated_translations)
def update_translation_of_type(service_user, translation_type, translation_strings):
"""Update service translation of translation_type with new keys
Args:
service_user (users.User)
translation_type (int): e.g. ServiceTranslation.MFLOW_TEXT
translation_strings (dict):
"""
def trans():
editable_translation_set = get_editable_translation_set(service_user)
should_create = not editable_translation_set
if should_create:
editable_translation_set = ServiceTranslationSet.create_editable_set(service_user)
editable_translation_set.put()
return should_create, editable_translation_set
@run_after_transaction
def update_service_profile(translation_set):
def inner_trans():
service_profile = get_service_profile(service_user)
service_profile.editableTranslationSet = str(translation_set.key())
service_profile.put()
run_in_transaction(inner_trans)
is_new_set, editable_translation_set = run_in_transaction(trans, xg=True)
if is_new_set:
update_service_profile(editable_translation_set)
all_translations = get_all_translations(editable_translation_set)
type_name = ServiceTranslation.TYPE_MAP[translation_type]
logging.info('Merging %s translations into the service translations', type_name)
logging.debug('New %s translation keys: %s', type_name, translation_strings)
logging.debug('Existing translations: %s', all_translations)
translations_dict = all_translations.setdefault(translation_type, dict())
updated = False
for default_string in translation_strings:
if default_string not in translations_dict:
translations_dict[default_string] = None
updated = True
if updated:
logging.debug('Updated translations: %s', all_translations)
save_translations(editable_translation_set, all_translations)
# convert "pt-br" keys to "pt_BR" before returning
for translations in translations_dict.itervalues():
if translations:
for lang in translations.keys():
translations[get_iso_lang(lang)] = translations.pop(lang)
return translations_dict, updated
def get_active_translation_set(service_profile):
# type: (ServiceProfile) -> ServiceTranslationSet
if service_profile.activeTranslationSet:
translation_set = ServiceTranslationSet.get(db.Key(encoded=service_profile.activeTranslationSet))
return translation_set
return None
def get_editable_translation_set(service_user):
service_profile = get_service_profile(service_user)
if service_profile.editableTranslationSet:
translation_set = ServiceTranslationSet.get(db.Key(encoded=service_profile.editableTranslationSet))
return translation_set
return None
def get_all_translations(translation_set, translation_types=None):
if translation_types:
keys = [ServiceTranslation.create_key(translation_set, translation_type)
for translation_type in translation_types]
db_translations = db.get(keys)
else:
db_translations = ServiceTranslation.all().ancestor(translation_set).fetch(None)
trdict = dict()
for db_translation in db_translations:
if db_translation:
trdict[db_translation.translation_type] = db_translation.translation_dict
return trdict
def save_translations(service_translation_set, multi_translation_dict):
def trans():
translation_keys = ServiceTranslation.all(keys_only=True).ancestor(service_translation_set).fetch(None)
db.delete(translation_keys)
to_put = list()
for translation_type, translation_dict in multi_translation_dict.iteritems():
to_put.append(ServiceTranslation.create(service_translation_set, translation_type, translation_dict))
db.put(to_put)
run_in_transaction(trans)
def deploy_translation(service_user):
def trans():
to_put = set()
service_profile = get_service_profile(service_user)
if not service_profile.editableTranslationSet:
logging.error("Deploy translation error - no editable translation found for svc %s" % service_user.email())
return
# 1. Archive old active translation set
if service_profile.activeTranslationSet:
old_active_translation_set = ServiceTranslationSet.get(service_profile.activeTranslationSet)
old_active_translation_set.status = ServiceTranslationSet.ARCHIVED
to_put.add(old_active_translation_set)
# 2. Promote old editable translation set to new active
service_profile.activeTranslationSet = service_profile.editableTranslationSet
to_put.add(service_profile)
new_active_translation_set = ServiceTranslationSet.get(service_profile.activeTranslationSet)
new_active_translation_set.status = ServiceTranslationSet.ACTIVE
to_put.add(new_active_translation_set)
# 3. Create new editable translation set
new_editable_translation_set = ServiceTranslationSet.create_editable_set(service_user)
new_editable_translation_set.latest_export_timestamp = new_active_translation_set.latest_export_timestamp
service_profile.editableTranslationSet = str(new_editable_translation_set.key())
to_put.add(new_editable_translation_set)
# 4. Copy existing translations to new
branding_translations_dict = None
for tr in ServiceTranslation.all().ancestor(new_active_translation_set).fetch(None):
translation_dict = tr.translation_dict
if tr.translation_type == ServiceTranslation.BRANDING_CONTENT:
branding_translations_dict = translation_dict
to_put.add(ServiceTranslation.create(new_editable_translation_set, tr.translation_type, translation_dict))
# 5. Store all in db
put_and_invalidate_cache(*to_put)
return service_profile, branding_translations_dict
service_profile, branding_translations_dict = run_in_transaction(trans, xg=True)
if len(service_profile.supportedLanguages) > 1:
if branding_translations_dict:
deferred.defer(_translate_all_app_brandings, service_user, Branding.TYPE_APP, branding_translations_dict)
deferred.defer(_translate_all_app_brandings, service_user,
Branding.TYPE_CORDOVA, branding_translations_dict)
deferred.defer(_translate_all_message_flows, service_user)
deferred.defer(_update_i18n_search_configs, service_user)
deferred.defer(_populate_new_editable_set, service_user)
def _update_i18n_search_configs(service_user):
from rogerthat.bizz.service import re_index
for service_identity in get_service_identities(service_user):
re_index(service_identity.user)
def _translate_all_app_brandings(service_user, branding_type, branding_translations_dict):
'''update all app brandings after editable set was deployed'''
from rogerthat.bizz.branding import add_translations_to_all_app_brandings
add_translations_to_all_app_brandings(service_user, branding_type, branding_translations_dict)
def _translate_all_message_flows(service_user):
'''update all multi-language flows after editable set was deployed'''
from rogerthat.bizz.service.mfd import render_xml_for_message_flow_design, render_js_for_message_flow_designs, \
get_message_flow_design_context
logging.debug("Re-translating all message flows of %s" % service_user.email())
translator = None
puts = list()
multilanguage_flows = get_multilanguage_message_flow_designs_by_status(service_user, MessageFlowDesign.STATUS_VALID)
for mfd in multilanguage_flows:
if translator is None:
translator = get_translator(service_user, ServiceTranslation.MFLOW_TYPES)
try:
context = get_message_flow_design_context(mfd) if mfd.definition else None
render_xml_for_message_flow_design(mfd, translator, context)
puts.append(mfd)
except:
logging.warning("Could not translate msg flow", exc_info=True)
try:
changed_languages = render_js_for_message_flow_designs(puts)
except:
logging.warning("Could not render JS for flows", exc_info=True)
changed_languages = None
put_and_invalidate_cache(*puts)
if not changed_languages:
from rogerthat.bizz.job.update_friends import schedule_update_all_friends_of_service_user
schedule_update_all_friends_of_service_user(service_user, bump_service_version=True)
def check_i18n_status_of_message_flows(service_user):
from rogerthat.bizz.service.mfd import render_xml_for_message_flow_design
def trans():
translator = get_translator(service_user)
mfds = get_multilanguage_message_flow_designs_by_status(service_user, MessageFlowDesign.STATUS_VALID)
for mfd in mfds:
render_xml_for_message_flow_design(mfd, translator, dict())
put_and_invalidate_cache(*mfds)
run_in_transaction(trans, xg=True)
channel.send_message(service_user, u'rogerthat.mfd.changes')
def _populate_new_editable_set(service_user):
'''copy active content to editable service translation set'''
def trans():
puts = list()
service_profile = get_service_profile(service_user)
editable_translation_set_key = db.Key(encoded=service_profile.editableTranslationSet)
active_translation_set_key = db.Key(encoded=service_profile.activeTranslationSet)
active_translations = ServiceTranslation.all().ancestor(active_translation_set_key).fetch(None)
for active_translation in active_translations:
editable_translation = ServiceTranslation.create(editable_translation_set_key,
active_translation.translation_type,
active_translation.translation_dict)
puts.append(editable_translation)
db.put(puts)
logging.debug("Copying active translation set into the new editable translation set")
run_in_transaction(trans, xg=True)
class Translator(object):
def __init__(self, translation_dict, supported_languages):
"""
Translation dict must not necessarily contain every translation.
E.g. for flows, only the flow strings is enough
"""
self.d = translation_dict
self.default_language = supported_languages[0]
self.supported_languages = supported_languages
@property
def non_default_supported_languages(self):
return self.supported_languages[1:]
def _translate(self, translation_type, string, target_language):
"""
translation_type defined on ServiceTranslation
returns <bool success>, <possibly translated string>
"""
if not string:
return True, string
if target_language == self.default_language:
return True, string
if translation_type in self.d:
translations = self.d[translation_type].get(string, None)
if translations:
target_language = convert_iso_lang_to_web_lang(target_language)
if target_language in translations:
return True, translations[target_language]
if target_language and '-' in target_language:
target_language = target_language.split('-')[0]
if target_language in translations:
return True, translations[target_language]
return False, string
def translate(self, translation_type, string, target_language):
"""
translation_type defined on ServiceTranslation
returns <possibly translated string>
"""
return self._translate(translation_type, string, target_language)[1]
def translate_flow(self, default_xml, flow_name=None):
"""
Input = full xml (including subflows) in default language
Output = full multilanguage xml
"""
from rogerthat.bizz.service.mfd import get_json_from_b64id, create_b64id_from_json_dict
result = {self.default_language: default_xml}
for language in self.supported_languages[1:]:
tree = etree.fromstring(default_xml.encode('utf-8')) # @UndefinedVariable
try:
default_str_element = None
for (path, translation_type) in MFLOW_XPATH_MAP.iteritems():
for default_str_element in tree.xpath(path, lang=self.default_language):
default_lang_str = unicode(default_str_element)
if translation_type in ServiceTranslation.MFLOW_TYPES_ALLOWING_LANGUAGE_FALLBACK:
if default_lang_str in self.d[translation_type] and self.d[translation_type][default_lang_str]:
translation = self.d[translation_type][default_lang_str].get(language, default_lang_str)
else:
# None or empty dict
translation = default_lang_str
else:
translation = self.d[translation_type][default_lang_str][language]
if default_str_element.is_text:
default_str_element.getparent().text = translation
elif default_str_element.is_attribute:
# Translate attribute
attribute_name = path.split('@')[-1]
default_str_element.getparent().attrib[attribute_name] = translation
# Set language of definition
tree.xpath('/definition')[0].attrib['language'] = language
# Update references ('lang' value in json_dict of id attr)
for ref in MFLOW_REFERENCES:
for str_element in tree.xpath('//definition[@language=$lang]//@%s' % ref, lang=language):
if str_element.startswith('base64:'):
json_dict = get_json_from_b64id(str_element)
json_dict['lang'] = language
v = create_b64id_from_json_dict(json_dict)
str_element.getparent().attrib[ref] = v
elements_with_id = tree.xpath("//definition[@language=$lang]//@id", lang=language)
for el in elements_with_id:
if el == str_element:
el.getparent().attrib['id'] = v
result[language] = etree.tounicode(tree) # @UndefinedVariable
except:
logging.warning("Could not translate msg flow [%s] to lang [%s] - error with str [%s]" % (
flow_name, language, unicode(default_str_element)), exc_info=True)
return result
class DummyTranslator(Translator):
def __init__(self, default_language):
super(DummyTranslator, self).__init__({}, [default_language])
def _translate(self, translation_type, string, target_language):
return True, string
def translate(self, translation_type, string, target_language):
return string
def translate_flow(self, default_xml, flow_name=None):
return {self.default_language: default_xml}
@cached(1, request=True, memcache=False)
@returns(Translator)
@arguments(service_user=users.User, translation_types=[int], language=unicode)
def get_translator(service_user, translation_types=None, language=None):
""" translation_types = list of translation_types """
service_profile = get_service_profile(service_user)
supportedLanguages = service_profile.supportedLanguages
# use dummy translator for default language or unsupported language
if language != service_profile.defaultLanguage:
if len(supportedLanguages) > 1:
s = get_active_translation_set(service_profile)
if s:
return Translator(get_all_translations(s, translation_types), supportedLanguages)
return DummyTranslator(service_profile.defaultLanguage)
|
nilq/baby-python
|
python
|
import operator
class MultiMapping:
def __init__(self, *stores):
self.stores = list(stores)
def __getitem__(self, key):
for store in self.stores:
if key in store:
return store[key]
raise KeyError(key)
_marker = []
def get(self, key, default=_marker):
for store in self.stores:
if key in store:
return store[key]
if default is self._marker:
raise KeyError(key)
return default
def __len__(self):
return sum([len(x) for x in self.stores])
def push(self, store):
self.stores.append(store)
def pop(self):
return self.stores.pop()
def items(self):
l = []
for store in self.stores:
l = l + list(store.items())
return l
|
nilq/baby-python
|
python
|
import logging
import shutil
from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.index.KmerSearchManager import KmerSearchManagerSourmash
from genomics_data_index.storage.model.db import Sample, SampleKmerIndex
from genomics_data_index.storage.service import DatabaseConnection
from genomics_data_index.storage.service.SampleService import SampleService
logger = logging.getLogger(__name__)
class KmerService:
FIND_MATCHES_MERGE_TYPES = ['union']
def __init__(self, database_connection: DatabaseConnection, features_dir: Path, sample_service: SampleService):
self._database = database_connection
self._sample_service = sample_service
self._features_dir = features_dir
self._sourmash_search = KmerSearchManagerSourmash()
def find_matches_within(self, sample_names: List[str],
kmer_size: int, distance_threshold: float,
results_merge_type: str = 'union',
samples_universe: SampleSet = None) -> SampleSet:
"""
Find samples within a particular distance of a list of samples. This is based on kmer signatures/sketches.
:param sample_names: The list of sample names to search for matches.
:param kmer_size: The kmer size to use for searching through signatures/sketches.
:param distance_threshold: A number from 0 to 1, with 0 indicating the distance threshold to include matches to
to the samples listed in 'sample_names'.
:param results_merge_type: Defines how to combine results when passing multiple 'sample_names'. A type of
'union' means that matches will be the union of all samples matching anything in
'sample_names'. Currently only 'union' is supported (this parameter is here to
make a bit more clear how results are combined until I implement additional ways of
merging results).
:param samples_universe: The universe of samples to search through. Can be used to restrict which samples we will
consider for matches. Set to 'None' to search for matches in all samples in the system.
:return: A SampleSet of the matches.
"""
if results_merge_type != 'union':
raise Exception(f'results_merge_type=[{results_merge_type}] is not supported. '
f'Only {self.FIND_MATCHES_MERGE_TYPES} are supported.')
if samples_universe is None:
sample_universe_objects = self._sample_service.get_samples()
else:
sample_universe_objects = self._sample_service.find_samples_by_ids(sample_ids=samples_universe)
kmer_index_paths = [s.sample_kmer_index.kmer_index_path for s in sample_universe_objects if
s.sample_kmer_index is not None]
if len(kmer_index_paths) < len(sample_universe_objects):
logger.debug(f'Not all samples (number={len(sample_universe_objects)} have associated kmer signatures '
f'(number={len(kmer_index_paths)}). These will be excluded from the search.')
if len(kmer_index_paths) == 0:
return SampleSet.create_empty()
else:
similarity_threshold = 1 - distance_threshold
matches_df = pd.DataFrame(data=[], columns=[
'Query',
'Match',
'Similarity',
'Distance',
])
for sample_name in sample_names:
query_sample = self._sample_service.get_sample(sample_name)
results_df = self._sourmash_search.search(kmer_size=kmer_size,
similarity_threshold=similarity_threshold,
query_file=query_sample.sample_kmer_index.kmer_index_path,
search_files=kmer_index_paths)
results_df['Distance'] = 1 - results_df['similarity']
results_df['Query'] = sample_name
results_df = results_df.rename({
'name': 'Match',
'similarity': 'Similarity',
}, axis='columns')
results_df = results_df[['Query', 'Match', 'Similarity', 'Distance']]
matches_df = pd.concat([matches_df, results_df])
sample_name_ids = self._sample_service.find_sample_name_ids(set(matches_df['Match'].tolist()))
matches_set = SampleSet(sample_name_ids.values())
return matches_set
def get_distance_matrix(self, sample_ids: Union[List[int], SampleSet], kmer_size: int,
ncores: int = 1) -> Tuple[
np.ndarray, List[str]]:
if isinstance(sample_ids, list):
sample_ids = SampleSet(sample_ids)
sourmash_search_multicore = KmerSearchManagerSourmash(ncores=ncores)
samples = self._sample_service.find_samples_by_ids(sample_ids)
kmer_index_paths = [s.sample_kmer_index.kmer_index_path for s in samples if
s.sample_kmer_index is not None]
if len(kmer_index_paths) < len(samples):
raise Exception(f'Not all samples (number={len(samples)}) have associated kmer signatures '
f'(number={len(kmer_index_paths)}).')
return sourmash_search_multicore.distances(kmer_size=kmer_size, signature_files=kmer_index_paths)
def has_kmer_index(self, sample_name: str) -> bool:
if self._sample_service.exists(sample_name):
sample = self._sample_service.get_sample(sample_name)
return sample.sample_kmer_index is not None
else:
return False
def insert_kmer_index(self, sample_name: str, kmer_index_path: Path):
if self._sample_service.exists(sample_name):
sample = self._sample_service.get_sample(sample_name)
else:
sample = Sample(name=sample_name)
self._database.get_session().add(sample)
kmer_path_internal = self._features_dir / kmer_index_path.name
shutil.copy(kmer_index_path, kmer_path_internal)
kmer_index = SampleKmerIndex(sample=sample, kmer_index_path=kmer_path_internal)
self._database.get_session().add(kmer_index)
self._database.get_session().commit()
|
nilq/baby-python
|
python
|
import numpy as np
class SimulationGoal:
"""
A class that tracks whether the simulation has reached its global goal.
"""
def __init__(self):
"""
We set the self.is_done to False as a start.
"""
self.is_done = False
def goal_reached(self, grid_world):
"""
Returns whether the global goal of the simulated grid world is accomplished. This method should be overridden
by a new goal function.
:param grid_world: An up to date representation of the grid world that will be analyzed in this function on
whether a specific coded global goal is reached.
:return: True when the goal is reached, False otherwise.
"""
pass
def get_progress(self, grid_world):
"""
Returns the progress of reaching the global goal in the simulated grid world. This method can be overridden
if you want to track the progress. But is not required.
:param grid_world: An up to date representation of the grid world that will be analyzed in this function on
how far we are in obtaining the global simulation goal.
:return: A Float representing with 0.0 no progress made, and 1.0 that the goal is reached.
"""
pass
class LimitedTimeGoal(SimulationGoal):
"""
A simulation goal that simply tracks whether a maximum number of ticks has been reached.
"""
def __init__(self, max_nr_ticks):
super().__init__()
self.max_nr_ticks = max_nr_ticks
def goal_reached(self, grid_world):
nr_ticks = grid_world.current_nr_ticks
if self.max_nr_ticks == np.inf or self.max_nr_ticks <= 0:
self.is_done = False
else:
if nr_ticks >= self.max_nr_ticks:
self.is_done = True
else:
self.is_done = False
return self.is_done
def get_progress(self, grid_world):
if self.max_nr_ticks == np.inf or self.max_nr_ticks <= 0:
return 0.
return min(1.0, grid_world.current_nr_ticks / self.max_nr_ticks)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
import stat
serverScript="dnsserver.py"
serverScriptFolder="modules"
dbFolder="databases"
dbFile="storage.sqlite"
server_script_path=os.path.join(request.folder,"modules","dnsserver.py")
st = os.stat(server_script_path)
os.chmod(server_script_path, st.st_mode | stat.S_IEXEC)
server_path=os.path.join(request.folder,serverScriptFolder,serverScript)
db_path=os.path.join(request.folder,dbFolder,dbFile)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
f = open("../data/train_small.csv", 'r')
g = open("../data/test_small.csv", 'r')
#Skip the head line
f.readline()
g.readline()
################################################
train_label_data, train_feature_data = [], []
for line in f:
labels = line.split(',')
labels = map(str.strip, labels)
feature = labels[-1].split(' ')
labels[-1] = feature[0]
feature = feature[1:]
labels = map(int, labels)
train_label_data.append(labels)
feature = map(lambda x: map(int, x.split(':')), feature)
feature = dict(feature)
train_feature_data.append(feature)
f.close()
#################################################
test_feature_data = []
for line in g:
feature = line.split(' ')
feature = feature[1:]
feature = map(lambda x: map(int, x.split(':')), feature)
feature = dict(feature)
test_feature_data.append(feature)
g.close()
#################################################
train_feature_merge = []
for feature in train_feature_data:
train_feature_merge.extend(feature.keys())
test_feature_merge = []
for feature in test_feature_data:
test_feature_merge.extend(feature.keys())
plt.hist(train_feature_merge, 50, facecolor='r', alpha=0.75)
plt.hist(test_feature_merge, 50, facecolor='g', alpha=0.75)
plt.show()
|
nilq/baby-python
|
python
|
from naive_bayes import NaiveBayesClassifier
def test_evaluate_algorithm():
classifier = NaiveBayesClassifier()
dataset = [[3.393533211, 2.331273381, 0],
[3.110073483, 1.781539638, 0],
[1.343808831, 3.368360954, 0],
[3.582294042, 4.67917911, 0],
[2.280362439, 2.866990263, 0],
[7.423436942, 4.696522875, 1],
[5.745051997, 3.533989803, 1],
[9.172168622, 2.511101045, 1],
[7.792783481, 3.424088941, 1],
[7.939820817, 0.791637231, 1]]
n_folds = 5
results_data = classifier.evaluate_algorithm(dataset, n_folds)
assert len(results_data) == n_folds
assert [data for data in results_data if 0 <= data <= 100]
|
nilq/baby-python
|
python
|
# *************************************
# |docname| - Misc CLI tools for Docker
# *************************************
# This files provides most of the subcommands for `docker_tools.py`.
#
# If you want to add a new subcommand you must add it to the list in the add_commands
# function. That command ensures that docker_tools.py knows about the commands added
# in docker_tools_misc.py
#
# Imports
# =======
# These are listed in the order prescribed by PEP 8, with exceptions noted below.
#
# There's a fair amount of bootstrap code here to download and install required imports and their dependencies.
#
# Standard library
# ----------------
from pathlib import Path
import os
import sys
import subprocess
from time import sleep
from typing import Optional, Tuple
# Third-party
# -----------
import click
# Local application
# -----------------
from ci_utils import env, xqt
# Globals
# =======
SERVER_START_SUCCESS_MESSAGE = "Success! The Runestone servers are running."
SERVER_START_FAILURE_MESSAGE = "Failed to start the Runestone servers."
# Subcommands for the CLI
# ========================
#
# ``shell``
# ---------
@click.command()
@click.option(
"--venv/--no-venv",
default=True,
help="Open a shell within the Python virtual environment for the Runestone servers.",
)
def shell(venv: bool) -> None:
"""
Open a Bash shell in the Docker container.
"""
# Ask for an interactive console.
ensure_in_docker(True)
# Skip a check, since the user will see any failures and because this raises an exception of the last command in the shell produced a non-zero exit code.
if venv:
xqt("poetry run bash", cwd=env.RUNESTONE_PATH, check=False)
else:
xqt("bash", check=False)
# ``start_servers``
# -----------------
@click.command()
@click.option(
"--dev/--no-dev",
default=False,
help="Run the BookServer in development mode, auto-reloading if the code changes.",
)
def start_servers(dev: bool) -> None:
"""
Run the web servers -- nginx, web2py, and FastAPI -- used by Runestone. Before starting the server, it will stop any currently-running servers.
"""
_start_servers(dev)
# Since click changes the way argument passing works, have a non-click version that's easily callable from Python code.
def _start_servers(dev: bool) -> None:
ensure_in_docker()
bs_config = os.environ.get("BOOK_SERVER_CONFIG", "production")
if bs_config == "development":
dev = True
# sudo doesn't pass root's env vars; provide only the env vars Celery needs when invoking it.
xqt(
'sudo -u www-data env "PATH=$PATH" "REDIS_URI=$REDIS_URI" '
"poetry run celery --app=scheduled_builder worker --pool=threads "
"--concurrency=3 --loglevel=info &",
cwd=f"{env.RUNESTONE_PATH}/modules",
)
xqt(
"rm -f /srv/books.pid",
"poetry run bookserver --root /ns "
"--error_path /tmp "
"--gconfig $RUNESTONE_PATH/docker/gunicorn_config/fastapi_config.py "
# This much match the address in `./nginx/sites-available/runestone.template`.
"--bind unix:/run/fastapi.sock "
+ ("--reload " if dev else "")
+ "2>&1 > /proc/1/fd/1 &", # This redirect ensures output ends up in the docker log
"service nginx start",
"poetry run gunicorn -D --config $RUNESTONE_PATH/docker/gunicorn_config/web2py_config.py &",
cwd=f"{env.RUNESTONE_PATH}/docker/gunicorn_config",
)
# Start the script to collect tickets and store them in the database. Most useful
# for a production environment with several worker containers
xqt(
f"cp {env.RUNESTONE_PATH}/scripts/tickets2db.py {env.WEB2PY_PATH}",
"python web2py.py -M -S runestone --run tickets2db.py &",
cwd=f"{env.WEB2PY_PATH}",
)
# ``stop_servers``
# ----------------
# Shut down the web servers.
@click.command()
def stop_servers() -> None:
"""
Shut down the web servers and celery, typically before running tests which involve the web servers.
"""
_stop_servers()
def _stop_servers() -> None:
ensure_in_docker()
xqt(
"pkill celery",
"pkill -f gunicorn",
"pkill -f tickets2db.py",
"nginx -s stop",
check=False,
)
@click.command()
@click.option(
"--dev/--no-dev",
default=False,
help="Run the BookServer in development mode, auto-reloading if the code changes.",
)
def restart_servers(dev):
"""
Restart the web servers and celery.
"""
_stop_servers(dev)
sleep(2)
_start_servers()
@click.command()
def reloadbks() -> None:
"""
Tell BookServer to reload the application.
"""
ensure_in_docker()
with open("/srv/books.pid") as pfile:
pid = pfile.read().strip()
pid = int(pid)
os.kill(pid, 1) # send the HUP signal to bookserver
# ``test``
# --------
@click.command()
@click.option("--bks/--no-bks", default=False, help="Run/skip tests on the BookServer.")
@click.option(
"--rc/--no-rc", default=False, help="Run/skip tests on the Runestone components."
)
@click.option(
"--rs/--no-rs", default=True, help="Run/skip tests on the Runestone server."
)
# Allow users to pass args directly to the underlying ``pytest`` command -- see the `click docs <https://click.palletsprojects.com/en/8.0.x/arguments/#option-like-arguments>`_.
@click.argument("passthrough", nargs=-1, type=click.UNPROCESSED)
def test(bks: bool, rc: bool, rs: bool, passthrough: Tuple) -> None:
"""
Run unit tests.
PASSTHROUGH: These arguments are passed directly to the underlying "pytest" command. To pass options to this command, prefix this argument with "--". For example, use "docker_tools.py test -- -k test_just_this" instead of "docker_tools.py test -k test_just_this" (which produces an error).
"""
ensure_in_docker()
_stop_servers()
pytest = "$RUNESTONE_PATH/.venv/bin/pytest"
passthrough_args = " ".join(passthrough)
if bks:
xqt(f"{pytest} -v {passthrough_args}", cwd="/srv/BookServer")
if rc:
xqt(f"{pytest} -v {passthrough_args}", cwd="/srv/RunestoneComponents")
if rs:
xqt(
f"{pytest} -v applications/runestone/tests {passthrough_args}",
cwd=env.WEB2PY_PATH,
)
# ``wait``
# --------
# This is primarily used by tests to wait until the servers are running.
@click.command()
def wait() -> None:
"""
Wait until the server is running, then report success or failure through the program's exit code.
"""
ensure_in_docker()
ready_file = get_ready_file()
# Wait for success or failure.
while True:
txt = ready_file.read_text() if ready_file.is_file() else ""
if txt.endswith(SERVER_START_FAILURE_MESSAGE):
sys.exit(1)
if txt.endswith(SERVER_START_SUCCESS_MESSAGE):
sys.exit(0)
# Misc
# ----
# Add all subcommands in this file to the CLI.
def add_commands(cli) -> None:
for cmd in (
shell,
start_servers,
stop_servers,
test,
wait,
reloadbks,
restart_servers,
):
cli.add_command(cmd)
# Determine if we're running in a Docker container.
def in_docker() -> bool:
# This is difficult, and varies between OSes (Linux vs OS X) and Docker versions. Try a few different approaches and hope one works. This was taken from a `site <https://www.baeldung.com/linux/is-process-running-inside-container>`__.
cgroup = Path("/proc/1/cgroup")
if cgroup.is_file() and "docker" in cgroup.read_text():
return True
# Newer Docker versions create a file -- just look for that.
if Path("/.dockerenv").is_file():
return True
# Try looking at the first process to see if it's ``sh``.
sched = Path("/proc/1/sched")
if sched.is_file():
return sched.read_text().startswith("sh")
# We can't find any evidence of Docker. Assume it's not running.
return False
# If we're not in Docker, then re-run this command inside Docker.
def ensure_in_docker(
# True to make this interactive (the ``-i`` flag in ``docker exec``.)
is_interactive: bool = False,
# Return value: True if already in Docker; the function calls ``sys.exit(0)``, ending the program, otherwise.
) -> bool:
if in_docker():
return True
# Get the name of the container running the Runestone servers.
res = subprocess.run(
'docker ps --filter "ancestor=runestone/server" --format "{{.Names}}"',
shell=True,
capture_output=True,
text=True,
)
runestone_container_name = res.stdout.strip()
if not runestone_container_name:
runestone_container_name = "production-runestone-1"
# Some subtleties:
#
# #. Single-quote each argument before passing it.
# #. Run it in the venv used when building Docker, since this avoids installing click globally.
# #. Use env vars defined in the `../Dockerfile`, rather than hard-coding paths. We want these env vars evaluated after the shell in Docker starts, not now, hence the use of ``\$`` and the surrounding double quotes.
# #. Use just the name, not the full path, of ``sys.argv[0]``, since the filesystem is different in Docker. We assume that this command will be either in the path (with the venv activated).
exec_name = Path(sys.argv[0]).name
quoted_args = "' '".join([exec_name] + sys.argv[1:])
xqt(
f"docker exec -{'i' if is_interactive else ''}t {runestone_container_name} bash -c "
'"source \$RUNESTONE_PATH/.venv/bin/activate; '
f"'{quoted_args}'\""
)
sys.exit(0)
# Determine if the BookServer git repo is available, returning a Path to it if it exists, or ``None``` otherwise.
def get_bookserver_path() -> Optional[Path]:
w2p_parent = Path(env.WEB2PY_PATH).parent
bookserver_path = Path(f"{w2p_parent}/BookServer")
# _`Volume detection strategy`: don't check just ``BookServer`` -- the volume may be mounted, but may not point to an actual filesystem path if the developer didn't clone the BookServer repo. Instead, look for evidence that there are actually some files in this path.
dev_bookserver = (bookserver_path / "bookserver").is_dir()
return bookserver_path if dev_bookserver else None
# Return the path to a file used to report the status of the container. Only for use inside Docker.
def get_ready_file() -> Path:
return Path(env.RUNESTONE_PATH) / "ready.txt"
|
nilq/baby-python
|
python
|
import os
import io
import base64
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
#from cryptography.hazmat.primitives import padding
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import utils
from encodings.base64_codec import base64_encode
"""
def hashFile(fname):
blocksize = 16
totalsize = 0
mydata = bytearray(blocksize)
#load and hash data to be signed, from task 1
file = open(fname, 'rb')
myhash = hashes.MD5()
hasher = hashes.Hash(myhash, backend)
while True:
num = file.readinto(mydata)
totalsize += num
print(num, mydata)
if num == blocksize:
data = bytes(mydata)
hasher.update(data)
else:
mydata2 = mydata[0:num]
data = bytes(mydata2)
hasher.update(data)
digest = hasher.finalize()
break
return(myhash, digest)
"""
def hashFile(mydata, blocksize):
#blocksize = 16
#totalsize = 0
#mydata = bytearray(blocksize)
#load and hash data to be signed, from task 1
#file = open(fname, 'rb')
myhash = hashes.MD5()
hasher = hashes.Hash(myhash, backend)
num = len(mydata)
print(num)
print(len(mydata))
if num == blocksize:
data = bytes(mydata)
hasher.update(data)
digest = hasher.finalize()
else:
print("error")
"""
mydata2 = mydata[0:num]
data = bytes(mydata2)
hasher.update(data)
digest = hasher.finalize()
#break
"""
return(myhash, digest)
def createSig(mydata, kr_fname, password, blocksize):
#fname2 = "infile.txt"
myhash, digest = hashFile(mydata, blocksize)
with open(kr_fname, 'rb') as file:
private_key = serialization.load_pem_private_key(
data = file.read(),
password = password.encode(),
backend = backend
)
file.close()
pad = padding.PKCS1v15()
sig = private_key.sign(
data = digest,
padding = pad,
algorithm = utils.Prehashed(myhash)
)
return sig
def verifySignature(fname, sigFname, certFname):
# sigFname = "user1.sig"
#fname = "infile.txt"
myhash, digest = hashFile(fname)
#with open("user1_cert.pem","rb") as file:
with open(certFname,"rb") as file:
certificate = x509.load_pem_x509_certificate(
data=file.read(),
backend=backend)
file.close()
with open(sigFname, "rb") as file:
temp = file.read()
sig = temp[26:-24]
sig = base64.b64decode(sig)
file.close()
public_key = certificate.public_key()
pad = padding.PKCS1v15()
public_key.verify(
signature = sig,
data = digest,
padding = pad,
algorithm = utils.Prehashed(myhash)
)
def verifySig(fname, sig, kuFname):
# sigFname = "user1.sig"
#fname = "infile.txt"
myhash, digest = hashFile(fname)
#with open("user1_cert.pem","rb") as file:
with open(kuFname, 'rb') as file:
public_key = serialization.load_pem_public_key(
data = file.read(),
backend = backend
)
file.close()
pad = padding.PKCS1v15() #need to use different type of padding?
public_key.verify(
signature = sig,
data = digest,
padding = pad,
algorithm = utils.Prehashed(myhash)
)
|
nilq/baby-python
|
python
|
from pathlib import Path
import unittest
import re
from unittest.mock import patch
from typer.testing import CliRunner
from plotly.graph_objects import Figure
from tempfile import NamedTemporaryFile
from ausdex import main
class TestMain(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def test_version(self):
result = self.runner.invoke(main.app, ["--version"])
assert result.exit_code == 0
assert re.match(r"\d+\.\d+\.\d+", result.stdout)
@patch("typer.launch")
def test_repo(self, mock_launch):
result = self.runner.invoke(main.app, ["repo"])
assert result.exit_code == 0
mock_launch.assert_called_once()
self.assertIn("https://github.com/rbturnbull/ausdex", str(mock_launch.call_args))
@patch("subprocess.run")
def test_docs_live(self, mock_subprocess):
result = self.runner.invoke(main.app, ["docs"])
assert result.exit_code == 0
mock_subprocess.assert_called_once()
self.assertIn("sphinx-autobuild", str(mock_subprocess.call_args))
@patch("webbrowser.open_new")
@patch("subprocess.run")
def test_docs_static(self, mock_subprocess, mock_open_web):
result = self.runner.invoke(main.app, ["docs", "--no-live"])
assert result.exit_code == 0
mock_subprocess.assert_called_once()
self.assertIn("sphinx-build", str(mock_subprocess.call_args))
mock_open_web.assert_called_once()
def test_inflation(self):
result = self.runner.invoke(
main.app,
["inflation", "13", "March 1991", "--evaluation-date", "June 2010"],
)
assert result.exit_code == 0
assert "21.14" in result.stdout
def test_inflation_melbourne(self):
result = self.runner.invoke(
main.app,
["inflation", "13", "March 1991", "--evaluation-date", "May 2022", "--location", "melbourne"],
)
assert result.exit_code == 0
assert "26.95" in result.stdout
def test_inflation_perth(self):
result = self.runner.invoke(
main.app,
["inflation", "1", "March 1979", "--location", "Perth", "--evaluation-date", "May 2022"],
)
assert result.exit_code == 0
assert "5.29" in result.stdout
@patch.object(Figure, "show")
def test_plot_cpi(self, mock_show):
result = self.runner.invoke(
main.app,
["plot-cpi"],
)
assert result.exit_code == 0
mock_show.assert_called_once()
@patch.object(Figure, "show")
@patch.object(Figure, "write_image")
def test_plot_cpi_output(self, mock_show, mock_write_image):
result = self.runner.invoke(
main.app,
["plot-cpi", "--output", "tmp.jpg", "--location", "Melbourne"],
)
assert result.exit_code == 0
mock_show.assert_called_once()
mock_write_image.assert_called_once()
@patch.object(Figure, "show")
def test_plot_inflation(self, mock_show):
result = self.runner.invoke(
main.app,
["plot-inflation", "2022"],
)
assert result.exit_code == 0
mock_show.assert_called_once()
@patch.object(Figure, "show")
@patch.object(Figure, "write_html")
def test_plot_inflation_output(self, mock_show, mock_write_html):
result = self.runner.invoke(
main.app,
["plot-inflation", "2022", "--output", "tmp.html", "--location", "Melbourne"],
)
assert result.exit_code == 0
mock_show.assert_called_once()
mock_write_html.assert_called_once()
def test_plot_inflation_output_exists(self):
with NamedTemporaryFile(suffix=".html") as tmp:
result = self.runner.invoke(
main.app,
[
"plot-inflation",
"01-01-2019",
"--no-show",
"--output",
tmp.name,
"--start-date",
"06-06-1949",
],
)
assert result.exit_code == 0
assert Path(tmp.name).exists()
def test_plot_cpi_output_exists(self):
with NamedTemporaryFile(suffix=".png") as tmp:
result = self.runner.invoke(
main.app,
[
"plot-cpi",
"--no-show",
"--output",
tmp.name,
"--start-date",
"06-06-1949",
],
)
assert result.exit_code == 0
assert Path(tmp.name).exists()
|
nilq/baby-python
|
python
|
'''
Created on Feb 9, 2019
@author: NOOK
'''
from abc import ABC
from numpy import array, zeros
from math import sqrt, sin, cos, atan2, pi
def POW(a, b):
return a**b;
class RadarCoordinatesTemplate(ABC):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def AER2ENU(self, A, E, R) -> array:
ENU = zeros([len(A), 3])
ENU[0, 0] = R[0] * cos(E[0]) * sin(A[0])
ENU[0, 1] = R[0] * cos(E[0]) * cos(A[0])
ENU[0, 2] = R[0] * sin(E[0])
if (len(A) > 1) :
ENU[1, 0] = self.d1EastdAER1(A, E, R)
ENU[1, 1] = self.d1NorthdAER1(A, E, R)
ENU[1, 2] = self.d1UpdAER1(A, E, R)
if (len(A) > 2) :
ENU[2, 0] = self.d2EastdAER2(A, E, R)
ENU[2, 1] = self.d2NorthdAER2(A, E, R)
ENU[2, 2] = self.d2UpdAER2(A, E, R)
if (len(A) > 3) :
ENU[3, 0] = self.d3EastdAER3(A, E, R)
ENU[3, 1] = self.d3NorthdAER3(A, E, R)
ENU[3, 2] = self.d3UpdAER3(A, E, R)
if (len(A) > 4) :
ENU[4, 0] = self.d4EastdAER4(A, E, R)
ENU[4, 1] = self.d4NorthdAER4(A, E, R)
ENU[4, 2] = self.d4UpdAER4(A, E, R)
if (len(A) > 5) :
ENU[5, 0] = self.d5EastdAER5(A, E, R)
ENU[5, 1] = self.d5NorthdAER5(A, E, R)
ENU[5, 2] = self.d5UpdAER5(A, E, R)
return ENU
def ENU2AER(self, E, N, U) -> array:
AER = zeros([len(E), 3])
AER[0, 0] = atan2( E[0], N[0] ) % (2*pi)
AER[0, 1] = atan2( U[0], sqrt(E[0]**2 + N[0]**2) )
AER[0, 2] = sqrt(E[0]**2 + N[0]**2 + U[0]**2)
if (len(E) > 1) :
AER[1, 0] = self.d1AzimuthdENU1(E, N, U)
AER[1, 1] = self.d1ElevationdENU1(E, N, U)
AER[1, 2] = self.d1RangedENU1(E, N, U)
if (len(E) > 2) :
AER[2, 0] = self.d2AzimuthdENU2(E, N, U)
AER[2, 1] = self.d2ElevationdENU2(E, N, U)
AER[2, 2] = self.d2RangedENU2(E, N, U)
if (len(E) > 3) :
AER[3, 0] = self.d3AzimuthdENU3(E, N, U)
AER[3, 1] = self.d3ElevationdENU3(E, N, U)
AER[3, 2] = self.d3RangedENU3(E, N, U)
if (len(E) > 4) :
AER[4, 0] = self.d4AzimuthdENU4(E, N, U)
AER[4, 1] = self.d4ElevationdENU4(E, N, U)
AER[4, 2] = self.d4RangedENU4(E, N, U)
if (len(E) > 5) :
AER[5, 0] = self.d5AzimuthdENU5(E, N, U)
AER[5, 1] = self.d5ElevationdENU5(E, N, U)
AER[5, 2] = self.d5RangedENU5(E, N, U)
return AER
'''
public RealMatrix ENU2AER( RealVector E, RealVector N, RealVector U ) {
RealMatrix AER = new Array2DRowRealMatrix( E.getDimension(), 3 );
AER.setEntry(0, 0, Math.atan2(N.getEntry(0), E.getEntry(0))); // azimuth
AER.setEntry(0, 1, Math.atan2(U.getEntry(0), Math.sqrt(POW(E.getEntry(0),2) + POW(N.getEntry(0),2))));
AER.setEntry(0, 2, Math.sqrt(POW(E.getEntry(0),2) + POW(N.getEntry(0),2) + POW(U.getEntry(0),2)));
if (E.getDimension() > 1) {
AER.setEntry(1, 0, d1AzimuthdENU1(E, N, U));
AER.setEntry(1, 1, d1ElevationdENU1(E, N, U));
AER.setEntry(1, 2, d1RangedENU1(E, N, U));
if (E.getDimension() > 2) {
AER.setEntry(2, 0, d2AzimuthdENU2(E, N, U));
AER.setEntry(2, 1, d2ElevationdENU2(E, N, U));
AER.setEntry(2, 2, d2RangedENU2(E, N, U));
if (E.getDimension() > 3) {
AER.setEntry(3, 0, d3AzimuthdENU3(E, N, U));
AER.setEntry(3, 1, d3ElevationdENU3(E, N, U));
AER.setEntry(3, 2, d3RangedENU3(E, N, U));
if (E.getDimension() > 4) {
AER.setEntry(4, 0, d4AzimuthdENU4(E, N, U));
AER.setEntry(4, 1, d4ElevationdENU4(E, N, U));
AER.setEntry(4, 2, d4RangedENU4(E, N, U));
if (E.getDimension() > 5) {
AER.setEntry(5, 0, d5AzimuthdENU5(E, N, U));
AER.setEntry(5, 1, d5ElevationdENU5(E, N, U));
AER.setEntry(5, 2, d5RangedENU5(E, N, U));
}
}
}
}
}
return AER;
}
'''
def d1AzimuthdENU1(self, E, N, U) -> array:
pass # {$d1AzimuthdENU1}
def d2AzimuthdENU2(self, E, N, U) -> array:
pass # {$d2AzimuthdENU2}
def d3AzimuthdENU3(self, E, N, U) -> array:
pass # {$d3AzimuthdENU3}
def d4AzimuthdENU4(self, E, N, U) -> array:
pass # {$d4AzimuthdENU4}
def d5AzimuthdENU5(self, E, N, U) -> array:
pass # {$d5AzimuthdENU5}
def d1ElevationdENU1(self, E, N, U) -> array:
pass # {$d1ElevationdENU1}
def d2ElevationdENU2(self, E, N, U) -> array:
pass # {$d2ElevationdENU2}
def d3ElevationdENU3(self, E, N, U) -> array:
pass # {$d3ElevationdENU3}
def d4ElevationdENU4(self, E, N, U) -> array:
pass # {$d4ElevationdENU4}
def d5ElevationdENU5(self, E, N, U) -> array:
pass # {$d5ElevationdENU5}
def d1RangedENU1(self, E, N, U) -> array:
pass # {$d1RangedENU1}
def d2RangedENU2(self, E, N, U) -> array:
pass # {$d2RangedENU2}
def d3RangedENU3(self, E, N, U) -> array:
pass # {$d3RangedENU3}
def d4RangedENU4(self, E, N, U) -> array:
pass # {$d4RangedENU4}
def d5RangedENU5(self, E, N, U) -> array:
pass # {$d5RangedENU5}
def d1EastdAER1(self, A, E, R) -> array:
pass # {$d1EastdAER1}
def d2EastdAER2(self, A, E, R) -> array:
pass # {$d2EastdAER2}
def d3EastdAER3(self, A, E, R) -> array:
pass # {$d3EastdAER3}
def d4EastdAER4(self, A, E, R) -> array:
pass # {$d4EastdAER4}
def d5EastdAER5(self, A, E, R) -> array:
pass # {$d5EastdAER5}
def d1NorthdAER1(self, A, E, R) -> array:
pass # {$d1NorthdAER1}
def d2NorthdAER2(self, A, E, R) -> array:
pass # {$d2NorthdAER2}
def d3NorthdAER3(self, A, E, R) -> array:
pass # {$d3NorthdAER3}
def d4NorthdAER4(self, A, E, R) -> array:
pass # {$d4NorthdAER4}
def d5NorthdAER5(self, A, E, R) -> array:
pass # {$d5NorthdAER5}
def d1UpdAER1(self, A, E, R) -> array:
pass # {$d1UpdAER1}
def d2UpdAER2(self, A, E, R) -> array:
pass # {$d2UpdAER2}
def d3UpdAER3(self, A, E, R) -> array:
pass # {$d3UpdAER3}
def d4UpdAER4(self, A, E, R) -> array:
pass # {$d4UpdAER4}
def d5UpdAER5(self, A, E, R) -> array:
pass # {$d5UpdAER5}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pylint: disable=deprecated-sys-function
from __future__ import absolute_import
import cffi
import sys
_ffi = cffi.FFI()
_ffi.cdef("""
enum geopm_error_e {
GEOPM_ERROR_RUNTIME = -1,
GEOPM_ERROR_LOGIC = -2,
GEOPM_ERROR_INVALID = -3,
GEOPM_ERROR_FILE_PARSE = -4,
GEOPM_ERROR_LEVEL_RANGE = -5,
GEOPM_ERROR_NOT_IMPLEMENTED = -6,
GEOPM_ERROR_PLATFORM_UNSUPPORTED = -7,
GEOPM_ERROR_MSR_OPEN = -8,
GEOPM_ERROR_MSR_READ = -9,
GEOPM_ERROR_MSR_WRITE = -10,
GEOPM_ERROR_AGENT_UNSUPPORTED = -11,
GEOPM_ERROR_AFFINITY = -12,
GEOPM_ERROR_NO_AGENT = -13,
};
void geopm_error_message(int err, char *msg, size_t size);
""")
try:
_dl = _ffi.dlopen('libgeopmpolicy.so', _ffi.RTLD_GLOBAL|_ffi.RTLD_LAZY)
except OSError as ee:
raise OSError('This module requires libgeopmpolicy.so to be present in your LD_LIBRARY_PATH.') from ee
ERROR_RUNTIME = _dl.GEOPM_ERROR_RUNTIME
ERROR_LOGIC = _dl.GEOPM_ERROR_LOGIC
ERROR_INVALID = _dl.GEOPM_ERROR_INVALID
ERROR_FILE_PARSE = _dl.GEOPM_ERROR_FILE_PARSE
ERROR_LEVEL_RANGE = _dl.GEOPM_ERROR_LEVEL_RANGE
ERROR_NOT_IMPLEMENTED = _dl.GEOPM_ERROR_NOT_IMPLEMENTED
ERROR_PLATFORM_UNSUPPORTED = _dl.GEOPM_ERROR_PLATFORM_UNSUPPORTED
ERROR_MSR_OPEN = _dl.GEOPM_ERROR_MSR_OPEN
ERROR_MSR_READ = _dl.GEOPM_ERROR_MSR_READ
ERROR_MSR_WRITE = _dl.GEOPM_ERROR_MSR_WRITE
ERROR_AGENT_UNSUPPORTED = _dl.GEOPM_ERROR_AGENT_UNSUPPORTED
ERROR_AFFINITY = _dl.GEOPM_ERROR_AFFINITY
ERROR_NO_AGENT = _dl.GEOPM_ERROR_NO_AGENT
def message(err_number):
"""Return the error message associated with the error code. Positive
error codes are interpreted as system error numbers, and
negative error codes are interpreted as GEOPM error numbers.
Args:
err_number (int): Error code to be interpreted.
Returns:
str: Error message associated with error code.
"""
global _ffi
global _dl
name_max = 1024
result_cstr = _ffi.new("char[]", name_max)
_dl.geopm_error_message(err_number, result_cstr, name_max)
return _ffi.string(result_cstr).decode()
|
nilq/baby-python
|
python
|
import path_utils
import os
import Sample
import gym_raas
replot_dir = ""
dir = os.path.join(path_utils.get_output_dir(), replot_dir)
s = Sample.replot_sample_dict_from_dir(dir)
|
nilq/baby-python
|
python
|
__author__ = 'Przemyslaw "Blasto" Wentrys'
@app.route('/mysql/testt')
def mysql_test():
execute = MySQL_Connection().execute_query
query = MySQL_Functions()
server = 'Naboo'
database = 'jira'
table = 'jiraissue'
columns = '*'
return str(execute(server, query.check_for_existence(database, table, columns)).rows)
@app.route('/mysql/formatted')
def mysql_formatted():
execute = MySQL_Connection().execute_query
query = MySQL_Functions()
final_result = []
server = 'Naboo'
database = 'jira'
table = 'jiraissue'
columns = '*'
result_raw = execute(server, query.check_for_existence(database, table, columns))
result_formatted = {
'col_names': list(result_raw.fields),
'data': list(result_raw.rows)
}
for row_index in range(0, len(result_formatted['data'])):
loop_result = {}
row = result_formatted['data'][row_index]
print row
for col_index in range(0, len(row)):
column = row[col_index]
print column
try:
loop_result.update({result_formatted['col_names'][col_index][0]: int(column)})
except:
loop_result.update({result_formatted['col_names'][col_index][0]: str(column)})
final_result.append(loop_result)
return str(final_result)
@app.route('/mysql/datatables')
def mysql_datatables_no_pagination():
execute = MySQL_Connection().execute_query
query = MySQL_Functions()
final_result = []
col_names = ''
server = 'Naboo'
database = 'jira'
table = 'jiraissue'
columns = '*'
result_raw = execute(server, query.check_for_existence(database, table, columns))
for col_name in result_raw.fields:
col_names += '{0}"class": "center", "title": "{1}"{2},'.format('{', col_name[0], '}')
result_formatted = {
'col_names': col_names[:-1],
'data': list(result_raw.rows)
}
for row_index in range(0, len(result_formatted['data'])):
loop_result = []
row = result_formatted['data'][row_index]
print row
for col_index in range(0, len(row)):
column = row[col_index]
print column
try:
loop_result.append(int(column))
except:
loop_result.append(str(column))
final_result.append(loop_result)
final_result = {'col_names': result_formatted['col_names'], 'data': final_result}
print final_result
return render_template('sql/result_table.html', jira_data=final_result)
@app.route('/mysql/datatables_multiple', methods=['GET', 'POST'])
def mysql_datatables_no_pagination_multiple():
execute = MySQL_Connection().execute_query
query = MySQL_Functions()
form = Query(request.form)
template = 'sql/result_table_multiple.html'
server = 'Naboo'
database = 'jira'
table = 'jiraissue'
columns = '*'
if request.method == 'POST':
if form.validate():
result_raw = execute(server, str(form.query.data))
result_formatted = query.datatables_output(result_raw)
return render_template(template, form=form, title=str(form.query.data),
jira_data=result_formatted)
else:
return render_template(template, form=form, title=str(form.query.data),
jira_data='ERROR')
elif request.method == 'GET':
result_raw = execute(server, query.check_for_existence(database, table, columns))
result_formatted = query.datatables_output(result_raw)
return render_template(template, form=form, title="Jira Issues",
jira_data=result_formatted)
else:
return str(request.method) + 'NOT ALLOWED'
@app.route('/mysql/datatables_accordion', methods=['GET', 'POST'])
def mysql_datatables_no_pagination_accordion():
execute = MySQL_Connection().execute_query
query = MySQL_Functions()
form = Query(request.form)
template = 'sql/result_table_accordion.html'
server = 'Naboo'
schema_raw = execute(server, query.get_schema())
result_dict = {}
for schema, table, column in schema_raw.rows:
result_dict[str(schema)] = {}
for schema, table, column in schema_raw.rows:
result_dict[str(schema)][str(table)] = []
for schema, table, column in schema_raw.rows:
result_dict[str(schema)][str(table)].append(str(column))
schema_nav = ''
col_names = ''
for col_name in schema_raw.fields:
col_names += '{0}"class": "center", "title": "{1}"{2},'.format('{', col_name[0], '}')
for schema in result_dict.iterkeys():
schema_nav += '{0} \'text\': \'{1}\','.format('{', schema)
schema_nav += '\'children\': {0}'.format('[')
for table in result_dict[schema].iterkeys():
schema_nav += '{0} \'text\': \'{1}\','.format('{', table)
schema_nav += '\'children\': {0}'.format('[')
for column in result_dict[schema][table]:
schema_nav += '{0}\'text\': \'{1}\''.format('{', column)
schema_nav += '{0}'.format('},')
print column, schema, table
schema_nav = schema_nav[:-1] + '{0}'.format(']')
schema_nav += '{0}'.format('},')
schema_nav = schema_nav[:-1] + '{0}'.format(']')
schema_nav += '{0}'.format('},')
if request.method == 'POST':
if form.validate():
result_raw = execute(server, str(form.query.data))
result_formatted = query.datatables_output(result_raw)
return render_template(template, form=form, schema=str(json.dumps(schema_nav))[1:-1],
title=str(form.query.data),
data=result_formatted)
else:
return render_template(template, form=form, schema=str(json.dumps(schema_nav))[1:-1],
title=str(form.query.data),
data='ERROR')
elif request.method == 'GET':
result_raw = execute(server, query.check_for_existence('jira', 'jiraissue', 'ID as "id", issuenum as "Issue Num", PROJECT as "Project", ASSIGNEE as "Assignee", PRIORITY as "Priority", issuestatus as "Issue Status"'))
result_formatted = query.datatables_output(result_raw)
return render_template(template, form=form, schema=str(json.dumps(schema_nav))[1:-1],
title=query.check_for_existence('jira', 'jiraissue', 'ID as "id", issuenum as "Issue Num", PROJECT as "Project", ASSIGNEE as "Assignee", PRIORITY as "Priority", issuestatus as "Issue Status"'),
data=result_formatted,)
else:
return str(request.method) + 'NOT ALLOWED'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html for a list of status code
"""
from .rfc7285 import mimetypes
import logging
import traceback
import bottle
def is_error(code):
return not code in [200, 204]
def format_error(response = bottle.response, message='', exception = None):
"""
TODO: format message in application/alto-error+json
"""
response.set_header('content-type', mimetypes.ERROR)
if exception is not None:
traceback.print_tb(exception.__traceback__)
return message
def bad_request(response = bottle.response, **kargs):
response.status = 400 # Bad request
return format_error(response, **kargs)
def unauthorized(response = bottle.response, auth_method = 'basic', **kargs):
response.status = 401 # unauthorized
response.set_header('WWW-Authenticate', auth_method)
return format_error(response, **kargs)
def not_found(response = bottle.response, service=None):
response.status = 404 # Not found
if service is not None:
logging.info('Failed to find service %s', service)
return format_error(response)
def not_allowed(response = bottle.response, allow = [], **kargs):
response.status = 405 # Method not allowed
response.set_header('Allow', ','.join(allow))
return format_error(response, **kargs)
def not_acceptable(response = bottle.response, **kargs):
response.status = 406 # Not acceptable
return format_error(response, **kargs)
def not_supported(response = bottle.response, **kargs):
response.status = 415 # media type unsupported
return format_error(response, **kargs)
def server_error(response = bottle.response, cause = '', **kargs):
response.status = 500 # Internal Server Error
exception = kargs.get('exception')
if exception is not None:
logging.error('Server error %s', exception)
cause = '{}: {}'.format(type(exception), exception)
kargs.pop('message', '')
return format_error(response, message=cause, **kargs)
def not_implemented(response = bottle.response, **kargs):
response.status = 501 # Not implemented
return format_error(response, **kargs)
|
nilq/baby-python
|
python
|
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
__author__="Cihan Altinay"
"""
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
import numpy
import sympy
from .symbol import Symbol
def symbols(*names, **kwargs):
"""
Emulates the behaviour of sympy.symbols.
"""
shape=kwargs.pop('shape', ())
s = names[0]
if not isinstance(s, list):
import re
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty strings
if not t:
continue
sym = Symbol(t, shape, **kwargs)
res.append(sym)
res = tuple(res)
if len(res) == 0: # var('')
res = None
elif len(res) == 1: # var('x')
res = res[0]
# otherwise var('a b ...')
return res
def combineData(array, shape):
"""
"""
# array could just be a single value
if not hasattr(array,'__len__') and shape==():
return array
from esys.escript import Data
n=numpy.array(array) # for indexing
# find function space if any
dom=set()
fs=set()
for idx in numpy.ndindex(shape):
if isinstance(n[idx], Data):
fs.add(n[idx].getFunctionSpace())
dom.add(n[idx].getDomain())
if len(dom)>1:
domain=dom.pop()
while len(dom)>0:
if domain!=dom.pop():
raise ValueError("Mixing of domains not supported")
if len(fs)>0:
d=Data(0., shape, fs.pop()) # maybe interpolate instead of using first?
else:
d=numpy.zeros(shape)
for idx in numpy.ndindex(shape):
#z=numpy.zeros(shape)
#z[idx]=1.
#d+=n[idx]*z # much slower!
if hasattr(n[idx], "ndim") and n[idx].ndim==0:
d[idx]=float(n[idx])
else:
d[idx]=n[idx]
return d
def isSymbol(arg):
"""
Returns True if the argument ``arg`` is an escript ``Symbol`` or
``sympy.Basic`` object, False otherwise.
"""
return isinstance(arg, Symbol) or isinstance(arg, sympy.Basic)
def removeFsFromGrad(sym):
"""
Returns ``sym`` with all occurrences grad_n(a,b,c) replaced by grad_n(a,b).
That is, all functionspace parameters are removed.
"""
from esys.escript import symfn
gg=sym.atoms(symfn.grad_n)
for g in gg:
if len(g.args)==3:
r=symfn.grad_n(*g.args[:2])
sym=sym.subs(g, r)
return sym
def getTotalDifferential(f, x, order=0):
"""
This function computes::
| Df/Dx = del_f/del_x + del_f/del_grad(x)*del_grad(x)/del_x + ...
| \ / \ /
| a b
"""
from esys.escript import util
res=()
shape=util.getShape(f)
if not isSymbol(f):
res+=(numpy.zeros(shape+x.getShape()),)
for i in range(order):
x=x.grad()
res+=numpy.zeros(shape+x.getShape())
elif x.getRank()==0:
f=removeFsFromGrad(f)
dfdx=f.diff(x)
dgdx=x.grad().diff(x)
a=numpy.empty(shape, dtype=object)
if order>0:
b=numpy.empty(shape+dgdx.getShape(), dtype=object)
if len(shape)==0:
for j in numpy.ndindex(dgdx.getShape()):
y=dfdx
z=dgdx[j]
# expand() and coeff() are very expensive so
# we set the unwanted factors to zero to extract
# the one we need
for jj in numpy.ndindex(dgdx.getShape()):
if j==jj: continue
y=y.subs(dgdx[jj], 0)
a=y.subs(z,0) # terms in x and constants
if order>0:
b[j]=y.subs(z,1)-a
else:
for i in numpy.ndindex(shape):
for j in numpy.ndindex(dgdx.getShape()):
y=dfdx[i]
z=dgdx[j]
for jj in numpy.ndindex(dgdx.getShape()):
if j==jj: continue
y=y.subs(dgdx[jj], 0)
a[i]=y.subs(z,0) # terms in x and constants
if order>0:
b[i+j]=y.subs(z,1)-a[i]
res+=(Symbol(a, dim=f.getDim(), subs=f.getDataSubstitutions()),)
if order>0:
res+=(Symbol(b, dim=f.getDim(), subs=f.getDataSubstitutions()),)
elif x.getRank()==1:
f=removeFsFromGrad(f)
dfdx=f.diff(x)
dgdx=x.grad().diff(x).transpose(2)
a=numpy.empty(shape+x.getShape(), dtype=object)
if order>0:
b=numpy.empty(shape+x.grad().getShape(), dtype=object)
if len(shape)==0:
raise NotImplementedError('f scalar, x vector')
else:
for i in numpy.ndindex(shape):
for k,l in numpy.ndindex(x.grad().getShape()):
if dgdx[k,k,l]==0:
a[i+(k,)]=0
if order>0:
b[i+(k,l)]=0
else:
y=dfdx[i+(k,)]
z=dgdx[k,k,l]
for kk,ll in numpy.ndindex(x.grad().getShape()):
if k==kk and l==ll: continue
y=y.subs(dgdx[kk,kk,ll], 0)
a[i+(k,)]=y.subs(z,0) # terms in x and constants
if order>0:
b[i+(k,l)]=y.subs(z,1)-a[i+(k,)]
res+=(Symbol(a, dim=f.getDim(), subs=f.getDataSubstitutions()),)
if order>0:
res+=(Symbol(b, dim=f.getDim(), subs=f.getDataSubstitutions()),)
if len(res)==1:
return res[0]
else:
return res
|
nilq/baby-python
|
python
|
'''
File: dict_merge.py
Project: 01-DataSturcture
===========
File Created: Tuesday, 21st July 2020 4:49:05 pm
Author: <<LanLing>> (<<lanlingrock@gmail.com>>)
===========
Last Modified: Tuesday, 21st July 2020 4:49:09 pm
Modified By: <<LanLing>> (<<lanlingrock@gmail.com>>>)
===========
Description: 字典合并
Copyright <<2020>> - 2020 Your Company, <<XDU>>
'''
from collections import ChainMap
a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }
# 合并字典
# 在内部创建了一个容纳这些字典的列表
# 相同的键,只保留在第一个字典的
c = ChainMap(a, b)
print(c['x'])
print(c['y'])
print(c['z'])
print(list(c.keys()))
print(list(c.values()))
# 所有的操作都只会影响第一个字典
c['z'] = 10
c['w'] = 20
print(a, b)
# 个人感觉 defaultdict 会好一些
|
nilq/baby-python
|
python
|
"""
A module for (non-TS) species conformer generation
Note:
variables that contain atom indices such as torsions and tops are 1-indexed,
while atoms in Molecules are 0-indexed.
Todo:
* Consider boat-chair conformers (https://en.wikipedia.org/wiki/Cyclohexane_conformation)
* finally, consider h-bonds
* Does it take the scan energy into account when generating combinations??
* The secretary problem - incorporate for stochastic searching
* What's the confirmed bottleneck?
conformers is a list of dictionaries, each with the following keys::
{'xyz': <dict>,
'index': <int>,
'FF energy': <float>,
'source': <str>,
'torsion_dihedrals': {<torsion tuple 0>: angle 0,
<torsion tuple 1>: angle 1,
}
Module workflow::
generate_conformers
generate_force_field_conformers
get_force_field_energies, rdkit_force_field or openbabel_force_field_on_rdkit_conformers,
determine_dihedrals
deduce_new_conformers
get_torsion_angles, determine_torsion_symmetry, determine_torsion_sampling_points,
change_dihedrals_and_force_field_it
get_lowest_confs
"""
import copy
import logging
import sys
import time
from itertools import product
from typing import List, Optional, Tuple, Union
from openbabel import openbabel as ob
from openbabel import pybel as pyb
from rdkit import Chem
from rdkit.Chem.rdchem import EditableMol as RDMol
import rmgpy.molecule.group as gr
from rmgpy.exceptions import ILPSolutionError, ResonanceError
from rmgpy.molecule.converter import to_ob_mol
from rmgpy.molecule.molecule import Atom, Bond, Molecule
from rmgpy.molecule.element import C as C_ELEMENT, H as H_ELEMENT, F as F_ELEMENT, Cl as Cl_ELEMENT, I as I_ELEMENT
from arc.common import logger, determine_top_group_indices
from arc.exceptions import ConformerError, InputError
import arc.plotter
from arc.species import converter, vectors
# The number of conformers to generate per range of heavy atoms in the molecule
# (will be increased if there are chiral centers)
CONFS_VS_HEAVY_ATOMS = {(0, 3): 75,
(4, 9): 500,
(10, 29): 1000,
(30, 59): 2500,
(60, 99): 5000,
(100, 'inf'): 7500,
}
# The number of conformers to generate per range of potential torsions in the molecule
# (will be increased if there are chiral centers)
CONFS_VS_TORSIONS = {(0, 1): 75,
(2, 5): 500,
(5, 19): 1000,
(20, 34): 2500,
(35, 49): 5000,
(50, 'inf'): 7500,
}
# The resolution (in degrees) for scanning smeared wells
SMEARED_SCAN_RESOLUTIONS = 30.0
# An energy threshold (in kJ/mol) above which wells in a torsion will not be considered (rel. to the most stable well)
DE_THRESHOLD = 5.
# The gap (in degrees) that defines different wells
WELL_GAP = 20
# The maximum number of times to iteratively search for the lowest conformer
MAX_COMBINATION_ITERATIONS = 25
# A threshold below which all combinations will be generated. Above it just samples of the entire search space.
COMBINATION_THRESHOLD = 1000
# Consolidation tolerances for Z matrices
CONSOLIDATION_TOLS = {'R': 1e-2, 'A': 1e-2, 'D': 1e-2}
def generate_conformers(mol_list: Union[List[Molecule], Molecule],
label,
xyzs=None,
torsions=None,
tops=None,
charge=0,
multiplicity=None,
num_confs_to_generate=None,
n_confs=10,
e_confs=5.0,
de_threshold=None,
smeared_scan_res=None,
combination_threshold=None,
force_field='MMFF94s',
max_combination_iterations=None,
diastereomers=None,
return_all_conformers=False,
plot_path=None,
print_logs=True,
) -> Union[list, Tuple[list, list], None]:
"""
Generate conformers for (non-TS) species starting from a list of RMG Molecules.
(resonance structures are assumed to have already been generated and included in the molecule list)
Args:
mol_list (Union[List[Molecule], Molecule]): Molecule objects to consider (or Molecule, resonance structures will be generated).
label (str): The species' label.
xyzs (list), optional: A list of user guess xyzs that will also be taken into account, each in a dict format.
torsions (list, optional): A list of all possible torsions in the molecule. Will be determined if not given.
tops (list, optional): A list of tops corresponding to torsions. Will be determined if not given.
charge (int, optional): The species charge. Used to perceive a molecule from xyz.
multiplicity (int, optional): The species multiplicity. Used to perceive a molecule from xyz.
num_confs_to_generate (int, optional): The number of conformers to generate (can be determined automatically)
n_confs (int, optional): The number of conformers to return.
e_confs (float, optional): The energy threshold in kJ/mol above the lowest energy conformer
below which all (unique) generated conformers will be returned.
de_threshold (float, optional): Energy threshold (in kJ/mol) above which wells will not be considered.
smeared_scan_res (float, optional): The resolution (in degrees) for scanning smeared wells.
combination_threshold (int, optional): A threshold below which all combinations will be generated.
force_field (str, optional): The type of force field to use (MMFF94, MMFF94s, UFF, GAFF, fit).
'fit' will first run MMFF94, than fit a custom Amber FF to the species.
max_combination_iterations (int, optional): The maximum number of times to iteratively search
for the lowest conformer.
diastereomers (list, optional): Entries are xyz's in a dictionary format or conformer structures
representing specific diastereomers to keep.
return_all_conformers (bool, optional): Whether to return the full conformers list of conformer dictionaries
In addition to the lowest conformers list. Tru to return it.
plot_path (str, optional): A folder path in which the plot will be saved.
If None, the plot will not be shown (nor saved).
print_logs (bool, optional): Whether define a logger so logs are also printed to stdout.
Useful when run outside of ARC. True to print.
Raises:
ConformerError: If something goes wrong.
TypeError: If xyzs has entries of a wrong type.
Returns:
list: Lowest conformers.
"""
if isinstance(mol_list, Molecule):
# try generating resonance structures, but strictly keep atom order
success = False
try:
new_mol_list = mol_list.copy(deep=True).generate_resonance_structures(keep_isomorphic=False,
filter_structures=True)
success = converter.order_atoms_in_mol_list(ref_mol=mol_list.copy(deep=True), mol_list=new_mol_list)
except (ValueError, ILPSolutionError, ResonanceError) as e:
logger.warning(f'Could not generate resonance structures for species {label}. Got: {e}')
if success:
mol_list = new_mol_list
else:
mol_list = [mol_list]
if not isinstance(mol_list, list):
logger.error(f'The `mol_list` argument must be a list, got {type(mol_list)}')
return None
if len(mol_list) == 0 or mol_list[0] is None:
logger.error('Must get a non-empty `mol_list` argument.')
return None
for mol in mol_list:
if not isinstance(mol, Molecule):
raise ConformerError(f'Each entry in the `mol_list` argument must be an RMG Molecule object, '
f'got {type(mol)}')
mol_list = [update_mol(mol) for mol in mol_list]
# a quick bypass for mono-atomic species:
if len(mol_list[0].atoms) == 1:
confs = [generate_monoatomic_conformer(symbol=mol_list[0].atoms[0].element.symbol)]
if not return_all_conformers:
return confs
else:
return confs, confs
if xyzs is not None and any([not isinstance(xyz, dict) for xyz in xyzs]):
raise TypeError(f"xyz entries of xyzs must be dictionaries, e.g.:\n\n"
f"{{'symbols': ('O', 'C', 'H', 'H'),\n'isotopes': (16, 12, 1, 1),\n"
f"'coords': ((0.0, 0.0, 0.678514),\n (0.0, 0.0, -0.532672),\n"
f" (0.0, 0.935797, -1.116041),\n (0.0, -0.935797, -1.116041))}}\n\n"
f"Got {[type(xyz) for xyz in xyzs]}")
if print_logs:
initialize_log()
t0 = time.time()
logger.info(f'Generating conformers for {label}')
max_combination_iterations = max_combination_iterations or MAX_COMBINATION_ITERATIONS
combination_threshold = combination_threshold or COMBINATION_THRESHOLD
if torsions is None or tops is None:
torsions, tops = determine_rotors(mol_list)
conformers = generate_force_field_conformers(
mol_list=mol_list, label=label, xyzs=xyzs, torsion_num=len(torsions), charge=charge, multiplicity=multiplicity,
num_confs=num_confs_to_generate, force_field=force_field)
if len(conformers):
conformers = determine_dihedrals(conformers, torsions)
new_conformers, symmetries = deduce_new_conformers(
label, conformers, torsions, tops, mol_list, smeared_scan_res, plot_path=plot_path,
combination_threshold=combination_threshold, force_field=force_field,
max_combination_iterations=max_combination_iterations, diastereomers=diastereomers,
de_threshold=de_threshold)
new_conformers = determine_chirality(conformers=new_conformers, label=label, mol=mol_list[0])
lowest_confs = get_lowest_confs(label, new_conformers, n=n_confs, e=e_confs)
lowest_confs.sort(key=lambda x: x['FF energy'], reverse=False) # sort by output confs, lowest to highest energy
execution_time = time.time() - t0
t, s = divmod(execution_time, 60)
t, m = divmod(t, 60)
d, h = divmod(t, 24)
days = f'{int(d)} days and ' if d else ''
if execution_time > 10:
logger.info(f'Conformer execution time using {force_field}: {days}{int(h):02d}:{int(m):02d}:{int(s):02d}')
else:
logger.error(f'Could not generate conformers for {label}: {mol_list[0].copy(deep=True).to_smiles()}')
lowest_confs, new_conformers = list(), list()
if not return_all_conformers:
return lowest_confs
else:
return lowest_confs, new_conformers
def deduce_new_conformers(label, conformers, torsions, tops, mol_list, smeared_scan_res=None, plot_path=None,
combination_threshold=1000, force_field='MMFF94s', max_combination_iterations=25,
diastereomers=None, de_threshold=None):
"""
By knowing the existing torsion wells, get the geometries of all important conformers.
Validate that atoms don't collide in the generated conformers (don't consider ones where they do).
Args:
label (str): The species' label.
conformers (list): Entries are conformer dictionaries.
torsions (list): A list of all possible torsion angles in the molecule, each torsion angles list is sorted.
tops (list): A list of tops corresponding to torsions.
mol_list (list): A list of RMG Molecule objects.
smeared_scan_res (float, optional): The resolution (in degrees) for scanning smeared wells.
plot_path (str, optional): A folder path in which the plot will be saved.
If None, the plot will not be shown (nor saved).
combination_threshold (int, optional): A threshold below which all combinations will be generated.
force_field (str, optional): The type of force field to use.
max_combination_iterations (int, optional): The max num of times to iteratively search for the lowest conformer.
diastereomers (list, optional): Entries are xyz's in a dictionary format or conformer structures
representing specific diastereomers to keep.
de_threshold (float, optional): An energy threshold (in kJ/mol) above which wells in a torsion
will not be considered.
Returns:
list: The deduced conformers.
Returns:
dict: Keys are torsion tuples
"""
smeared_scan_res = smeared_scan_res or SMEARED_SCAN_RESOLUTIONS
if not any(['torsion_dihedrals' in conformer for conformer in conformers]):
conformers = determine_dihedrals(conformers, torsions)
torsion_angles = get_torsion_angles(label, conformers, torsions) # get all wells per torsion
mol = mol_list[0]
symmetries = dict()
for torsion, top in zip(torsions, tops):
# identify symmetric torsions so we don't bother considering them in the conformational combinations
symmetry = determine_torsion_symmetry(label, top, mol_list, torsion_angles[tuple(torsion)])
symmetries[tuple(torsion)] = symmetry
logger.debug(f'Identified {len([s for s in symmetries.values() if s > 1])} symmetric wells for {label}')
torsions_sampling_points, wells_dict = dict(), dict()
for tor, tor_angles in torsion_angles.items():
torsions_sampling_points[tor], wells_dict[tor] = \
determine_torsion_sampling_points(label, tor_angles, smeared_scan_res=smeared_scan_res,
symmetry=symmetries[tor])
if plot_path is not None:
arc.plotter.plot_torsion_angles(torsion_angles, torsions_sampling_points, wells_dict=wells_dict,
plot_path=plot_path)
hypothetical_num_comb = 1
for points in torsions_sampling_points.values():
hypothetical_num_comb *= len(points)
number_of_chiral_centers = get_number_of_chiral_centers(label, mol, conformer=conformers[0],
just_get_the_number=True)
hypothetical_num_comb *= 2 ** number_of_chiral_centers
if hypothetical_num_comb > 1000:
hypothetical_num_comb_str = '{0:.2E}'.format(hypothetical_num_comb)
else:
hypothetical_num_comb_str = str(hypothetical_num_comb)
logger.info(f'\nHypothetical number of conformer combinations for {label}: {hypothetical_num_comb_str}')
# split torsions_sampling_points into two lists, use combinations only for those with multiple sampling points
single_tors, multiple_tors, single_sampling_point, multiple_sampling_points = list(), list(), list(), list()
multiple_sampling_points_dict = dict() # used for plotting an energy "scan"
for tor, points in torsions_sampling_points.items():
if len(points) == 1:
single_tors.append(tor)
single_sampling_point.append((points[0]))
else:
multiple_sampling_points_dict[tor] = points
multiple_tors.append(tor)
multiple_sampling_points.append(points)
diastereomeric_conformers = get_lowest_diastereomers(label=label, mol=mol, conformers=conformers,
diastereomers=diastereomers)
new_conformers = list()
for diastereomeric_conformer in diastereomeric_conformers:
# set symmetric (single well) torsions to the mean of the well
if 'chirality' in diastereomeric_conformer and diastereomeric_conformer['chirality'] != dict():
logger.info(f"Considering diastereomer {diastereomeric_conformer['chirality']}")
base_xyz = diastereomeric_conformer['xyz'] # base_xyz is modified within the loop below
for torsion, dihedral in zip(single_tors, single_sampling_point):
torsion_0_indexed = [tor - 1 for tor in torsion]
conf, rd_mol = converter.rdkit_conf_from_mol(mol, base_xyz)
if conf is not None:
base_xyz = converter.set_rdkit_dihedrals(conf, rd_mol, torsion_0_indexed, deg_abs=dihedral)
new_conformers.extend(generate_conformer_combinations(
label=label, mol=mol_list[0], base_xyz=base_xyz, hypothetical_num_comb=hypothetical_num_comb,
multiple_tors=multiple_tors, multiple_sampling_points=multiple_sampling_points,
combination_threshold=combination_threshold, len_conformers=len(conformers), force_field=force_field,
max_combination_iterations=max_combination_iterations, plot_path=plot_path, torsion_angles=torsion_angles,
multiple_sampling_points_dict=multiple_sampling_points_dict, wells_dict=wells_dict,
de_threshold=de_threshold, symmetries=symmetries))
if plot_path is not None:
lowest_conf = get_lowest_confs(label=label, confs=new_conformers, n=1)[0]
lowest_conf = determine_chirality([lowest_conf], label, mol, force=False)[0]
diastereomer = f" (diastereomer: {lowest_conf['chirality']})" if 'chirality' in lowest_conf \
and lowest_conf['chirality'] else ''
logger.info(f'Lowest force field conformer for {label}{diastereomer}:\n'
f'{converter.xyz_to_str(lowest_conf["xyz"])}\n')
arc.plotter.draw_structure(xyz=lowest_conf['xyz'])
return new_conformers, symmetries
def generate_conformer_combinations(label, mol, base_xyz, hypothetical_num_comb, multiple_tors,
multiple_sampling_points, combination_threshold=1000, len_conformers=-1,
force_field='MMFF94s', max_combination_iterations=25, plot_path=None,
torsion_angles=None, multiple_sampling_points_dict=None, wells_dict=None,
de_threshold=None, symmetries=None):
"""
Call either conformers_combinations_by_lowest_conformer() or generate_all_combinations(),
according to the hypothetical_num_comb.
Args:
label (str): The species' label.
mol (Molecule): The RMG molecule with the connectivity information.
base_xyz (dict): The base 3D geometry to be changed.
hypothetical_num_comb (int): The number of combinations that could be generated by changing dihedrals,
considering symmetry but not considering atom collisions.
combination_threshold (int, optional): A threshold below which all combinations will be generated.
multiple_tors (list): Entries are torsion tuples of non-symmetric torsions.
multiple_sampling_points (list): Entries are lists of dihedral angles (sampling points), respectively correspond
to torsions in multiple_tors.
len_conformers (int, optional): The length of the existing conformers list (for consecutive numbering).
de_threshold (float, optional): An energy threshold (in kJ/mol) above which wells in a torsion
will not be considered.
force_field (str, optional): The type of force field to use.
max_combination_iterations (int, optional): The max num of times to iteratively search for the lowest conformer.
torsion_angles (dict, optional): The torsion angles. Keys are torsion tuples, values are lists of all
corresponding angles from conformers.
multiple_sampling_points_dict (dict, optional): Keys are torsion tuples, values are respective sampling points.
wells_dict (dict, optional): Keys are torsion tuples, values are well dictionaries.
plot_path (str, optional): A folder path in which the plot will be saved.
If None, the plot will not be shown (nor saved).
symmetries (dict, optional): Keys are tuples scan indices (1-indexed), values are internal
rotation symmetry numbers (sigma).
Returns:
list: New conformer combinations, entries are conformer dictionaries.
"""
de_threshold = de_threshold or DE_THRESHOLD
if hypothetical_num_comb > combination_threshold:
# don't generate all combinations, there are simply too many
# iteratively modify the lowest conformer until it converges.
logger.debug(f'hypothetical_num_comb for {label} is > {combination_threshold}')
new_conformers = conformers_combinations_by_lowest_conformer(
label, mol=mol, base_xyz=base_xyz, multiple_tors=multiple_tors,
multiple_sampling_points=multiple_sampling_points, len_conformers=len_conformers, force_field=force_field,
plot_path=plot_path, de_threshold=de_threshold, max_combination_iterations=max_combination_iterations,
torsion_angles=torsion_angles, multiple_sampling_points_dict=multiple_sampling_points_dict,
wells_dict=wells_dict, symmetries=symmetries)
else:
# just generate all combinations and get their FF energies
logger.debug(f'hypothetical_num_comb for {label} is < {combination_threshold}')
new_conformers = generate_all_combinations(label, mol, base_xyz, multiple_tors, multiple_sampling_points,
len_conformers=len_conformers, force_field=force_field,
torsions=list(torsion_angles.keys()))
return new_conformers
def conformers_combinations_by_lowest_conformer(label, mol, base_xyz, multiple_tors, multiple_sampling_points,
len_conformers=-1, force_field='MMFF94s', max_combination_iterations=25,
torsion_angles=None, multiple_sampling_points_dict=None,
wells_dict=None, de_threshold=None, plot_path=False, symmetries=None):
"""
Iteratively modify dihedrals in the lowest conformer (each iteration deduces a new lowest conformer),
until convergence.
Args:
label (str): The species' label.
mol (Molecule): The RMG molecule with the connectivity information.
base_xyz (dict): The base 3D geometry to be changed.
multiple_tors (list): Entries are torsion tuples of non-symmetric torsions.
multiple_sampling_points (list): Entries are lists of dihedral angles (sampling points), respectively correspond
to torsions in multiple_tors.
len_conformers (int, optional): The length of the existing conformers list (for consecutive numbering).
de_threshold (float, optional): An energy threshold (in kJ/mol) above which wells in a torsion
will not be considered.
force_field (str, optional): The type of force field to use.
max_combination_iterations (int, optional): The max num of times to iteratively search for the lowest conformer.
torsion_angles (dict, optional): The torsion angles. Keys are torsion tuples, values are lists of all
corresponding angles from conformers.
multiple_sampling_points_dict (dict, optional): Keys are torsion tuples, values are respective sampling points.
wells_dict (dict, optional): Keys are torsion tuples, values are well dictionaries.
plot_path (str, optional): A folder path in which the plot will be saved.
If None, the plot will not be shown (nor saved).
symmetries (dict, optional): Keys are tuples scan indices (1-indexed), values are internal
rotation symmetry numbers (sigma).
Returns:
list: New conformer combinations, entries are conformer dictionaries.
"""
base_energy = get_force_field_energies(label, mol, num_confs=None, xyz=base_xyz,
force_field=force_field, optimize=True, suppress_warning=True)[1]
if len(base_energy) == 0:
return list()
else:
base_energy = base_energy[0]
new_conformers = list() # will be returned
lowest_conf_i = None
for i in range(max_combination_iterations):
newest_conformers_dict, newest_conformer_list = dict(), list() # conformers from the current iteration
for tor, sampling_points in zip(multiple_tors, multiple_sampling_points):
xyzs, energies = change_dihedrals_and_force_field_it(label, mol, xyz=base_xyz, torsions=[tor],
new_dihedrals=[[sp] for sp in sampling_points],
force_field=force_field, optimize=False)
newest_conformers_dict[tor] = list() # keys are torsions for plotting
for xyz, energy, dihedral in zip(xyzs, energies, sampling_points):
exists = False
if any([converter.compare_confs(xyz, conf['xyz']) for conf in new_conformers + newest_conformer_list]):
exists = True
if xyz is not None:
conformer = {'index': len_conformers + len(new_conformers) + len(newest_conformer_list),
'xyz': xyz,
'FF energy': round(energy, 3),
'source': f'Changing dihedrals on most stable conformer, iteration {i}',
'torsion': tor,
'dihedral': round(dihedral, 2)}
newest_conformers_dict[tor].append(conformer)
if not exists:
newest_conformer_list.append(conformer)
else:
# if xyz is None, atoms have collided
logger.debug(f'\n\natoms colliding in {label} for torsion {tor} and dihedral {dihedral}:')
logger.debug(xyz)
logger.debug('\n\n')
new_conformers.extend(newest_conformer_list)
if not newest_conformer_list:
newest_conformer_list = [lowest_conf_i]
lowest_conf_i = get_lowest_confs(label, newest_conformer_list, n=1)[0]
if lowest_conf_i['FF energy'] == base_energy \
and converter.compare_confs(lowest_conf_i['xyz'], base_xyz):
break
elif lowest_conf_i['FF energy'] < base_energy:
base_energy = lowest_conf_i['FF energy']
if plot_path is not None:
logger.info(converter.xyz_to_str(lowest_conf_i['xyz']))
arc.plotter.draw_structure(xyz=lowest_conf_i['xyz'])
num_comb = arc.plotter.plot_torsion_angles(torsion_angles, multiple_sampling_points_dict,
wells_dict=wells_dict, e_conformers=newest_conformers_dict,
de_threshold=de_threshold, plot_path=plot_path)
if num_comb is not None:
if num_comb > 1000:
num_comb_str = f'{num_comb:.2E}'
else:
num_comb_str = str(num_comb)
logger.info(f'Number of conformer combinations for {label} after reduction: {num_comb_str}')
if de_threshold is not None:
min_e = min([conf['FF energy'] for conf in new_conformers])
new_conformers = [conf for conf in new_conformers if conf['FF energy'] - min_e < de_threshold]
return new_conformers
def generate_all_combinations(label, mol, base_xyz, multiple_tors, multiple_sampling_points, len_conformers=-1,
torsions=None, force_field='MMFF94s'):
"""
Generate all combinations of torsion wells from a base conformer.
Args:
label (str): The species' label.
mol (Molecule): The RMG molecule with the connectivity information.
base_xyz (dict): The base 3D geometry to be changed.
multiple_tors (list): Entries are torsion tuples of non-symmetric torsions.
multiple_sampling_points (list): Entries are lists of dihedral angles (sampling points), respectively correspond
to torsions in multiple_tors.
len_conformers (int, optional): The length of the existing conformers list (for consecutive numbering).
force_field (str, optional): The type of force field to use.
torsions (list, optional): A list of all possible torsions in the molecule. Will be determined if not given.
Returns:
list: New conformer combinations, entries are conformer dictionaries.
"""
# generate sampling points combinations
product_combinations = list(product(*multiple_sampling_points))
new_conformers = list() # will be returned
if multiple_tors:
xyzs, energies = change_dihedrals_and_force_field_it(label, mol, xyz=base_xyz, torsions=multiple_tors,
new_dihedrals=product_combinations, optimize=True,
force_field=force_field)
for xyz, energy in zip(xyzs, energies):
if xyz is not None:
new_conformers.append({'index': len_conformers + len(new_conformers),
'xyz': xyz,
'FF energy': energy,
'source': 'Generated all combinations from scan map'})
else:
# no multiple torsions (all torsions are symmetric or no torsions in the molecule), this is a trivial case
energy = get_force_field_energies(label, mol, num_confs=None, xyz=base_xyz, force_field=force_field,
optimize=True, suppress_warning=True)[1][0]
new_conformers.append({'index': len_conformers + len(new_conformers),
'xyz': base_xyz,
'FF energy': energy,
'source': 'Generated all combinations from scan map (trivial case)'})
if torsions is None:
torsions = determine_rotors([mol])
new_conformers = determine_dihedrals(new_conformers, torsions)
return new_conformers
def generate_force_field_conformers(label,
mol_list,
torsion_num,
charge,
multiplicity,
xyzs=None,
num_confs=None,
force_field='MMFF94s'):
"""
Generate conformers using RDKit and OpenBabel and optimize them using a force field
Also consider user guesses in `xyzs`
Args:
label (str): The species' label.
mol_list (list): Entries are Molecule objects representing resonance structures of a chemical species.
xyzs (list, optional): Entries are xyz coordinates in dict format, given as initial guesses.
torsion_num (int): The number of torsions identified in the molecule.
charge (int): The net charge of the species.
multiplicity (int): The species spin multiplicity.
num_confs (int, optional): The number of conformers to generate.
force_field (str, optional): The type of force field to use.
Returns:
list: Entries are conformer dictionaries.
Raises:
ConformerError: If xyzs is given and it is not a list, or its entries are not strings.
"""
conformers = list()
number_of_heavy_atoms = len([atom for atom in mol_list[0].atoms if atom.is_non_hydrogen()])
if num_confs is None:
num_confs, num_chiral_centers = determine_number_of_conformers_to_generate(
label=label, heavy_atoms=number_of_heavy_atoms, torsion_num=torsion_num, mol=mol_list[0],
xyz=xyzs[0] if xyzs is not None else None)
else:
num_chiral_centers = ''
chiral_centers = '' if not num_chiral_centers else f', {num_chiral_centers} chiral centers,'
logger.info(f'Species {label} has {number_of_heavy_atoms} heavy atoms{chiral_centers} and {torsion_num} torsions. '
f'Using {num_confs} random conformers.')
for mol in mol_list:
ff_xyzs, ff_energies = list(), list()
try:
ff_xyzs, ff_energies = get_force_field_energies(label,
mol,
num_confs=num_confs,
force_field=force_field)
except ValueError as e:
logger.warning(f'Could not generate conformers for {label}, failed with: {e}')
if ff_xyzs:
for xyz, energy in zip(ff_xyzs, ff_energies):
conformers.append({'xyz': xyz,
'index': len(conformers),
'FF energy': energy,
'source': force_field})
# User guesses
if xyzs is not None and xyzs:
if not isinstance(xyzs, list):
raise ConformerError('The xyzs argument must be a list, got {0}'.format(type(xyzs)))
for xyz in xyzs:
if not isinstance(xyz, dict):
raise ConformerError('Each entry in xyzs must be a dictionary, got {0}'.format(type(xyz)))
s_mol, b_mol = converter.molecules_from_xyz(xyz, multiplicity=multiplicity, charge=charge)
conformers.append({'xyz': xyz,
'index': len(conformers),
'FF energy': get_force_field_energies(label, mol=b_mol or s_mol, xyz=xyz,
optimize=True, force_field=force_field)[1][0],
'source': 'User Guess'})
return conformers
def change_dihedrals_and_force_field_it(label, mol, xyz, torsions, new_dihedrals, optimize=True, force_field='MMFF94s'):
"""
Change dihedrals of specified torsions according to the new dihedrals specified, and get FF energies.
Example::
torsions = [(1, 2, 3, 4), (9, 4, 7, 1)]
new_dihedrals = [[90, 120], [90, 300], [180, 270], [30, 270]]
This will calculate the energy of the original conformer (defined using `xyz`).
We iterate through new_dihedrals. The torsions are set accordingly and the energy and xyz of the newly
generated conformer are kept.
We assume that each list entry in new_dihedrals is of the length of the torsions list (2 in the example).
Args:
label (str): The species' label.
mol (Molecule): The RMG molecule with the connectivity information.
xyz (dict): The base 3D geometry to be changed.
torsions (list): Entries are torsion tuples for which the dihedral will be changed relative to xyz.
new_dihedrals (list): Entries are same size lists of dihedral angles (floats) corresponding to the torsions.
optimize (bool, optional): Whether to optimize the coordinates using FF. True to optimize.
force_field (str, optional): The type of force field to use.
Returns:
list: The conformer FF energies corresponding to the list of dihedrals.
Returns:
list: The conformer xyz geometries corresponding to the list of dihedrals.
"""
if isinstance(xyz, str):
xyz = converter.str_to_xyz(xyz)
if torsions is None or new_dihedrals is None:
xyz, energy = get_force_field_energies(label, mol=mol, xyz=xyz, optimize=True,
force_field=force_field, suppress_warning=True)
return xyz, energy
xyzs, energies = list(), list()
# make sure new_dihedrals is a list of lists (or tuples):
if isinstance(new_dihedrals, (int, float)):
new_dihedrals = [[new_dihedrals]]
if isinstance(new_dihedrals, list) and not isinstance(new_dihedrals[0], (list, tuple)):
new_dihedrals = [new_dihedrals]
for dihedrals in new_dihedrals:
xyz_dihedrals = xyz
for torsion, dihedral in zip(torsions, dihedrals):
conf, rd_mol = converter.rdkit_conf_from_mol(mol, xyz_dihedrals)
if conf is not None:
torsion_0_indexed = [tor - 1 for tor in torsion]
xyz_dihedrals = converter.set_rdkit_dihedrals(conf, rd_mol, torsion_0_indexed, deg_abs=dihedral)
xyz_, energy = get_force_field_energies(label, mol=mol, xyz=xyz_dihedrals, optimize=True,
force_field=force_field, suppress_warning=True)
if energy and xyz_:
energies.append(energy[0])
if optimize:
xyzs.append(xyz_[0])
else:
xyzs.append(xyz_dihedrals)
else:
energies.append(None)
xyzs.append(xyz_dihedrals)
return xyzs, energies
def determine_rotors(mol_list):
"""
Determine possible unique rotors in the species to be treated as hindered rotors.
Args:
mol_list (list): Localized structures (Molecule objects) by which all rotors will be determined.
Returns:
list: A list of indices of scan pivots.
Returns:
list: A list of indices of top atoms (including one of the pivotal atoms) corresponding to the torsions.
"""
torsions, tops = list(), list()
for mol in mol_list:
rotors = find_internal_rotors(mol)
for new_rotor in rotors:
for existing_torsion in torsions:
if existing_torsion == new_rotor['scan']:
break
else:
torsions.append(new_rotor['scan'])
tops.append(new_rotor['top'])
return torsions, tops
def determine_number_of_conformers_to_generate(label: str,
heavy_atoms: int,
torsion_num: int,
mol: Optional[Molecule] = None,
xyz: Optional[dict] = None,
minimalist: bool = False,
) -> Tuple[int, int]:
"""
Determine the number of conformers to generate using molecular mechanics
Args:
label (str): The species' label.
heavy_atoms (int): The number of heavy atoms in the molecule.
torsion_num (int): The number of potential torsions in the molecule.
mol (Molecule, optional): The RMG Molecule object.
xyz (dict, optional): The xyz coordinates.
minimalist (bool, optional): Whether to return a small number of conformers, useful when this is just a guess
before fitting a force field. True to be minimalistic.
Raises:
ConformerError: If the number of conformers to generate cannot be determined.
Returns:
Tuple[int, int]:
- The number of conformers to generate.
- The number of chiral centers.
"""
if isinstance(torsion_num, list):
torsion_num = len(torsion_num)
for heavy_range, num_confs_1 in CONFS_VS_HEAVY_ATOMS.items():
if heavy_range[1] == 'inf' and heavy_atoms >= heavy_range[0]:
break
elif heavy_range[1] >= heavy_atoms >= heavy_range[0]:
break
else:
raise ConformerError(f'Could not determine the number of conformers to generate according to the number '
f'of heavy atoms ({heavy_atoms}) in {label}. The CONFS_VS_HEAVY_ATOMS dictionary might be '
f'corrupt, got:\n {CONFS_VS_HEAVY_ATOMS}')
for torsion_range, num_confs_2 in CONFS_VS_TORSIONS.items():
if torsion_range[1] == 'inf' and torsion_num >= torsion_range[0]:
break
elif torsion_range[1] >= torsion_num >= torsion_range[0]:
break
else:
raise ConformerError(f'Could not determine the number of conformers to generate according to the number '
f'of torsions ({torsion_num}) in {label}. The CONFS_VS_TORSIONS dictionary might be '
f'corrupt, got:\n {CONFS_VS_TORSIONS}')
if minimalist:
num_confs = min(num_confs_1, num_confs_2, 250)
else:
num_confs = max(num_confs_1, num_confs_2)
# increase the number of conformers if there are more than two chiral centers
num_chiral_centers = 0
if mol is None and xyz is not None:
mol = converter.molecules_from_xyz(xyz)[1]
if mol is not None and xyz is None:
xyzs = get_force_field_energies(label, mol, num_confs=1, suppress_warning=True)[0]
xyz = xyzs[0] if len(xyzs) else None
if mol is not None and xyz is not None:
num_chiral_centers = get_number_of_chiral_centers(label, mol, xyz=xyz, just_get_the_number=True)
if num_chiral_centers > 2:
num_confs = int(num_confs * num_chiral_centers)
return num_confs, num_chiral_centers
def determine_dihedrals(conformers, torsions):
"""
For each conformer in `conformers` determine the respective dihedrals.
Args:
conformers (list): Entries are conformer dictionaries.
torsions (list): All possible torsions in the molecule.
Returns:
list: Entries are conformer dictionaries.
"""
for conformer in conformers:
if isinstance(conformer['xyz'], str):
xyz = converter.str_to_xyz(conformer['xyz'])
else:
xyz = conformer['xyz']
if 'torsion_dihedrals' not in conformer or not conformer['torsion_dihedrals']:
conformer['torsion_dihedrals'] = dict()
for torsion in torsions:
dihedral = vectors.calculate_dihedral_angle(coords=xyz['coords'], torsion=torsion, index=1)
conformer['torsion_dihedrals'][tuple(torsion)] = dihedral
return conformers
def determine_torsion_sampling_points(label, torsion_angles, smeared_scan_res=None, symmetry=1):
"""
Determine how many points to consider in each well of a torsion for conformer combinations.
Args:
label (str): The species' label.
torsion_angles (list): Well angles in the torsion.
smeared_scan_res (float, optional): The resolution (in degrees) for scanning smeared wells.
symmetry (int, optional): The torsion symmetry number.
Returns:
list: Sampling points for the torsion.
Returns:
list: Each entry is a well dictionary with the keys
``start_idx``, ``end_idx``, ``start_angle``, ``end_angle``, ``angles``.
"""
smeared_scan_res = smeared_scan_res or SMEARED_SCAN_RESOLUTIONS
sampling_points = list()
wells = get_wells(label, torsion_angles, blank=20)
for i, well in enumerate(wells):
width = abs(well['end_angle'] - well['start_angle'])
mean = sum(well['angles']) / len(well['angles'])
if width <= 2 * smeared_scan_res:
sampling_points.append(mean)
else:
num = int(width / smeared_scan_res)
padding = abs(mean - well['start_angle'] - ((num - 1) * smeared_scan_res) / 2)
sampling_points.extend([padding + well['angles'][0] + smeared_scan_res * j for j in range(int(num))])
if symmetry > 1 and i == len(wells) / symmetry - 1:
break
return sampling_points, wells
def determine_torsion_symmetry(label, top1, mol_list, torsion_scan):
"""
Check whether a torsion is symmetric.
If a torsion well is "well defined" and not smeared, it could be symmetric.
Check the groups attached to the rotor pivots to determine whether it is indeed symmetric
We don't care about the actual rotor symmetry number here, since we plan to just use the first well
(they're all the same).
Args:
label (str): The species' label.
top1 (list): A list of atom indices on one side of the torsion, including the pivotal atom.
mol_list (list): A list of molecules.
torsion_scan (list): The angles corresponding to this torsion from all conformers.
Returns:
int: The rotor symmetry number.
"""
symmetry = 1
check_tops = [1, 1] # flags for checking top1 and top2
mol = mol_list[0]
top2 = [i + 1 for i in range(len(mol.atoms)) if i + 1 not in top1]
for j, top in enumerate([top1, top2]):
# A quick bypass for methyl rotors which are too common:
if len(top) == 4 and mol.atoms[top[0] - 1].is_carbon() \
and all([mol.atoms[top[i] - 1].is_hydrogen() for i in range(1, 4)]):
symmetry *= 3
check_tops[j] = 0
# A quick bypass for methylene radicals:
if len(top) == 3 and mol.atoms[top[0] - 1].is_carbon() and mol.atoms[top[0] - 1].radical_electrons == 1 \
and all([mol.atoms[top[i] - 1].is_hydrogen() for i in range(1, 3)]):
symmetry *= 2
check_tops[j] = 0
# A quick bypass for benzene rings:
elif len(top) == 11 and sum([mol.atoms[top[i] - 1].is_carbon() for i in range(11)]) == 6 \
and sum([mol.atoms[top[i] - 1].is_hydrogen() for i in range(11)]) == 5:
symmetry *= 2
check_tops[j] = 0
# treat the torsion list as cyclic, search for at least two blank parts of at least 60 degrees each
# if the means of all data parts of the scan are uniformly scattered, the torsion might be symmetric
wells = get_wells(label=label, angles=torsion_scan, blank=60)
distances, well_widths = list(), list()
for i in range(len(wells)):
well_widths.append(abs(wells[i]['end_angle'] - wells[i]['start_angle']))
if i > 0:
distances.append(int(round(abs(wells[i]['start_angle'] - wells[i - 1]['end_angle'])) / 10) * 10)
mean_well_width = sum(well_widths) / len(well_widths)
if len(wells) in [1, 2, 3, 4, 6, 9] and all([distance == distances[0] for distance in distances]) \
and all([abs(width - mean_well_width) / mean_well_width < determine_well_width_tolerance(mean_well_width)
for width in well_widths]):
# All well distances and widths are equal. The torsion scan might be symmetric, check the groups
for j, top in enumerate([top1, top2]):
if check_tops[j]:
groups, grp_idx, groups_indices = list(), list(), list()
for atom in mol.atoms[top[0] - 1].edges.keys():
if mol.vertices.index(atom) + 1 in top:
atom_indices = determine_top_group_indices(
mol=mol, atom1=mol.atoms[top[0] - 1], atom2=atom, index=0)[0]
groups.append(to_group(mol, atom_indices))
grp_idx.append(atom_indices)
groups_indices.append([g + 1 for g in atom_indices])
# hard-coding for NO2/NS2 groups, since the two O or S atoms have different atom types in each localized
# structure, hence are not isomorphic
if len(top) == 3 and mol.atoms[top[0] - 1].atomtype.label == 'N5dc' \
and (all([mol.atoms[top[k] - 1].atomtype.label in ['O2d', 'O0sc'] for k in [1, 2]])
or all([mol.atoms[top[k] - 1].atomtype.label in ['S2d', 'S0sc'] for k in [1, 2]])):
symmetry *= 2
# all other groups:
elif not mol.atoms[top[0] - 1].lone_pairs > 0 and not mol.atoms[top[0] - 1].radical_electrons > 0 \
and all([groups[0].is_isomorphic(group, save_order=True) for group in groups[1:]]):
symmetry *= len(groups)
return symmetry
def determine_well_width_tolerance(mean_width):
"""
Determine the tolerance by which well widths are determined to be nearly equal.
Fitted to a polynomial trend line for the following data of (mean, tolerance) pairs::
(100, 0.11), (60, 0.13), (50, 0.15), (25, 0.25), (5, 0.50), (1, 0.59)
Args:
mean_width (float): The mean well width in degrees.
Returns:
float: The tolerance.
"""
if mean_width > 100:
return 0.1
tol = -1.695e-10 * mean_width ** 5 + 6.209e-8 * mean_width ** 4 - 8.855e-6 * mean_width ** 3 \
+ 6.446e-4 * mean_width ** 2 - 2.610e-2 * mean_width + 0.6155
return tol
def get_lowest_confs(label: str,
confs: Union[dict, list],
n: int = 10,
e: float = 5.0,
energy: str = 'FF energy',
) -> list:
"""
Get the most stable conformer
Args:
label (str): The species' label.
confs (dict, list): Entries are either conformer dictionaries or a length two list of xyz coordinates and energy
n (int, optional): Number of lowest conformers to return.
e (float, optional): The energy threshold above the lowest energy conformer in kJ/mol
below which all conformers will be returned.
energy (str, optional): The energy attribute to search by. Currently only 'FF energy' is supported.
Raises:
ConformerError: If n < 1, e < 0, both n and e are ``None``, or if no conformers are given.
Returns:
list: Conformer dictionaries.
"""
if e is not None:
if e < 0:
raise ConformerError(f'e cannot be negative, got: {e}')
elif n is not None:
if n < 1:
raise ConformerError(f'n cannot be lower than 1, got: {n}')
else:
raise ConformerError(f'Either n or e must be specified')
if not confs or confs is None:
raise ConformerError(f'get_lowest_confs() got no conformers for {label}')
if isinstance(confs[0], list):
conformer_list = list()
for entry in confs:
if entry[1] is not None:
conformer_list.append({'xyz': entry[0], energy: entry[1]})
elif isinstance(confs[0], dict):
conformer_list = [conformer for conformer in confs if energy in conformer and conformer[energy] is not None]
else:
raise ConformerError(f'confs could either be a list of dictionaries or a list of lists. '
f'Got a list of {type(confs[0])}s for {label}')
conformer_list.sort(key=lambda conformer: conformer[energy], reverse=False)
if e is not None:
min_e = min([conf[energy] for conf in conformer_list])
lowest_confs = [conformer_list[0]]
for index in range(len(conformer_list)):
if (e is not None and conformer_list[index][energy] > min_e + e) or (n is not None and len(lowest_confs) >= n):
break
if index > 0 and not any([converter.compare_confs(lowest_conf['xyz'], conformer_list[index]['xyz'])
for lowest_conf in lowest_confs]):
lowest_confs.append(conformer_list[index])
return lowest_confs
def get_torsion_angles(label, conformers, torsions):
"""
Populate each torsion pivots with all available angles from the generated conformers
Args:
label (str): The species' label.
conformers (list): The conformers from which to extract the angles.
torsions (list): The torsions to consider.
Returns:
dict: The torsion angles. Keys are torsion tuples, values are lists of all corresponding angles from conformers.
"""
torsion_angles = dict()
if len(conformers) and not any(['torsion_dihedrals' in conformer for conformer in conformers]):
raise ConformerError(f'Could not determine dihedral torsion angles for {label}. '
f'Consider calling `determine_dihedrals()` first.')
for conformer in conformers:
if 'torsion_dihedrals' in conformer and conformer['torsion_dihedrals']:
for torsion in torsions:
if tuple(torsion) not in torsion_angles:
torsion_angles[tuple(torsion)] = list()
torsion_angles[tuple(torsion)].append(conformer['torsion_dihedrals'][tuple(torsion)])
for tor in torsion_angles.keys():
torsion_angles[tor].sort()
return torsion_angles
def get_force_field_energies(label: str,
mol: Molecule,
num_confs: int = None,
xyz: dict = None,
force_field: str = 'MMFF94s',
optimize: bool = True,
try_ob: bool = True,
suppress_warning: bool = False) -> Tuple[list, list]:
"""
Determine force field energies using RDKit.
If ``num_confs`` is given, random 3D geometries will be generated. If xyz is given, it will be directly used instead.
The coordinates are returned in the order of atoms in mol.
Args:
label (str): The species' label.
mol (Molecule): The RMG molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (dict, optional): The 3D coordinates guess.
force_field (str, optional): The type of force field to use.
optimize (bool, optional): Whether to first optimize the conformer using FF. True to optimize.
try_ob (bool, optional): Whether to try OpenBabel if RDKit fails. ``True`` to try, ``True`` by default.
suppress_warning (bool, optional): Wheter to suppress warning of using OpenBabel. ``True`` to suppress, ``False`` by default.
Raises:
ConformerError: If conformers could not be generated.
Returns:
list: Entries are xyz coordinates, each in a dict format.
Returns:
list: Entries are the FF energies (in kJ/mol).
"""
xyzs, energies = list(), list()
if force_field.lower() in ['mmff94', 'mmff94s', 'uff']:
rd_mol = embed_rdkit(label, mol, num_confs=num_confs, xyz=xyz)
xyzs, energies = rdkit_force_field(label, rd_mol, force_field=force_field, optimize=optimize)
if not len(xyzs) and force_field.lower() in ['gaff', 'mmff94', 'mmff94s', 'uff', 'ghemical'] and try_ob:
if not suppress_warning:
logger.warning(f'Using OpenBabel instead of RDKit as a fall back method to generate conformers for {label}. '
f'This is often slower.')
xyzs, energies = openbabel_force_field_on_rdkit_conformers(
label, rd_mol, force_field=force_field, optimize=optimize)
if not len(xyzs):
if force_field.lower() not in ['mmff94', 'mmff94s', 'uff', 'gaff', 'ghemical']:
raise ConformerError(f'Unrecognized force field for {label}. Should be either MMFF94, MMFF94s, UFF, '
f'Ghemical, or GAFF. Got: {force_field}.')
# raise ConformerError(f'Could not generate conformers for species {label}.')
return xyzs, energies
def openbabel_force_field_on_rdkit_conformers(label, rd_mol, force_field='MMFF94s', optimize=True):
"""
Optimize RDKit conformers by OpenBabel using a force field (MMFF94 or MMFF94s are recommended).
This is a fall back method when RDKit fails to generate force field optimized conformers.
Args:
label (str): The species' label.
rd_mol (RDKit RDMol): The RDKit molecule with embedded conformers to optimize.
force_field (str, optional): The type of force field to use.
optimize (bool, optional): Whether to first optimize the conformer using FF. True to optimize.
Returns:
list: Entries are optimized xyz's in a dictionary format.
Returns:
list: Entries are float numbers representing the energies (in kJ/mol).
"""
xyzs, energies = list(), list()
# Set up Openbabel input and output format
obconversion = ob.OBConversion()
obconversion.SetInAndOutFormats('xyz', 'xyz')
# Set up Openbabel force field
ff = ob.OBForceField.FindForceField(force_field)
symbols = [rd_atom.GetSymbol() for rd_atom in rd_mol.GetAtoms()]
for i in range(rd_mol.GetNumConformers()):
# Convert RDKit conformer to xyz string
conf = rd_mol.GetConformer(i)
xyz_str = f'{conf.GetNumAtoms()}\n\n'
for j in range(conf.GetNumAtoms()):
xyz_str += symbols[j] + ' '
pt = conf.GetAtomPosition(j)
xyz_str += ' '.join([str(pt.x), str(pt.y), str(pt.z)]) + '\n'
# Build OpenBabel molecule from xyz string
ob_mol = ob.OBMol()
obconversion.ReadString(ob_mol, xyz_str)
ff.Setup(ob_mol)
# Optimize the molecule if needed
if optimize:
ff.ConjugateGradients(2000)
# Export xyzs and energies
ob_mol.GetCoordinates()
ff.GetCoordinates(ob_mol)
energies.append(ff.Energy())
xyz_str = '\n'.join(obconversion.WriteString(ob_mol).splitlines()[2:])
xyzs.append(converter.str_to_xyz(xyz_str))
return xyzs, energies
def mix_rdkit_and_openbabel_force_field(label,
mol,
num_confs=None,
xyz=None,
force_field='GAFF',
try_ob=False):
"""
Optimize conformers using a force field (GAFF, MMFF94s, MMFF94, UFF, Ghemical)
Use RDKit to generate the random conformers (OpenBabel isn't good enough),
but use OpenBabel to optimize them (RDKit doesn't have GAFF)
Args:
label (str): The species' label.
mol (Molecule, optional): The RMG molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (string or list, optional): The 3D coordinates in either a string or an array format.
force_field (str, optional): The type of force field to use.
try_ob (bool, optional): Whether to try OpenBabel if RDKit fails. ``True`` to try, ``False`` by default.
Returns:
list: Entries are optimized xyz's in a list format.
Returns:
list: Entries are float numbers representing the energies in kJ/mol.
"""
xyzs, energies = list(), list()
rd_mol = embed_rdkit(label, mol, num_confs=num_confs, xyz=xyz)
unoptimized_xyzs = list()
for i in range(rd_mol.GetNumConformers()):
conf, xyz = rd_mol.GetConformer(i), list()
for j in range(conf.GetNumAtoms()):
pt = conf.GetAtomPosition(j)
xyz.append([pt.x, pt.y, pt.z])
xyz = [xyz[j] for j, _ in enumerate(xyz)] # reorder
unoptimized_xyzs.append(xyz)
if not len(unoptimized_xyzs) and try_ob:
# use OB as the fall back method
logger.warning(f'Using OpenBabel instead of RDKit as a fall back method to generate conformers for {label}. '
f'This is often slower, and prohibits ARC from using all features of the conformers module.')
xyzs, energies = openbabel_force_field(label, mol, num_confs, force_field=force_field)
else:
for xyz in unoptimized_xyzs:
xyzs_, energies_ = openbabel_force_field(label,
mol,
num_confs,
xyz=xyz,
force_field=force_field)
xyzs.extend(xyzs_)
energies.extend(energies_)
return xyzs, energies
def openbabel_force_field(label, mol, num_confs=None, xyz=None, force_field='GAFF', method='diverse'):
"""
Optimize conformers using a force field (GAFF, MMFF94s, MMFF94, UFF, Ghemical)
Args:
label (str): The species' label.
mol (Molecule, optional): The RMG molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (dict, optional): The 3D coordinates.
force_field (str, optional): The type of force field to use.
method (str, optional): The conformer searching method to use in OpenBabel.
For method description, see http://openbabel.org/dev-api/group__conformer.shtml
Returns:
list: Entries are optimized xyz's in a list format.
Returns:
list: Entries are float numbers representing the energies in kJ/mol.
"""
xyzs, energies = list(), list()
ff = ob.OBForceField.FindForceField(force_field)
if xyz is not None:
# generate an OpenBabel molecule
obmol = ob.OBMol()
atoms = mol.vertices
ob_atom_ids = dict() # dictionary of OB atom IDs
for i, atom in enumerate(atoms):
a = obmol.NewAtom()
a.SetAtomicNum(atom.number)
a.SetVector(xyz['coords'][i][0], xyz['coords'][i][1], xyz['coords'][i][2])
if atom.element.isotope != -1:
a.SetIsotope(atom.element.isotope)
a.SetFormalCharge(atom.charge)
ob_atom_ids[atom] = a.GetId()
orders = {1: 1, 2: 2, 3: 3, 4: 4, 1.5: 5}
for atom1 in mol.vertices:
for atom2, bond in atom1.edges.items():
if bond.is_hydrogen_bond():
continue
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2:
obmol.AddBond(index1 + 1, index2 + 1, orders[bond.order])
# optimize
ff.Setup(obmol)
ff.SetLogLevel(0)
ff.SetVDWCutOff(6.0) # The VDW cut-off distance (default=6.0)
ff.SetElectrostaticCutOff(10.0) # The Electrostatic cut-off distance (default=10.0)
ff.SetUpdateFrequency(10) # The frequency to update the non-bonded pairs (default=10)
ff.EnableCutOff(False) # Use cut-off (default=don't use cut-off)
# ff.SetLineSearchType('Newton2Num')
ff.SteepestDescentInitialize() # ConjugateGradientsInitialize
v = 1
while v:
v = ff.SteepestDescentTakeNSteps(1) # ConjugateGradientsTakeNSteps
if ff.DetectExplosion():
raise ConformerError(f'Force field {force_field} exploded with method SteepestDescent for {label}')
ff.GetCoordinates(obmol)
elif num_confs is not None:
obmol, ob_atom_ids = to_ob_mol(mol, return_mapping=True)
pybmol = pyb.Molecule(obmol)
pybmol.make3D()
obmol = pybmol.OBMol
ff.Setup(obmol)
if method.lower() == 'weighted':
ff.WeightedRotorSearch(num_confs, 2000)
elif method.lower() == 'random':
ff.RandomRotorSearch(num_confs, 2000)
elif method.lower() == 'diverse':
rmsd_cutoff = 0.5
energy_cutoff = 50.
confab_verbose = False
ff.DiverseConfGen(rmsd_cutoff, num_confs, energy_cutoff, confab_verbose)
elif method.lower() == 'systematic':
ff.SystematicRotorSearch(num_confs)
else:
raise ConformerError(f'Could not identify method {method} for {label}')
else:
raise ConformerError(f'Either num_confs or xyz should be given for {label}')
ff.GetConformers(obmol)
obconversion = ob.OBConversion()
obconversion.SetOutFormat('xyz')
for i in range(obmol.NumConformers()):
obmol.SetConformer(i)
ff.Setup(obmol)
xyz_str = '\n'.join(obconversion.WriteString(obmol).splitlines()[2:])
xyz_dict = converter.str_to_xyz(xyz_str)
# reorder:
xyz_dict['coords'] = tuple(xyz_dict['coords'][ob_atom_ids[mol.atoms[j]]]
for j in range(len(xyz_dict['coords'])))
xyzs.append(xyz_dict)
energies.append(ff.Energy())
return xyzs, energies
def embed_rdkit(label, mol, num_confs=None, xyz=None):
"""
Generate unoptimized conformers in RDKit. If ``xyz`` is not given, random conformers will be generated.
Args:
label (str): The species' label.
mol (RMG Molecule or RDKit RDMol): The molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (dict, optional): The 3D coordinates.
Returns:
RDMol: An RDKIt molecule with embedded conformers.
"""
if num_confs is None and xyz is None:
raise ConformerError(f'Either num_confs or xyz must be set when calling embed_rdkit() for {label}')
if isinstance(mol, RDMol):
rd_mol = mol
elif isinstance(mol, Molecule):
rd_mol = converter.to_rdkit_mol(mol=mol, remove_h=False)
else:
raise ConformerError(f'Argument mol can be either an RMG Molecule or an RDKit RDMol object. '
f'Got {type(mol)} for {label}')
if num_confs is not None:
Chem.AllChem.EmbedMultipleConfs(rd_mol, numConfs=num_confs, randomSeed=1, enforceChirality=True)
# Chem.AllChem.EmbedMultipleConfs(rd_mol, numConfs=num_confs, randomSeed=15, enforceChirality=False)
elif xyz is not None:
rd_conf = Chem.Conformer(rd_mol.GetNumAtoms())
for i in range(rd_mol.GetNumAtoms()):
rd_conf.SetAtomPosition(i, xyz['coords'][i])
rd_mol.AddConformer(rd_conf)
return rd_mol
def read_rdkit_embedded_conformers(label, rd_mol, i=None, rd_index_map=None):
"""
Read coordinates from RDKit conformers.
Args:
label (str): The species' label.
rd_mol (RDKit RDMol): The RDKit molecule with embedded conformers to optimize.
i (int, optional): The conformer index from rd_mol to read. If None, all will be read,
rd_index_map (list, optional): An atom map dictionary to reorder the xyz. Requires mol to not be None.
Returns:
list: entries are xyz coordinate dicts.
"""
xyzs = list()
if i is None:
# read all conformers:
for i in range(rd_mol.GetNumConformers()):
xyzs.append(read_rdkit_embedded_conformer_i(rd_mol, i, rd_index_map=rd_index_map))
elif isinstance(i, int) and i < rd_mol.GetNumConformers():
# read only conformer i:
xyzs.append(read_rdkit_embedded_conformer_i(rd_mol, i, rd_index_map=rd_index_map))
else:
raise ConformerError(f'Cannot read conformer number "{i}" out of {rd_mol.GetNumConformers()} RDKit '
f'conformers for {label}')
return xyzs
def read_rdkit_embedded_conformer_i(rd_mol, i, rd_index_map=None):
"""
Read coordinates from RDKit conformers.
Args:
rd_mol (RDKit RDMol): The RDKit molecule with embedded conformers to optimize.
i (int): The conformer index from rd_mol to read.
rd_index_map (list, optional): An atom map dictionary to reorder the xyz.
Keys are rdkit atom indices, values are RMG mol atom indices
Returns:
dict: xyz coordinates.
"""
conf = rd_mol.GetConformer(i)
coords = list()
for j in range(conf.GetNumAtoms()):
pt = conf.GetAtomPosition(j)
coords.append((pt.x, pt.y, pt.z))
symbols = [rd_atom.GetSymbol() for rd_atom in rd_mol.GetAtoms()]
if rd_index_map is not None:
# reorder
coords = [coords[rd_index_map[j]] for j in range(len(coords))]
symbols = [symbols[rd_index_map[j]] for j in range(len(symbols))]
xyz_dict = converter.xyz_from_data(coords=coords, symbols=symbols)
return xyz_dict
def rdkit_force_field(label, rd_mol, force_field='MMFF94s', optimize=True):
"""
Optimize RDKit conformers using a force field (MMFF94 or MMFF94s are recommended).
Args:
label (str): The species' label.
rd_mol (RDKit RDMol): The RDKit molecule with embedded conformers to optimize.
force_field (str, optional): The type of force field to use.
optimize (bool, optional): Whether to first optimize the conformer using FF. True to optimize.
Returns:
list: Entries are optimized xyz's in a dictionary format.
Returns:
list: Entries are float numbers representing the energies.
"""
xyzs, energies = list(), list()
for i in range(rd_mol.GetNumConformers()):
if optimize:
v, j = 1, 0
while v == 1 and j < 200: # v == 1: continue, v == 0: enough steps, v == -1: unable to set up
v = Chem.AllChem.MMFFOptimizeMolecule(rd_mol, mmffVariant=force_field, confId=i,
maxIters=500, ignoreInterfragInteractions=False)
j += 1
mol_properties = Chem.AllChem.MMFFGetMoleculeProperties(rd_mol, mmffVariant=force_field)
if mol_properties is not None:
ff = Chem.AllChem.MMFFGetMoleculeForceField(rd_mol, mol_properties, confId=i)
if optimize:
energies.append(ff.CalcEnergy())
xyzs.append(read_rdkit_embedded_conformer_i(rd_mol, i))
return xyzs, energies
def get_wells(label, angles, blank=20):
"""
Determine the distinct wells from a list of angles.
Args:
label (str): The species' label.
angles (list): The angles in the torsion.
blank (int, optional): The blank space between wells.
Returns:
list: Entry are well dicts with keys: ``start_idx``, ``end_idx``, ``start_angle``, ``end_angle``, ``angles``.
"""
if not angles:
raise ConformerError(f'Cannot determine wells without angles for {label}')
new_angles = angles
if angles[0] < 0 + blank and angles[-1] > 360 - blank:
# relocate the first chunk of data at the end, the well seems to include the +180/-180 degrees point
for i, angle in enumerate(angles):
if i > 0 and abs(angle - angles[i - 1]) > blank:
part2 = angles[:i]
for j, _ in enumerate(part2):
part2[j] += 360
new_angles = angles[i:] + part2
break
wells = list()
new_well = True
for i in range(len(new_angles) - 1):
if new_well:
wells.append({'start_idx': i,
'end_idx': None,
'start_angle': new_angles[i],
'end_angle': None,
'angles': list()})
new_well = False
wells[-1]['angles'].append(new_angles[i])
if abs(new_angles[i + 1] - new_angles[i]) > blank:
# This is the last point in this well
wells[-1]['end_idx'] = i
wells[-1]['end_angle'] = new_angles[i]
new_well = True
if len(wells):
wells[-1]['end_idx'] = len(new_angles) - 1
wells[-1]['end_angle'] = new_angles[-1]
wells[-1]['angles'].append(new_angles[-1])
return wells
def check_special_non_rotor_cases(mol, top1, top2):
"""
Check whether one of the tops correspond to a special case which does not have a torsional mode.
Checking for ``R-[C,N]#[N,[CH],[C]]`` groups, such as: in cyano groups (`R-C#N``),
C#C groups (``R-C#CH`` or ``R-C#[C]``), and azide groups: (``R-N#N``).
Args:
mol (Molecule): The RMG molecule.
top1 (list): Entries are atom indices (1-indexed) on one side of the torsion, inc. one of the pivotal atoms.
top2 (list): Entries are atom indices (1-indexed) on the other side of the torsion, inc. the other pivotal atom.
Returns:
bool: ``True`` if this is indeed a special case which should **not** be treated as a torsional mode.
"""
for top in [top1, top2]:
if mol.atoms[top[0] - 1].atomtype.label in ['Ct', 'N3t', 'N5tc'] \
and mol.atoms[top[1] - 1].atomtype.label in ['Ct', 'N3t'] and \
(len(top) == 2 or (len(top) == 3 and mol.atoms[top[2] - 1].is_hydrogen())):
return True
return False
def find_internal_rotors(mol):
"""
Locates the sets of indices corresponding to every internal rotor (1-indexed).
Args:
mol (Molecule): The molecule for which rotors will be determined
Returns:
list: Entries are rotor dictionaries with the four-atom scan coordinates, the pivots, and the smallest top.
"""
rotors = list()
for atom1 in mol.vertices:
if atom1.is_non_hydrogen():
for atom2, bond in atom1.edges.items():
if atom2.is_non_hydrogen() and mol.vertices.index(atom1) < mol.vertices.index(atom2) \
and (bond.is_single() or bond.is_hydrogen_bond()) and not mol.is_bond_in_cycle(bond):
if len(atom1.edges) > 1 and len(atom2.edges) > 1: # none of the pivotal atoms are terminal
rotor = dict()
# pivots:
rotor['pivots'] = [mol.vertices.index(atom1) + 1, mol.vertices.index(atom2) + 1]
# top:
top1, top1_has_heavy_atoms = determine_top_group_indices(mol, atom2, atom1, index=1)
top2, top2_has_heavy_atoms = determine_top_group_indices(mol, atom1, atom2, index=1)
non_rotor = check_special_non_rotor_cases(mol, top1, top2)
if non_rotor:
continue
if top1_has_heavy_atoms and not top2_has_heavy_atoms:
rotor['top'] = top2
elif top2_has_heavy_atoms and not top1_has_heavy_atoms:
rotor['top'] = top1
else:
rotor['top'] = top1 if len(top1) <= len(top2) else top2
# scan:
rotor['scan'] = [determine_smallest_atom_index_in_scan(atom1=atom1, atom2=atom2, mol=mol)]
rotor['scan'].extend([mol.vertices.index(atom1) + 1, mol.vertices.index(atom2) + 1])
rotor['scan'].append(determine_smallest_atom_index_in_scan(atom1=atom2, atom2=atom1, mol=mol))
# other keys:
rotor['number_of_running_jobs'] = 0
rotor['success'] = None
rotor['invalidation_reason'] = ''
rotor['times_dihedral_set'] = 0
rotor['trsh_methods'] = list()
rotor['scan_path'] = ''
rotor['directed_scan_type'] = 'ess' # default to 'ess', changed in initialize_directed_rotors()
rotor['directed_scan'] = dict()
rotor['dimensions'] = 1
rotor['original_dihedrals'] = list()
rotor['cont_indices'] = list()
rotors.append(rotor)
return rotors
def determine_smallest_atom_index_in_scan(atom1: Atom,
atom2: Atom,
mol: Molecule,
) -> int:
"""
Determine the smallest atom index in mol connected to ``atom1`` which is not ``atom2``.
Returns a heavy atom if available, otherwise a hydrogen atom.
Useful for deterministically determining the indices of four atom in a scan.
This function assumes there ARE additional atoms connected to ``atom1``, and that ``atom2`` is not a hydrogen atom.
Args:
atom1 (Atom): The atom who's neighbors will be searched.
atom2 (Atom): An atom connected to ``atom1`` to exclude (a pivotal atom).
mol (Molecule): The molecule to process.
Returns:
int: The smallest atom index (1-indexed) connected to ``atom1`` which is not ``atom2``.
"""
heavy_atoms, hydrogens = list(), list()
for atom3 in atom1.edges.keys():
if atom3.is_hydrogen():
hydrogens.append(mol.vertices.index(atom3))
elif atom3 is not atom2:
heavy_atoms.append(mol.vertices.index(atom3))
smallest_index = len(mol.vertices)
if len(heavy_atoms):
for atom_index in heavy_atoms:
if atom_index < smallest_index:
smallest_index = atom_index
else:
for atom_index in hydrogens:
if atom_index < smallest_index:
smallest_index = atom_index
return smallest_index + 1
def to_group(mol, atom_indices):
"""
This method converts a defined part of a Molecule into a Group.
Args:
mol (Molecule): The base molecule.
atom_indices (list): 0-indexed atom indices corresponding to atoms in mol to be included in the group.
Returns:
Group: A group consisting of the desired atoms in mol.
"""
# Create GroupAtom object for each atom in the molecule
group_atoms = list()
index_map = dict() # keys are Molecule atom indices, values are Group atom indices
for i, atom_index in enumerate(atom_indices):
atom = mol.atoms[atom_index]
group_atoms.append(gr.GroupAtom(atomtype=[atom.atomtype], radical_electrons=[atom.radical_electrons],
charge=[atom.charge], lone_pairs=[atom.lone_pairs]))
index_map[atom_index] = i
group = gr.Group(atoms=group_atoms, multiplicity=[mol.multiplicity])
for atom in mol.atoms:
# Create a GroupBond for each bond between desired atoms in the molecule
if mol.atoms.index(atom) in atom_indices:
for bonded_atom, bond in atom.edges.items():
if mol.atoms.index(bonded_atom) in atom_indices:
group.add_bond(gr.GroupBond(atom1=group_atoms[index_map[mol.atoms.index(atom)]],
atom2=group_atoms[index_map[mol.atoms.index(bonded_atom)]],
order=[bond.order]))
group.update()
return group
def update_mol(mol):
"""
Update atom types, multiplicity, and atom charges in the molecule.
Args:
mol (Molecule): The molecule to update.
Returns:
Molecule: the updated molecule.
"""
for atom in mol.atoms:
atom.update_charge()
mol.update_atomtypes(log_species=False, raise_exception=False)
mol.update_multiplicity()
mol.identify_ring_membership()
return mol
def generate_monoatomic_conformer(symbol):
"""
Generate a conformer for a monoatomic species.
Args:
symbol (str): The atomic symbol.
Returns:
dict: The monoatomic conformer.
"""
conf = {'xyz': {'symbols': (symbol,),
'isotopes': (converter.get_most_common_isotope_for_element(symbol),),
'coords': ((0.0, 0.0, 0.0),)},
'index': 0,
'FF energy': 0.0,
'chirality': None,
'source': 'monoatomic species',
'torsion_dihedrals': None,
}
return conf
def translate_groups(label, mol, xyz, pivot):
"""
Exchange between two groups in a molecule. The groups cannot share a ring with the pivotal atom.
The function does not change the atom order, just the coordinates of atoms.
If the pivotal atom has exactly one lone pair, consider it as well as a dummy atom in translations.
Args:
label (str): The species' label.
mol (Molecule): The 2D graph representation of the molecule.
xyz (dict): A string-format 3d coordinates of the molecule with the same atom order as in mol.
pivot (int): The 0-index of the pivotal atom around which groups are to be translated.
Returns:
dict: The translated coordinates.
"""
mol.identify_ring_membership() # populates the Atom.props['inRing'] attribute
atom1 = mol.atoms[pivot]
lp = atom1.lone_pairs
if lp > 1:
logger.warning(f'Cannot translate groups for {label} if the pivotal atom has more than one '
f'lone electron pair')
return xyz
groups, translate, dont_translate = list(), list(), list()
for atom2 in mol.atoms[pivot].edges.keys():
top = determine_top_group_indices(mol, atom1, atom2, index=0)[0]
groups.append({'atom': atom2, 'protons': sum([mol.atoms[i].number for i in top])}) # a dict per top
if 'inRing' in atom1.props and atom1.props['inRing'] and 'inRing' in atom2.props and atom2.props['inRing']:
# check whether atom1 and atom2 belong to the same ring
sssr = mol.get_deterministic_sssr()
for ring in sssr:
if atom1 in ring and atom2 in ring:
dont_translate.append(atom2)
break
groups.sort(key=lambda x: x['protons'], reverse=False) # sort by the size (sum of atomic numbers)
i = 0
while len(translate) < 2 - lp and i < len(groups):
if groups[i]['atom'] not in dont_translate:
translate.append(groups[i])
i += 1
if len(translate) == 1 and lp:
vector = vectors.get_lp_vector(label, mol=mol, xyz=xyz, pivot=pivot)
new_xyz = translate_group(mol=mol, xyz=xyz, pivot=pivot,
anchor=mol.atoms.index(translate[0]['atom']), vector=vector)
elif len(translate) == 2 and not lp:
vector = vectors.get_vector(pivot=pivot, anchor=mol.atoms.index(translate[1]['atom']), xyz=xyz)
new_xyz = translate_group(mol=mol, xyz=xyz, pivot=pivot,
anchor=mol.atoms.index(translate[0]['atom']), vector=vector)
# keep original xyz:
vector = vectors.get_vector(pivot=pivot, anchor=mol.atoms.index(translate[0]['atom']), xyz=xyz)
new_xyz = translate_group(mol=mol, xyz=new_xyz, pivot=pivot,
anchor=mol.atoms.index(translate[1]['atom']), vector=vector)
else:
if lp:
raise ConformerError(f'The number of groups to translate is {len(translate)}, expected 1 '
f'(with a lone pair) for {label}.')
else:
raise ConformerError(f'The number of groups to translate is {len(translate)}, expected 2 for {label}.')
return new_xyz
def translate_group(mol, xyz, pivot, anchor, vector):
"""
Translate a group (a set of atoms from the pivot towards the anchor and onwards) by changing its
pivot -> anchor vector to the desired new vector. Keep the relative distances between the group's atoms constant,
as well as the distance between the anchor and the vector atoms.
Args:
mol (Molecule): The 2D graph representation of the molecule.
xyz (dict): The 3D coordinates of the molecule with the same atom order as in mol.
pivot (int): The 0-index of the pivotal atom around which groups are to be translated.
anchor (int): The 0-index of an anchor atom. The group is defined from the pivot atom to the anchor atom,
including all other atoms in the molecule connected to the anchor. The pivot and anchor
atoms should not have another path connecting them such as a ring.
vector (list): The new vector by which the group will be translated.
Returns:
dict: The translated coordinates.
"""
# v1 = unit_vector([-vector[0], -vector[1], -vector[2]]) # reverse the direction to get the correct angle
v1 = vectors.unit_vector(vector)
v2 = vectors.unit_vector(vectors.get_vector(pivot=pivot, anchor=anchor, xyz=xyz))
normal = vectors.get_normal(v2, v1)
theta = vectors.get_angle(v1, v2)
# print(theta * 180 / math.pi) # print theta in degrees when troubleshooting
# All atoms within the group will be rotated around the same normal vector by theta:
group = determine_top_group_indices(mol=mol, atom1=mol.atoms[pivot], atom2=mol.atoms[anchor], index=0)[0]
coords = converter.xyz_to_coords_list(xyz)
for i in group:
coords[i] = vectors.rotate_vector(point_a=coords[pivot], point_b=coords[i], normal=normal, theta=theta)
new_xyz = converter.xyz_from_data(coords=coords, symbols=xyz['symbols'], isotopes=xyz['isotopes'])
return new_xyz
def get_number_of_chiral_centers(label, mol, conformer=None, xyz=None, just_get_the_number=True):
"""
Determine the number of chiral centers by type. Either ``conformer`` or ``xyz`` must be given.
Args:
label (str): The species label.
mol (Molecule): The RMG Molecule object.
conformer (dict, optional): A conformer dictionary.
xyz (dict, optional): The xyz coordinates.
just_get_the_number (bool, optional): Return the number of chiral centers regardless of their type.
Returns:
dict, int : Keys are types of chiral sites ('C' for carbon, 'N' for nitrogen, 'D' for double bond),
values are the number of chiral centers of each type. If ``just_get_the_number`` is ``True``,
just returns the number of chiral centers (integer).
Raises:
InputError: If neither ``conformer`` nor ``xyz`` were given.
"""
if conformer is None and xyz is None:
raise InputError('Must get either conformer or xyz.')
if conformer is None:
conformer = {'xyz': xyz}
conformer = determine_chirality(conformers=[conformer], label=label, mol=mol)[0]
result = {'C': 0, 'N': 0, 'D': 0}
for symbol in conformer['chirality'].values():
if symbol in ['R', 'S']:
result['C'] += 1
elif symbol in ['NR', 'NS']:
result['N'] += 1
elif symbol in ['E', 'Z']:
result['D'] += 1
else:
raise ConformerError(f"Chiral symbols must be either `R`, `S`, `NR`, `NS`, `E`, `Z`, got: {symbol}.")
if just_get_the_number:
return sum([val for val in result.values()])
return result
def get_lowest_diastereomers(label, mol, conformers, diastereomers=None):
"""
Get the 2^(n-1) diastereomers with the lowest energy (where n is the number of chiral centers in the molecule).
We exclude enantiomers (mirror images where ALL chiral centers simultaneously invert).
If a specific diastereomer is given (in an xyz dict form), then only the lowest conformer with the same chirality
will be returned.
Args:
label (str): The species' label.
mol (Molecule): The 2D graph representation of the molecule.
conformers (list): Entries are conformer dictionaries.
diastereomers (list, optional): Entries are xyz's in a dictionary format or conformer structures
representing specific diastereomers to keep.
Returns:
list: Entries are lowest energy diastereomeric conformer dictionaries to consider.
Raises:
ConformerError: If diastereomers is not None and is of wrong type,
or if conformers with the requested chirality combination could not be generated.
"""
# assign chirality properties to all conformers
conformers = determine_chirality(conformers, label, mol)
# initialize the enantiomeric dictionary (includes enantiomers and diastereomers)
# keys are chiral combinations, values are lowest conformers
enantiomers_dict = dict()
for conformer in conformers:
if conformer['FF energy'] is not None:
chirality_tuple = chirality_dict_to_tuple(conformer['chirality'])
if chirality_tuple not in list(enantiomers_dict.keys()):
# this is a new enantiomer, consider it
enantiomers_dict[chirality_tuple] = conformer
elif conformer['FF energy'] < enantiomers_dict[chirality_tuple]['FF energy']:
# found a lower energy conformer with the same chirality, replace
enantiomers_dict[chirality_tuple] = conformer
if diastereomers is None:
# no specific diastereomers were requested
pruned_enantiomers_dict = prune_enantiomers_dict(label, enantiomers_dict)
else:
if isinstance(diastereomers, list):
# make sure entries are conformers, convert if needed
modified_diastereomers = list()
for diastereomer in diastereomers:
if isinstance(diastereomer, str) or isinstance(diastereomer, dict) and 'coords' in diastereomer:
# we'll also accept string format xyz
modified_diastereomers.append({'xyz': converter.check_xyz_dict(diastereomer)})
elif isinstance(diastereomer, dict) and 'xyz' in diastereomer:
modified_diastereomers.append(diastereomer)
else:
raise ConformerError(f'diastereomers entries must be either xyz or conformer dictionaries, '
f'got {type(diastereomer)} for {label}')
diastereomer_confs = [{'xyz': converter.check_xyz_dict(diastereomer)} for diastereomer in diastereomers]
diastereomer_confs = determine_chirality(diastereomer_confs, label, mol)
else:
raise ConformerError(f'diastereomers must be a list of xyz coordinates, got: {type(diastereomers)}')
chirality_tuples = [chirality_dict_to_tuple(conformer['chirality']) for conformer in diastereomer_confs]
new_enantiomers_dict = dict()
for chirality_tuple, conformer in enantiomers_dict.items():
if chirality_tuple in chirality_tuples:
new_enantiomers_dict[chirality_tuple] = conformer
if not new_enantiomers_dict:
raise ConformerError(f'Could not generate conformers with chirality combination:\n{chirality_tuples}')
pruned_enantiomers_dict = prune_enantiomers_dict(label, new_enantiomers_dict)
if len(list(pruned_enantiomers_dict.keys())) and list(pruned_enantiomers_dict.keys())[0] != tuple():
logger.info(f'Considering the following enantiomeric combinations for {label}:\n'
f'{list(pruned_enantiomers_dict.keys())}')
return list(pruned_enantiomers_dict.values())
def prune_enantiomers_dict(label, enantiomers_dict):
"""
A helper function for screening out enantiomers from the enantiomers_dict, leaving only diastereomers
(so removing all exact mirror images). Note that double bond chiralities 'E' and 'Z' are not mirror images of each
other, and are not pruned out.
Args:
label (str): The species' label.
enantiomers_dict (dict): Keys are chirality tuples, values are conformer structures.
Returns:
dict: The pruned enantiomers_dict.
"""
pruned_enantiomers_dict = dict()
for chirality_tuples, conformer in enantiomers_dict.items():
inversed_chirality_tuples = tuple([(chirality_tuple[0], inverse_chirality_symbol(chirality_tuple[1]))
for chirality_tuple in chirality_tuples])
if chirality_tuples not in pruned_enantiomers_dict and inversed_chirality_tuples not in pruned_enantiomers_dict:
# this combination (or its exact mirror image) was not considered yet
if inversed_chirality_tuples in list(enantiomers_dict.keys()):
# the mirror image exists, check which has a lower energy
inversed_conformer = enantiomers_dict[inversed_chirality_tuples]
if inversed_conformer['FF energy'] is None and conformer['FF energy'] is None:
logger.warning(f'Could not get energies of enantiomers {chirality_tuples} '
f'nor its mirror image {inversed_chirality_tuples} for species {label}')
continue
elif inversed_conformer['FF energy'] is None:
pruned_enantiomers_dict[chirality_tuples] = conformer
elif conformer['FF energy'] is None:
pruned_enantiomers_dict[inversed_chirality_tuples] = inversed_conformer
elif conformer['FF energy'] <= inversed_conformer['FF energy']:
pruned_enantiomers_dict[chirality_tuples] = conformer
else:
pruned_enantiomers_dict[inversed_chirality_tuples] = inversed_conformer
else:
# the mirror image does not exist
pruned_enantiomers_dict[chirality_tuples] = conformer
return pruned_enantiomers_dict
def inverse_chirality_symbol(symbol):
"""
Inverses a chirality symbol, e.g., the 'R' character to 'S', or 'NS' to 'NR'.
Note that chiral double bonds ('E' and 'Z') must not be inversed (they are not mirror images of each other).
Args:
symbol (str): The chirality symbol.
Returns:
str: The inverse chirality symbol.
Raises:
InputError: If ``symbol`` could not be recognized.
"""
inversion_dict = {'R': 'S', 'S': 'R', 'NR': 'NS', 'NS': 'NR', 'E': 'E', 'Z': 'Z'}
if symbol not in list(inversion_dict.keys()):
raise InputError(f"Recognized chirality symbols are 'R', 'S', 'NR', 'NS', 'E', and 'Z', got {symbol}.")
return inversion_dict[symbol]
def chirality_dict_to_tuple(chirality_dict):
"""
A helper function for using the chirality dictionary of a conformer as a key in the enantiomers_dict
by converting it to a tuple deterministically.
Args:
chirality_dict (dict): The chirality dictionary of a conformer.
Returns:
tuple: A deterministic tuple representation of the chirality dictionary.
Raises:
ConformerError: If the chirality values are wrong.
"""
# extract carbon sites (values are either 'R' or 'S'), nitrogen sites (values are either 'NR' or 'NS')
# and chiral double bonds (values are either 'E' or 'Z')
c_sites, n_sites, bonds, result = list(), list(), list(), list()
for site, chirality in chirality_dict.items():
if chirality in ['R', 'S']:
c_sites.append((site, chirality))
elif chirality in ['NR', 'NS']:
n_sites.append((site, chirality))
elif chirality in ['E', 'Z']:
bond_site = site if site[0] < site[1] else (site[1], site[0])
bonds.append((bond_site, chirality))
else:
raise ConformerError(f'Chiralities could either be R, S, NR, NS, E, or Z. Got: {chirality}.')
# sort the lists
c_sites.sort(key=lambda entry: entry[0])
n_sites.sort(key=lambda entry: entry[0])
bonds.sort(key=lambda entry: entry[0])
# combine by order
for entry in c_sites + n_sites + bonds:
result.append(entry)
return tuple(result)
def determine_chirality(conformers, label, mol, force=False):
"""
Determines the Cahn–Ingold–Prelog (CIP) chirality (R or S) of atoms in the conformer,
as well as the CIP chirality of double bonds (E or Z).
Args:
conformers (list): Entries are conformer dictionaries.
label (str): The species' label.
mol (RMG Molecule or RDKit RDMol): The molecule object with connectivity and bond order information.
force (bool, optional): Whether to override data, ``True`` to override, default is ``False``.
Returns:
list: Conformer dictionaries with updated with 'chirality'. ``conformer['chirality']`` is a dictionary.
Keys are either a 1-length tuple of atom indices (for chiral atom centers) or a 2-length tuple of atom
indices (for chiral double bonds), values are either 'R' or 'S' for chiral atom centers
(or 'NR' or 'NS' for chiral nitrogen centers), or 'E' or 'Z' for chiral double bonds.
All atom indices are 0-indexed.
"""
chiral_nitrogen_centers = identify_chiral_nitrogen_centers(mol)
new_mol, elements_to_insert = replace_n_with_c_in_mol(mol, chiral_nitrogen_centers)
for conformer in conformers:
if 'chirality' not in conformer:
# keys are either 1-length atom indices (for chiral atom centers)
# or 2-length atom indices (for chiral double bonds)
# values are either 'R', 'S', 'NR', 'NS', 'E', or 'Z'
conformer['chirality'] = dict()
elif conformer['chirality'] != dict() and not force:
# don't override data
continue
new_xyz = replace_n_with_c_in_xyz(label, mol, conformer['xyz'], chiral_nitrogen_centers, elements_to_insert)
rd_mol = embed_rdkit(label, new_mol, xyz=new_xyz)
Chem.rdmolops.AssignStereochemistryFrom3D(rd_mol, 0)
for i, rd_atom in enumerate(rd_mol.GetAtoms()):
rd_atom_props_dict = rd_atom.GetPropsAsDict()
if '_CIPCode' in list(rd_atom_props_dict.keys()):
if mol.atoms[i].is_nitrogen():
# this is a nitrogen site in the original molecule, mark accordingly
conformer['chirality'][(i,)] = 'N' + rd_atom_props_dict['_CIPCode']
else:
conformer['chirality'][(i,)] = rd_atom_props_dict['_CIPCode']
for rd_bond in rd_mol.GetBonds():
stereo = str(rd_bond.GetStereo())
if stereo in ['STEREOE', 'STEREOZ']:
# possible values are 'STEREOANY', 'STEREOCIS', 'STEREOE', 'STEREONONE', 'STEREOTRANS', and 'STEREOZ'
rd_atoms = [rd_bond.GetBeginAtomIdx(), rd_bond.GetEndAtomIdx()] # indices of atoms bonded by this bond
conformer['chirality'][tuple(rd_atom for rd_atom in rd_atoms)] = stereo[-1]
return conformers
def identify_chiral_nitrogen_centers(mol):
"""
Identify the atom indices corresponding to a chiral nitrogen centers in a molecule (umbrella modes).
Args:
mol (Molecule): The molecule to be analyzed.
Returns:
list: Atom numbers (0-indexed) representing chiral nitrogen centers in the molecule (umbrella modes).
Raises:
TypeError: If ``mol`` is of wrong type.
"""
if not isinstance(mol, Molecule):
raise TypeError(f'mol must be a Molecule instance, got: {type(mol)}')
chiral_nitrogen_centers = list()
for atom1 in mol.atoms:
if atom1.is_nitrogen() and atom1.lone_pairs == 1 and atom1.radical_electrons == 0 \
and (len(list(atom1.edges.keys())) == 3
or (atom1.radical_electrons == 1 and len(list(atom1.edges.keys())) == 2)):
groups, tops, top_element_counts = list(), list(), list()
for atom2 in atom1.edges.keys():
top = determine_top_group_indices(mol, atom1, atom2, index=0)[0]
tops.append(top)
top_element_counts.append(get_top_element_count(mol, top))
groups.append(to_group(mol, top))
if (top_element_counts[0] != top_element_counts[1] and top_element_counts[1] != top_element_counts[2]) \
or all([not groups[0].is_isomorphic(group, save_order=True) for group in groups[1:]] +
[not groups[-1].is_isomorphic(group, save_order=True) for group in groups[:-1]]):
# if we can say that TWO groups, each separately considered, isn't isomorphic to the others,
# then this nitrogen has all different groups.
chiral_nitrogen_centers.append(mol.atoms.index(atom1))
return chiral_nitrogen_centers
def replace_n_with_c_in_mol(mol, chiral_nitrogen_centers):
"""
Replace nitrogen atoms (pre-identified as chiral centers) with carbon atoms, replacing the lone electron pair
(assuming just one exists) with a hydrogen or a halogen atom, preserving any radical electrons on the nitrogen atom.
Args:
mol (Molecule): The molecule to be analyzed.
chiral_nitrogen_centers (list): The 0-index of chiral (umbrella mode) nitrogen atoms in the molecule.
Returns:
Molecule: A copy of the molecule with replaced N atoms.
Returns:
list: Elements inserted in addition to the C atom, ordered as in ``chiral_nitrogen_centers``.
Raises:
ConformerError: If any of the atoms indicated by ``chiral_nitrogen_centers`` could not be a chiral nitrogen atom
"""
new_mol = mol.copy(deep=True)
inserted_elements = list()
for n_index in chiral_nitrogen_centers:
if not mol.atoms[n_index].is_nitrogen():
raise ConformerError(f'Cannot replace a nitrogen atom index {n_index} if it is not a nitrogen element.')
if mol.atoms[n_index].lone_pairs != 1:
raise ConformerError(f'Cannot replace a nitrogen atom index {n_index} with number of lone pairs '
f'different than one (got: {mol.atoms[n_index].lone_pairs}).')
if mol.atoms[n_index].radical_electrons > 1:
raise ConformerError(f'Cannot replace a nitrogen atom index {n_index} if it has more than one radical '
f'electrons (got: {mol.atoms[n_index].radical_electrons}).')
if any([not bond.is_single() for bond in mol.atoms[n_index].edges.values()]):
raise ConformerError(f'Cannot replace a nitrogen atom index {n_index} if not all of its bonds are single '
f'(got: {[bond.order for bond in mol.atoms[n_index].edges.values()]}).')
new_c_atom = Atom(element=C_ELEMENT, radical_electrons=mol.atoms[n_index].radical_electrons,
charge=mol.atoms[n_index].charge, lone_pairs=0, id=mol.atoms[n_index].id)
new_c_atom.edges = dict()
for atom2 in mol.atoms[n_index].edges.keys():
# delete bonds from all other atoms connected to the atom represented by n_index
del new_mol.atoms[mol.atoms.index(atom2)].edges[new_mol.atoms[n_index]]
new_mol.vertices[n_index] = new_c_atom
h, f, cl = False, False, False # mark hydrogen, fluorine, and chlorine neighbors of the original atom
for atom2 in mol.atoms[n_index].edges.keys():
new_mol.add_bond(Bond(atom1=new_c_atom, atom2=new_mol.atoms[mol.atoms.index(atom2)], order=1))
if atom2.is_hydrogen():
h = True
elif atom2.is_fluorine():
f = True
elif atom2.is_chlorine():
cl = True
if not h:
additional_element = H_ELEMENT
inserted_elements.append('H')
elif not f:
additional_element = F_ELEMENT
inserted_elements.append('F')
elif not cl:
additional_element = Cl_ELEMENT
inserted_elements.append('Cl')
else:
# this can only happen if the molecule is NHFCl (ammonia substituted with one F and one Cl), use iodine
additional_element = I_ELEMENT
inserted_elements.append('I')
new_atom = Atom(element=additional_element, radical_electrons=0, charge=0,
lone_pairs=0 if additional_element.number == 1 else 3)
new_atom.edges = dict()
# new_mol.add_atom(new_atom)
new_mol.vertices.append(new_atom)
new_bond = Bond(atom1=new_c_atom, atom2=new_atom, order=1)
new_mol.add_bond(new_bond)
return new_mol, inserted_elements
def replace_n_with_c_in_xyz(label, mol, xyz, chiral_nitrogen_centers, elements_to_insert):
"""
Replace nitrogen atoms (pre-identified as chiral centers) with carbon atoms, replacing the lone electron pair
(assuming just one exists) with a hydrogen or a halogen atom.
Args:
label (str): The species label.
mol (Molecule): The respective molecule object.
xyz (dict): The 3D coordinates to process.
chiral_nitrogen_centers (list): The 0-index of chiral (umbrella mode) nitrogen atoms in the molecule.
elements_to_insert (list): The element (H/F/Cl/I) to insert in addition to C per nitrogen center.
Returns:
dict: The coordinates with replaced N atoms.
"""
symbols = list(copy.copy(xyz['symbols']))
isotopes = list(copy.copy(xyz['isotopes'])) if 'isotopes' in xyz else None
coords = converter.xyz_to_coords_list(xyz)
for n_index, element_to_insert in zip(chiral_nitrogen_centers, elements_to_insert):
symbols[n_index] = 'C'
if isotopes is not None:
isotopes[n_index] = 12
if element_to_insert == 'H':
symbol, isotope, distance = 'H', 1, 1.1
elif element_to_insert == 'F':
symbol, isotope, distance = 'F', 19, 2.0
elif element_to_insert == 'Cl':
symbol, isotope, distance = 'Cl', 35, 1.77
elif element_to_insert == 'I':
symbol, isotope, distance = 'I', 127, 2.14
else:
raise ConformerError(f'Element to insert must be either H, F, Cl, or I. Got: {element_to_insert}')
symbols.append(symbol)
if isotopes is not None:
isotopes.append(isotope)
lp_vector = vectors.set_vector_length(vectors.get_lp_vector(label, mol, xyz, n_index), distance)
lp_vector[0] += coords[n_index][0]
lp_vector[1] += coords[n_index][1]
lp_vector[2] += coords[n_index][2]
coords.append(lp_vector)
new_xyz = converter.xyz_from_data(coords=coords, symbols=symbols, isotopes=isotopes)
return new_xyz
def get_top_element_count(mol, top):
"""
Returns the element count for the molecule considering only the atom indices in ``top``.
Args:
mol (Molecule): The molecule to consider.
top (list): The atom indices to consider.
Returns:
dict: The element count, keys are tuples of (element symbol, isotope number), values are counts.
"""
if not isinstance(top, list):
top = list(top)
element_count = {}
for i, atom in enumerate(mol.atoms):
if i in top:
key = (atom.element.symbol, atom.element.isotope)
if key in element_count:
element_count[key] += 1
else:
element_count[key] = 1
return element_count
def initialize_log(verbose=logging.INFO):
"""
Set up a simple logger for stdout printing (not saving into as log file).
Args:
verbose (int, optional): Specify the amount of log text seen.
"""
logger.setLevel(verbose)
logger.propagate = False
# Use custom level names for cleaner log output
logging.addLevelName(logging.CRITICAL, 'Critical: ')
logging.addLevelName(logging.ERROR, 'Error: ')
logging.addLevelName(logging.WARNING, 'Warning: ')
logging.addLevelName(logging.INFO, '')
logging.addLevelName(logging.DEBUG, '')
logging.addLevelName(0, '')
# Create formatter and add to handlers
formatter = logging.Formatter('%(levelname)s%(message)s')
# Remove old handlers before adding ours
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Create console handler; send everything to stdout rather than stderr
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(verbose)
ch.setFormatter(formatter)
logger.addHandler(ch)
|
nilq/baby-python
|
python
|
class FibonacciCode:
def fib_sequence(self, n, decode=False):
l = []
a = 0
b = 1
if decode:
for _ in range(n + 2):
l.append(a)
a, b = b, a + b
else:
while a <= n:
l.append(a)
a, b = b, a + b
return l[2:]
def encode(self, n):
seq = self.fib_sequence(n)
res = ["0" for _ in seq]
while n > 0:
i, x = [(i, x) for i, x in enumerate(seq) if x <= n][-1]
res[i] = "1"
n %= x
res.append("1")
return "".join(res)
def decode(self, code):
codes = [x + "1" for x in code.split("11")][0:-1]
seq = self.fib_sequence(max([len(x) for x in codes]), True)
return [
sum([seq[i] if x == "1" else 0 for i, x in enumerate(code)])
for code in codes
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def split_date(df):
# Remove the empty lines
df = df.dropna(how="all")
# Create a new dateframe for only the date and time
date = df.Päivämäärä.str.split(expand=True)
# Change column names
date.columns = ["Weekday", "Day", "Month", "Year", "Hour"]
# Create the conversion dictionaries
days = {"ma":"Mon", "ti":"Tue", "ke":"Wed", "to":"Thu", "pe":"Fri", "la":"Sat", "su":"Sun"}
months = {"tammi":1, "helmi":2, "maalis":3, "huhti":4, "touko":5, "kesä":6, "heinä":7, "elo":8, "syys":9, "loka":10, "marras":11, "joulu":12}
# Function do to time conversion to hours
def time_to_hour(time):
string = str(time)
hour_part = string.split(":")[0]
return int(hour_part)
# Convert columns
date.Weekday = date.Weekday.map(days)
date.Day = date.Day.map(int)
date.Month = date.Month.map(months)
date.Year = date.Year.map(int)
date.Hour = date.Hour.map(time_to_hour)
return date
def split_date_continues():
# Get the original dataframe
df = pd.read_csv("src/Helsingin_pyorailijamaarat.csv", sep=";")
# Remove empty rows and columns
df = df.dropna(how="all", axis=1).dropna(how="all")
# Get the dateframe which has the date split into multiple columns
date = split_date(df)
# Drop the Päivämäärä column
pruned = df.drop(columns=["Päivämäärä"])
return pd.concat([date, pruned], axis=1)
def cyclists_per_day():
# Get the original dataframe
df = split_date_continues()
# Drop the Hour and Weekday columns
df = df.drop(columns=["Hour", "Weekday"])
# Group by year, month and day
grouped = df.groupby(["Year", "Month", "Day"]).sum()
return grouped
def main():
# Original dataframe
df = cyclists_per_day()
# Dataframe of August 2017
august_2017 = df.loc[2017, 8, :]
print(august_2017)
# Helper array
arr = np.array(range(1, 32))
# Plot the dataframe with matplotlib
plt.plot(arr, august_2017)
plt.xticks(arr)
plt.show()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
class CSVNoDupPlugin:
def input(self, infile):
inputcsv = open(infile, 'r')
self.lines = []
for line in inputcsv:
self.lines.append(line.strip().split(','))
def run(self):
# Find dups in first line
names = []
dups = []
for element in self.lines[0]:
if (element not in names):
names.append(element)
elif (element not in dups):
dups.append(element)
counters = dict()
for dup in dups:
counters[dup] = 0
for i in range(len(self.lines[0])):
if (self.lines[0][i] in dups):
name = self.lines[0][i]
self.lines[0][i] = '\"' + self.lines[0][i][1:len(self.lines[0][i])-1]+" "+str(counters[self.lines[0][i]]+1) + '\"'
self.lines[i+1][0] = '\"' + self.lines[i+1][0][1:len(self.lines[i+1][0])-1]+" "+str(counters[self.lines[i+1][0]]+1) + '\"'
counters[name] += 1
def output(self, outfile):
outputcsv = open(outfile, 'w')
for line in self.lines:
for i in range(0, len(line)):
outputcsv.write(line[i])
if (i != len(line)-1):
outputcsv.write(',')
else:
outputcsv.write('\n')
|
nilq/baby-python
|
python
|
# Dependencies
from bs4 import BeautifulSoup
import pandas as pd
import datetime as dt
from splinter import Browser
import time
import re
# Define a function called `scrape` that will execute all of your scraping code from the `mission_to_mars.ipynb` notebook and return one Python dictionary containing all of the scraped data.
def scrape():
browser = Browser("chrome", executable_path="chromedriver", headless=True)
news_title, news_paragraph = mars_news(browser)
# store the result of the scraping function in dictionary.
dict = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_img(browser),
"hemispheres": hemispheres(browser),
"weather": weather_tweet(browser),
"facts": facts_mars(),
"last_modified": dt.datetime.now()
}
browser.quit()
return dict
def mars_news(browser):
mars_url = "https://mars.nasa.gov/news/"
browser.visit(mars_url)
# Retrieve first list element and pause half a second if not instantly present
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=0.5)
html = browser.html
mars_news_soup = BeautifulSoup(html, "html.parser")
try:
slide_elem = mars_news_soup.select_one("ul.item_list li.slide")
news_title = slide_elem.find("div", class_="content_title").get_text()
news_p = slide_elem.find(
"div", class_="article_teaser_body").get_text()
except AttributeError:
return None, None
return news_title, news_p
def featured_img(browser):
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
full_img_elem = browser.find_by_id("full_image")
full_img_elem.click()
browser.is_element_present_by_text("more info", wait_time=0.5)
more_info_elem = browser.links.find_by_partial_text("more info")
more_info_elem.click()
# read the consequential html with soup
html = browser.html
image_soup = BeautifulSoup(html, "html.parser")
# Get the relative img url
image = image_soup.select_one("figure.lede a img")
try:
image_url_rel = image.get("src")
except AttributeError:
return None
# Use the base url to create an absolute url
image_url = f"https://www.jpl.nasa.gov{image_url_rel}"
return image_url
def hemispheres(browser):
# A way to break up long strings
hem_url = (
"https://astrogeology.usgs.gov/search/"
"results?q=hemisphere+enhanced&k1=target&v1=Mars"
)
browser.visit(hem_url)
# Click the link, find the sample anchor, return the href
hem_img_urls = []
for index in range(4):
# Find the elements on each loop to avoid a stale element exception
browser.find_by_css("a.product-item h3")[index].click()
hemi_data = scrape_hemisphere(browser.html)
# Append hemisphere object to list
hem_img_urls.append(hemi_data)
# Finally, we navigate backwards
browser.back()
return hem_img_urls
def weather_tweet(browser):
twitter_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(twitter_url)
# halt for 4 seconds to let the Twitter page load before extracting the html
time.sleep(4)
html = browser.html
mars_weather_soup = BeautifulSoup(html, "html.parser")
# Find a tweet which contains the text `Mars Weather`
tweet_att = {"class": "tweet", "data-name": "Mars Weather"}
mars_weather_tweet = mars_weather_soup.find("div", attrs=tweet_att)
# Look through the tweet for the paragraph tag or span tag containing the tweet text
# As Tweets changes rgularly the try/except function will spot the tweet
try:
tweet_mars_weather = mars_weather_tweet.find("p", "tweet-text").get_text()
except AttributeError:
pattern = re.compile(r'sol')
tweet_mars_weather = mars_weather_soup.find('span', text=pattern).text
return tweet_mars_weather
def scrape_hemisphere(html_text):
# Soupify the html text
hemisphere_soup = BeautifulSoup(html_text, "html.parser")
# Try to get href and text except if error.
try:
elem_title = hemisphere_soup.find("h2", class_="title").get_text()
elem_sample = hemisphere_soup.find("a", text="Sample").get("href")
except AttributeError:
# Image error returns None for better front-end handling
elem_title = None
elem_sample = None
hem_dict = {
"title": elem_title,
"img_url": elem_sample
}
return hem_dict
def facts_mars():
try:
facts_df = pd.read_html("http://space-facts.com/mars/")[0]
except BaseException:
return None
facts_df.columns = ["Parameter", "Value"]
facts_df.set_index("Parameter", inplace=True)
# Add some bootstrap styling to <table>
return facts_df.to_html(classes="table table-striped")
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape())
|
nilq/baby-python
|
python
|
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework.test import APIClient
import annotator
from annotator import models
class RootTestCase(TestCase):
"""
See the documentation for the
`root <http://docs.annotatorjs.org/en/v1.2.x/storage.html#root>`_
endpoint.
"""
def test_root(self):
"""
Verifies that an object containing store metadata, including
API version, is returned.
"""
client = APIClient()
response = client.get(reverse("root"))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(200, response.status_code)
self.assertListEqual(["name", "version"], sorted(content.keys()))
self.assertEqual(annotator.__version__, content["version"])
class AnnotationTestCase(TestCase):
"""
Base class with a few utility methods.
The
`documentation <http://docs.annotatorjs.org/en/v1.2.x/storage.html>`_
at forms the basis for many of the tests.
"""
def setUp(self):
super(AnnotationTestCase, self).setUp()
self.client = APIClient()
self.index_create_url = reverse("annotations-list")
self.annotation = {
"annotator_schema_version": "v1.0",
"text": "A note I wrote",
"quote": "the text that was annotated",
"uri": "http://example.com",
"ranges": [
{
"start": "/p[69]/span/span",
"end": "/p[70]/span/span",
"startOffset": 0,
"endOffset": 120,
}
],
}
def create_annotation(self, annotation=None):
return self.client.post(
self.index_create_url,
data=json.dumps(annotation or self.annotation),
content_type="application/json",
)
class IndexTestCase(AnnotationTestCase):
"""
Tests methods on the index (i.e. ``/annotations``) route.
"""
def test_create(self):
"""
Verifies that, on receipt of an annotation object, a ``303``
redirect is returned with an appropriate ``Location`` header.
"""
response = self.client.get(self.index_create_url)
content = json.loads(response.content.decode("utf-8"))
self.assertEquals(0, len(content))
response = self.create_annotation()
self.assertEquals(303, response.status_code)
self.assertTrue(response.has_header("Location"))
def test_index(self):
"""
Verifies that the index view returns a list of all annotation
objects.
"""
self.create_annotation()
response = self.client.get(self.index_create_url)
content = json.loads(response.content.decode("utf-8"))
self.assertEquals(1, len(content))
self.assertEqual(1, models.Annotation.objects.count())
self.assertEqual(1, models.Range.objects.count())
class DetailTestCase(AnnotationTestCase):
"""
Verifies the output of the detail view (i.e.
``/annotations/<id>``).
"""
def test_read(self):
"""
Verifies that an annotation object is returned.
"""
response = self.create_annotation()
response = self.client.get(response.get("Location"))
content = json.loads(response.content.decode("utf-8"))
for key in self.annotation.keys():
self.assertEquals(content.get(key), self.annotation.get(key))
def test_partial_update(self):
"""
Verifies that on receipt of a partial annotation object, a
``303`` redirect is returned with an appropriate ``Location``
header.
"""
response = self.create_annotation()
response = self.client.patch(
response.get("Location"),
data='{"text": "Another note I wrote."}',
content_type="application/json",
)
self.assertEquals(303, response.status_code)
self.assertTrue(response.has_header("Location"))
self.assertEqual(0, len(response.content))
response = self.client.get(response.get("Location"))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content.get("text"), "Another note I wrote.")
def test_delete(self):
"""
Verifies that deletion of an annotation returns a ``204`` and
no content.
"""
response = self.create_annotation()
self.assertEqual(1, models.Annotation.objects.count())
self.assertEqual(1, models.Range.objects.count())
response = self.client.delete(response.get("Location"))
self.assertEqual(204, response.status_code)
self.assertEqual(0, len(response.content))
self.assertEqual(0, models.Annotation.objects.count())
self.assertEqual(0, models.Range.objects.count())
class SearchTestCase(AnnotationTestCase):
"""
Verifies the output of the search (i.e. ``/search?text=spam``)
endpoint.
"""
def setUp(self):
super(SearchTestCase, self).setUp()
annotations = (
("man", "Well, what've you got?"),
(
"waitress",
(
"Well, there's egg and bacon; egg sausage and bacon; "
"egg and spam; egg bacon and spam; egg bacon sausage "
"and spam; spam bacon sausage and spam; spam egg "
"spam spam bacon and spam; spam sausage spam spam "
"bacon spam tomato and spam…"
),
),
("vikings", "Spam spam spam spam…"),
("vikings", "Spam! Lovely spam! Lovely spam!"),
)
annotation = self.annotation
for k, v in annotations:
annotation["text"] = v
annotation["quote"] = k
self.create_annotation(annotation)
def test_search_exact(self):
"""
Verifies that on receipt of a valid search, an object with
``total`` and ``rows`` fields is returned.
"""
response = self.client.get(
reverse("annotations-search"), data={"quote": "vikings"}
)
content = json.loads(response.content.decode("utf-8"))
self.assertListEqual(["rows", "total"], sorted(content.keys()))
self.assertEqual(2, content["total"])
self.assertEqual(2, len(content["rows"]))
def test_search_inexact(self):
"""
All fields, save ``text`` should be exact matches.
"""
response = self.client.get(
reverse("annotations-search"), data={"quote": "viking"}
)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(0, content["total"])
self.assertEqual(0, len(content["rows"]))
def test_search_text(self):
"""
As per the examples for
`search <http://docs.annotatorjs.org/en/v1.2.x/storage.html#search>`_,
``text`` should allow matches where the search term is
*contained* in the ``text`` field.
"""
response = self.client.get(reverse("annotations-search"), data={"text": "spam"})
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(3, content["total"])
self.assertEqual(3, len(content["rows"]))
|
nilq/baby-python
|
python
|
try:
# detect if concurrent.futures is available as a Python
# stdlib or Python 2.7 backport
from ..futures import patch as wrap_futures
from ..futures import unpatch as unwrap_futures
futures_available = True
except ImportError:
def wrap_futures():
pass
def unwrap_futures():
pass
futures_available = False
|
nilq/baby-python
|
python
|
# proxy module
from __future__ import absolute_import
from apptools.naming.binding import *
|
nilq/baby-python
|
python
|
from datetime import date
import boundaries
boundaries.register('Halifax districts',
domain='Halifax, NS',
last_updated=date(2012, 11, 6),
name_func=boundaries.attr('DISTNAME'),
id_func=boundaries.attr('DIST_ID'),
authority='Halifax Regional Municipality',
notes='We use a shapefile received via email.',
encoding='iso-8859-1',
)
|
nilq/baby-python
|
python
|
import os
from pathlib import Path
import quickfix as fix
from dotenv import load_dotenv
from fixit.application import Application
_ = load_dotenv()
def main(path):
try:
settings = fix.SessionSettings(path.name)
application = Application(
username=os.environ['FIX_USERNAME'],
password=os.environ['FIX_PASSWORD'],
sender_sub_id=os.environ['FIX_SENDER_SUB_ID']
)
store_factory = fix.FileStoreFactory(settings)
log_factory = fix.FileLogFactory(settings)
initiator = fix.SocketInitiator(application, store_factory, settings, log_factory)
initiator.start()
application.run()
# time.sleep(5)
initiator.stop()
except fix.ConfigError as e:
print(e)
if __name__ == '__main__':
main(path=Path('settings.cfg'))
|
nilq/baby-python
|
python
|
from collections import defaultdict
class Graph():
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def isCyclicUtil(self, v, visited, recStack):
visited[v] = True
recStack[v] = True
for neighbour in self.graph[v]:
if visited[neighbour] == False:
if self.isCyclicUtil(neighbour, visited, recStack) == True:
return True
elif recStack[neighbour] == True:
return True
recStack[v] = False
return False
def isCyclic(self):
visited = [False] * (self.V + 1)
recStack = [False] * (self.V + 1)
for node in range(self.V):
if visited[node] == False:
if self.isCyclicUtil(node,visited,recStack) == True:
return True
return False
g = Graph(4)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
if g.isCyclic() == 1:
print ("Graph has a cycle")
else:
print ("Graph has no cycle")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Classes that provide abstractions of different image source manifests."""
import json
import re
from typing import List, Set, Union
from docker_registry_client_async import FormattedSHA256, ImageName, JsonBytes
from .manifest import Manifest
class ArchiveChangeset(JsonBytes):
"""
Image Filesystem Changeset as defined in:
https://github.com/moby/moby/blob/master/image/spec/v1.md
"""
@staticmethod
def get_repository_tag(image_name: ImageName):
"""
Constructs a repository tag from an image name.
Args:
image_name: Image name from which to construct the repository tag.
Returns:
The normalized repository tag.
"""
return ArchiveChangeset.normalize_tags(
[f"{image_name.image}:{image_name.tag}"]
)[0]
@staticmethod
def normalize_tags(tags: Union[List[str], Set[str]]) -> List[str]:
"""
Normalizes a list of tags to conform with the output of docker-save.
Args:
tags: The list of tags to be normalized.
Returns:
The normalized list.
"""
# TODO: 'library/' image prefix does not appear to be exported by docker-save ...
if not tags:
return tags
return [re.sub(r"^library/", "", tag) for tag in tags]
def append_manifest(self, manifest: "ArchiveManifest"):
"""
Appends an archive manifest to the archive changeset.
Args:
manifest: The archive manifest to be appended.
"""
# Remove the image if it already exists
self.remove_manifest(FormattedSHA256(manifest.get_json()["Config"][:-5]))
# Remove all tags being assigned to the new image ...
tags = ArchiveChangeset.normalize_tags(manifest.get_tags())
if tags:
self.remove_tags(tags)
# Append the new image configuration ...
_json = self.get_json()
_json.append(manifest.get_json())
self._set_json(_json)
def get_manifest(self, image_name: ImageName) -> "ArchiveManifest":
"""
Retrieves the archive manifest for a given image name from the archive changeset.
Args:
image_name: The image name.
Returns:
The corresponding archive manifest.
"""
if image_name.digest:
for manifest in self.get_manifests():
if manifest.get_config_digest() == image_name.resolve_digest():
return manifest
else:
tag = ArchiveChangeset.get_repository_tag(image_name)
for manifest in self.get_manifests():
tags = manifest.get_tags()
if tags and tag in manifest.get_tags():
return manifest
raise RuntimeError(
f"Unable to locate configuration in archive manifest for: {image_name.resolve_name()}"
)
def get_manifests(self):
"""
Retrieves the list of archive manifests contained within the archive changeset.
Returns:
The list of archive manifests contained within the archive changset.
"""
return [
ArchiveManifest(json.dumps(manifest).encode("utf-8"))
for manifest in self.get_json()
]
def remove_manifest(self, config_digest: FormattedSHA256):
"""
Removes an archive manifest from the archive changeset.
Args:
config_digest: Image configuration digest in the form <hash type>:<digest value>.
"""
manifests = [
manifest.get_json()
for manifest in self.get_manifests()
if manifest.get_config_digest() != config_digest
]
self._set_json(manifests)
def remove_tags(self, tags: Union[List[str], Set[str]]):
"""
Removes a list of repository tags from all archive manifests within the archive changeset.
Args:
tags: A list of tags to be removed from all image configurations.
"""
manifests = self.get_manifests()
for manifest in manifests:
manifest.remove_tags(tags)
manifests = [manifest.get_json() for manifest in manifests]
self._set_json(manifests)
class ArchiveManifest(Manifest):
"""
Image source manifest for docker archives.
"""
@staticmethod
def digest_to_layer(digest: FormattedSHA256) -> str:
"""
Converts a digest value to a archive layer identifier.
Args:
digest: The digest value in the form: <hash type>:<digest value>.
Returns:
The corresponding archive layer identifier (relative tar path).
"""
return f"{digest.sha256}/layer.tar"
@staticmethod
def from_json(_json) -> "ArchiveManifest":
"""
Initializes an archive manifest from a JSON object.
Args:
_json: JSON object with which to initialize the archive manifest.
Returns:
The newly initialized archive manifest.
"""
archive_manifest = ArchiveManifest(b"{}")
archive_manifest._set_json(_json) # pylint: disable=protected-access
return archive_manifest
@staticmethod
def layer_to_digest(layer: str) -> FormattedSHA256:
"""
Coverts a archive layer identifier to a digest value.
Args:
layer: The archive layer identifier (relative tar path).
Returns:
The corresponding digest value in the form: <hash type>:<digest value>.
"""
return FormattedSHA256(layer[:-10])
def get_tags(self) -> Set[str]:
"""
Retrieves the set of repository tags.
Returns:
The set of repository tags.
"""
result = self.get_json()["RepoTags"]
return set(result) if result else result
def remove_tags(self, tags: Union[List[str], Set[str]]):
"""
Removes a list of repository tags.
Args:
tags: A list of tags to be removed from all image configurations.
"""
existing = self.get_tags()
if not existing:
existing = set()
delta = set(tags) if tags else set()
self.set_tags(existing - delta)
def set_config_digest(self, config_digest: FormattedSHA256):
"""
Assigns the image configuration digest.
Args:
config_digest: Image configuration digest in the form <hash type>:<digest value>.
"""
_json = self.get_json()
_json["Config"] = f"{config_digest.sha256}.json"
self._set_json(_json)
def set_layers(self, layers: List[FormattedSHA256]):
"""
Assigns the list of manifest layer identifiers.
Args:
layers: List of manifest layer identifiers in the form: <hash type>:<digest_value>.
"""
_json = self.get_json()
_json["Layers"] = [ArchiveManifest.digest_to_layer(digest) for digest in layers]
self._set_json(_json)
def set_tags(self, tags: Union[List[str], Set[str], None]):
"""
Assigns the list of repository tags.
Args:
tags: The list of repository tags to be assigned.
"""
_json = self.get_json()
_json["RepoTags"] = list(tags) if tags else None
self._set_json(_json)
# Manifest Members
def get_config_digest(self, image_name: ImageName = None) -> FormattedSHA256:
return FormattedSHA256(self.get_json()["Config"][:-5])
def get_layers(self, image_name: ImageName = None) -> List[FormattedSHA256]:
layers = self.get_json()["Layers"]
return [ArchiveManifest.layer_to_digest(layer) for layer in layers]
class ArchiveRepositories(JsonBytes):
"""
Archive repositories as defined in:
https://github.com/moby/moby/blob/master/image/spec/v1.md
"""
def get_tag(self, image_name: ImageName):
"""
Retrieves a repository tag for a given image.
Args:
image_name: The image for which to assign the tag
Returns:
The repository tag, or None.
"""
image = ArchiveChangeset.normalize_tags([image_name.image])[0]
return self.get_json().get(image, {}).get(image_name.resolve_tag(), None)
def set_tag(self, image_name: ImageName, digests: FormattedSHA256):
"""
Assigns a repository tag.
Args:
image_name: The image for which to assign the tag
digests: The value to be assigned to the tag
"""
_json = self.get_json()
image = ArchiveChangeset.normalize_tags([image_name.image])[0]
if not image in _json:
_json[image] = {}
_json[image][image_name.resolve_tag()] = digests.sha256
self._set_json(_json)
|
nilq/baby-python
|
python
|
from Chef import Chef
from ChineseChef import ChineseChef
myChef = Chef()
myChef.make_special_dish()
myChineseChef = ChineseChef()
myChineseChef.make_fried_rice()
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecretBackendRoleArgs', 'SecretBackendRole']
@pulumi.input_type
class SecretBackendRoleArgs:
def __init__(__self__, *,
backend: pulumi.Input[str],
creation_statements: pulumi.Input[Sequence[pulumi.Input[str]]],
db_name: pulumi.Input[str],
default_ttl: Optional[pulumi.Input[int]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
renew_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revocation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rollback_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SecretBackendRole resource.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[Sequence[pulumi.Input[str]]] creation_statements: The database statements to execute when
creating a user.
:param pulumi.Input[str] db_name: The unique name of the database connection to use for
the role.
:param pulumi.Input[int] default_ttl: The default number of seconds for leases for this
role.
:param pulumi.Input[int] max_ttl: The maximum number of seconds for leases for this
role.
:param pulumi.Input[str] name: A unique name to give the role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] renew_statements: The database statements to execute when
renewing a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] revocation_statements: The database statements to execute when
revoking a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rollback_statements: The database statements to execute when
rolling back creation due to an error.
"""
pulumi.set(__self__, "backend", backend)
pulumi.set(__self__, "creation_statements", creation_statements)
pulumi.set(__self__, "db_name", db_name)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if renew_statements is not None:
pulumi.set(__self__, "renew_statements", renew_statements)
if revocation_statements is not None:
pulumi.set(__self__, "revocation_statements", revocation_statements)
if rollback_statements is not None:
pulumi.set(__self__, "rollback_statements", rollback_statements)
@property
@pulumi.getter
def backend(self) -> pulumi.Input[str]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: pulumi.Input[str]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="creationStatements")
def creation_statements(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The database statements to execute when
creating a user.
"""
return pulumi.get(self, "creation_statements")
@creation_statements.setter
def creation_statements(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "creation_statements", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Input[str]:
"""
The unique name of the database connection to use for
the role.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: pulumi.Input[str]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The default number of seconds for leases for this
role.
"""
return pulumi.get(self, "default_ttl")
@default_ttl.setter
def default_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_ttl", value)
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of seconds for leases for this
role.
"""
return pulumi.get(self, "max_ttl")
@max_ttl.setter
def max_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name to give the role.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="renewStatements")
def renew_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
renewing a user.
"""
return pulumi.get(self, "renew_statements")
@renew_statements.setter
def renew_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "renew_statements", value)
@property
@pulumi.getter(name="revocationStatements")
def revocation_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
revoking a user.
"""
return pulumi.get(self, "revocation_statements")
@revocation_statements.setter
def revocation_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "revocation_statements", value)
@property
@pulumi.getter(name="rollbackStatements")
def rollback_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
rolling back creation due to an error.
"""
return pulumi.get(self, "rollback_statements")
@rollback_statements.setter
def rollback_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "rollback_statements", value)
@pulumi.input_type
class _SecretBackendRoleState:
def __init__(__self__, *,
backend: Optional[pulumi.Input[str]] = None,
creation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
db_name: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
renew_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revocation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rollback_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering SecretBackendRole resources.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[Sequence[pulumi.Input[str]]] creation_statements: The database statements to execute when
creating a user.
:param pulumi.Input[str] db_name: The unique name of the database connection to use for
the role.
:param pulumi.Input[int] default_ttl: The default number of seconds for leases for this
role.
:param pulumi.Input[int] max_ttl: The maximum number of seconds for leases for this
role.
:param pulumi.Input[str] name: A unique name to give the role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] renew_statements: The database statements to execute when
renewing a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] revocation_statements: The database statements to execute when
revoking a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rollback_statements: The database statements to execute when
rolling back creation due to an error.
"""
if backend is not None:
pulumi.set(__self__, "backend", backend)
if creation_statements is not None:
pulumi.set(__self__, "creation_statements", creation_statements)
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if renew_statements is not None:
pulumi.set(__self__, "renew_statements", renew_statements)
if revocation_statements is not None:
pulumi.set(__self__, "revocation_statements", revocation_statements)
if rollback_statements is not None:
pulumi.set(__self__, "rollback_statements", rollback_statements)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter(name="creationStatements")
def creation_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
creating a user.
"""
return pulumi.get(self, "creation_statements")
@creation_statements.setter
def creation_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "creation_statements", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the database connection to use for
the role.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The default number of seconds for leases for this
role.
"""
return pulumi.get(self, "default_ttl")
@default_ttl.setter
def default_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_ttl", value)
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of seconds for leases for this
role.
"""
return pulumi.get(self, "max_ttl")
@max_ttl.setter
def max_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name to give the role.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="renewStatements")
def renew_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
renewing a user.
"""
return pulumi.get(self, "renew_statements")
@renew_statements.setter
def renew_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "renew_statements", value)
@property
@pulumi.getter(name="revocationStatements")
def revocation_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
revoking a user.
"""
return pulumi.get(self, "revocation_statements")
@revocation_statements.setter
def revocation_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "revocation_statements", value)
@property
@pulumi.getter(name="rollbackStatements")
def rollback_statements(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The database statements to execute when
rolling back creation due to an error.
"""
return pulumi.get(self, "rollback_statements")
@rollback_statements.setter
def rollback_statements(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "rollback_statements", value)
class SecretBackendRole(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
creation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
db_name: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
renew_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revocation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rollback_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
## Import
Database secret backend roles can be imported using the `backend`, `/roles/`, and the `name` e.g.
```sh
$ pulumi import vault:database/secretBackendRole:SecretBackendRole example postgres/roles/my-role
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[Sequence[pulumi.Input[str]]] creation_statements: The database statements to execute when
creating a user.
:param pulumi.Input[str] db_name: The unique name of the database connection to use for
the role.
:param pulumi.Input[int] default_ttl: The default number of seconds for leases for this
role.
:param pulumi.Input[int] max_ttl: The maximum number of seconds for leases for this
role.
:param pulumi.Input[str] name: A unique name to give the role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] renew_statements: The database statements to execute when
renewing a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] revocation_statements: The database statements to execute when
revoking a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rollback_statements: The database statements to execute when
rolling back creation due to an error.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretBackendRoleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Database secret backend roles can be imported using the `backend`, `/roles/`, and the `name` e.g.
```sh
$ pulumi import vault:database/secretBackendRole:SecretBackendRole example postgres/roles/my-role
```
:param str resource_name: The name of the resource.
:param SecretBackendRoleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretBackendRoleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
creation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
db_name: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
renew_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revocation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rollback_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretBackendRoleArgs.__new__(SecretBackendRoleArgs)
if backend is None and not opts.urn:
raise TypeError("Missing required property 'backend'")
__props__.__dict__["backend"] = backend
if creation_statements is None and not opts.urn:
raise TypeError("Missing required property 'creation_statements'")
__props__.__dict__["creation_statements"] = creation_statements
if db_name is None and not opts.urn:
raise TypeError("Missing required property 'db_name'")
__props__.__dict__["db_name"] = db_name
__props__.__dict__["default_ttl"] = default_ttl
__props__.__dict__["max_ttl"] = max_ttl
__props__.__dict__["name"] = name
__props__.__dict__["renew_statements"] = renew_statements
__props__.__dict__["revocation_statements"] = revocation_statements
__props__.__dict__["rollback_statements"] = rollback_statements
super(SecretBackendRole, __self__).__init__(
'vault:database/secretBackendRole:SecretBackendRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
creation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
db_name: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
renew_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
revocation_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
rollback_statements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SecretBackendRole':
"""
Get an existing SecretBackendRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The unique name of the Vault mount to configure.
:param pulumi.Input[Sequence[pulumi.Input[str]]] creation_statements: The database statements to execute when
creating a user.
:param pulumi.Input[str] db_name: The unique name of the database connection to use for
the role.
:param pulumi.Input[int] default_ttl: The default number of seconds for leases for this
role.
:param pulumi.Input[int] max_ttl: The maximum number of seconds for leases for this
role.
:param pulumi.Input[str] name: A unique name to give the role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] renew_statements: The database statements to execute when
renewing a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] revocation_statements: The database statements to execute when
revoking a user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rollback_statements: The database statements to execute when
rolling back creation due to an error.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretBackendRoleState.__new__(_SecretBackendRoleState)
__props__.__dict__["backend"] = backend
__props__.__dict__["creation_statements"] = creation_statements
__props__.__dict__["db_name"] = db_name
__props__.__dict__["default_ttl"] = default_ttl
__props__.__dict__["max_ttl"] = max_ttl
__props__.__dict__["name"] = name
__props__.__dict__["renew_statements"] = renew_statements
__props__.__dict__["revocation_statements"] = revocation_statements
__props__.__dict__["rollback_statements"] = rollback_statements
return SecretBackendRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def backend(self) -> pulumi.Output[str]:
"""
The unique name of the Vault mount to configure.
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter(name="creationStatements")
def creation_statements(self) -> pulumi.Output[Sequence[str]]:
"""
The database statements to execute when
creating a user.
"""
return pulumi.get(self, "creation_statements")
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Output[str]:
"""
The unique name of the database connection to use for
the role.
"""
return pulumi.get(self, "db_name")
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> pulumi.Output[Optional[int]]:
"""
The default number of seconds for leases for this
role.
"""
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> pulumi.Output[Optional[int]]:
"""
The maximum number of seconds for leases for this
role.
"""
return pulumi.get(self, "max_ttl")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name to give the role.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="renewStatements")
def renew_statements(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The database statements to execute when
renewing a user.
"""
return pulumi.get(self, "renew_statements")
@property
@pulumi.getter(name="revocationStatements")
def revocation_statements(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The database statements to execute when
revoking a user.
"""
return pulumi.get(self, "revocation_statements")
@property
@pulumi.getter(name="rollbackStatements")
def rollback_statements(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The database statements to execute when
rolling back creation due to an error.
"""
return pulumi.get(self, "rollback_statements")
|
nilq/baby-python
|
python
|
"""
This file implements a deep neural network that tries to predict the next position of the laser spot from the current
position of the laser spot.
"""
import numpy as np
import observations_set
from keras import Sequential
from keras.layers import Dense
from keras.models import Model, model_from_json
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
import random
import os
import glob
class DeepNeuralNetworkPosition(object):
"""
Deep neural network classifier.
"""
def __init__(self, nn_model):
"""
Initializes the deep neural network from a Keras Model.
:param nn_model:
"""
self.nn = nn_model
@classmethod
def from_data(cls, obs_set, weights_filename, batch_size=256, epochs=100):
"""
Train the deep neural network from the data of an ObservationROISet.
:param obs_set: Training ObservationROISet.
:param weights_filename: filename prefix for saving the structure/weights of the trained deep neural network.
:param batch_size: batch size for training.
:param epochs: number of epochs to train.
:return: A trained DeepNeuralNetworkPosition.
"""
origin_positions, dest_positions = DeepNeuralNetworkPosition._generate_movements(obs_set)
origin_positions /= 32
dest_positions /= 32
nn = DeepNeuralNetworkPosition._train_model(origin_positions, dest_positions, weights_filename, batch_size=batch_size, epochs=epochs)
return DeepNeuralNetworkPosition(nn)
@classmethod
def from_trained_model(cls, name):
"""
Loads a pretrained model given the filename prefix for the structure/weights. When there is more than one weights
file for a given structure, the last epoch weights will be selected because only the epochs that improve the
evaluation loss are saved.
:param name: filename prefix of the model.
:return: A trained DeepNeuralNetworkPosition
"""
with open(name + '-structure.json', 'r') as structure:
model = model_from_json(structure.read())
weight_files = glob.glob(name + "*.hdf5")
greater_epoch_index = 0
last_epoch = 0
for i, f in enumerate(weight_files):
start_epoch_string = len(name)+1
epoch = int(f[start_epoch_string:(start_epoch_string+3)])
if epoch > last_epoch:
last_epoch = epoch
greater_epoch_index = i
best_weights_file = weight_files[greater_epoch_index]
model.load_weights(best_weights_file)
return DeepNeuralNetworkPosition(model)
@classmethod
def _generate_movements(cls, obs_set):
"""
Generate the movements returning the origin and destination points of each movement.
:param obs_set: ObservationROISet.
:return: origin positions, destination positions of each movement
"""
num_movements = 0
for i in range(obs_set.num_observations()):
num_movements += obs_set.length_vector[i] - 1
origin_positions = np.empty((num_movements, 2))
dest_positions = np.empty((num_movements, 2))
current_pos = 0
for i in range(obs_set.num_observations()):
length = obs_set.length_vector[i]
origin_positions[current_pos:(current_pos + length - 1), :] = obs_set.obs[i, :, :(length - 1)].T
dest_positions[current_pos:(current_pos + length - 1), :] = obs_set.obs[i, :, 1:length].T
current_pos += length - 1
return origin_positions, dest_positions
@classmethod
def _generate_model(cls, weights_filename):
"""
Generates the structure of the deep neural network.
:param weights_filename: filename prefix to save the structure.
:return: model structure.
"""
model = Sequential()
model.add(Dense(8, activation='relu', input_shape=(2,)))
model.add(Dense(128, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.summary()
with open(weights_filename + '-structure.json', 'w') as model_json:
model_json.write(model.to_json())
return model
@classmethod
def _train_model(cls, origin_positions, dest_positions, weights_filename, batch_size=256, epochs=100):
"""
Train a deep neural network given the origin and destination points of a set of movements. A set of movements
(20%) is selected randomly as validation data.
The weights of the model are only saved for those epochs that improve the validation loss (mean squared error).
:param origin_positions: Origin positions of the movements set.
:param dest_positions: Destination positions of the movements set.
:param weights_filename: filename prefix to save the structure/weights.
:param batch_size: batch size to train the deep neural network.
:param epochs: number of epochs to train the deep neural network.
:return:
"""
model = DeepNeuralNetworkPosition._generate_model(weights_filename)
nn = Model(inputs=model.input, outputs=model.output)
nn.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001))
train_origin, valid_origin, train_dest, valid_dest = train_test_split(origin_positions,
dest_positions,
test_size=0.2,
random_state=13)
logger = ModelCheckpoint(weights_filename + "-{epoch:03d}-{val_loss:.6f}.hdf5", monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min', period=1)
nn.fit(train_origin, train_dest, batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(valid_origin, valid_dest), callbacks=[logger])
return nn
def evaluate_observation(self, obs_test):
"""
Returns the anomaly score for a given test ObservationROISet.
:param obs_test: ObservationROISet to test.
:return: anomaly score.
"""
origin_test, dest_test = self._generate_movements(obs_test)
origin_test /= 32
dest_test /= 32
predicted = self.nn.predict(origin_test)
diff = dest_test - predicted
hypot_distance = np.hypot(diff[:,0], diff[:,1])
return hypot_distance.sum()
def evaluate_model(data_folder, weights_filename, output_name):
"""
Applies a 2-fold cross validation to evaluate the performance of the deep neural network.
:param data_folder: Folder name where the data is located.
:param weights_filename: filename prefix to save the structure/weights.
:param output_name: Name of the files which contains the result of the deep nueral network using leaving-one-out.
:return:
"""
obs_set = observations_set.ObservationROISet.fromfolder(data_folder)
obs_set.synchronize_average()
possible_train_indices = range(0, obs_set.num_observations())
# Don't train with the known anomaly.
if "1673" in obs_set.names_vector:
anomaly_index = np.where(obs_set.names_vector == "1673")[0][0]
possible_train_indices = list(possible_train_indices)
del possible_train_indices[anomaly_index]
num_test = int(0.5*obs_set.num_observations())
random.seed(0)
train_idx_first = np.asarray(random.sample(possible_train_indices, num_test))
train_idx_second = list(set(possible_train_indices) - set(train_idx_first))
# Generate the train/test sets for the first validation
train_obs_first = obs_set.select_observations(train_idx_first, inplace=False)
test_obs_first = obs_set.unselect_observations(train_idx_first, inplace=False)
nn = DeepNeuralNetworkPosition.from_data(train_obs_first, weights_filename + "_first")
normal_results = np.empty((obs_set.num_observations(),))
gaussian_results = np.empty((obs_set.num_observations(),))
for i in range(test_obs_first.num_observations()):
test_observation = test_obs_first.select_observations(i, inplace=False)
# Generate noise in the data
gaussian_observation = test_observation.gaussian_noise(std_col=0.141421356, std_row=0.141421356, inplace=False)
name = test_obs_first.names_vector[i]
obs_index = np.where(obs_set.names_vector == name)[0][0]
normal_results[obs_index] = nn.evaluate_observation(test_observation)
gaussian_results[obs_index] = nn.evaluate_observation(gaussian_observation)
# Generate the train/test sets for the first validation
train_obs_second = obs_set.select_observations(train_idx_second, inplace=False)
test_obs_second = obs_set.unselect_observations(train_idx_second, inplace=False)
nn = DeepNeuralNetworkPosition.from_data(train_obs_second, weights_filename + "_second")
for i in range(test_obs_second.num_observations()):
test_observation = test_obs_second.select_observations(i, inplace=False)
# Generate noise in the data
gaussian_observation = test_observation.gaussian_noise(std_col=0.141421356, std_row=0.141421356, inplace=False)
name = test_obs_second.names_vector[i]
obs_index = np.where(obs_set.names_vector == name)[0][0]
normal_results[obs_index] = nn.evaluate_observation(test_observation)
gaussian_results[obs_index] = nn.evaluate_observation(gaussian_observation)
with open(output_name + '_normal.csv', 'w') as normal_file, open(output_name + '_gaussian002.csv', 'w') as gaussian_file:
normal_file.write("Name,AnomalyScore" + '\n')
gaussian_file.write("Name,AnomalyScore" + '\n')
for n in range(0, obs_set.num_observations()):
# Writes the results.
normal_file.write(obs_set.names_vector[n] + "," + str(normal_results[n]) + '\n')
gaussian_file.write(obs_set.names_vector[n] + "," + str(gaussian_results[n]) + '\n')
if __name__ == '__main__':
if not os.path.isdir('results/DeepNeuralNetworkPosition'):
os.mkdir('results/DeepNeuralNetworkPosition')
for t in range(1,37):
data_folder = 'data/Type' + str(t)
weights_folder = "nn_positions_models/Type" + str(t)
result_folder = "results/DeepNeuralNetworkPosition/Type" + str(t)
if not os.path.isdir(weights_folder):
os.mkdir(weights_folder)
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
evaluate_model(data_folder, weights_folder + "/Type" + str(t), result_folder + "/DeepNeuralNetworkPosition")
|
nilq/baby-python
|
python
|
"""Top-level {{cookiecutter.package_slug}} package."""
import logging
from logging import NullHandler
__author__ = '{{cookiecutter.full_name}}'
__email__ = '{{cookiecutter.email}}'
__version__ = '{{cookiecutter.version}}'
logging.getLogger(__name__).addHandler(NullHandler())
|
nilq/baby-python
|
python
|
"""polysearch URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
from providers.views import ProviderViewSet
from rest_framework.routers import DefaultRouter
from service_areas.views import ServiceAreaViewSet
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'providers', ProviderViewSet)
router.register(r'service-area', ServiceAreaViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('api/v1/', include(router.urls)),
path('api/v1/schema/', SpectacularAPIView.as_view(), name='schema'),
path('api/v1/swagger/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui')
]
|
nilq/baby-python
|
python
|
import random
shapes = [
[
(1, 1, 1, 1)
],
[
(1, 0), (1, 0), (1, 1)
],
[
(0, 1), (0, 1), (1, 1)
],
[
(0, 1), (1, 1), (1, 0)
],
[
(1, 1), (1, 1)
]
]
class Block:
def __init__(self):
self.shape = random.choice(shapes)
self.position = (0, 0)
@property
def width(self):
"""
Convenience for width of block
:return: the height of the block
"""
return len(self.shape[0])
@property
def height(self):
return len(self.shape)
def mask(self):
"""
A matrix like mask is created which is used to interpolate with exisiting blocks.
:return: a 2 dimensional matrix with the blocks positions as 1's and empty as 0's
"""
m = [[0 for _ in range(20)] for _ in range(20)]
for i, row in enumerate(self.shape):
for j, element in enumerate(row):
x = self.position[0] + i
y = self.position[1] + j
if x >= 20 or y >= 20:
return False, None
m[x][y] = element
return True, m
def move_left(self, set_pos=False):
"""
Moves the block left.
:param set_pos: simulate only
:return: result of operation
"""
new_p = (self.position[0], self.position[1] - 1)
if not (0 <= new_p[0] < 20 and 0 <= new_p[1] < 20):
return False, None
if set_pos:
self.position = new_p
return True, new_p
def move_right(self, set_pos=False):
"""
Move the block right
:param set_pos: Simulate only.
:return: The result of the operation.
"""
new_p = (self.position[0], self.position[1] + 1)
if not (0 <= (new_p[0] + self.height) < 20 and 0 <= (new_p[1] + self.width - 1) < 20):
return False, None
if set_pos:
self.position = new_p
return True, new_p
def rotate_clockwise(self):
"""
Rotate the block clockwise.
:return: The result of the operation
"""
new_shape = zip(*self.shape[::-1])
if (self.position[1] + len(new_shape[0])) > 20 or (self.position[0] + len(new_shape)) > 20:
return False
self.shape = new_shape
return True
def rotate_counter(self):
"""
Rotate the block counter clockwise.
:return: The result of the opeartion.
"""
new_shape = zip(*self.shape)[::-1]
if (self.position[1] + len(new_shape[0])) > 20 or (self.position[0] + len(new_shape)) > 20:
return False
self.shape = new_shape
return True
def print_mask(self):
"""
Convenience method to print the current mask.
"""
_, m = self.mask()
for row in m:
p = []
for e in row:
p.append('-' if e == 0 else '*')
print(''.join(p))
def down(self):
"""
Move the block down one position.
"""
new_y = self.position[0] + 1
if new_y > 20:
raise RuntimeError('Moved outside. Should be detected')
self.position = new_y, self.position[1]
|
nilq/baby-python
|
python
|
from image_match.goldberg import ImageSignature
gis = ImageSignature()
a = gis.generate_signature('MonaLisa_Wikipedia.jpg')
b = gis.generate_signature('MonaLisa_WikiImages.jpg')
c = gis.generate_signature('Caravaggio_Wikipedia.jpg')
print('a = MonaLisa_Wikipedia.jpg\n','b = MonaLisa_WikiImages.jpg\n','c = Caravaggio_Wikipedia.jpg\n')
print("a,a",gis.normalized_distance(a, a) )
print("a,b",gis.normalized_distance(a, b) )
print("a,c",gis.normalized_distance(a, c) )
print("b,c",gis.normalized_distance(b, c) )
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='XUnit2HTML',
version='1.0.1',
description='A tool for converting xunit generated xml files into an html report',
author='Gudni Olafsson',
author_email='gudni.olafsson@gmail.com',
py_modules=['xunit2html'],
scripts=['src/x2h.py'],
packages=[''],
package_dir={'': 'src'},
package_data={'': ['templates/*.tmpl', 'templates/jquery-1.9.1.min.js', 'templates/report.css']},
requires=['argparse'])
|
nilq/baby-python
|
python
|
#!/usr/bin/envthon
# coding: utf-8
import requests
from bs4 import BeautifulSoup
import re
import sys
import os, shutil
#from internal_processing import get_job_details, get_name_and_loc, get_posted_and_applicants
from internal_processing import get_job_title, get_job_id, get_job_description
from helpers import strings_to_check_for, remove_substrings
# Get the location and local files
def get_files(directory = './saved_webpages/', verbose=False):
# bookmark backup directory
if not os.path.isdir(directory):
print('Error?', directory)
else:
if verbose: print("Valid dir.:", directory)
for path, dirs, files in os.walk(directory):
# If there are any other directory in the backup directory,
# we need to stop the process and get the backup files only
if path == directory:
break
if files:
return sorted(files) # sort all the backup files
else:
return []
def get_job_id_wrapper(filename):
# Open the file and soup it
f = open(filename,'r')
soup = BeautifulSoup(f.read(), "lxml")
f.close()
return get_job_id(soup)
def get_source_dir(filename, directory, verbose=False):
change_dirname = True
dirp = filename.replace('.html', "_files")
#print(dirp)
source_fpath = directory
if os.path.isdir(directory+'dirs/'+dirp):
if verbose: print('\tin dirs/')
source_fpath += 'dirs/'
elif os.path.isdir(directory+dirp):
if verbose: print('\tin base')
else:
if verbose: print('Nope:',filename)
change_dirname = False
return source_fpath, change_dirname
def rename_files_and_dirs(files, directory = './saved_webpages/', verbose=False):
if verbose: print(f'Processing {len(files)} files')
dirs = directory + 'dirs/'
for file_ in files:
#if verbose: print('Trying:',file_)
#if verbose: print([True for s in [' ']+strings_to_check_for[:-2] if s in file_.split('.html')[0]])
# Check if the file is already processed
#print(file_.split('.')[0])
#print([s for s in [' ']+strings_to_check_for[:-2] if s in file_])
if any([True for s in [' ']+strings_to_check_for[:-2] if s in file_.split('.html')[0]]):
if verbose: print('Processing:',file_)
else:
if verbose: print('\t\tAlready processed:',file_)
continue
# Get job ID
filename = directory+file_
job_id = get_job_id_wrapper(filename)
newname = remove_substrings(strings_to_check_for)(file_.replace(" ","_"))
newname = remove_substrings(["html"])(newname)
newname = newname+f"_{job_id}"
#source_dpath, change_dirname = get_source_dir(file_, directory, verbose)
# Rename the html files
source_fpath = os.path.join(directory,file_)
dest_fpath = os.path.join(directory,newname+'.html')
if os.path.isfile(source_fpath):
os.rename(source_fpath,dest_fpath)
# Move the residual directory (Maybe just delete?)
source_dpath = os.path.join(directory,file_.replace('.html', "_files"))
dest_dpath = os.path.join(dirs,newname+'_files')
#if os.path.isdir(source_dpath):
#print('Removing')
#os.removedirs(source_dpath)
#os.rename(source_dpath,dest_dpath)
try:
shutil.rmtree(source_dpath)
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
return None
def rename_remove_vert(files, directory = './saved_webpages/', verbose=False):
dirs = directory + 'dirs/'
for file_ in files:
# Check if the file is already processed
#if file_.split('_LinkedIn')[1] != '.html':
if ("|" in file_) | ("(" in file_) | (")" in file_):
if verbose: print('Processing:',file_)
else:
if verbose: print('\t\tAlready processed:',file_)
continue
# Get job ID
filename = directory+file_
job_id = get_job_id_wrapper(filename)
newname = file_.replace('.html', '').replace('|', '').replace('(', '').replace(')', '')
source_dpath, change_dirname = get_source_dir(file_, directory, verbose)
source_fpath = os.path.join(directory,file_)
dest_fpath = os.path.join(directory,newname+'.html')
os.rename(source_fpath,dest_fpath)
#if (change_dirname):
source_dpath = os.path.join(dirs,file_.replace('.html', "_files"))
dest_dpath = os.path.join(dirs,newname+'_files')
print(dest_dpath)
os.rename(source_dpath,dest_dpath)
return None
def get_paths(args_master, args_output):
"""Prepare the paths for the master DB and output DB
"""
# Get the cwd; set as base path for the outer files
base_path = os.getcwd()
output_data_path = os.path.join(base_path)
# If both names specified, use them
if args_master and args_output:
output_db = args_output
master_db = args_master
# if only the master is specified, use it as output
elif args_master and args_output == None:
master_db = args_master
output_db = args_master
# if only the output is specified, try it as master, else skip master
elif args_output and args_master == None:
if os.path.exists(os.path.join(output_data_path, args_output)):
master_db = args_output
else:
master_db = None
output_db = args_output
# if Nones: write to default & skip master
else:
master_db = None
# include a `data` dir to the path
output_data_path = os.path.join(output_data_path, 'data')
if os.path.exists(output_data_path) == False:
os.mkdir(output_data_path)
output_db = 'master.csv'
# If master, create its path
if master_db:
master_db = os.path.join(output_data_path, master_db)
output_db = os.path.join(output_data_path, output_db)
return master_db, output_db
|
nilq/baby-python
|
python
|
# Source https://github.com/NVlabs/PWC-Net/blob/master/PyTorch/models/PWCNet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from matplotlib.pyplot import imread
import matplotlib.pyplot as plt
import os
# os.environ['PYTHON_EGG_CACHE'] = 'tmp/' # a writable directory
# from correlation_package.modules.corr import Correlation
from correlation.correlation import Correlation
import numpy as np
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=True)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
class PWCNet(nn.Module):
"""
PWC-DC net. add dilation convolution and densenet connections
"""
def __init__(self, md=4, path='pre_trained_models/pwc_net.pth'):
"""
input: md --- maximum displacement (for correlation. default: 4), after warpping
"""
super(PWCNet, self).__init__()
self.conv1a = conv(3, 16, kernel_size=3, stride=2)
self.conv1aa = conv(16, 16, kernel_size=3, stride=1)
self.conv1b = conv(16, 16, kernel_size=3, stride=1)
self.conv2a = conv(16, 32, kernel_size=3, stride=2)
self.conv2aa = conv(32, 32, kernel_size=3, stride=1)
self.conv2b = conv(32, 32, kernel_size=3, stride=1)
self.conv3a = conv(32, 64, kernel_size=3, stride=2)
self.conv3aa = conv(64, 64, kernel_size=3, stride=1)
self.conv3b = conv(64, 64, kernel_size=3, stride=1)
self.conv4a = conv(64, 96, kernel_size=3, stride=2)
self.conv4aa = conv(96, 96, kernel_size=3, stride=1)
self.conv4b = conv(96, 96, kernel_size=3, stride=1)
self.conv5a = conv(96, 128, kernel_size=3, stride=2)
self.conv5aa = conv(128, 128, kernel_size=3, stride=1)
self.conv5b = conv(128, 128, kernel_size=3, stride=1)
self.conv6aa = conv(128, 196, kernel_size=3, stride=2)
self.conv6a = conv(196, 196, kernel_size=3, stride=1)
self.conv6b = conv(196, 196, kernel_size=3, stride=1)
self.corr = Correlation(pad_size=md, kernel_size=1, max_displacement=md, stride1=1, stride2=1, corr_multiply=1)
self.leakyRELU = nn.LeakyReLU(0.1)
nd = (2 * md + 1) ** 2
dd = np.cumsum([128, 128, 96, 64, 32])
od = nd
self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv6_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
self.conv6_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
self.conv6_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
self.conv6_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
self.predict_flow6 = predict_flow(od + dd[4])
self.deconv6 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat6 = deconv(od + dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd + 128 + 4
self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv5_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
self.conv5_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
self.conv5_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
self.conv5_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
self.predict_flow5 = predict_flow(od + dd[4])
self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat5 = deconv(od + dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd + 96 + 4
self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv4_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
self.conv4_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
self.conv4_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
self.conv4_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
self.predict_flow4 = predict_flow(od + dd[4])
self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat4 = deconv(od + dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd + 64 + 4
self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv3_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
self.conv3_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
self.conv3_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
self.conv3_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
self.predict_flow3 = predict_flow(od + dd[4])
self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.upfeat3 = deconv(od + dd[4], 2, kernel_size=4, stride=2, padding=1)
od = nd + 32 + 4
self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
self.conv2_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
self.conv2_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
self.conv2_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
self.conv2_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
self.predict_flow2 = predict_flow(od + dd[4])
self.deconv2 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
self.dc_conv1 = conv(od + dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv2 = conv(128, 128, kernel_size=3, stride=1, padding=2, dilation=2)
self.dc_conv3 = conv(128, 128, kernel_size=3, stride=1, padding=4, dilation=4)
self.dc_conv4 = conv(128, 96, kernel_size=3, stride=1, padding=8, dilation=8)
self.dc_conv5 = conv(96, 64, kernel_size=3, stride=1, padding=16, dilation=16)
self.dc_conv6 = conv(64, 32, kernel_size=3, stride=1, padding=1, dilation=1)
self.dc_conv7 = predict_flow(32)
if path is None:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
else:
data = torch.load(path)
if 'state_dict' in data.keys():
self.load_state_dict(data['state_dict'])
else:
self.load_state_dict(data)
def warp(self, x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
# if W==128:
# np.save('mask.npy', mask.cpu().data.numpy())
# np.save('warp.npy', output.cpu().data.numpy())
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output * mask
def forward(self, x):
input_shape = (x.shape[2], x.shape[3])
im1 = x[:, :3, :, :]
im2 = x[:, 3:, :, :]
c11 = self.conv1b(self.conv1aa(self.conv1a(im1)))
c21 = self.conv1b(self.conv1aa(self.conv1a(im2)))
c12 = self.conv2b(self.conv2aa(self.conv2a(c11)))
c22 = self.conv2b(self.conv2aa(self.conv2a(c21)))
c13 = self.conv3b(self.conv3aa(self.conv3a(c12)))
c23 = self.conv3b(self.conv3aa(self.conv3a(c22)))
c14 = self.conv4b(self.conv4aa(self.conv4a(c13)))
c24 = self.conv4b(self.conv4aa(self.conv4a(c23)))
c15 = self.conv5b(self.conv5aa(self.conv5a(c14)))
c25 = self.conv5b(self.conv5aa(self.conv5a(c24)))
c16 = self.conv6b(self.conv6a(self.conv6aa(c15)))
c26 = self.conv6b(self.conv6a(self.conv6aa(c25)))
corr6 = self.corr(c16, c26)
corr6 = self.leakyRELU(corr6)
x = torch.cat((self.conv6_0(corr6), corr6), 1)
x = torch.cat((self.conv6_1(x), x), 1)
x = torch.cat((self.conv6_2(x), x), 1)
x = torch.cat((self.conv6_3(x), x), 1)
x = torch.cat((self.conv6_4(x), x), 1)
flow6 = self.predict_flow6(x)
up_flow6 = self.deconv6(flow6)
up_feat6 = self.upfeat6(x)
warp5 = self.warp(c25, up_flow6 * 0.625)
corr5 = self.corr(c15, warp5)
corr5 = self.leakyRELU(corr5)
x = torch.cat((corr5, c15, up_flow6, up_feat6), 1)
x = torch.cat((self.conv5_0(x), x), 1)
x = torch.cat((self.conv5_1(x), x), 1)
x = torch.cat((self.conv5_2(x), x), 1)
x = torch.cat((self.conv5_3(x), x), 1)
x = torch.cat((self.conv5_4(x), x), 1)
flow5 = self.predict_flow5(x)
up_flow5 = self.deconv5(flow5)
up_feat5 = self.upfeat5(x)
warp4 = self.warp(c24, up_flow5 * 1.25)
corr4 = self.corr(c14, warp4)
corr4 = self.leakyRELU(corr4)
x = torch.cat((corr4, c14, up_flow5, up_feat5), 1)
x = torch.cat((self.conv4_0(x), x), 1)
x = torch.cat((self.conv4_1(x), x), 1)
x = torch.cat((self.conv4_2(x), x), 1)
x = torch.cat((self.conv4_3(x), x), 1)
x = torch.cat((self.conv4_4(x), x), 1)
flow4 = self.predict_flow4(x)
up_flow4 = self.deconv4(flow4)
up_feat4 = self.upfeat4(x)
warp3 = self.warp(c23, up_flow4 * 2.5)
corr3 = self.corr(c13, warp3)
corr3 = self.leakyRELU(corr3)
x = torch.cat((corr3, c13, up_flow4, up_feat4), 1)
x = torch.cat((self.conv3_0(x), x), 1)
x = torch.cat((self.conv3_1(x), x), 1)
x = torch.cat((self.conv3_2(x), x), 1)
x = torch.cat((self.conv3_3(x), x), 1)
x = torch.cat((self.conv3_4(x), x), 1)
flow3 = self.predict_flow3(x)
up_flow3 = self.deconv3(flow3)
up_feat3 = self.upfeat3(x)
warp2 = self.warp(c22, up_flow3 * 5.0)
corr2 = self.corr(c12, warp2)
corr2 = self.leakyRELU(corr2)
x = torch.cat((corr2, c12, up_flow3, up_feat3), 1)
x = torch.cat((self.conv2_0(x), x), 1)
x = torch.cat((self.conv2_1(x), x), 1)
x = torch.cat((self.conv2_2(x), x), 1)
x = torch.cat((self.conv2_3(x), x), 1)
x = torch.cat((self.conv2_4(x), x), 1)
flow2 = self.predict_flow2(x)
x = self.dc_conv4(self.dc_conv3(self.dc_conv2(self.dc_conv1(x))))
flow2 = flow2 + self.dc_conv7(self.dc_conv6(self.dc_conv5(x)))
if self.training:
return flow2, flow3, flow4, flow5, flow6
else:
return 20.0 * F.interpolate(flow2, size=(input_shape), mode='bicubic', align_corners=False)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import dataset
from resample.resample2d import Resample2d
pwc_net = PWCNet().cuda().eval()
dataset = dataset.REDS()
resample = Resample2d()
images = dataset[0][1]
image_1 = images[:3].unsqueeze(dim=0).cuda()
image_2 = images[3:6].unsqueeze(dim=0).cuda()
plt.imshow(image_1[0].detach().cpu().numpy().transpose(1, 2, 0))
plt.show()
plt.imshow(image_2[0].detach().cpu().numpy().transpose(1, 2, 0))
plt.show()
flow = pwc_net(image_1, image_2)
plt.imshow(flow.cpu().detach().numpy()[0, 0])
plt.show()
plt.imshow(flow.cpu().detach().numpy()[0, 1])
plt.show()
image_rec = resample(image_2, flow)
print(image_rec.shape)
plt.imshow(image_rec[0].detach().cpu().numpy().transpose(1, 2, 0))
plt.show()
|
nilq/baby-python
|
python
|
import pytest
import tensorflow as tf
from doctr.models import backbones
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["vgg16_bn", (224, 224, 3), (7, 56, 512)],
["resnet31", (32, 128, 3), (4, 32, 512)],
["magc_resnet31", (32, 128, 3), (4, 32, 512)],
["mobilenet_v3_small", (512, 512, 3), (16, 16, 576)],
["mobilenet_v3_large", (512, 512, 3), (16, 16, 960)],
],
)
def test_classification_architectures(arch_name, input_shape, output_size):
# Model
batch_size = 2
model = backbones.__dict__[arch_name](pretrained=True, input_shape=input_shape)
# Forward
out = model(tf.random.uniform(shape=[batch_size, *input_shape], maxval=1, dtype=tf.float32))
# Output checks
assert isinstance(out, tf.Tensor)
assert out.dtype == tf.float32
assert out.numpy().shape == (batch_size, *output_size)
|
nilq/baby-python
|
python
|
from fsm.models import ActivityLog
def quit_edge(self, edge, fsmStack, request, **kwargs):
"""
Edge method that terminates this live-session.
"""
for studentState in fsmStack.state.linkChildren.all():
studentState.linkState = None # detach from our state
studentState.save()
return edge.toNode
QuitEdgeData = dict(
name='quit', toNode='END', title='End this live-session',
description='''If you have no more questions to ask, end
this live session.''',
help='''Click here to end this live-session. ''',
showOption=True,
)
class START(object):
"""
This activity will allow you to select questions
for students to answer in-class.
"""
def start_event(self, node, fsmStack, request, **kwargs):
'event handler for START node'
unit = fsmStack.state.get_data_attr('unit')
course = fsmStack.state.get_data_attr('course')
fsmStack.state.title = 'Teaching: %s' % unit.title
activity = ActivityLog(
fsmName=fsmStack.state.fsmNode.fsm.name,
course=course
) # create a new activity
activity.save()
fsmStack.state.activity = activity
fsmStack.state.isLiveSession = True
return node.get_path(fsmStack.state, request, **kwargs)
# node specification data goes here
path = 'fsm:fsm_node'
title = 'Start Teaching a Live Session'
edges = (
dict(name='next', toNode='CHOOSE', title='Start asking a question',
showOption=True),
)
class CHOOSE(object):
"""
At this step you choose a question to ask in this live session.
"""
def select_UnitLesson_filter(self, edge, unit_lesson):
"""
Return True if input is acceptable for this edge.
input: UnitLesson
"""
return unit_lesson.is_question()
# node specification data goes here
path = 'ct:unit_lessons'
title = 'Choose a Question to Ask'
help = '''Select a question below that you want to ask your students in this
live session, then click its Ask this Question button. '''
edges = (
dict(name='select_UnitLesson', toNode='QUESTION',
title='Ask this question',
help='''Click here to start posing this question to your
live session students.'''),
)
class QUESTION(object):
path = 'ct:live_question'
title = 'Ask a question to students in a classroom live-session'
help = '''Explain the question and ask if there are any aspects
where the students are unsure what exactly they are being asked.
Then click the START button and ask the students to think about
the question for a minute or so, then briefly type whatever
answer they come up with. You will be able to monitor their
progress on this page in real-time.'''
edges = (
dict(name='next', toNode='ANSWER', title='Present the answer',
help='''Click here to move to the assessment stage of this
exercise. '''),
)
class ANSWER(object):
quit_edge = quit_edge
path = 'ct:ul_teach'
title = 'Present the answer for students to self-assess'
help = '''Explain the answer and ask if there are any aspects
the students are wondering about. Then ask them to assess
their own answer against the correct answer'''
edges = (
dict(name='next', toNode='RECYCLE', title='Finish this question',
help='''Click here to end this question. '''),
QuitEdgeData,
)
class RECYCLE(object):
"""
You have completed presenting this question. Do you want to
ask the students another question, or end this live session?
"""
def next_edge(self, edge, fsmStack, request, pageData=None, **kwargs):
'make sure timer is reset before going to another question'
pageData.set_refresh_timer(request, False)
return edge.toNode
path = 'fsm:fsm_node'
title = 'Do you want to ask another question?'
edges = (
dict(name='next', toNode='CHOOSE', title='Move on to another question',
help='''Click here to choose another question to ask. '''),
QuitEdgeData,
)
class END(object):
# node specification data goes here
path = 'ct:unit_tasks'
title = 'Live Session completed'
help = '''You have successfully ended this live-session.
See below for suggested next steps for what you can work on next
to help students with this courselet.'''
def get_specs():
'get FSM specifications stored in this file'
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='liveteach',
title='Teach a live (classroom) session',
description='''You can begin teaching this courselet in a
live classroom session by clicking here:''',
pluginNodes=[START, CHOOSE, QUESTION, ANSWER, RECYCLE, END],
fsmGroups=('teach/unit/published',),
)
return (spec,)
|
nilq/baby-python
|
python
|
from core import Bot
from templates import Template
template = Template()
class Message:
def message(self):
@Bot.bot.message_handler(content_types=['text'])
def text_message(message):
if message.text == "/start":
Bot.bot.reply_to(message, template.start(), parse_mode='HTML', disable_web_page_preview=True)
elif message.text == "/help":
Bot.bot.reply_to(message, template.help(), parse_mode='HTML', disable_web_page_preview=True)
elif message.text == "/about":
Bot.bot.reply_to(message, template.about(), parse_mode='HTML', disable_web_page_preview=True)
else:
Bot.bot.reply_to(message, template.error(), parse_mode='HTML', disable_web_page_preview=True)
pass
pass
pass
|
nilq/baby-python
|
python
|
from django.conf.urls import include, url
from tumblelog.views import (
PostIndexView, AddPostView, PostDetailView,
UpdatePostView, DeletePostView, ImageFileView,
TestSessionView,
)
from django_mongoengine import mongo_admin
post_patterns = [
url(r'^$', PostDetailView.as_view(), name="post"),
url(r'^edit/$', UpdatePostView.as_view(), name="post_update"),
url(r'^delete/$', DeletePostView.as_view(), name="post_delete")
]
urlpatterns = [
url(r'^test-session/', TestSessionView.as_view()),
url(r'^$', PostIndexView.as_view(), name="post_index"),
url(r'^new/$', AddPostView.as_view(), name="post_new"),
url(r'^new/(?P<post_type>(post|video|image|quote|music))/$',
AddPostView.as_view(), name="post_new"),
url(r'^admin/', include(mongo_admin.site.urls)),
url(r'^image-file/(?P<slug>[a-zA-Z0-9-]+)/', ImageFileView.as_view(),
name="image_file"),
url(r'^(?P<slug>[a-zA-Z0-9-]+)/', include(post_patterns)),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# =============================================================================
# CrowdTangle Utils
# =============================================================================
#
# Miscellaneous utility functions to be used with CrowdTangle Link.
# @Author: Brayan Rodriguez <bradrd2009jp@gmail.com>
# @Organization: LIIT-UNED 2020
#TODO:
#Include module for make search with CrowdTangle API.
import urllib.request, json
import pandas as pd
import tldextract
#Constantes:
main_url = 'https://api.crowdtangle.com'
__all__ = ['get_dict', 'get_json', 'ctdatapost_', 'ctdatalink_', 'get_ct_data', 'ct_lists', 'ct_accounts', 'ct_leaderboard_data', 'ct_posts', 'ct_search_data']
def get_dict(json_data):
return {key: json_data[key] for key in json_data.keys()}
def get_json(url_data):
with urllib.request.urlopen(url_data) as url:
data = json.loads(url.read().decode())
return data
class ctdatapost_():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def notes(self):
try:
return self.dict_data['notes']
except KeyError:
print("There was not included \'notes\' in this searching return")
return ''
def post(self):
result_dict = get_dict(self.result())
return result_dict['posts']
def platform_id(self):
post_dict = get_dict(self.post()[0])
return post_dict['platformId']
def date(self):
post_dict = get_dict(self.post()[0])
return post_dict['date']
def message(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['message']
except KeyError:
print("There was not included \'message\' in this searching return")
return ''
def title(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['title']
except KeyError:
print("There was not included \'title\' in this searching return")
return ''
def ct_id(self):
post_dict = get_dict(self.post()[0])
return post_dict['id']
def link(self):
post_dict = get_dict(self.post()[0])
return post_dict['link']
def post_url(self):
post_dict = get_dict(self.post()[0])
return post_dict['postUrl']
def domain(self):
ext = tldextract.extract(self.link())
return ext.domain
def type(self):
post_dict = get_dict(self.post()[0])
return post_dict['type']
def media(self):
post_dict = get_dict(self.post()[0])
return post_dict['media']
def media_type(self):
media_dict = get_dict(self.media()[0])
return media_dict.get('type')
def media_url(self):
media_dict = get_dict(self.media()[0])
return media_dict.get('full')
def statistics(self):
post_dict = get_dict(self.post()[0])
return post_dict['statistics']
def statistics_df(self):
stat_dict = get_dict(self.statistics())
columns = ['platformId']
value_lst = [self.platform_id()]
for key, value in stat_dict['actual'].items():
columns.append('actual_%s'%key)
value_lst.append(value)
for key, value in stat_dict['expected'].items():
columns.append('expected_%s'%key)
value_lst.append(value)
df = pd.DataFrame([value_lst], columns=columns)
return df
def history(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['history']
except KeyError:
print("There was not included \'history\' in this searching return")
return 0
def history_df(self):
try:
post_dict = get_dict(self.post()[0])
df_prev = pd.DataFrame(post_dict['history'])
df_final = pd.DataFrame()
lst_aux = ['likeCount', 'shareCount', 'commentCount', 'loveCount', 'wowCount', 'hahaCount', 'sadCount', 'angryCount', 'thankfulCount', 'careCount']
for i in lst_aux:
df_final['actual_%s'%i] = [k.get(i) for k in df_prev['actual']]
for i in lst_aux:
df_final['expected_%s'%i] = [k.get(i) for k in df_prev['expected']]
df_final['timestep'] = df_prev['timestep'].tolist()
df_final['date'] = df_prev['date'].tolist()
df_final['score'] = df_prev['score'].tolist()
return df_final
except KeyError:
print("There was not included \'history\' in this searching return")
return 0
class ctdatalink_():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def notes(self):
try:
return self.dict_data['notes']
except KeyError:
print("There was not included \'notes\' in this searching return")
return ''
def post(self):
result_dict = get_dict(self.result())
return result_dict['posts']
def platform_id(self):
post_dict = get_dict(self.post()[0])
return post_dict['platformId']
def date(self):
post_dict = get_dict(self.post()[0])
return post_dict['date']
def message(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['message']
except KeyError:
print("There was not included \'message\' in this searching return")
return ''
def title(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['title']
except KeyError:
print("There was not included \'title\' in this searching return")
return ''
def caption(self):
post_dict = get_dict(self.post()[0])
return post_dict['caption']
def link(self):
post_dict = get_dict(self.post()[0])
return post_dict['link']
def post_url(self):
post_dict = get_dict(self.post()[0])
return post_dict['postUrl']
def domain(self):
ext = tldextract.extract(self.link())
return ext.domain
def post_url(self):
post_dict = get_dict(self.post()[0])
return post_dict['postUrl']
def media(self):
post_dict = get_dict(self.post()[0])
return post_dict['media']
def media_type(self):
media_dict = get_dict(self.media()[0])
return media_dict.get('type')
def media_url(self):
media_dict = get_dict(self.media()[0])
return media_dict.get('full')
def statistics(self):
post_dict = get_dict(self.post()[0])
return post_dict['statistics']
def statistics_df(self):
stat_dict = get_dict(self.statistics())
columns = ['platformId']
value_lst = [self.platform_id()]
for key, value in stat_dict['actual'].items():
columns.append('actual_%s'%key)
value_lst.append(value)
for key, value in stat_dict['expected'].items():
columns.append('expected_%s'%key)
value_lst.append(value)
df = pd.DataFrame([value_lst], columns=columns)
return df
def history(self):
try:
post_dict = get_dict(self.post()[0])
return post_dict['history']
except KeyError:
print("There was not included \'history\' in this searching return")
return 0
def history_df(self):
try:
post_dict = get_dict(self.post()[0])
df_prev = pd.DataFrame(post_dict['history'])
df_final = pd.DataFrame()
lst_aux = ['likeCount', 'shareCount', 'commentCount', 'loveCount', 'wowCount', 'hahaCount', 'sadCount', 'angryCount', 'thankfulCount', 'careCount']
for i in lst_aux:
df_final['actual_%s'%i] = [k.get(i) for k in df_prev['actual']]
for i in lst_aux:
df_final['expected_%s'%i] = [k.get(i) for k in df_prev['expected']]
df_final['timestep'] = df_prev['timestep'].tolist()
df_final['date'] = df_prev['date'].tolist()
df_final['score'] = df_prev['score'].tolist()
return df_final
except KeyError:
print("There was not included \'history\' in this searching return")
return 0
class ct_lists():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def list_of_dict(self):
result_dict = get_dict(self.result())
return result_dict['lists']
def list_df(self):
df_final = pd.DataFrame()
lst_aux = ['id', 'title', 'type']
for i in lst_aux:
df_final[i] = [k.get(i) for k in self.list_of_dict()]
return df_final
def lists_of_id(self):
return self.list_df()['id'].tolist()
class ct_accounts():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def list_of_accounts(self):
result_dict = get_dict(self.result())
return result_dict['accounts']
def accounts_df(self):
df_final = pd.DataFrame()
lst_aux = ['id', 'name', 'handle', 'profileImage', 'suscriberCount', 'url', 'platformId', 'accountType', 'pageAdminTopCountry', 'verified']
for i in lst_aux:
df_final[i] = [k.get(i) for k in self.list_of_accounts()]
return df_final
class ct_leaderboard_data():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def list_of_accounts(self):
post_dict = get_dict(self.result())
return post_dict['accountStatistics']
def return_list(self, key, dict_of_dicts):
return [k.get(key) for k in dict_of_dicts]
def get_df(self):
df_prev = pd.DataFrame()
df_final = pd.DataFrame()
lst_aux = ['account', 'summary', 'subscriberData', ]
for i in lst_aux:
df_prev[i] = [k.get(i) for k in self.list_of_accounts()]
lst_acc = ['id', 'name', 'handle', 'subscriberCount', 'url', 'platformId', 'pageAdminTopCountry', 'verified']
for i in lst_acc:
df_final[i] = self.return_list(i, df_prev['account'])
lst_sum = ['likeCount', 'loveCount', 'hahaCount', 'wowCount', 'thankfulCount', 'angryCount', 'sadCount', 'shareCount', 'commentCount', 'totalInteractionCount', 'interactionRate']
for i in lst_sum:
df_final[i] = self.return_list(i, df_prev['summary'])
lst_sbd = ['initialCount', 'finalCount']
for i in lst_sbd:
df_final['subscriber_%s'%i] = self.return_list(i, df_prev['subscriberData'])
return df_final
#TODO: Programar completo el search, pero se requiere permiso de CrowdTangle
class ct_search_data():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
class ct_posts():
def __init__(self, json_data):
self.json_data = json_data
self.dict_data = get_dict(json_data)
def raw_dict(self):
return self.dict_data
def status(self):
return self.dict_data['status']
def result(self):
return self.dict_data['result']
def list_of_posts(self):
post_dict = get_dict(self.result())
return post_dict['posts']
def get_df(self):
df_final = pd.DataFrame()
lst_aux = ['platformId', 'date', 'update', 'type', 'title', 'caption', 'description', 'message', 'link', 'postUrl', 'subscriberCount', 'score', ]
for i in lst_aux:
df_final[i] = [k.get(i) for k in self.list_of_posts()]
return df_final
class get_ct_data():
def __init__(self, token):
self.token = token
def ctpost(self, ctpost):
url_data = main_url + "/ctpost/" + ctpost + "?token=" + self.token
json_data = get_json(url_data)
ctp = ctdatapost_(json_data)
return ctp
def post(self, fbpost, includeHistory = False):
if includeHistory:
url_data = main_url + "/post/" + fbpost + "?token=" + self.token + "&includeHistory=true"
else:
url_data = main_url + "/post/" + fbpost + "?token=" + self.token
json_data = get_json(url_data)
ctp = ctdatapost_(json_data)
return ctp
def lists(self):
url_data = main_url + "/lists" + "?token=" + self.token
json_data = get_json(url_data)
ctl = ct_lists(json_data)
return ctl
def list(self, id_, count = 10, offset_options = 0):
url_data = main_url + "/lists/" + str(id_) + "/accounts?token=" + self.token
#options:
if count > 100 : count = 100
if count == 0 : count = 1
url_data += "&offset=%d&count=%d"%(offset_options, count)
json_data = get_json(url_data)
cta = ct_accounts(json_data)
return cta
def links(self, link, count=100, includeHistory=False, includeSummary=False, **kwargs):
url_data = main_url + "/links" + "?token=" + self.token + "&link=" + link
if count > 100: count = 100
if count == 0: count = 1
url_data += '&count=%d'%count
if includeHistory:
url_data += '&includeHistory=true'
if includeSummary:
url_data += '&includeSummary=true'
for key, value in kwargs.items():
if key == 'startDate':
url_data += '&startDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'endDate':
url_data += '&endDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'sortBy':
url_data += '&sortBy=%s'%value #date, subscriber_count, total_interactions
json_data = get_json(url_data)
ctl = ctdatalink_(json_data)
return ctl
#TODO: Preguntar que datos sería útiles:
def posts(self, count=10, includeHistory=False, includeSummary=False, **kwargs):
url_data = main_url + "/posts" + "?token=" + self.token
if count > 100: count = 100
if count == 0: count = 1
url_data += '&count=%d'%count
if includeHistory:
url_data += '&includeHistory=true'
if includeSummary:
url_data += '&includeSummary=true'
lst_aux = ['weightAngry', 'weightComment', 'weightHaha', 'weightLike', 'weightLove', 'weightRepost', 'weightSad', 'weightShare', 'weightUpvote', 'weightView', 'weightWow']
for key, value in kwargs.items():
if key == 'startDate':
url_data += '&startDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'endDate':
url_data += '&endDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'language':
url_data += '&language=%s'%value #en, es, zh-CN, zh-TW, etc.
if key == 'sortBy':
url_data += '&sortBy=%s'%value #overperforming, date, interaction_rate, total_interactions, underperforming
if key == 'types':
url_data += '&types=%s'%value #episode, extra_clip, link, live_video, live_video_complete, live_video_scheduled, native_video, photo, status, trailer, video, vine, youtube
if key in lst_aux:
url_data += '&%s=%d'%(key,value) #0 (default) - 10
json_data = get_json(url_data)
ctps = ct_posts(json_data)
return ctps
def leaderboard(self, count = 50, **kwargs):
url_data = main_url + "/leaderboard" + "?token=" + self.token
if count > 100: count = 100
if count == 0: count = 1
url_data += '&count=%d'%count
for key, value in kwargs.items():
if key == 'startDate':
url_data += '&startDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'endDate':
url_data += '&endDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'orderBy':
url_data += '&orderBy=%s'%value #asc, desc
if key == 'sortBy':
url_data += '&sortBy=%s'%value #interaction_rate, total_interactions
json_data = get_json(url_data)
ctlb = ct_leaderboard_data(json_data)
return ctlb
def search(self, count = 10, includeHistory = False, **kwargs):
url_data = main_url + "/posts/search" + "?token=" + self.token
if count > 100: count = 100
if count == 0: count = 1
url_data += '&count=%d'%count
if includeHistory:
url_data += '&includeHistory=true'
for key, value in kwargs.items():
if key == 'startDate':
url_data += '&startDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'endDate':
url_data += '&endDate=%s'%value #1."yyyy-mm-ddThh:mm:ss" 2."yyyy-mm-dd"
if key == 'orderBy':
url_data += '&orderBy=%s'%value #asc, desc
if key == 'sortBy':
url_data += '&sortBy=%s'%value #interaction_rate, total_interactions
if key == 'language':
url_data += '&language=%s'%value #es, en, zh-CN, zh-TW, ...
if key == 'searchField':
url_data += '&searchField=%s'%value # text_fields_and_image_text, include_query_strings, text_fields_only , account_name_only, image_text_only
if key == 'searchTerm':
url_data += '&searchTerm=%s'%value
json_data = get_json(url_data)
ctsc = ct_search_data(json_data)
return ctsc
if __name__ == '__main__':
print("Module CrowdTangle Extractor")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import settings
import datetime
import logging
import os
import random
import ujson
import urllib
import urllib2
from B2SUtils.errors import ValidationError
def _get_mock_vessel_detail():
return {
"meta": {
"cp_consumed": 1.5, "cp_remaining": 96.0, "limit": 5,
"next": None, "offset": 0, "previous": None, "total_count": 1},
"objects": [{
"destination": "BAR HARBOR",
"etatime": "2014-10-28T11:00+0000",
"flag": "IT|Italy",
"heading": "307.0",
"imonumber": 9362542,
"last_ports": [{
"arrival": "2014-10-21T12:37+0000",
"departure": "2014-10-23T18:08+0000",
"locode": "CAMTR",
"portname": "Montreal"
}, {
"arrival": "2014-10-27T10:10+0000",
"departure": "2014-10-27T17:50+0000",
"locode": "CAHAL",
"portname": "Halifax"
}],
"latitude": str(random.uniform(25, 43.628562)),
"location": "Gulf of Maine, CA",
"longitude": str(random.uniform(-30, -66.714317)),
"mmsinumber": 247229700,
"name": "AIDABELLA",
"navigationstatus": "under way using engine",
"photos": "//img3.fleetmon.com/thumbnails/AIDABELLA_603862.220x146.jpg|//img3.fleetmon.com/thumbnails/AIDABELLA_603862.570x1140.jpg",
"positionreceived": datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M+0000'),
"publicurl": "http://www.fleetmon.com/en/vessels/Aidabella_50934",
"type": "Passenger ship"}
]}
class FleetmonAPI:
def searchVessel(self, name=None, imo=None, mmsi=None):
kwargs = {}
if imo:
kwargs['imo'] = imo
elif mmsi:
kwargs['mmsi'] = mmsi
elif name:
kwargs['q'] = name
else:
raise ValidationError('INVALID_REQUEST')
result = self._execute('/api/p/personal-v1/vesselurl/', **kwargs)
objects = result['objects']
while result['meta']['next']:
result = self._execute(result['meta']['next'])
objects += result['objects']
return objects
def getVesselInfo(self, name=None, imo=None, mmsi=None):
kwargs = {}
if imo:
kwargs['imonumber'] = imo
elif mmsi:
kwargs['mmsinumber'] = mmsi
elif name:
kwargs['q'] = name
else:
raise ValidationError('INVALID_REQUEST')
kwargs['lastports'] = 1
if settings.USE_MOCK_FLEETMON_DATA:
result = _get_mock_vessel_detail()
else:
result = self._execute('/api/p/personal-v1/vessels_terrestrial/',
**kwargs)
objects = result['objects']
while result['meta']['next']:
result = self._execute(result['meta']['next'])
objects += result['objects']
return objects
def searchPort(self, name=None, country=None, locode=None):
kwargs = {}
if locode:
kwargs['locode'] = locode
elif name:
kwargs['q'] = name
if country:
kwargs['country_isocode'] = country
else:
raise ValidationError('INVALID_REQUEST')
result = self._execute('/api/p/personal-v1/porturl/', **kwargs)
objects = result['objects']
while result['meta']['next']:
result = self._execute(result['meta']['next'])
objects += result['objects']
return objects
def _execute(self, path, **kwargs):
api_url = os.path.join(settings.FLEETMON_API_URL, path.lstrip('/'))
if kwargs:
kwargs.update({
'username': settings.FLEETMON_USERNAME,
'api_key': settings.FLEETMON_API_KEY,
'format': 'json',
})
api_url += "?%s" % urllib.urlencode(kwargs)
try:
req = urllib2.Request(api_url)
resp = urllib2.urlopen(req,
timeout=settings.THIRDPARTY_ACCESS_TIMEOUT)
json_return = ujson.loads(resp.read())
logging.info('Got return from Fleetmon (url: %s) : \n%s',
api_url, json_return)
return json_return
except Exception, e:
logging.error("Got exception when accessing third-party API "
"(url: %s) : %s", api_url, e, exc_info=True)
raise
|
nilq/baby-python
|
python
|
# import numpy as np
# r= [1.0,1.0,1.0,-1.0,1.0,1.0]
# gamma = 0.5
#
# r = np.array(r)
# discounted_r = np.zeros_like(r)
# running_add = 0
# # we go from last reward to first one so we don't have to do exponentiations
# for t in reversed(range(0, r.size)):
# if r[t] != 0:
# running_add = 0 # if the game ended (in Pong), reset the reward sum
# running_add = running_add * gamma + r[t] # the point here is to use Horner's method to compute those rewards efficiently
# discounted_r[t] = running_add
# discounted_r -= np.mean(discounted_r) #normalizing the result
# discounted_r /= np.std(discounted_r) #idem
# print (discounted_r)
#
# print ('{0:02b}'.format(0))
from gym import envs
envids = [spec.id for spec in envs.registry.all()]
for envid in sorted(envids):
print(envid)
import gym
# initializing our environment
env = gym.make('BipedalWalker-v2')
nb_actions = env.action_space.n
# beginning of an episode
observation = env.reset()
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from project.models import Project
def project_index(request):
p1 = Project(
title='My First Project',
description='A web development project.',
technology='Django',
image='img/project1.png'
)
p2 = Project(
title='My Second Project',
description='A web development project.',
technology='Django',
image='img/project1.png'
)
projects = [p1, p2]
context = {
'projects': projects
}
return render(request, 'project_index.html', context)
|
nilq/baby-python
|
python
|
import ephem
manen = ((ephem.Io(), 'i'),
(ephem.Europa(), 'e'),
(ephem.Ganymede(), 'g'),
(ephem.Callisto(), 'c'))
nu = ephem.now()
interval = ephem.minute
m = ephem.Europa()
t = nu
lengte = 80
jupiterIndex = int(lengte / 2) + 1
while True:
regel = lengte * [' ']
regel[jupiterIndex] = 'J'
for maan, karakter in manen:
maan.compute(nu)
pos = int(round(-maan.x + lengte / 2))
if pos != jupiterIndex:
regel[pos] = karakter
print(str(ephem.date(nu)), ''.join(regel))
nu += interval
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.registration import register
register(
id='MB_FetchSlide-v1',
entry_point='pddm.envs.fetch.slide:FetchSlideEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPush-v1',
entry_point='pddm.envs.fetch.push:FetchPushEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPickAndPlace-v1',
entry_point='pddm.envs.fetch.pick_and_place:FetchPickAndPlaceEnv',
max_episode_steps=50,
)
register(
id='MB_FetchReach-v1',
entry_point='pddm.envs.fetch.reach:FetchReachEnv',
max_episode_steps=50,
)
|
nilq/baby-python
|
python
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import (
MagicMock,
patch,
)
from ... import commands
from ...commands import restart
from .command_test import (
mock_arguments,
mock_configuration,
)
class RestartTest(unittest.TestCase):
@patch.object(restart, 'Stop')
@patch.object(restart, 'Start')
def test_restart(self, commands_Start, commands_Stop) -> None:
state = MagicMock()
state.running = ['.']
arguments = mock_arguments()
arguments.terminal = False
configuration = mock_configuration()
configuration.get_search_path.return_value = ['root']
source_directory = '.'
with patch.object(restart, 'Stop') as commands_Stop, \
patch.object(restart, 'Start') as commands_Start, \
patch.object(restart, 'Incremental') as commands_Incremental:
commands.Restart(
arguments,
configuration,
source_directory,
blocking=False)._run()
commands_Stop.assert_called_with(
arguments,
configuration,
source_directory)
commands_Start.assert_called_with(
arguments,
configuration,
source_directory)
commands_Incremental.assert_not_called()
with patch.object(restart, 'Stop') as commands_Stop, \
patch.object(restart, 'Start') as commands_Start, \
patch.object(restart, 'Incremental') as commands_Incremental:
commands.Restart(
arguments,
configuration,
source_directory)._run()
commands_Stop.assert_called_with(
arguments,
configuration,
source_directory)
commands_Incremental.assert_called_with(
arguments,
configuration,
source_directory)
commands_Start.assert_not_called()
|
nilq/baby-python
|
python
|
from django.template import Engine
_dirs_undefined = object()
class JsEngine(Engine):
def __init__(self, dirs=None, app_dirs=False,
allowed_include_roots=None, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8'):
# Set the default loader to the JS loader
if loaders is None:
loaders = ['django_jsx.template.loaders.JsLoader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
app_dirs = False
super(JsEngine, self).__init__(dirs=dirs, app_dirs=app_dirs,
allowed_include_roots=allowed_include_roots, context_processors=context_processors,
debug=debug, loaders=loaders, string_if_invalid=string_if_invalid,
file_charset=file_charset)
def get_template(self, template_name, dirs=_dirs_undefined):
if dirs is _dirs_undefined:
dirs = None
template_path, origin = self.find_template(template_name, dirs)
return template_path
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from ffmpymedia import __author__, __version__, __version_info__, __copyright__
video_codecs = {'mpeg2video': 'MPEG-2 video',
'h264': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10',
'vp8': 'On2 VP8',
'mpeg4': 'MPEG-4 part 2',
'theora': 'Theora',
'msmpeg4v2': 'MPEG-4 part 2 Microsoft variant version 2',
'vc1': 'SMPTE VC-1',
'mjpeg': 'MJPEG (Motion JPEG)'}
quicktime_video_codec_tags = {'xd54': 'XDCAM HD422 720p24 CBR',
'xd59': 'XDCAM HD422 720p60 CBR',
'xd5a': 'XDCAM HD422 720p50 CBR',
'xd5b': 'XDCAM HD422 1080i60 CBR',
'xd5c': 'XDCAM HD422 1080i50 CBR',
'xd5d': 'XDCAM HD422 1080p24 CBR',
'xd5e': 'XDCAM HD422 1080p25 CBR',
'xd5f': 'XDCAM HD422 1080p30 CBR',
'xdvb': 'XDCAM EX 1080i60 (35 Mb/s VBR)',
'DX50': 'MPEG-4 part 2',
'XVID': 'MPEG-4 part 2',
}
audio_codecs = {'flac': 'FLAC (Free Lossless Audio Codec)',
'mp3': 'MP3 (MPEG audio layer 3)',
'vorbis': 'Vorbis',
'aac': 'AAC (Advanced Audio Coding)',
'mp2': 'MP2 (MPEG audio layer 2)',
'pcm_s16le': 'PCM signed 16-bit little-endian',
'wmav2': 'Windows Media Audio 2',
'sowt': 'PCM signed 16-bit little-endian',
}
image_codecs = {'png': 'PNG (Portable Network Graphics) image',
'bmp': 'BMP (Windows and OS/2 bitmap)',
'gif': 'GIF (Graphics Interchange Format)',
'alias_pix': 'Alias/Wavefront PIX image',
'pgm': 'PGM (Portable GrayMap) image',
'tiff': 'TIFF image',
'targa': 'Truevision Targa image',
}
subtitle_codecs = {'ass': 'ASS (Advanced SubStation Alpha) subtitle',
'subrip': 'SubRip subtitle',
'hdmv_pgs_subtitle': 'HDMV Presentation Graphic Stream subtitles',
'pgssub': 'HDMV Presentation Graphic Stream subtitles'}
video_formats = {'mov,mp4,m4a,3gp,3g2,mj2': 'QuickTime / MOV',
'matroska,webm': 'Matroska / WebM',
'avi': 'AVI (Audio Video Interleaved)',
'ogg': 'Ogg',
'asf': 'ASF (Advanced / Active Streaming Format)',
'mxf': 'MXF (Material eXchange Format)'}
audio_formats = {'flac': 'raw FLAC',
'mp3': 'MP2/3 (MPEG audio layer 2/3)',
'ogg': 'Ogg'}
image_formats = {'png_pipe': 'piped png sequence',
'bmp_pipe': 'piped bmp sequence',
'gif': 'CompuServe Graphics Interchange Format (GIF)',
'alias_pix': 'Alias/Wavefront PIX image',
'tiff_pipe': 'piped tiff sequence',
'mpeg': 'MPEG-PS (MPEG-2 Program Stream)',
'image2': 'image2 sequence'}
def get_codec_long_name(codec_name):
conversion_table = dict(list(video_codecs.items()) +
list(audio_codecs.items()) +
list(image_codecs.items()) +
list(subtitle_codecs.items()))
return conversion_table.get(codec_name, '')
def get_format_long_name(format_name):
conversion_table = dict(list(video_formats.items()) +
list(audio_formats.items()) +
list(image_formats.items()))
return conversion_table.get(format_name, '')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#障碍跑
#使用时请将ip改成需要连接的机器人
#winxos 2012-07-14
import time,math
import wsNaoVisionMT as wsnv
import wsNaoMotion as wsnm
import numpy as np
import obstacleState as state
ground=np.array([[0,0,80],[180,220,255]])
if __name__ == '__main__':
ip="192.168.1.103" #修改此处ip地址为机器人实际连接ip
nv=wsnv.wsNaoVision(ip)
nm=wsnm.wsNaoMotion(ip)
nv.switchCam(1)
nv._gate_min=ground[0]
nv._gate_max=ground[1]
nv.setWindowsOn() #显示cv窗口,注销此行将不现实cv窗口
nv.startMonitor()
nm.stiffnessOn()
nm.poseInit()
nm.headPitchTo(-0.2)
nm._motion.setWalkArmsEnable(True,True)
time.sleep(1)
c=state.findObstacle()
while not isinstance(c,state.finalState):
c.do(nv,nm)
time.sleep(0.1)
nv.stopMonitor()
|
nilq/baby-python
|
python
|
import argparse
import os
import shutil
import stat
if __name__ == "__main__":
if os.getuid() != 0:
print("This script is intended to be run as root!")
print("By doing this, we isolate the commands that truly need privilege.")
print("This script runs: cp, chmod (on only the input/output files)")
exit(1)
parser = argparse.ArgumentParser()
parser.add_argument("config")
parser.add_argument("install_path")
args = parser.parse_args()
shutil.copy(args.config, args.install_path)
config_file = args.install_path
if os.path.isdir(config_file):
config_file = os.path.join(config_file, os.path.basename(args.config))
os.chmod(
args.install_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
)
print("Config installed!")
|
nilq/baby-python
|
python
|
import json
import pygame
from ceEntity import CEEntity
import ceSprite
import ceColor
import ceText
import ceGame
def rowToInts(row):
return [int(s) for s in row.split()]
def clamp(val, mn, mx):
if val<mn:
return mn
elif val>mx:
return mx
else:
return val
class CEStage(CEEntity):
def __init__(self, fn):
super(CEStage, self).__init__()
data = json.load( open('rsrc/stage/'+fn+'.json') )
self.tileset = pygame.image.load( 'rsrc/sprite/tiles/' + data['tileset']+'.png' )
self.tiledata = json.load( open('rsrc/sprite/tiles/' + data['tileset'] + '.json'))
self.tiledata['walls'] = set(self.tiledata['walls'])
self.tileWidth = self.tileset.get_width()/16
self.tiles = [[rowToInts(row) for row in layer] for layer in data['tiles']]
self.name = data['name']
self.music = data['music']
self.animations = data['animations']
self.scripts = data['scripts']
print(self.scripts)
self.timer = 0
self.aspeed = data['anim-speed']
self.aframe = 0
self.contents={}
def update(self, mils):
self.timer += mils
if self.timer > self.aspeed:
self.aframe += 1
self.timer -= self.aspeed
def isWall(self, layer, x, y):
try:
return self.tiles[layer][y][x] in self.tiledata['walls']
except IndexError:
return True
def render(self, surf, camSprite):
camx = clamp(
camSprite.get('x') - ceGame.XSIZE/2, 0, 16*len(self.tiles[0][0]) - ceGame.XSIZE)
camy = clamp(
camSprite.get('y') - ceGame.YSIZE/2, 0, 16*len(self.tiles[0]) - ceGame.YSIZE)
tilex = int(camx/16)+1
ox = 16-camx%16
tiley = int(camy/16)+1
oy = 16-camy%16
for layer in range(len(self.tiles)):
for xpos in range(-1,16):
for ypos in range(-1,14):
try:
tNum = self.tiles[layer][ypos+tiley][xpos+tilex]
if tNum<0:
# this is an animation
frames = self.animations[-tNum-1]['frames']
tNum = frames[self.aframe % len(frames)]
except IndexError:
continue
surf.blit(self.tileset,
(ox+xpos*16, oy+ypos*16),
(16*(tNum%self.tileWidth), 16*int(tNum/self.tileWidth), 16, 16))
return (camx, camy)
def put(self, sprite, x, y):
# TODO: make this aware of tile/platform physics later
self.contents[(x,y)] = sprite
sprite.x = x*16
sprite.y = y*16
def isClear(self, x, y, sizeX, sizeY):
for checkX in list(range(x, x+sizeX, 16))+list(range(x+15, x+sizeX+15, 16)):
for checkY in list(range(y, y+sizeY, 16))+list(range(y+15, y+sizeY+15, 16)):
ctileX = int(checkX/16)
ctileY = int(checkY/16)
if (ctileX,ctileY) in self.contents and self.contents[(ctileX,ctileY)]!=None:
print('collision')
return False
if self.isWall(0, ctileX, ctileY):
return False
return True
def _drawTile(self, surf, n, x, y):
tileX = 16*(n % self.tileWidth)
tileY = 16*(n / self.tileWidth)
surf.blit(self.tileset, (x, y), (tileX, tileY, 16, 16))
def main():
clock = pygame.time.Clock()
scr = ceGame.init()
sprites = []
iris = ceSprite.CESprite('iris', 'player-grid16')
iris.setState('stand-n')
iris.moveTo( (12*16, 24*16) )
iris.set('collideWall', True)
iris.set('collideOther', False)
iris.stage = CEStage('temple')
sprites.append( iris )
frames = 0
while ceGame.running:
frames += 1
mils = clock.tick(60)
ceGame.update()
# TODO: Game should keep track of sprites and propagate update/render to all
iris.stage.update(mils)
sprites.sort(key=(lambda s:s.get('y')))
(camx, camy) = iris.stage.render(scr, sprites[-1])
ceText.drawText(scr, iris.stage.name, 8, 8)
for sprite in sprites:
sprite.update(mils)
sprite.render(scr, camx, camy)
ceGame.render(scr)
if __name__=='__main__':
main()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0 on 2021-03-20 12:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('udaan_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='QuizResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='UserQuestionReponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_resp', to='udaan_app.Question')),
('quiz_qesponse', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_resp', to='udaan_app.QuizResponse')),
('user_ans', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_resp', to='udaan_app.QuestionOptions')),
],
),
migrations.AddField(
model_name='quizresponse',
name='question_ans',
field=models.ManyToManyField(related_name='quiz_response', through='udaan_app.UserQuestionReponse', to='udaan_app.Question'),
),
migrations.AddField(
model_name='quizresponse',
name='quiz_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='quiz_response', to='udaan_app.Quiz'),
),
]
|
nilq/baby-python
|
python
|
"""Stock feature generation including momentum indicators and volatility
Currently RSI (Relative Strength Index), volatility, and price return rank are successfully calculated for each stock
and for each day based on the basic price data ingested from yahoo. To Run the program,
the stock_price_and_returns.csv file containing time series daily, monthly, and yearly returns
must be generated by running the price_returns.py file and placed in the same directory.
The output of the prgram will be an updated csv file with a new rsi and volatility column.
TODO: percent off 52 week high, and Sharp Ratio
George Krug
04/22/2019
"""
import pandas as pd
import time
from taIndicators import momentum, basic, volatility as vol
import dataframeHandling as dfhandle
import sys
def clean_and_merge_monthly_and_yearly_dfs(df_yearly, df_monthly):
# Convert to single index on Symbol
df_yearly.set_index(['Symbol'], inplace=True)
df_monthly.set_index(['Symbol'], inplace=True)
# Drop duplicate columns to isolate monthly rankings
try:
df_monthly.drop(
columns=['Open', 'High', 'Low', 'Close', 'Volume', 'AdjClose', 'Pct_Change_Daily', 'Pct_Change_Monthly',
'Pct_Change_Yearly', 'RSI', 'Volatility', 'Sharp_Ratio'], inplace=True)
except Exception as err:
pass
# Declare Final Dataframe to be stored
global final_df
final_df = pd.DataFrame()
# Loop symbol rows in dataframe and merge to add the monthly return rankings to the yearly
for symbol in ticker_list:
tmp = pd.merge(df_yearly.loc[symbol], df_monthly.loc[symbol], on='Date', how='inner')
tmp['Symbol'] = symbol
final_df = final_df.append(tmp)
# Adjusted index before converted or stored
try:
final_df.reset_index(level=0, inplace=True)
final_df['date_of_transaction']=final_df['Date']
final_df.set_index(['Symbol', 'Date'], inplace=True)
final_df.drop(columns=['Yearly_Return', 'Monthly_Return', 'index'], inplace=True)
except Exception as err:
print(err)
return final_df
def get_index_lists(df, ticker_list, date_list):
# Get Index Lists
for symbol, mrow in df.groupby(level=0):
ticker_list.append(symbol)
for date, mrow in df.groupby(level=1):
date_list.append(date)
return ticker_list, date_list
def handle_input_arguments():
"""
Handle input arguments and allow for custom test files
:return:
"""
if len(sys.argv) > 1:
if sys.argv[1] == "-test" and len(sys.argv) == 2:
return "test"
elif sys.argv[1] == "-f" and len(sys.argv) == 3:
return "test"
elif (len(sys.argv) == 2 and sys.argv[1] is not "-test") or (len(sys.argv) == 3 and sys.argv[1] != "-f") or len(sys.argv) > 3:
print("ERROR: Improper input arguments!\nDefault Test Command:"
" \n\tpython feature-gen.py -test\nCustom Test File Command:\n\tpython feature-gen.py -f <file name>")
return "error"
else:
return "live"
###############################
# Main Method
###############################
if __name__== '__main__':
file_path = "data/stock_prices_and_returns_3.csv"
test_file_path = "data-test/Head_stock_prices_and_returns.csv"
output_file_path = "data/momentum-features.csv"
test_output_file = "data-test/test-momentum.csv"
new_columns = ['RSI', 'Volatility', 'Sharp_Ratio']
ticker_list = []
date_list = []
# Allow custom input file for testing
action = handle_input_arguments()
if action == "test":
file_path = test_file_path
output_file_path = test_output_file
elif action == "error":
exit(1)
print("input file: " + file_path)
print("output file: " + output_file_path)
# convert csv to dataframe, index ticker & date, and add new featur columns
#basic_df = dfhandle.get_dataframe_from_csv(file_path)
max_date=dfhandle.find_max_date()
print('max date '+max_date)
start = time.time()
basic_df=dfhandle.read_table('stock_price_return',max_date)
basic_df['Date']=basic_df['date_of_transaction']
end = time.time()
df = dfhandle.add_columns_to_df(basic_df, new_columns)
basic_df['Date']=basic_df['date_of_transaction']
# get index lists of 3d df to optimize looping
ticker_list, date_list = get_index_lists(df, ticker_list, date_list)
print('Generating Momentum Features\n-------------------------------------------------------------')
print('Updating Dataframe with RSI, Volatility, Sharp Ratio and Performance Rank columns......')
start = time.time()
for symbol in ticker_list:
df = momentum.get_stock_rsi_daily(df, symbol)
df = vol.get_stock_volatility(df, symbol)
# Get Daily adjusted return rankings based on trailing monthly and yearly prices
df_yearly, df_monthly = momentum.get_daily_adjusted_stock_return_rankings(df, ticker_list, date_list)
print(df_yearly.head())
print(df_monthly.head())
# Clean and merge data
final_df = clean_and_merge_monthly_and_yearly_dfs(df_yearly, df_monthly)
######################################################
# OUTPUT DATA #######################################
print("Writing to file: " + output_file_path)
#final_df.to_csv(output_file_path, encoding='utf-8', index=True)
print(final_df.columns)
final_df.reset_index()
final_df.info()
final_df.set_index(['ticker_y', 'date_of_transaction'], inplace=True)
final_df.reset_index(inplace=True)
# initialize percent change positive/negative binary
# copy values from percent change daily before data manipulation
final_df['Pct_Change_Class'] = final_df['Pct_Change_Daily']
# if percent positive, assign 1; else assign 0
final_df['Pct_Change_Class'].where(final_df['Pct_Change_Class'] < 0, other=1, inplace=True)
final_df['Pct_Change_Class'].where(final_df['Pct_Change_Class'] > 0, other=0, inplace=True)
final_df.head()
# set index on symbol
final_df.set_index('ticker_y', inplace=True)
print(final_df.head())
# initialize new rolling average features
final_df['Rolling_Yearly_Mean_Positive_Days'] = final_df['Pct_Change_Class']
final_df['Rolling_Monthly_Mean_Positive_Days'] = final_df['Pct_Change_Class']
final_df['Rolling_Monthly_Mean_Price'] = final_df['AdjClose']
final_df['Rolling_Yearly_Mean_Price'] = final_df['AdjClose']
# use pandas rolling method to calculate moving averages on selected featurs on a monthly and yearly basis
YEARLY_TRADING_DAYS = 252
MONTHLY_TRADING_DAYS = 21
rolling_monthly_up_days = final_df.groupby(level=0)['Rolling_Monthly_Mean_Positive_Days'].rolling(MONTHLY_TRADING_DAYS, min_periods=MONTHLY_TRADING_DAYS).mean()
rolling_yearly_up_days = final_df.groupby(level=0)['Rolling_Yearly_Mean_Positive_Days'].rolling(YEARLY_TRADING_DAYS, min_periods=YEARLY_TRADING_DAYS).mean()
monthly_rolling_average_price = final_df.groupby(level=0)['Rolling_Monthly_Mean_Price'].rolling(MONTHLY_TRADING_DAYS, min_periods=MONTHLY_TRADING_DAYS).mean()
yearly_rolling_average_price = final_df.groupby(level=0)['Rolling_Yearly_Mean_Price'].rolling(YEARLY_TRADING_DAYS, min_periods=YEARLY_TRADING_DAYS).mean()
# copy values into the working stocks dataframe
final_df['Rolling_Monthly_Mean_Positive_Days'] = rolling_monthly_up_days.values
final_df['Rolling_Yearly_Mean_Positive_Days'] = rolling_yearly_up_days.values
final_df['Rolling_Monthly_Mean_Price'] = monthly_rolling_average_price.values
final_df['Rolling_Yearly_Mean_Price'] = yearly_rolling_average_price.values
print(final_df.head())
print(final_df.info())
final_df.set_index(['ticker_x', 'date_of_transaction'], inplace=True)
final_df['Momentum_Quality_Monthly'] = (final_df['Pct_Change_Monthly'] * 100) * (( final_df['Rolling_Monthly_Mean_Positive_Days'] - (1 - final_df['Rolling_Monthly_Mean_Positive_Days'])))
final_df['Momentum_Quality_Yearly'] = (final_df['Pct_Change_Yearly'] * 100) * (( final_df['Rolling_Yearly_Mean_Positive_Days'] - (1 - final_df['Rolling_Yearly_Mean_Positive_Days'])))
print(final_df.head())
spy=dfhandle.read_table('spy_stock_price_return',max_date)
#spy.drop(columns=['Unnamed: 0'], inplace=True)
#df.groupby(level=0)['SPY_Trailing_Month_Return'] = spy['Pct_Change_Monthly']
spy.set_index('date_of_transaction', inplace=True)
spy_trailing_month_return = spy.drop(columns=['sno','Symbol', 'High', 'Low', 'Open', 'Close', 'Volume', 'AdjClose', 'ticker','Pct_Change_Daily', 'Pct_Change_Yearly'])
global spy_df
spy_df = pd.DataFrame()
spy_trailing_month_return['SPY_Trailing_Month_Return'] = spy_trailing_month_return['Pct_Change_Monthly']
spy_trailing_month_return.drop(columns=['Pct_Change_Monthly'], inplace=True)
spy_trailing_month_return.reset_index(inplace=True)
spy_trailing_month_return.drop(columns=['Date'], inplace=True)
print(final_df.info())
for symbol, r in final_df.groupby(level=0):
tmp = r
#print("Sybmol"+symbol)
tmp.reset_index(inplace=True)
tick = pd.merge(tmp, spy_trailing_month_return, how='left', left_index=True, right_index=True)
spy_df = spy_df.append(tick)
spy_df['Symbol']=spy_df['ticker_x']
spy_df['Date']=spy_df['date_of_transaction_x']
spy_df.set_index(['ticker_x', 'date_of_transaction_x'], inplace=True)
print(spy_df.info())
columns=['Symbol','Date','High','Low','Open','Close','Volume','AdjClose','Pct_Change_Daily','Pct_Change_Monthly','Pct_Change_Yearly','RSI','Volatility','Yearly_Return_Rank','Monthly_Return_Rank','Rolling_Yearly_Mean_Positive_Days','Rolling_Monthly_Mean_Positive_Days','Rolling_Monthly_Mean_Price','Rolling_Yearly_Mean_Price','Momentum_Quality_Monthly','Momentum_Quality_Yearly','SPY_Trailing_Month_Return']
print(spy_df[columns].head())
dfhandle.load_table(spy_df[columns],'momentum_features')
end = time.time()
print("Process time: " + str(end - start) + " seconds.")
######################################################
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import pika
def on_request(ch, method, props, body):
n = int(body)
response = n*n
print(" [.] Calculando %s * %s = %s" % (n, n, response))
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
credentials = pika.PlainCredentials('quest', 'quest')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
channel.basic_consume(queue='rpc_queue', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
nilq/baby-python
|
python
|
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.http import is_safe_url
from django.utils.translation import override
from django.views.generic import View
from pretalx.common.phrases import phrases
class LocaleSet(View):
def get(self, request, *args, **kwargs):
url = request.GET.get('next', request.META.get('HTTP_REFERER', '/'))
url = url if is_safe_url(url, host=request.get_host()) else '/'
resp = HttpResponseRedirect(url)
locale = request.GET.get('locale')
if locale in [lc for lc, ll in settings.LANGUAGES]:
if request.user.is_authenticated:
request.user.locale = locale
request.user.save()
max_age = 10 * 365 * 24 * 60 * 60
resp.set_cookie(settings.LANGUAGE_COOKIE_NAME, locale, max_age=max_age,
expires=(datetime.utcnow() + timedelta(seconds=max_age)).strftime(
'%a, %d-%b-%Y %H:%M:%S GMT'),
domain=settings.SESSION_COOKIE_DOMAIN)
with override(locale):
messages.success(request, phrases.cfp.locale_change_success)
return resp
|
nilq/baby-python
|
python
|
from numba import jit
@jit
def fibo(n):
if n < 2:
return n
return fibo(n-1) + fibo(n-2)
print(fibo(45))
|
nilq/baby-python
|
python
|
# Training script with LazyLoader
#
# Instead of dumping all input into memory, we lazy load on the fly.
# This can create an IO bound where slow training down but helping to training large dataset such as MetaVideoLazy
import os
from tqdm.auto import tqdm
from opt import config_parser
import logging
import ruamel.yaml
yaml2 = ruamel.yaml.YAML()
from utils import set_logger, printlog
from collections import OrderedDict
import json, random
from renderer import *
from utils import *
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import autocast, GradScaler
import datetime
from torch.utils.data import DataLoader
from dataLoader import dataset_dict
import sys
import pdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
renderer = OctreeRender_trilinear_fast
@torch.no_grad()
def evaluation_lazy(test_dataset,tensorf, args, renderer, savePath=None, N_vis=5, prtx='', N_samples=-1,
white_bg=False, ndc_ray=False, compute_extra_metrics=True, device='cuda'):
PSNRs, rgb_maps, depth_maps = [], [], []
ssims,l_alex,l_vgg=[],[],[]
#os.makedirs(savePath+'/img', exist_ok=True)
os.makedirs(savePath+"/img/rgbd", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
#img_eval_interval = 1 if N_vis < 0 else test_dataset.all_rays.shape[0] // N_vis
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=int(os.cpu_count() * args.dataloader_thread_ratio))
for idx, samples in tqdm(enumerate(test_dataloader), file=sys.stdout):
if N_vis > 0 and idx % N_vis != 0: continue
W, H = test_dataset.img_wh
rays = samples['rays'].view(-1,samples['rays'].shape[-1])
rgb_map, _, depth_map, _, _ = renderer(rays, tensorf, chunk=512, N_samples=N_samples, ndc_ray=ndc_ray, white_bg = white_bg, device=device)
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape(H, W).cpu()
depth_map, min_max = visualize_depth_numpy(depth_map.numpy(),near_far)
if True: #temporary predict
gt_rgb = samples['rgbs'].view(H, W, 3)
loss = torch.mean((rgb_map - gt_rgb) ** 2)
PSNRs.append(-10.0 * np.log(loss.item()) / np.log(10.0))
if compute_extra_metrics:
ssim = rgb_ssim(rgb_map, gt_rgb, 1)
l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', tensorf.device)
l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', tensorf.device)
ssims.append(ssim)
l_alex.append(l_a)
l_vgg.append(l_v)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
# rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
rgb_maps.append(rgb_map)
depth_maps.append(depth_map)
if savePath is not None:
imageio.imwrite(f'{savePath}/img/{prtx}{idx:03d}.png', rgb_map)
rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
imageio.imwrite(f'{savePath}/img/rgbd/{prtx}{idx:03d}.png', rgb_map)
imageio.mimwrite(f'{savePath}/{prtx}video.mp4', np.stack(rgb_maps), fps=30, quality=10)
imageio.mimwrite(f'{savePath}/{prtx}depthvideo.mp4', np.stack(depth_maps), fps=30, quality=10)
if PSNRs:
psnr = np.mean(np.asarray(PSNRs))
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr, ssim, l_a, l_v]))
else:
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr]))
return PSNRs
@torch.no_grad()
def evaluation_path_lazy(test_dataset,tensorf, c2ws, renderer, savePath=None, N_vis=5, prtx='', N_samples=-1,
white_bg=False, ndc_ray=False, compute_extra_metrics=True, device='cuda'):
PSNRs, rgb_maps, depth_maps = [], [], []
ssims,l_alex,l_vgg=[],[],[]
os.makedirs(savePath, exist_ok=True)
os.makedirs(savePath+"/img/rgbd", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
for idx, c2w in enumerate(tqdm(c2ws)):
W, H = test_dataset.img_wh
c2w = torch.FloatTensor(c2w)
rays_o, rays_d = get_rays(test_dataset.directions, c2w) # both (h*w, 3)
if ndc_ray:
rays_o, rays_d = ndc_rays_blender(H, W, test_dataset.focal[0], 1.0, rays_o, rays_d)
if hasattr(test_dataset, 'max_t'):
rays = torch.cat([rays_o, rays_d, torch.ones_like(rays_o[:, :1]) * idx], 1)
else:
rays = torch.cat([rays_o, rays_d], 1) # (h*w, 6)
rgb_map, _, depth_map, _, _ = renderer(rays, tensorf, chunk=512, N_samples=N_samples,
ndc_ray=ndc_ray, white_bg = white_bg, device=device)
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape(H, W).cpu()
depth_map, _ = visualize_depth_numpy(depth_map.numpy(),near_far)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
# rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
rgb_maps.append(rgb_map)
depth_maps.append(depth_map)
if savePath is not None:
imageio.imwrite(f'{savePath}/img/{prtx}{idx:03d}.png', rgb_map)
rgb_map = np.concatenate((rgb_map, depth_map), axis=1)
imageio.imwrite(f'{savePath}/img/rgbd/{prtx}{idx:03d}.png', rgb_map)
imageio.mimwrite(f'{savePath}/{prtx}video.mp4', np.stack(rgb_maps), fps=30, quality=8)
imageio.mimwrite(f'{savePath}/{prtx}depthvideo.mp4', np.stack(depth_maps), fps=30, quality=8)
if PSNRs:
psnr = np.mean(np.asarray(PSNRs))
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr, ssim, l_a, l_v]))
else:
np.savetxt(f'{savePath}/{prtx}mean.txt', np.asarray([psnr]))
return PSNRs
@torch.no_grad()
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = get_dataset(args, 'test')
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
if args.num_frames > 1 or args.model_name == 'TensoRFVideo': #only some model support max_t, so we pass max_t if num_frames provide
kwargs.update({'max_t': args.num_frames})
kwargs.update({'t_keyframe': args.t_keyframe})
kwargs.update({'upsamp_list': args.upsamp_list})
tensorf = eval(args.model_name)(**kwargs)
tensorf.load(ckpt)
#pdb.set_trace()
if args.model_name in ['TensorSph']:
tensorf.set_origin(test_dataset.origin,test_dataset.sph_box,test_dataset.sph_frontback)
tensorf_for_renderer = tensorf
if args.data_parallel:
tensorf_for_renderer = torch.nn.DataParallel(tensorf)
logfolder = os.path.dirname(args.ckpt)
if False and args.render_train:
os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
train_dataset = get_dataset(args, 'train')
train_dataset.is_sampling = False
PSNRs_test = evaluation_lazy(train_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_train_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if True or args.render_test:
test_dataset = get_dataset(args, 'test')
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_test_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if False and args.render_dynerf:
test_dataset = get_dataset(args, 'test', hold_every_frame=10)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf_for_renderer, args, renderer, f'{logfolder}/imgs_test_dynerf/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_dynerf psnr: {np.mean(PSNRs_test)} <========================')
if True or args.render_path:
c2ws = test_dataset.render_path
print('========>',c2ws.shape)
os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
evaluation_path_lazy(test_dataset,tensorf_for_renderer, c2ws, renderer, f'{logfolder}/imgs_path_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
def get_dataset(args, split, hold_every_frame=1, psudo_length=-1):
dataset_class = dataset_dict[args.dataset_name]
dataset = dataset_class(
args.datadir,
split=split,
downsample=args.downsample_train,
is_stack=(split == False),
ndc_ray=args.ndc_ray,
max_t=args.num_frames,
hold_every=args.hold_every,
num_rays=args.batch_size,
hold_every_frame=hold_every_frame,
psudo_length=psudo_length
)
return dataset
def reconstruction(args):
train_dataset = get_dataset(args, 'train')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis_train', exist_ok=True)
os.makedirs(f'{logfolder}/imgs_rgba', exist_ok=True)
os.makedirs(f'{logfolder}/rgba', exist_ok=True)
gfile_stream = open(os.path.join(logfolder, 'stdout.txt'), 'w')
set_logger(gfile_stream)
printlog('Start Training')
summary_writer = SummaryWriter(logfolder)
with open(os.path.join(logfolder, "config.yml"), "w") as f:
yaml2.dump(vars(args), f)
# init parameters
# tensorVM, renderer = init_parameters(args, train_dataset.scene_bbox.to(device), reso_list[0])
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb)
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device':device})
tensorf = eval(args.model_name)(**kwargs)
tensorf.load(ckpt)
else:
# Pure: Dynamic Ordered dict for easily design a model without conflict
kwargs = OrderedDict([
("aabb", aabb),
("gridSize", reso_cur),
("device", device),
("density_n_comp", n_lamb_sigma),
("appearance_n_comp", n_lamb_sh),
("app_dim", args.data_dim_color),
("near_far", near_far),
("shadingMode", args.shadingMode),
("alphaMask_thres", args.alpha_mask_thre),
("density_shift", args.density_shift),
("distance_scale", args.distance_scale),
("pos_pe",args.pos_pe),
("view_pe",args.view_pe),
("fea_pe", args.fea_pe),
("featureC", args.featureC),
("step_ratio", args.step_ratio),
("fea2denseAct", args.fea2denseAct)
])
if args.num_frames > 1 or args.model_name == 'TensoRFVideo': #only some model support max_t, so we pass max_t if num_frames provide
kwargs["max_t"] = args.num_frames
kwargs["t_keyframe"] = args.t_keyframe
kwargs["upsamp_list"] = args.upsamp_list
if args.model_name == 'TensoRF5dSigma':
kwargs['train_dataset'] = train_dataset
tensorf = eval(args.model_name)(**kwargs)
if args.model_name in ['TensorSph']:
tensorf.set_origin(train_dataset.origin,train_dataset.sph_box,train_dataset.sph_frontback)
grad_vars = tensorf.get_optparam_groups(args.lr_init, args.lr_basis)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9,0.99))
scaler = GradScaler()
training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args=args, hierarchy_type='coarse') #key frame training
training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args=args, hierarchy_type='fine') #all frame trainign
tensorf.save(f'{logfolder}/{args.expname}.th')
if args.render_train:
os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
train_dataset = get_dataset(args, 'train')
train_dataset.is_sampling = False
PSNRs_test = evaluation_lazy(train_dataset,tensorf, args, renderer, f'{logfolder}/imgs_train_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
test_dataset = get_dataset(args, 'test')
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_dynerf:
test_dataset = get_dataset(args, 'test', hold_every_frame=10)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_dynerf/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_dynerf psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test_dynerf/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_firstframe:
test_dataset = get_dataset(args, 'test', hold_every_frame=args.num_frames)
os.makedirs(f'{logfolder}/imgs_test_dynerf', exist_ok=True)
PSNRs_test = evaluation_lazy(test_dataset,tensorf, args, renderer, f'{logfolder}/imgs_test_firstframe/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
printlog(f'======> {args.expname} test_firstframe psnr: {np.mean(PSNRs_test)} <========================')
summary_writer.add_scalar('test_dynerf/psnr_all', np.mean(PSNRs_test), global_step=args.n_iters)
if args.render_path:
c2ws = test_dataset.render_path
print('========>',c2ws.shape)
os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
evaluation_path_lazy(test_dataset,tensorf_for_renderer, c2ws, renderer, f'{logfolder}/imgs_path_all/',
N_vis=-1, N_samples=-1, white_bg = white_bg, ndc_ray=ndc_ray,device=device)
def training_loop(tensorf, optimizer, scaler, summary_writer, logfolder, args, hierarchy_type='coarse'):
test_dataset = get_dataset(args, 'test')
train_viz_dataset = get_dataset(args, 'train')
train_viz_dataset.is_sampling = False
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
n_iters = args.keyframe_iters if hierarchy_type=='coarse' else args.n_iters
hold_every_frame = 1# args.t_keyframe if hierarchy_type=='coarse' else 1
train_dataset = get_dataset(args, 'train', hold_every_frame= hold_every_frame, psudo_length=n_iters)
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=int(os.cpu_count() * args.dataloader_thread_ratio))
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
if hierarchy_type=='coarse' or args.keyframe_iters < 0:
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio**(1/args.lr_decay_iters)
lr_decay_iters = args.lr_decay_iters
else:
lr_decay_iters = n_iters
lr_factor = args.lr_decay_target_ratio**(1/n_iters)
printlog(f"lr decay {args.lr_decay_target_ratio} {lr_decay_iters}")
else:
printlog(f"continue tuning without decay")
# continue training without further more deacy in fine step
lr_factor = 1.0
TV_weight_density *= args.lr_decay_target_ratio
TV_weight_app *= args.lr_decay_target_ratio
reso_mask = None
#linear in logrithmic space, note that we can upsampling only coarse
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
N_voxel_list = (torch.round(torch.exp(torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list)+1))).long()).tolist()[1:]
ndc_ray = args.ndc_ray
torch.cuda.empty_cache()
PSNRs,PSNRs_test = [],[0]
if not args.ndc_ray:
raise NotImplementError('haven\'t implement filter ray to support non-ndc mode yet')
allrays, allrgbs = train_dataset.all_rays, train_dataset.all_rgbs
allrays, allrgbs = tensorf.filtering_rays(allrays, allrgbs, bbox_only=True)
Ortho_reg_weight = args.Ortho_weight
L1_reg_weight = args.L1_weight_inital
tvreg = TVLoss()
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init if (hierarchy_type=='coarse' or args.keyframe_iters < 0) else args.N_voxel_final, aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur,args.step_ratio))
if hierarchy_type == 'coarse':
print("==== Training Coarse (keyframe) level ====")
printlog(f"initial Ortho_reg_weight {Ortho_reg_weight}")
printlog(f"initial L1_reg_weight {L1_reg_weight}")
printlog(f"initial TV_weight density: {TV_weight_density} appearance: {TV_weight_app}")
else:
print("==== Training Fine (all-frame) level ====")
pbar = tqdm(range(n_iters), miniters=args.progress_refresh_rate, file=sys.stdout)
tensorf_for_renderer = tensorf
if args.data_parallel:
tensorf_for_renderer = torch.nn.DataParallel(tensorf)
median_step = int(args.median_ratio * n_iters)
temporal_step = int(args.temporal_ratio * n_iters)
train_iterator = iter(train_dataloader)
if hierarchy_type == 'coarse' and args.median_keyframe:
train_dataloader.dataset.is_median = True
with autocast(enabled=False):
for iteration in pbar:
#enable weight sampling option
if hierarchy_type == 'fine':
if iteration == median_step:
print("apply median sampling...")
train_dataloader.dataset.is_median = True
train_dataloader.dataset.is_temporal = False
if iteration == temporal_step:
print("apply temporal sampling...")
train_dataloader.dataset.is_median = False
train_dataloader.dataset.is_temporal = True
# pick ray_batch from traintring loader
try:
ray_batch = next(train_iterator)
except StopIteration:
train_iterator = iter(train_dataloader)
ray_batch = next(train_iterator)
rays_train = ray_batch['rays'][0]
rgb_train = ray_batch['rgbs'][0].to(device)
rgb_map, alphas_map, depth_map, weights, uncertainty = renderer(rays_train, tensorf_for_renderer, chunk=args.batch_size, N_samples=nSamples, white_bg = train_dataset.white_bg, ndc_ray=ndc_ray, device=device, is_train=True)
loss = torch.mean((rgb_map - rgb_train) ** 2)
# loss
total_loss = loss
if iteration % args.TV_every==0:
if Ortho_reg_weight > 0:
loss_reg = tensorf.vector_comp_diffs()
total_loss += Ortho_reg_weight*loss_reg
summary_writer.add_scalar('train/reg', loss_reg.detach().item(), global_step=iteration)
if L1_reg_weight > 0:
loss_reg_L1 = tensorf.density_L1()
total_loss += L1_reg_weight*loss_reg_L1
summary_writer.add_scalar('train/reg_l1', loss_reg_L1.detach().item(), global_step=iteration)
if TV_weight_density>0:
TV_weight_density *= lr_factor
loss_tv = tensorf.TV_loss_density(tvreg) * TV_weight_density
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app>0:
TV_weight_app *= lr_factor
loss_tv = loss_tv + tensorf.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)
if args.grad_scaler:
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
else:
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
loss = loss.detach().item()
PSNRs.append(-10.0 * np.log(loss) / np.log(10.0))
summary_writer.add_scalar('train/PSNR', PSNRs[-1], global_step=iteration)
summary_writer.add_scalar('train/mse', loss, global_step=iteration)
summary_writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=iteration)
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lr_factor
# Print the current values of the losses.
if iteration % args.progress_refresh_rate == 0:
pbar.set_description(
f'Iteration {iteration:05d}:'
+ f' train_psnr = {float(np.mean(PSNRs)):.2f}'
+ f' test_psnr = {float(np.mean(PSNRs_test)):.2f}'
+ f' mse = {loss:.6f}'
)
PSNRs = []
if iteration % args.vis_every == args.vis_every - 1:
PSNRs_test = evaluation_lazy(
test_dataset, tensorf, args, renderer, f'{logfolder}/imgs_vis/',
N_vis=args.N_vis, prtx=f'{iteration:06d}_', N_samples=nSamples,
white_bg = white_bg, ndc_ray=ndc_ray, compute_extra_metrics=False
)
evaluation_lazy(
train_viz_dataset, tensorf, args, renderer, f'{logfolder}/imgs_vis_train/',
N_vis=args.N_vis, prtx=f'{iteration:06d}_', N_samples=nSamples,
white_bg = white_bg, ndc_ray=ndc_ray, compute_extra_metrics=False
)
summary_writer.add_scalar('test/psnr', np.mean(PSNRs_test), global_step=iteration)
logging.info(f'Iteration {iteration} test psnr {np.mean(PSNRs_test)}')
if iteration in update_AlphaMask_list:
if reso_cur[0] * reso_cur[1] * reso_cur[2]<256**3:# update volume resolution
reso_mask = reso_cur
if reso_mask == None:
reso_mask = tuple([256,256,256])
new_aabb = tensorf.updateAlphaMask(tuple(reso_mask))
if iteration == update_AlphaMask_list[0]:
tensorf.shrink(new_aabb)
# tensorVM.alphaMask = None
L1_reg_weight = args.L1_weight_rest
printlog(f"continuing L1_reg_weight {L1_reg_weight}")
if not args.ndc_ray and iteration == update_AlphaMask_list[1]:
# filter rays outside the bbox
allrays,allrgbs = tensorf.filtering_rays(allrays,allrgbs)
trainingSampler = SimpleSampler(allrgbs.shape[0], args.batch_size)
# currently, upsammling uspo
if (hierarchy_type == 'coarse' or args.keyframe_iters < 0) and iteration in upsamp_list:
n_voxels = N_voxel_list.pop(0)
reso_cur = N_to_reso(n_voxels, tensorf.aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur,args.step_ratio))
print("Resolution ====== > ")
print(reso_cur)
tensorf.upsample_volume_grid(reso_cur)
torch.cuda.empty_cache()
if args.lr_upsample_reset:
printlog("reset lr to initial")
lr_scale = 1 #0.1 ** (iteration / args.n_iters)
else:
lr_scale = args.lr_decay_target_ratio ** (iteration / args.n_iters)
grad_vars = tensorf.get_optparam_groups(args.lr_init*lr_scale, args.lr_basis*lr_scale)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
if __name__ == '__main__':
sys.excepthook = colored_hook(os.path.dirname(os.path.realpath(__file__)))
torch.set_default_dtype(torch.float32)
torch.manual_seed(20121202)
np.random.seed(20121202)
args = config_parser()
print(args)
if args.render_only and (args.render_test or args.render_path):
render_test(args)
else:
reconstruction(args)
|
nilq/baby-python
|
python
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from accountsynchr.trumba_gws import TrumbaToGws
from accountsynchr.tests import fdao_gws_override
@fdao_gws_override
class TestTrumbaToGws(TestCase):
def test_group_manager(self):
tg = TrumbaToGws()
tg.sync()
self.assertEqual(tg.ttl_editor_grps_synced, 6)
self.assertEqual(tg.ttl_showon_grp_synced, 6)
self.assertEqual(tg.del_editor_perm_counts, 2)
self.assertEqual(tg.del_showon_perm_counts, 2)
self.assertFalse(tg.has_err())
|
nilq/baby-python
|
python
|
# flake8: noqa
from my_happy_pandas._libs import NaT, Period, Timedelta, Timestamp
from my_happy_pandas._libs.missing import NA
from my_happy_pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from my_happy_pandas.core.dtypes.missing import isna, isnull, notna, notnull
from my_happy_pandas.core.algorithms import factorize, unique, value_counts
from my_happy_pandas.core.arrays import Categorical
from my_happy_pandas.core.arrays.boolean import BooleanDtype
from my_happy_pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from my_happy_pandas.core.arrays.string_ import StringDtype
from my_happy_pandas.core.construction import array
from my_happy_pandas.core.groupby import Grouper, NamedAgg
from my_happy_pandas.core.indexes.api import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
UInt64Index,
)
from my_happy_pandas.core.indexes.datetimes import bdate_range, date_range
from my_happy_pandas.core.indexes.interval import Interval, interval_range
from my_happy_pandas.core.indexes.period import period_range
from my_happy_pandas.core.indexes.timedeltas import timedelta_range
from my_happy_pandas.core.indexing import IndexSlice
from my_happy_pandas.core.series import Series
from my_happy_pandas.core.tools.datetimes import to_datetime
from my_happy_pandas.core.tools.numeric import to_numeric
from my_happy_pandas.core.tools.timedeltas import to_timedelta
from my_happy_pandas.io.formats.format import set_eng_float_format
from my_happy_pandas.tseries.offsets import DateOffset
# DataFrame needs to be imported after NamedAgg to avoid a circular import
from my_happy_pandas.core.frame import DataFrame # isort:skip
|
nilq/baby-python
|
python
|
import numpy as np
import scipy
import GPyOpt
import GPy
from multi_objective import MultiObjective
from multi_outputGP import multi_outputGP
from uKG_SGA import uKG_SGA
from uKG_cf import uKG_cf
from uEI_noiseless import uEI_noiseless
from parameter_distribution import ParameterDistribution
from utility import Utility
from expectation_utility import ExpectationUtility
import cbo
import sys
import time
# --- Function to optimize
m = 5 # Number of attributes
aux_model = []
I = np.linspace(0., 1., 6)
aux_grid = np.meshgrid(I, I, I, I)
grid = np.array([a.flatten() for a in aux_grid]).T
kernel = GPy.kern.SE(input_dim=4, variance=2., lengthscale=0.3)
cov = kernel.K(grid)
mean = np.zeros((6 ** 4,))
for j in range(m):
r = np.random.RandomState(j+7)
Y = r.multivariate_normal(mean, cov)
Y = np.reshape(Y, (6 ** 4, 1))
print(Y[:5, 0])
aux_model.append(GPy.models.GPRegression(grid, Y, kernel, noise_var=1e-10))
def f(X):
X = np.atleast_2d(X)
fX = np.empty((m, X.shape[0]))
for j in range(m):
fX[j, :] = aux_model[j].posterior_mean(X)[:, 0]
return fX
#noise_var = [0.25]*m
objective = MultiObjective(f, as_list=False, output_dim=m)
#objective = MultiObjective(f, noise_var=noise_var, as_list=False, output_dim=m)
# --- Space
space = GPyOpt.Design_space(space=[{'name': 'var', 'type': 'continuous', 'domain': (0, 1), 'dimensionality': 4}])
# --- Model (Multi-output GP)
n_attributes = m
model = multi_outputGP(output_dim=n_attributes, exact_feval=[False] * m, fixed_hyps=False)
# model = multi_outputGP(output_dim=n_attributes, noise_var=noise_var, fixed_hyps=True)
# --- Aquisition optimizer
acq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='lbfgs2', inner_optimizer='lbfgs2', space=space)
# --- Initial design
initial_design = GPyOpt.experiment_design.initial_design('random', space, 10)
# --- Parameter distribution
bounds = [(0, 1)] * 4
starting_points = np.random.rand(100, 4)
#parameter_support = np.empty((1,m))
for j in range(1):
def marginal_func(x):
x_copy = np.atleast_2d(x)
val = aux_model[j].posterior_mean(x_copy)[:, 0]
return -val
best_val_found = np.inf
for x0 in starting_points:
res = scipy.optimize.fmin_l_bfgs_b(marginal_func, x0, approx_grad=True, bounds=bounds)
if best_val_found > res[1]:
# print(res)
best_val_found = res[1]
marginal_opt = res[0]
parameter_support = f(marginal_opt).transpose()
#parameter_support = f(x_opt).T #+ r.normal(scale=1., size=(6, 3))
parameter_dist = np.ones((1,)) / 1
parameter_distribution = ParameterDistribution(continuous=False, support=parameter_support, prob_dist=parameter_dist)
# --- Utility function
def U_func(parameter, y):
#y_aux = np.squeeze(y)
aux = (y.transpose() - parameter).transpose()
return -np.sum(np.square(aux), axis=0)
def dU_func(parameter, y):
y_aux = np.squeeze(y)
return -2*(y_aux - parameter)
U = Utility(func=U_func, dfunc=dU_func, parameter_dist=parameter_distribution, linear=False)
# --- Expectation of utility
def psi(parameter,mu, var):
#mu_aux = np.squeeze(mu)
#var_aux = np.squeeze(var)
aux = (mu.transpose() - parameter).transpose()
val = -np.sum(np.square(aux), axis=0) - np.sum(var, axis=0)
return val
def psi_gradient(parameter,mu,var):
mu_aux = np.squeeze(mu)
var_aux = np.squeeze(var)
gradient = -np.concatenate((2*(mu_aux - parameter), np.ones((len(var_aux),))))
return gradient
expectation_U = ExpectationUtility(psi, psi_gradient)
# --- Compute real optimum value
if True:
bounds = [(0, 1)] * 4
starting_points = np.random.rand(100, 4)
opt_val = 0
parameter_samples = parameter_support
for parameter in parameter_samples:
def marginal_func(x):
x_copy = np.atleast_2d(x)
fx = f(x_copy)
# print('test begin')
# print(parameter)
# print(fx)
val = U_func(parameter, fx)
return -val
best_val_found = np.inf
for x0 in starting_points:
res = scipy.optimize.fmin_l_bfgs_b(marginal_func, x0, approx_grad=True, bounds=bounds)
if best_val_found > res[1]:
# print(res)
best_val_found = res[1]
marginal_opt = res[0]
print('marginal opt')
print(parameter)
print(marginal_opt)
print(f(marginal_opt))
print(-best_val_found)
opt_val -= best_val_found
opt_val /= len(parameter_samples)
print('real optimum')
print(opt_val)
# --- Aquisition function
acquisition = uEI_noiseless(model, space, optimizer=acq_opt, utility=U)
#acquisition = uKG_cf(model, space, optimizer=acq_opt, utility=U, expectation_utility=expectation_U)
# --- Evaluator
evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
# standard BO
max_iter = 50
for i in range(1):
filename = './experiments/test1_EIh_noisy_' + str(i) + '.txt'
bo_model = cbo.CBO(model, space, objective, acquisition, evaluator, initial_design, expectation_utility=expectation_U)
bo_model.run_optimization(max_iter=max_iter, parallel=False, plot=False, results_file=filename)
|
nilq/baby-python
|
python
|
# NOQA
import asyncio
import requests
from xml.etree import ElementTree
from itertools import islice
from discord.ext import commands
class Language:
"""Dictionaries & other word things."""
def __init__(self, bot):
"""Cog constructor."""
self.bot = bot
@commands.command()
async def define(self, word: str):
"""Retrieve a definition of the word."""
api_key = "e02fb0b8-5f3e-4d5c-b868-87dd7de88974"
# Checks for mutliple words and only uses first
if " " in word:
word = word.split(" ")[0]
url = "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/{}?key={}".format(word.lower(), api_key)
response = requests.get(url)
results = ElementTree.fromstring(response.text)
"""
Tag descriptions:
entry_list - root
entry - ( ͡° ͜ʖ ͡°)
fl - word type
def - contains date and definitions
dt - sub tag of def, contains definitions
suggestion - returns if the word can't be found
"""
suggestions = []
for entry in islice(results, 0, 3):
# Add suggestions to list if the word isn't found
if entry.tag == "suggestion":
suggestions.append(entry.text)
continue
word = entry.find("ew").text
word_type = entry.find("fl").text
word_def = entry.find("def").find("dt").text
try:
# First definition sometimes returns blank results for some
# reason, skipping to the next description tag fixes it.
if word_def == ":":
word_def = entry.find("def").findall("dt")[1].text
await self.bot.say("**{}**\n*{}*\n{}".format(
word, word_type, word_def)
)
except IndexError:
continue
if suggestions:
await self.bot.say(
"That's not a word, maybe you meant: {}".format(
", ".join(suggestions)
)
)
@commands.command()
async def syn(self, word: str):
"""Get a list of 5 synonyms for the requested word."""
api_key = "ce01609f490e4f8c5b5ab55ce80d9530"
url = "http://words.bighugelabs.com/api/2/{}/{}/json".format(
api_key,
word.lower()
)
response = requests.get(url)
if response.status_code == 200:
# Get list of keys
syn_keys = list(response.json().keys())
# Start response
syn_string = "**{}**\n".format(word.title())
# Add synonyms to string
for key in syn_keys:
# Get first 5 synonyms
syn_list = ", ".join(response.json()[key]["syn"][:5])
syn_string += "*{}*\n{}\n".format(key, syn_list)
await self.bot.say(syn_string)
else:
await self.bot.say("No results.")
def setup(bot):
"""Setup function."""
bot.add_cog(Language(bot))
|
nilq/baby-python
|
python
|
import pytest
import skbot.ignition as ign
import skbot.transform as tf
from typing import Tuple, List, Union
from pathlib import Path
joint_types = Union[tf.RotationalJoint, tf.PrismaticJoint]
sdf_folder = Path(__file__).parents[1] / "ignition" / "sdf"
ign.sdformat.generic_sdf.base.WARN_UNSUPPORTED = False
@pytest.fixture()
def panda():
sdf_string = (sdf_folder / "robots" / "panda" / "model.sdf").read_text()
base_frame = ign.sdformat.to_frame_graph(sdf_string)
tool_frame = base_frame.find_frame(".../panda_link8")
joints = list()
for link in tool_frame.links_between(base_frame):
if isinstance(link, (tf.RotationalJoint, tf.PrismaticJoint)):
joints.append(link)
for value, joint in zip([0, -0.785, 0, -2.356, 0, 1.571, 0.785], reversed(joints)):
joint.param = value
return base_frame, joints
@pytest.fixture()
def double_pendulum():
sdf_string = (sdf_folder / "robots" / "double_pendulum" / "model.sdf").read_text()
base_frame = ign.sdformat.to_frame_graph(sdf_string)
tool_frame = base_frame.find_frame(".../lower_link")
joints = list()
for link in tool_frame.links_between(base_frame):
if isinstance(link, (tf.RotationalJoint, tf.PrismaticJoint)):
joints.append(link)
return base_frame, joints
@pytest.fixture()
def circle_bot():
world = tf.Frame(3, name="world")
ellbow = tf.Frame(3, name="ellbow")
tool = tf.Frame(3, name="tool")
rotate = tf.RotationalJoint((0, 0, 1), angle=0)
reach = tf.PrismaticJoint((-1, 0, 0), upper_limit=10, lower_limit=-10)
rotate(world, ellbow)
reach(ellbow, tool)
return world, [rotate, reach]
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import spacetimeformer as stf
from .encoder import VariableDownsample
class SpacetimeformerEmbedding(nn.Module):
def __init__(
self,
d_y,
d_x,
d_model=256,
time_emb_dim=6,
method="spatio-temporal",
downsample_convs=1,
start_token_len=0,
):
super().__init__()
assert method in ["spatio-temporal", "temporal"]
self.method = method
# account for added local position indicator "relative time"
d_x += 1
self.x_emb = stf.Time2Vec(d_x, embed_dim=time_emb_dim * d_x)
if self.method == "temporal":
y_emb_inp_dim = d_y + (time_emb_dim * d_x)
else:
y_emb_inp_dim = 1 + (time_emb_dim * d_x)
self.y_emb = nn.Linear(y_emb_inp_dim, d_model)
if self.method == "spatio-temporal":
self.var_emb = nn.Embedding(num_embeddings=d_y, embedding_dim=d_model)
self.start_token_len = start_token_len
self.given_emb = nn.Embedding(num_embeddings=2, embedding_dim=d_model)
self.downsize_convs = nn.ModuleList(
[VariableDownsample(d_y, d_model) for _ in range(downsample_convs)]
)
self._benchmark_embed_enc = None
self._benchmark_embed_dec = None
self.d_model = d_model
def __call__(self, y, x, is_encoder=True):
if self.method == "spatio-temporal":
val_time_emb, space_emb, var_idxs = self.parallel_spatio_temporal_embed(
y, x, is_encoder
)
else:
val_time_emb, space_emb = self.temporal_embed(y, x, is_encoder)
var_idxs = None
return val_time_emb, space_emb, var_idxs
def temporal_embed(self, y, x, is_encoder=True):
bs, length, d_y = y.shape
local_pos = (
torch.arange(length).view(1, -1, 1).repeat(bs, 1, 1).to(x.device) / length
)
if not self.TIME:
x = torch.zeros_like(x)
x = torch.cat((x, local_pos), dim=-1)
t2v_emb = self.x_emb(x)
emb_inp = torch.cat((y, t2v_emb), dim=-1)
emb = self.y_emb(emb_inp)
# "given" embedding
given = torch.ones((bs, length)).long().to(x.device)
if not is_encoder and self.GIVEN:
given[:, self.start_token_len :] = 0
given_emb = self.given_emb(given)
emb += given_emb
if is_encoder:
# shorten the sequence
for i, conv in enumerate(self.downsize_convs):
emb = conv(emb)
return emb, torch.zeros_like(emb)
def benchmark_spatio_temporal_embed(self, y, x, is_encoder=True):
# use pre-made fake embedding matrix to simulate the fastest
# possible embedding speed and measure whether this implementation
# is a bottleneck. (it isn't)
if self._benchmark_embed_enc is None and is_encoder:
bs, length, d_y = y.shape
self._benchmark_embed_enc = torch.ones(bs, d_y * length, self.d_model).to(
y.device
)
elif self._benchmark_embed_dec is None and not is_encoder:
bs, length, d_y = y.shape
self._benchmark_embed_dec = torch.ones(bs, d_y * length, self.d_model).to(
y.device
)
node_emb = (
self._benchmark_embed_enc if is_encoder else self._benchmark_embed_dec
)
if is_encoder:
for conv in self.downsize_convs:
node_emb = conv(node_emb)
return node_emb, torch.zeros_like(node_emb)
SPACE = True
TIME = True
VAL = True
GIVEN = True
def parallel_spatio_temporal_embed(self, y, x, is_encoder=True):
bs, length, d_y = y.shape
# val + time embedding
y = torch.cat(y.chunk(d_y, dim=-1), dim=1)
local_pos = (
torch.arange(length).view(1, -1, 1).repeat(bs, 1, 1).to(x.device) / length
)
x = torch.cat((x, local_pos), dim=-1)
if not self.TIME:
x = torch.zeros_like(x)
if not self.VAL:
y = torch.zeros_like(y)
t2v_emb = self.x_emb(x).repeat(1, d_y, 1)
val_time_inp = torch.cat((y, t2v_emb), dim=-1)
val_time_emb = self.y_emb(val_time_inp)
# "given" embedding
given = torch.ones((bs, length, d_y)).long().to(x.device)
if not is_encoder and self.GIVEN:
given[:, self.start_token_len :, :] = 0
given = torch.cat(given.chunk(d_y, dim=-1), dim=1).squeeze(-1)
given_emb = self.given_emb(given)
val_time_emb += given_emb
if is_encoder:
for conv in self.downsize_convs:
val_time_emb = conv(val_time_emb)
length //= 2
# var embedding
var_idx = torch.Tensor([[i for j in range(length)] for i in range(d_y)])
var_idx = var_idx.long().to(x.device).view(-1).unsqueeze(0).repeat(bs, 1)
var_idx_true = var_idx.clone()
if not self.SPACE:
var_idx = torch.zeros_like(var_idx)
var_emb = self.var_emb(var_idx)
return val_time_emb, var_emb, var_idx_true
def iter_spatio_temporal_embed(self, y, x, is_encoder=True):
assert len(self.downsize_convs) == 0
bs, length, d_y = y.shape
# split y into d_y sequences
ys = y.chunk(d_y, axis=-1)
# time embedding
if not self.TIME:
x = torch.zeros_like(x)
time_emb = self.x_emb(x)
val_time_embs = []
var_embs = []
for i, y in enumerate(ys):
emb_inp = torch.cat((y, time_emb), dim=-1)
val_time_emb = self.y_emb(emb_inp)
# spatial (variable) embedding for variable i
var_idx = (
torch.Tensor([i for _ in range(length)])
.long()
.to(y.device)
.repeat(bs, 1)
)
if not self.SPACE:
var_idx = torch.zeros_like(var_idx)
var_emb = self.var_emb(var_idx)
val_time_embs.append(val_time_emb)
var_embs.append(self.var_emb(var_idx))
val_time_embs = torch.cat(val_time_embs, dim=1)
var_embs = torch.cat(var_embs, dim=1)
return val_time_embs, var_embs
|
nilq/baby-python
|
python
|
from subprocess import check_output, STDOUT, CalledProcessError
# ffmpeg command:
# ffmpeg -decryption_key 5df1b4e0d7ca82a62177e3518fe2f35a -i "./video_encripted.mp4" -pix_fmt bgr24 -vcodec copy "./video_decripted.mp4"
schema_encript = "cenc-aes-ctr"
key_encript = "5df1b4e0d7ca82a62177e3518fe2f35a"
kid_encript = "d0d28b3dd265e02ccf4612d4bd22c24f"
path_input_video = "./video.mp4"
path_output_video = "./video_encripted.mp4"
ffmpeg_command = ['ffmpeg',
'-i', path_input_video,
"-vcodec", "copy",
"-encryption_scheme", schema_encript,
"-encryption_key", key_encript,
"-encryption_kid", kid_encript, path_output_video]
try:
output_ffmpeg_execution = check_output(ffmpeg_command, stderr=STDOUT)
print(output_ffmpeg_execution)
except CalledProcessError as e:
print(e)
print(e.output)
|
nilq/baby-python
|
python
|
/usr/lib64/python2.7/sre_parse.py
|
nilq/baby-python
|
python
|
import sys
sys.path.append("/home/shansixioing/tools")
from gen_utils import master_run
import random
import numpy as np
import glob
seed = 12342
random.seed(seed)
np.random.seed(seed)
import time
def main():
# gpu_ls = ['babygroot0', 'babygroot1', 'babygroot3', 'groot0', 'groot1', 'groot2', 'groot3', 'nebula0',
# 'nebula1', 'nebula2']
# gpu_ls = ['george0', 'george1', 'george2', 'george3', 'fred0', 'fred1', 'fred2', 'nebula0', 'nebula1',
# 'nebula2']
gpu_ls = {
# 'george0': 3,
# 'george1': 2,
'george2': 1,
'george3': 1,
# 'fred0': 2,
# 'fred1': 2,
# 'fred2': 1,
# 'fred3': 1,
# 'nebula0': 3,
# 'nebula1': 3,
# 'nebula2': 3,
# # 'babygroot0': 2,
# 'babygroot1': 2,
# 'babygroot2': 2,
# 'babygroot3': 2,
# 'groot0': 2,
# 'groot1': 2,
# 'groot2': 2,
# 'groot3': 2,
}
all_queries_to_run = []
exp_names = []
for directory in glob.glob("/home/shansixioing/data/fawkes_test_small2/*/"):
exp_names.append(directory)
# , 'high'
print(exp_names)
time.sleep(2)
for mode in ['high']:
for exp_name in exp_names:
arg_string = "python3 protection.py -d {} -m {} --batch-size 20 -g {} --debug".format(
exp_name, mode, "GPUID"
)
print(arg_string)
args = arg_string.split(" ")
args = [str(x) for x in args]
all_queries_to_run.append(args)
master_run(all_queries_to_run, gpu_ls, max_num=None, rest=1)
print("Completed")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class BaseTask:
def run(self):
raise Exception("Method is not implemented")
def get_status(self):
raise Exception("Method is not implemented")
def wipe(self):
pass
|
nilq/baby-python
|
python
|
# Copyright 2019 The IEVA-DGM Authors. All rights reserved.
# Use of this source code is governed by a MIT-style license that can be
# found in the LICENSE file.
# mpas dataset
from __future__ import absolute_import, division, print_function
import os
import pandas as pd
import numpy as np
from skimage import io, transform
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
class MPASDataset(Dataset):
def __init__(self, root, train=True, data_len=0, transform=None):
self.root = root
self.train = train
self.data_len = data_len
self.transform = transform
if self.train:
self.filenames = pd.read_csv(os.path.join(root, "train/filenames.txt"),
sep=" ", header=None)
self.params = np.load(os.path.join(root, "train/params.npy"))
else:
self.filenames = pd.read_csv(os.path.join(root, "test/filenames.txt"),
sep=" ", header=None)
self.params = np.load(os.path.join(root, "test/params.npy"))
# TODO(wenbin): deal with data_len correctly.
def __len__(self):
if self.data_len:
return self.data_len
else:
return len(self.params)
def __getitem__(self, index):
if type(index) == torch.Tensor:
index = index.item()
params = self.params[index]
sparams = np.copy(params[1:2])
vops = np.copy(params[2:5])
vparams = np.zeros(3, dtype=np.float32)
vparams[0] = np.cos(np.deg2rad(params[5]))
vparams[1] = np.sin(np.deg2rad(params[5]))
vparams[2] = params[6] / 90.
if self.train:
img_name = os.path.join(self.root, "train/" + self.filenames.iloc[index][0])
else:
img_name = os.path.join(self.root, "test/" + self.filenames.iloc[index][0])
image = io.imread(img_name)[:, :, 0:3]
sample = {"image": image, "sparams": sparams, "vops": vops, "vparams": vparams}
if self.transform:
sample = self.transform(sample)
return sample
# utility functions
def imshow(image):
plt.imshow(image.numpy().transpose((1, 2, 0)))
# data transformation
class Resize(object):
def __init__(self, size):
assert isinstance(size, (int, tuple))
self.size = size
def __call__(self, sample):
image = sample["image"]
sparams = sample["sparams"]
vops = sample["vops"]
vparams = sample["vparams"]
h, w = image.shape[:2]
if isinstance(self.size, int):
if h > w:
new_h, new_w = self.size * h / w, self.size
else:
new_h, new_w = self.size, self.size * w / h
else:
new_h, new_w = self.size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(
image, (new_h, new_w), order=1, mode="reflect",
preserve_range=True, anti_aliasing=True).astype(np.float32)
return {"image": image, "sparams": sparams, "vops": vops, "vparams": vparams}
class Normalize(object):
def __call__(self, sample):
image = sample["image"]
sparams = sample["sparams"]
vops = sample["vops"]
vparams = sample["vparams"]
image = (image.astype(np.float32) - 127.5) / 127.5
# sparams min [1.]
# max [4.]
sparams = (sparams - np.array([2.5], dtype=np.float32)) / \
np.array([1.5], dtype=np.float32)
return {"image": image, "sparams": sparams, "vops": vops, "vparams": vparams}
class ToTensor(object):
def __call__(self, sample):
image = sample["image"]
sparams = sample["sparams"]
vops = sample["vops"]
vparams = sample["vparams"]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {"image": torch.from_numpy(image),
"sparams": torch.from_numpy(sparams),
"vops": torch.from_numpy(vops),
"vparams": torch.from_numpy(vparams)}
# # data verification
# import matplotlib.pyplot as plt
# dataset = MPASDataset(
# root="/Users/rhythm/Desktop/mpas",
# train=False,
# transform=transforms.Compose([Resize(64), Normalize(), ToTensor()]))
# loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)
# samples = iter(loader).next()
# print(samples)
# # fig = plt.figure()
# # imshow(utils.make_grid(((samples["image"] + 1.) * .5)))
# # plt.show()
|
nilq/baby-python
|
python
|
import os
from pkg_resources import resource_filename
import pandas as pd
def load_titanic(return_X_y: bool = False, as_frame: bool = False):
"""
Loads in a subset of the titanic dataset. You can find the full dataset [here](https://www.kaggle.com/c/titanic/data).
Arguments:
return_X_y: return a tuple of (`X`, `y`) for convenience
as_frame: return all the data as a pandas dataframe
Usage:
```python
from hulearn.datasets import load_titanic
df = load_titanic(as_frame=True)
X, y = load_titanic(return_X_y=True)
```
"""
filepath = resource_filename("hulearn", os.path.join("data", "titanic.zip"))
df = pd.read_csv(filepath)
if as_frame:
return df
X, y = (
df[["pclass", "name", "sex", "age", "fare", "sibsp", "parch"]].values,
df["survived"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_fish(return_X_y: bool = False, as_frame: bool = False):
"""
Loads in a subset of the Fish market dataset. You can find the full dataset [here](https://www.kaggle.com/aungpyaeap/fish-market).
Arguments:
return_X_y: return a tuple of (`X`, `y`) for convenience
as_frame: return all the data as a pandas dataframe
Usage:
```python
from hulearn.datasets import load_fish
df = load_fish(as_frame=True)
X, y = load_fish(return_X_y=True)
```
"""
filepath = resource_filename("hulearn", os.path.join("data", "fish.zip"))
df = pd.read_csv(filepath)
if as_frame:
return df
X, y = (
df[["Species", "Length1", "Length2", "Length3", "Height", "Width"]].values,
df["Weight"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
|
nilq/baby-python
|
python
|
# Contents of test_cart_1d.py
#===============================================================================
# TEST CartDecomposition and CartDataExchanger in 1D
#===============================================================================
def run_cart_1d( verbose=False ):
import numpy as np
from mpi4py import MPI
from psydac.ddm.cart import CartDecomposition, CartDataExchanger
#---------------------------------------------------------------------------
# INPUT PARAMETERS
#---------------------------------------------------------------------------
# Number of elements
n1 = 135
# Padding ('thickness' of ghost region)
p1 = 3
# Periodicity
period1 = True
#---------------------------------------------------------------------------
# DOMAIN DECOMPOSITION
#---------------------------------------------------------------------------
# Parallel info
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Decomposition of Cartesian domain
cart = CartDecomposition(
npts = [n1+1],
pads = [p1],
periods = [period1],
reorder = False,
comm = comm
)
# Local 1D array (extended domain)
u = np.zeros( cart.shape, dtype=int )
# Global indices of first and last elements of array
s1, = cart.starts
e1, = cart.ends
# Create object in charge of exchanging data between subdomains
synchronizer = CartDataExchanger( cart, u.dtype )
# Print some info
if verbose:
if rank == 0:
print( "" )
for k in range(size):
if k == rank:
print( "RANK = {}".format( rank ) )
print( "---------" )
print( ". s1:e1 = {:2d}:{:2d}".format( s1,e1 ) )
print( "", flush=True )
comm.Barrier()
#---------------------------------------------------------------------------
# TEST
#---------------------------------------------------------------------------
# Fill in true domain with u[i1_loc]=i1_glob
u[p1:-p1] = [i1 for i1 in range(s1,e1+1)]
# Update ghost regions
synchronizer.update_ghost_regions( u )
#---------------------------------------------------------------------------
# CHECK RESULTS
#---------------------------------------------------------------------------
# Verify that ghost cells contain correct data (note periodic domain!)
success = all( u[:] == [i1%(n1+1) for i1 in range(s1-p1,e1+p1+1)] )
# MASTER only: collect information from all processes
success_global = comm.reduce( success, op=MPI.LAND, root=0 )
return locals()
#===============================================================================
# RUN TEST WITH PYTEST
#===============================================================================
import pytest
@pytest.mark.parallel
def test_cart_1d():
namespace = run_cart_1d()
assert namespace['success']
#===============================================================================
# RUN TEST MANUALLY
#===============================================================================
if __name__=='__main__':
locals().update( run_cart_1d( verbose=True ) )
# Print error messages (if any) in orderly fashion
for k in range(size):
if k == rank and not success:
print( "Rank {}: wrong ghost cell data!".format( rank ), flush=True )
comm.Barrier()
if rank == 0:
if success_global:
print( "PASSED", end='\n\n', flush=True )
else:
print( "FAILED", end='\n\n', flush=True )
|
nilq/baby-python
|
python
|
from django.shortcuts import get_object_or_404
from django import template
from mailing.models import CustomerLoyaltyElement
from accounts.models import CorporateProfile, CompanyName
from travelling.models import Trip, Rating
register = template.Library()
@register.inclusion_tag('mailing/get-loyalty-element.html', takes_context=True)
def render_loyalty_window(context, user):
context['loyalty_elements'] = []
if user.is_company:
profile = get_object_or_404(CorporateProfile, admin=user)
trips = Trip.objects.filter(company=profile.company_name)
review_counter = 0
for trip in trips:
review_counter += Rating.objects.filter(trip=trip).count()
if review_counter >= 30 or user.is_superuser:
context['loyalty_elements'].append(CustomerLoyaltyElement.objects.get(pk=1))
return context
|
nilq/baby-python
|
python
|
# coding=utf-8
"""
Emulate a gmetric client for usage with
[Ganglia Monitoring System](http://ganglia.sourceforge.net/)
"""
from . Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(self, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
|
nilq/baby-python
|
python
|
from datetime import datetime
from pathlib import Path
from textwrap import dedent
import os
import pwd
import subprocess
import sys
import textwrap
import click
import jinja2
STATUS_CLASSIFIERS = {
"planning": "Development Status :: 1 - Planning",
"prealpha": "Development Status :: 2 - Pre-Alpha",
"alpha": "Development Status :: 3 - Alpha",
"beta": "Development Status :: 4 - Beta",
"stable": "Development Status :: 5 - Production/Stable",
"mature": "Development Status :: 6 - Mature",
"inactive": "Development Status :: 7 - Inactive",
}
VERSION_CLASSIFIERS = {
"pypy2": "Programming Language :: Python :: 2.7",
"pypy3": "Programming Language :: Python :: 3.6",
"py27": "Programming Language :: Python :: 2.7",
"py35": "Programming Language :: Python :: 3.5",
"py36": "Programming Language :: Python :: 3.6",
"py37": "Programming Language :: Python :: 3.7",
"py38": "Programming Language :: Python :: 3.8",
"py39": "Programming Language :: Python :: 3.9",
"py310": "Programming Language :: Python :: 3.10",
"jython": "Programming Language :: Python :: 2.7",
}
TEST_DEPS = {
"pytest": ["pytest"],
"twisted.trial": ["twisted"],
"virtue": ["virtue"],
}
TEMPLATE = Path(__file__).with_name("template")
CODECOV_URL = "https://codecov.io/gh/Julian"
PYPI_TOKEN_URL = "https://pypi.org/manage/account/token/"
READTHEDOCS_IMPORT_URL = "https://readthedocs.org/dashboard/import/manual/"
def dedented(*args, **kwargs):
return textwrap.dedent(*args, **kwargs).lstrip("\n")
@click.command()
@click.argument("name")
@click.option(
"--author",
default=pwd.getpwuid(os.getuid()).pw_gecos.partition(",")[0],
help="the name of the package author",
)
@click.option(
"--author-email",
default=None,
help="the package author's email",
)
@click.option(
"-c",
"--cli",
multiple=True,
help="include a CLI in the resulting package with the given name",
)
@click.option(
"--readme",
default="",
help="a (rst) README for the package",
)
@click.option(
"-t",
"--test-runner",
default="virtue",
type=click.Choice(TEST_DEPS.keys()),
help="the test runner to use",
)
@click.option(
"-s",
"--supports",
multiple=True,
type=click.Choice(sorted(VERSION_CLASSIFIERS)),
default=["py37", "py38", "py39", "pypy3"],
help="a version of Python supported by the package",
)
@click.option(
"--status",
type=click.Choice(STATUS_CLASSIFIERS),
default="alpha",
help="the initial package development status",
)
@click.option(
"--docs/--no-docs",
default=False,
help="generate a Sphinx documentation template for the new package",
)
@click.option(
"--single",
"--no-package",
"single_module",
is_flag=True,
default=False,
help="create a single module rather than a package.",
)
@click.option(
"--bare/--no-bare",
"bare",
default=False,
help="only create the core source files.",
)
@click.option(
"--cffi/--no-cffi",
default=False,
help="include a build script for CFFI modules",
)
@click.option(
"--style/--no-style",
"style",
default=True,
help="(don't) run pyflakes by default in tox runs.",
)
@click.option(
"--init-vcs/--no-init-vcs",
default=True,
help="don't initialize a VCS.",
)
@click.option(
"--closed/--open",
default=False,
help="create a closed source package.",
)
@click.version_option(prog_name="mkpkg")
def main(
name,
author,
author_email,
cffi,
cli,
readme,
test_runner,
supports,
status,
docs,
single_module,
bare,
style,
init_vcs,
closed,
):
"""
Oh how exciting! Create a new Python package.
"""
if name.startswith("python-"):
package_name = name[len("python-"):]
else:
package_name = name
package_name = package_name.lower().replace("-", "_")
env = jinja2.Environment(
loader=jinja2.PackageLoader("mkpkg", "template"),
undefined=jinja2.StrictUndefined,
keep_trailing_newline=True,
)
env.globals.update(
author=author,
cffi=cffi,
cli=cli,
closed=closed,
docs=docs,
name=name,
now=datetime.now(),
package_name=package_name,
single_module=single_module,
style=style,
supports=supports,
test_runner=test_runner,
)
package = Path(package_name)
if single_module:
tests = u"{toxinidir}/tests.py"
if len(cli) > 1:
sys.exit("Cannot create a single module with multiple CLIs.")
elif cli:
console_scripts = [f"{cli[0]} = {package_name}:main"]
script = env.get_template("package/_cli.py.j2").render(
program_name=cli[0],
)
else:
console_scripts = []
script = u""
script_name = package_name + ".py"
core_source_paths = {
script_name: script,
"tests.py": env.get_template("tests.py.j2").render(),
}
style_paths = ["{toxinidir}/" + script_name, tests]
else:
tests = package_name
core_source_paths = {
package / "tests" / "__init__.py": u"",
package / "__init__.py": env.get_template(
"package/__init__.py.j2",
).render(),
}
style_paths = ["{toxinidir}/" + package_name]
if cffi:
core_source_paths[package / "_build.py"] = env.get_template(
"package/_build.py.j2",
).render(cname=_cname(name))
if len(cli) == 1:
console_scripts = [f"{cli[0]} = {package_name}._cli:main"]
core_source_paths[package / "_cli.py"] = env.get_template(
"package/_cli.py.j2",
).render(program_name=cli[0])
core_source_paths[package / "__main__.py"] = env.get_template(
"package/__main__.py.j2",
).render()
else:
console_scripts = [
f"{each} = {package_name}._{each}:main" for each in cli
]
core_source_paths.update(
(
package / ("_" + each + ".py"),
env.get_template("package/_cli.py.j2").render(
program_name=each,
),
) for each in cli
)
install_requires = []
if cffi:
install_requires.append("cffi>=1.0.0")
if console_scripts:
install_requires.append("click")
files = {
"README.rst": env.get_template("README.rst.j2").render(
contents=readme,
),
"COPYING": env.get_template("COPYING.j2").render(),
"MANIFEST.in": template("MANIFEST.in"),
"pyproject.toml": env.get_template("pyproject.toml.j2").render(),
"setup.cfg": env.get_template("setup.cfg.j2").render(
install_requires=install_requires,
console_scripts=console_scripts,
author_email=(
author_email or u"Julian+" + package_name + u"@GrayVines.com"
),
status_classifier=STATUS_CLASSIFIERS[status],
version_classifiers={
VERSION_CLASSIFIERS[each]
for each in supports
if each in VERSION_CLASSIFIERS
},
py2=any(
version.startswith("py2")
or version in {"jython", "pypy2"}
for version in supports
),
py3=any(
version.startswith("py3")
or version == "pypy3"
for version in supports
),
cpython=any(
version not in {"jython", "pypy2", "pypy3"}
for version in supports
),
pypy="pypy2" in supports or "pypy3" in supports,
jython="jython" in supports,
),
".coveragerc": env.get_template(".coveragerc.j2").render(),
"tox.ini": env.get_template("tox.ini.j2").render(
test_deps=TEST_DEPS[test_runner],
tests=tests,
style_paths=style_paths,
),
".testr.conf": template(".testr.conf"),
}
if cffi:
files["setup.py"] = env.get_template("setup.py.j2").render()
if not closed:
for each in (TEMPLATE / ".github" / "workflows").iterdir():
files[".github/workflows/" + each.name] = each.read_text()
files[".github/FUNDING.yml"] = template(".github/FUNDING.yml")
files[".github/SECURITY.md"] = env.get_template(
".github/SECURITY.md.j2",
).render()
files["codecov.yml"] = template("codecov.yml")
root = Path(name)
if bare:
targets = core_source_paths
else:
files.update(core_source_paths)
targets = files
root.mkdir()
for path, content in targets.items():
path = root / path
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(dedented(content))
if docs:
(root / "docs").mkdir()
(root / "docs" / "requirements.txt").write_text(
template("docs", "requirements.txt"),
)
subprocess.check_call(
[
sys.executable,
"-m", "sphinx.cmd.quickstart",
"--quiet",
"--project", name,
"--author", author,
"--release", "",
"--ext-autodoc",
"--ext-coverage",
"--ext-doctest",
"--ext-intersphinx",
"--ext-viewcode",
"--extensions", "sphinx.ext.napoleon",
"--extensions", "sphinxcontrib.spelling",
"--makefile",
"--no-batchfile",
str(root / "docs"),
],
)
# Fix sphinx-quickstart not writing a trailing newline.
with root.joinpath("docs", "conf.py").open("a") as file:
file.write("\n")
(root / "docs" / "index.rst").write_text(template("docs", "index.rst"))
click.echo(f"Set up documentation at: {READTHEDOCS_IMPORT_URL}")
if init_vcs and not bare:
subprocess.check_call(["git", "init", "--quiet", name])
git_dir = root / ".git"
subprocess.check_call(
[
"git",
"--git-dir", str(git_dir),
"--work-tree", name,
"add", "COPYING",
])
subprocess.check_call(
[
"git",
"--git-dir", str(git_dir),
"commit", "--quiet", "-m", "Initial commit",
],
)
if not closed:
click.echo(
dedent(
f"""
Set up:
* a PyPI token from {PYPI_TOKEN_URL} named
'GitHub Actions - {name}'
* a CodeCov token from {CODECOV_URL}/{name}
and include them in the GitHub secrets at
https://github.com/Julian/{name}/settings/secrets
""",
),
)
def template(*segments):
return TEMPLATE.joinpath(*segments).read_text()
def _cname(name):
if name.endswith("-cffi"):
name = name[:-len("-cffi")]
if name.startswith("lib"):
name = name[len("lib"):]
return "_" + name
|
nilq/baby-python
|
python
|
__version__ = "0.0.9"
from .core import *
|
nilq/baby-python
|
python
|
import sys
__version__ = "0.1"
from .dicodon_optimization import (
optimize_dicodon_usage,
dicodon_count_from_sequences,
codon_count_from_sequences,
dicodon_score_dict_from_sequences,
score,
translate_to_aa,
)
from .fasta import parse_fasta_to_dict
|
nilq/baby-python
|
python
|
import pywhatkit
import speech_recognition as sr
import pyttsx3
r = sr.Recognizer()
def SpeakText(command):
engine = pyttsx3.init()
engine.say(command)
engine.runAndWait()
try:
with sr.Microphone() as source2:
r.adjust_for_ambient_noise(source2, duration=0.2)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
MyText = MyText.lower()
print("Playing "+MyText)
SpeakText(MyText)
except:
pass
try:
pywhatkit.playonyt(MyText)
print("Playing...")
except:
# printing the error message
print("Network Error Occured")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import getopt
import os
import json
from typing import Dict
from typing import List
def showhow():
print("configfilter.py -t tpldir -o outdir [-p <pattern>] [-v] <key-value.json>")
print(" -t: 設定ファイルのテンプレートが格納されたディレクトリ")
print(" -o: 処理された設定ファイルの出力先ディレクトリ")
print(" -p: パラメータ部を示すパターン。デフォルトは ##")
print(" -v: verboseモード")
print(" key-value.json: パラメータの実値が定義されたjsonファイル")
sys.exit(1)
def load_option(v:List[str]) -> Dict:
option = {"tpldir": "", "outdir": "", "pattern": "##", "verbose":False, "kv": ""}
try:
shortopts = "t:o:p:v"
opts, args = getopt.getopt(v, shortopts)
for o in opts:
flag = o[0]
if flag == "-v":
option["verbose"] = True
elif flag == "-t":
option["tpldir"] = o[1]
elif flag == "-o":
option["outdir"] = o[1]
elif flag == "-p":
option["pattern"] = o[1]
if len(args) != 1:
showhow()
option["kv"] = args[0]
if option["verbose"]:
print("Template Dir:", option["tpldir"])
print("Output Dir:", option["outdir"])
print("Patten:", option["pattern"])
print("Key-Value-JSON:", option["kv"])
# Check parameters.
if not os.path.isdir(option["tpldir"]):
print("Not a directory:", option["tpldir"])
showhow()
if not os.path.isdir(option["outdir"]):
print("Not a directory:", option["tpldir"])
showhow()
if option["tpldir"] == option["outdir"]:
print("Can't specify same directories -t and -o")
showhow()
if not os.path.isfile(option["kv"]):
print("Invalid path is specified:", option["kv"])
showhow()
except getopt.GetoptError:
showhow()
return option
class ConfigFilter:
def __init__(self, tpldir:str, outdir:str, kv:str):
self.tpldir = tpldir
self.outdir = outdir
self.verbose = False
self.pattern = "##"
self.kv:List[KeyWord] = []
json_file = open(kv, 'r')
kv_json = json.load(json_file)
for k in kv_json:
keyword = KeyWord(k, kv_json[k], self.pattern)
self.kv.append(keyword)
def set_verbose(self, v:bool) -> None:
self.verbose = v
def set_pattern(self, p:str) -> None:
self.pattern = p
def start(self) -> None:
if self.verbose:
print(self.kv)
conf_paths = os.listdir(path=self.tpldir)
for c in conf_paths:
path = self.tpldir + '/' + c
if os.path.isfile(path):
if self.verbose:
print("File:" + c + " to " + self.outdir)
cg = ConfigGenerator(c, self.tpldir, self.outdir, self.pattern, self.kv, self.verbose)
cg.start()
def check(self) -> None:
for kw in self.kv:
kw.print_result(self.verbose)
class KeyWord:
def __init__(self, key:str, value:str, pattern:str):
self.key = pattern + key + pattern
self.value = value
self.count: int = 0
self.replaced = False
def replace(self, line:str) -> str:
if self.key in line:
self.replaced = True
self.count += 1
newline = line.replace(self.key, self.value)
return newline
else:
self.replaced = False
return line
def is_replaced(self) -> bool:
return self.replaced
def print_result(self, verbose:bool) -> None:
if self.count == 0:
print("WARN:" + self.key + " is not used in any files")
elif verbose:
print(self.key + " used " + str(self.count) + " times")
class ConfigGenerator:
def __init__(self, fname:str, indir:str, outdir:str, pattern:str, kv:List[KeyWord], verbose:bool):
in_path = indir + '/' + fname
self.indata = []
self.outdata = []
with open(in_path, 'r') as infile:
self.indata = infile.readlines()
self.outpath = outdir + '/' + fname
if os.path.exists(self.outpath):
raise Exception("Output file already exists:" + self.outpath)
self.pattern = pattern
self.kv = kv
self.verbose = verbose
def start(self) -> None:
self.convert_lines()
self.save_conf()
def convert_lines(self) -> None:
for l in self.indata:
if self.pattern in l:
if self.verbose:
print("Replace:" + l, end="", flush=True)
newline = self.replace_keywords(l)
if self.pattern in newline:
print("WARN:NOT REPLACED:" + newline, end="")
self.outdata.append(newline)
else:
self.outdata.append(l)
def save_conf(self) -> None:
with open(self.outpath, "w") as f:
for l in self.outdata:
f.write(l)
def replace_keywords(self, line:str) -> str:
for kw in self.kv:
line = kw.replace(line)
return line
if __name__ == "__main__":
option = load_option(sys.argv[1:])
cf = ConfigFilter(option["tpldir"], option["outdir"], option["kv"])
cf.set_verbose(option["verbose"])
cf.set_pattern(option["pattern"])
try:
cf.start()
cf.check()
sys.exit(0)
except Exception as e:
print(e)
sys.exit(1)
|
nilq/baby-python
|
python
|
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import requests
import pandas as pd
import pymongo
import time
client = pymongo.MongoClient('mongodb://localhost:27017')
db = client.mars_db
collection = db.mars
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
collection.drop()
# NASA Mars News Webpage
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
nt = soup.find('div', class_='list_text')
np = soup.find('div', class_='article_teaser_body')
news_title = nt.a.text
news_p = np.text
# JPL Mars Space Webpage
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
image_url = soup.find('img', class_='headerimage fade-in')['src']
featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/' + image_url
# Mars facts URL
url = 'https://space-facts.com/mars/'
# Retrieve page with the requests module
response = requests.get(url)
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
df = pd.DataFrame(columns=['Feature','Value'])
for row in soup.findAll('table')[0].tbody.findAll('tr'):
first_column = row.findAll('td')[0].text.strip(": ")
second_column = row.findAll('td')[1].text
df = df.append({'Feature' : first_column,
'Value': second_column}, ignore_index=True)
df.to_html('mars_table.html')
mars_fact_html=df.to_html(header=False, index=False)
# Mars hemispheres title and image
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
hemisphere_image_urls = []
for table in soup.findAll('div', class_='accordian'):
for list in soup.findAll('div', class_='item'):
title_img_dict = {}
url = 'https://astrogeology.usgs.gov/' + list.a.get('href')
response = requests.get(url)
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.find('h2', class_='title')
title_img_dict["title"] = title.text
image = soup.find('div', class_='downloads')
title_img_dict["image_url"] = image.a['href']
hemisphere_image_urls.append(title_img_dict)
# Close the browser after scraping
browser.quit()
# Creates a dict and collection in the database
mars_data ={
'news_title' : news_title,
'summary': news_p,
'featured_image': featured_image_url,
'fact_table': mars_fact_html,
'hemisphere_image_urls': hemisphere_image_urls
}
return mars_data
|
nilq/baby-python
|
python
|
class AnnotationModel:
def __init__(self, text: str, comment: str, last_update: str):
self.text = text
self.comment = comment
self.last_update = last_update
|
nilq/baby-python
|
python
|
""" Assignment 7
Write a short script that will get a name from the user.
Find the length of the name.
If the length is lower than 5 print "Under".
If the length is more than 5 print "Over".
If the length is exactly 5 print "Five".
Try to use 'if', 'else' and 'elif' exactly once each.
Also, try not to evaluate the length of the name more than once. """
# Answer
name = input("What is your name? ")
size = len(name)
if size < 5:
print("Under")
elif size == 5:
print("Five")
else:
print("Over")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : leeyoshinari
import pymysql
import zhihu_spider.settings as cfg
class MySQL(object):
def __init__(self):
self.db = None
self.cursor = None
self.connect()
def connect(self):
self.db = pymysql.connect(host=cfg.MYSQL_HOST, user=cfg.MYSQL_USER, password=cfg.MYSQL_PASSWORD, database=cfg.MYSQL_DATABASE)
self.cursor = self.db.cursor()
answers_sql = """
CREATE TABLE IF NOT EXISTS answers (
id INT NOT NULL AUTO_INCREMENT,
answer_id VARCHAR(20) NOT NULL,
answerer_id VARCHAR(50),
url_token VARCHAR(100),
name VARCHAR(100),
gender INT,
age INT,
height INT,
weight INT,
beauty INT,
face_shape VARCHAR(8),
pic_num INT,
follower_count INT,
headline VARCHAR(255),
content LONGTEXT,
voteup_count INT,
comment_count INT,
create_time DATETIME,
update_time DATETIME,
code INT,
PRIMARY KEY (id))"""
self.cursor.execute(answers_sql)
comments_sql = """
CREATE TABLE IF NOT EXISTS comments (
answer_id VARCHAR(20) NOT NULL,
comment_id VARCHAR(20),
parent_id VARCHAR(20),
content LONGTEXT,
vote_count INT,
commenter_id VARCHAR(50),
url_token VARCHAR(100),
name VARCHAR(100),
gender INT,
headline VARCHAR(255),
create_time DATETIME,
code INT,
PRIMARY KEY (comment_id))"""
self.cursor.execute(comments_sql)
def __del__(self):
del self.db
del self.cursor
|
nilq/baby-python
|
python
|
import numpy as np
# ra54 = np.random.random((5,4))
ra54 = np.arange(20).reshape(5,4)
print(ra54)
print(ra54[2,3])
print(ra54[(2,3),(3,3)])
it = np.nditer(ra54, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
print(idx, '=>', ra54[idx])
it.iternext()
|
nilq/baby-python
|
python
|
import asyncio
async def make_americano():
print("Americano Start")
await asyncio.sleep(3)
print("Americano End")
return "Americano"
async def make_latte():
print("Latte Start")
await asyncio.sleep(5)
print("Latte End")
return "Latte"
async def main():
coro1 = make_americano()
coro2 = make_latte()
result = await asyncio.gather(
coro1,
coro2
)
print(result)
print("Main Start")
asyncio.run(main())
print("Main End")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Special tools for working with mapping types."""
from types import SimpleNamespace
from typing import Mapping, Iterator, Iterable, TypeVar, Union, Any
T = TypeVar("T")
T_Sentinel = type("T_Sentinel", (), {})
R_SENTINEL = T_Sentinel()
T_Bool = Union[T_Sentinel, bool]
def lowerify_mapping(obj: T, *, recursive: T_Bool=R_SENTINEL) -> T:
"""Take a Mapping and change all the keys to lowercase.
Use recursive=True to recursively lowerify all objects.
"""
if isinstance(obj, Mapping) and (not recursive or recursive is R_SENTINEL):
# no recursion
gen = ((k.lower(),v) for k,v in obj.items())
obj = type(obj)(gen)
elif isinstance(obj, Mapping):
# recursion and a mapping
obj = type(obj)((k.lower(), lowerify_mapping(v, recursive=recursive)) for k, v in obj.items())
elif recursive is R_SENTINEL:
# no recursion argument and not a mapping: error
raise TypeError(f"Non-mapping {type(obj).__qualname__!r} object detected")
elif recursive and not isinstance(obj,str) and not isinstance(obj,Iterator) and isinstance(obj,Iterable):
# recursion and not a mapping
obj = type(obj)(lowerify_mapping(i, recursive=True) for i in obj)
return obj
def shallow_mapify(o: Any) -> Mapping[str, Any]:
"""Shallowly convert an object so it can be unpacked as **kwargs to another context."""
if isinstance(o, Mapping):
return o
if isinstance(o, type):
raise TypeError(f"Cannot mapify the class object {o.__qualname__}")
if hasattr(o, '__dataclass_fields__'):
from .dataclasses import shallow_asdict
return shallow_asdict(o)
if isinstance(o, SimpleNamespace):
return vars(o)
# attempt common as dict methods
as_dicts = (getattr(o,n,None) for n in "_asdict asdict as_dict _as_dict".split())
for asdict in (a for a in as_dicts if a is not None):
if isinstance(asdict, Mapping):
m = asdict
else:
m = asdict()
if isinstance(m, Mapping):
return m
try:
return dict(o)
except (TypeError, ValueError):
pass
raise TypeError(f"Failed to mapify {type(o).__qualname__} object")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.