max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
disk-cleanup-macros/python-runnables/display-analysis-data-used-space-1-summary/runnable.py
|
gbetegon88/dataiku-contrib
| 93
|
6626651
|
from dataiku.runnables import Runnable, ResultTable
import dataiku
import subprocess
import os, os.path as osp
import cleanup
class MyRunnable(Runnable):
def __init__(self, project_key, config, plugin_config):
self.project_key = project_key
self.config = config
def get_progress_target(self):
return (100, 'NONE')
def run(self, progress_callback):
dip_home = os.environ['DIP_HOME']
analysis_data = osp.join(dip_home, 'analysis-data')
projects_sessions = {}
projects_splits = {}
analyses_sessions = {}
analyses_splits = {}
projects_analyses = {}
if self.config.get('allProjects', False):
projects = [project_key for project_key in os.listdir(analysis_data)]
else:
projects = [self.project_key]
for project in projects:
project_analysis_data = osp.join(analysis_data, project)
project_sessions = 0
project_splits = 0
projects_analyses[project] = []
if not osp.isdir(project_analysis_data):
projects_sessions[project] = 0
projects_splits[project] = 0
continue
for analysis in os.listdir(project_analysis_data):
analysis_dir = osp.join(project_analysis_data, analysis)
analysis_sessions = 0
analysis_splits = 0
projects_analyses[project].append(analysis)
for mltask in os.listdir(analysis_dir):
mltask_dir = osp.join(analysis_dir, mltask)
sessions_dir = osp.join(mltask_dir, "sessions")
splits_dir = osp.join(mltask_dir, "splits")
if osp.isdir(sessions_dir):
analysis_sessions += cleanup.du(sessions_dir)
if osp.isdir(splits_dir):
analysis_splits += cleanup.du(splits_dir)
project_sessions += analysis_sessions
project_splits += analysis_splits
analyses_splits[(project, analysis)] = analysis_splits
analyses_sessions[(project, analysis)] = analysis_sessions
projects_sessions[project] = project_sessions
projects_splits[project] = project_splits
rt = ResultTable()
rt.set_name("Analysis data used space")
if self.config["granularity"] == "project":
rt.add_column("project", "Project key", "STRING")
rt.add_column("total", "Total space (MB)", "STRING")
rt.add_column("sessions", "Sessions space (MB)", "STRING")
rt.add_column("splits", "Splits space (MB)", "STRING")
for project in projects:
total = (projects_sessions[project] + projects_splits[project])
if len(projects) > 0 and total == 0:
continue
record = []
record.append(project)
record.append(total / 1024)
record.append(projects_sessions[project] / 1024)
record.append(projects_splits[project] / 1024)
rt.add_record(record)
else:
rt.add_column("project", "Project key", "STRING")
rt.add_column("analysis", "Analysis id", "STRING")
rt.add_column("total", "Total space (MB)", "STRING")
rt.add_column("sessions", "Sessions space (MB)", "STRING")
rt.add_column("splits", "Splits space (MB)", "STRING")
for project in projects:
for analysis in projects_analyses[project]:
record = []
record.append(project)
record.append(analysis)
record.append((analyses_sessions[(project, analysis)]+analyses_splits[(project, analysis)])/ 1024)
record.append(analyses_sessions[(project, analysis)] / 1024)
record.append(analyses_splits[(project, analysis)] / 1024)
rt.add_record(record)
return rt
|
from dataiku.runnables import Runnable, ResultTable
import dataiku
import subprocess
import os, os.path as osp
import cleanup
class MyRunnable(Runnable):
def __init__(self, project_key, config, plugin_config):
self.project_key = project_key
self.config = config
def get_progress_target(self):
return (100, 'NONE')
def run(self, progress_callback):
dip_home = os.environ['DIP_HOME']
analysis_data = osp.join(dip_home, 'analysis-data')
projects_sessions = {}
projects_splits = {}
analyses_sessions = {}
analyses_splits = {}
projects_analyses = {}
if self.config.get('allProjects', False):
projects = [project_key for project_key in os.listdir(analysis_data)]
else:
projects = [self.project_key]
for project in projects:
project_analysis_data = osp.join(analysis_data, project)
project_sessions = 0
project_splits = 0
projects_analyses[project] = []
if not osp.isdir(project_analysis_data):
projects_sessions[project] = 0
projects_splits[project] = 0
continue
for analysis in os.listdir(project_analysis_data):
analysis_dir = osp.join(project_analysis_data, analysis)
analysis_sessions = 0
analysis_splits = 0
projects_analyses[project].append(analysis)
for mltask in os.listdir(analysis_dir):
mltask_dir = osp.join(analysis_dir, mltask)
sessions_dir = osp.join(mltask_dir, "sessions")
splits_dir = osp.join(mltask_dir, "splits")
if osp.isdir(sessions_dir):
analysis_sessions += cleanup.du(sessions_dir)
if osp.isdir(splits_dir):
analysis_splits += cleanup.du(splits_dir)
project_sessions += analysis_sessions
project_splits += analysis_splits
analyses_splits[(project, analysis)] = analysis_splits
analyses_sessions[(project, analysis)] = analysis_sessions
projects_sessions[project] = project_sessions
projects_splits[project] = project_splits
rt = ResultTable()
rt.set_name("Analysis data used space")
if self.config["granularity"] == "project":
rt.add_column("project", "Project key", "STRING")
rt.add_column("total", "Total space (MB)", "STRING")
rt.add_column("sessions", "Sessions space (MB)", "STRING")
rt.add_column("splits", "Splits space (MB)", "STRING")
for project in projects:
total = (projects_sessions[project] + projects_splits[project])
if len(projects) > 0 and total == 0:
continue
record = []
record.append(project)
record.append(total / 1024)
record.append(projects_sessions[project] / 1024)
record.append(projects_splits[project] / 1024)
rt.add_record(record)
else:
rt.add_column("project", "Project key", "STRING")
rt.add_column("analysis", "Analysis id", "STRING")
rt.add_column("total", "Total space (MB)", "STRING")
rt.add_column("sessions", "Sessions space (MB)", "STRING")
rt.add_column("splits", "Splits space (MB)", "STRING")
for project in projects:
for analysis in projects_analyses[project]:
record = []
record.append(project)
record.append(analysis)
record.append((analyses_sessions[(project, analysis)]+analyses_splits[(project, analysis)])/ 1024)
record.append(analyses_sessions[(project, analysis)] / 1024)
record.append(analyses_splits[(project, analysis)] / 1024)
rt.add_record(record)
return rt
|
none
| 1
| 2.239997
| 2
|
|
src/ratingservice/main.py
|
Ayelet41/cloud-ops-sandbox
| 229
|
6626652
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify, request
from psycopg2 import pool, DatabaseError, IntegrityError
# enable GCP debugger when not running locally
if __name__ != "__main__":
try:
import googleclouddebugger
googleclouddebugger.enable(
breakpoint_enable_canary=False
)
except ImportError:
pass
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
db_connection_pool = None
app = Flask(__name__)
db_user = os.environ.get('DB_USERNAME')
db_name = os.environ.get('DB_NAME')
db_pass = <PASSWORD>('DB_PASSWORD')
db_host = os.environ.get('DB_HOST')
if not all([db_name, db_user, db_pass, db_host]):
print('error: environment vars DB_USERNAME, DB_PASSWORD, DB_NAME and DB_HOST must be defined.')
exit(1)
if os.environ.get('GAE_ENV') == 'standard':
db_host = '/cloudsql/{}'.format(db_host)
def getConnection():
global db_connection_pool
if db_connection_pool == None:
cfg = {
'user': db_user,
'password': <PASSWORD>,
'database': db_name,
'host': db_host
}
max_connections = int(os.getenv("MAX_DB_CONNECTIONS", "10"))
try:
db_connection_pool = pool.SimpleConnectionPool(
minconn=1, maxconn=max_connections, **cfg)
except (Exception, DatabaseError) as error:
print(error)
return None
return db_connection_pool.getconn()
def makeError(code, message):
result = jsonify({'error': message})
result.status_code = code
return result
def makeResult(data):
result = jsonify(data)
result.status_code = 200
return result
#
# APIs
#
@app.route('/_ah/warmup')
def warmup():
'''Handles App Engine warmup logic
'''
conn = getConnection()
if conn is not None:
db_connection_pool.putconn(conn)
return '', 200, {}
@app.route('/ratings', methods=['GET'])
def getRatings():
'''Gets a list of all ratings.
Returns:
HTTP status 200 and Json payload { ratings: [{'id': (string), 'rating': (number)}] }
HTTP status 500 when there is an error querying DB or no data
'''
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute("SELECT eid, ROUND(rating,4) FROM ratings")
result = cursor.fetchall()
conn.commit()
if result is not None:
# cast to float because flask.jsonify doesn't work with decimal
ratings = [{"id": eid.strip(), "rating": float(rating)}
for (eid, rating) in result]
return makeResult({
'ratings': ratings,
})
else:
return makeError(500, 'No available ratings')
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@app.route('/rating/<eid>', methods=['GET'])
def getRatingById(eid):
'''Gets rating of the entity by its id.
Args:
eid (string): the entity id.
Returns:
HTTP status 200 and Json payload { 'id': (string), 'rating': (number), 'votes': (int) }
HTTP status 400 when eid is is missing or invalid
HTTP status 404 when rating for eid cannot be found
HTTP status 500 when there is an error querying DB
'''
if not eid:
return makeError(400, "malformed entity id")
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute(
"SELECT ROUND(rating,4), votes FROM ratings WHERE eid=%s", (eid,))
result = cursor.fetchone()
conn.commit()
if result != None:
return makeResult({
'id': eid,
# cast to float because flas.jsonify doesn't work with decimal
'rating': float(result[0]),
'votes': result[1]
})
else:
return makeError(404, "invalid entity id")
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@app.route('/rating', methods=['POST'])
def postRating():
'''Adds new vote for entity's rating.
Args:
Json payload {'id': (string), 'rating': (integer) }
Returns:
HTTP status 200 and empty Json payload { }
HTTP status 400 when payload is malformed (e.g. missing expected field)
HTTP status 400 when eid is missing or invalid or rating is missing, invalid or out of [1..5] range
HTTP status 404 when rating for eid cannot be reported
HTTP status 500 when there is an error querying DB
'''
data = request.get_json()
if data == None:
return makeError(400, "missing json payload")
eid = data.get('id')
if not eid:
return makeError(400, "malformed entity id")
rating = 0
try:
rating = int(data['rating'])
except KeyError:
return makeError(400, "missing 'rating' field in payload")
except ValueError:
return makeError(400, "rating should be integer number")
if rating < 1 or rating > 5:
return makeError(400, "rating should be value between 1 and 5")
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute(
"INSERT INTO votes (eid, rating) VALUES (%s, %s)", (str(eid), rating))
conn.commit()
return makeResult({})
except IntegrityError:
return makeError(404, 'invalid entity id')
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@ app.route('/ratings:recollect', methods=['POST'])
def aggregateRatings():
'''Updates current ratings for all entities based on new votes received until now.
Returns:
HTTP status 200 and empty Json payload { }
HTTP status 500 when there is an error querying DB
'''
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute("UPDATE votes SET in_process=TRUE")
cursor.execute(
"UPDATE ratings AS r SET "
"rating=(r.rating*r.votes/(r.votes+v.votes))+(v.avg_rating*v.votes/(r.votes+v.votes)), "
"votes=r.votes+v.votes "
"FROM (SELECT eid, ROUND(AVG(rating),4) AS avg_rating, COUNT(eid) AS votes FROM votes WHERE in_process=TRUE GROUP BY eid) AS v "
"WHERE r.eid = v.eid")
cursor.execute("DELETE FROM votes WHERE in_process=TRUE")
conn.commit()
return makeResult({})
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
return resp
if __name__ == "__main__":
# Used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host="localhost", port=8080, debug=True)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify, request
from psycopg2 import pool, DatabaseError, IntegrityError
# enable GCP debugger when not running locally
if __name__ != "__main__":
try:
import googleclouddebugger
googleclouddebugger.enable(
breakpoint_enable_canary=False
)
except ImportError:
pass
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
db_connection_pool = None
app = Flask(__name__)
db_user = os.environ.get('DB_USERNAME')
db_name = os.environ.get('DB_NAME')
db_pass = <PASSWORD>('DB_PASSWORD')
db_host = os.environ.get('DB_HOST')
if not all([db_name, db_user, db_pass, db_host]):
print('error: environment vars DB_USERNAME, DB_PASSWORD, DB_NAME and DB_HOST must be defined.')
exit(1)
if os.environ.get('GAE_ENV') == 'standard':
db_host = '/cloudsql/{}'.format(db_host)
def getConnection():
global db_connection_pool
if db_connection_pool == None:
cfg = {
'user': db_user,
'password': <PASSWORD>,
'database': db_name,
'host': db_host
}
max_connections = int(os.getenv("MAX_DB_CONNECTIONS", "10"))
try:
db_connection_pool = pool.SimpleConnectionPool(
minconn=1, maxconn=max_connections, **cfg)
except (Exception, DatabaseError) as error:
print(error)
return None
return db_connection_pool.getconn()
def makeError(code, message):
result = jsonify({'error': message})
result.status_code = code
return result
def makeResult(data):
result = jsonify(data)
result.status_code = 200
return result
#
# APIs
#
@app.route('/_ah/warmup')
def warmup():
'''Handles App Engine warmup logic
'''
conn = getConnection()
if conn is not None:
db_connection_pool.putconn(conn)
return '', 200, {}
@app.route('/ratings', methods=['GET'])
def getRatings():
'''Gets a list of all ratings.
Returns:
HTTP status 200 and Json payload { ratings: [{'id': (string), 'rating': (number)}] }
HTTP status 500 when there is an error querying DB or no data
'''
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute("SELECT eid, ROUND(rating,4) FROM ratings")
result = cursor.fetchall()
conn.commit()
if result is not None:
# cast to float because flask.jsonify doesn't work with decimal
ratings = [{"id": eid.strip(), "rating": float(rating)}
for (eid, rating) in result]
return makeResult({
'ratings': ratings,
})
else:
return makeError(500, 'No available ratings')
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@app.route('/rating/<eid>', methods=['GET'])
def getRatingById(eid):
'''Gets rating of the entity by its id.
Args:
eid (string): the entity id.
Returns:
HTTP status 200 and Json payload { 'id': (string), 'rating': (number), 'votes': (int) }
HTTP status 400 when eid is is missing or invalid
HTTP status 404 when rating for eid cannot be found
HTTP status 500 when there is an error querying DB
'''
if not eid:
return makeError(400, "malformed entity id")
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute(
"SELECT ROUND(rating,4), votes FROM ratings WHERE eid=%s", (eid,))
result = cursor.fetchone()
conn.commit()
if result != None:
return makeResult({
'id': eid,
# cast to float because flas.jsonify doesn't work with decimal
'rating': float(result[0]),
'votes': result[1]
})
else:
return makeError(404, "invalid entity id")
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@app.route('/rating', methods=['POST'])
def postRating():
'''Adds new vote for entity's rating.
Args:
Json payload {'id': (string), 'rating': (integer) }
Returns:
HTTP status 200 and empty Json payload { }
HTTP status 400 when payload is malformed (e.g. missing expected field)
HTTP status 400 when eid is missing or invalid or rating is missing, invalid or out of [1..5] range
HTTP status 404 when rating for eid cannot be reported
HTTP status 500 when there is an error querying DB
'''
data = request.get_json()
if data == None:
return makeError(400, "missing json payload")
eid = data.get('id')
if not eid:
return makeError(400, "malformed entity id")
rating = 0
try:
rating = int(data['rating'])
except KeyError:
return makeError(400, "missing 'rating' field in payload")
except ValueError:
return makeError(400, "rating should be integer number")
if rating < 1 or rating > 5:
return makeError(400, "rating should be value between 1 and 5")
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute(
"INSERT INTO votes (eid, rating) VALUES (%s, %s)", (str(eid), rating))
conn.commit()
return makeResult({})
except IntegrityError:
return makeError(404, 'invalid entity id')
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
@ app.route('/ratings:recollect', methods=['POST'])
def aggregateRatings():
'''Updates current ratings for all entities based on new votes received until now.
Returns:
HTTP status 200 and empty Json payload { }
HTTP status 500 when there is an error querying DB
'''
conn = getConnection()
if conn == None:
return makeError(500, 'failed to connect to DB')
try:
with conn.cursor() as cursor:
cursor.execute("UPDATE votes SET in_process=TRUE")
cursor.execute(
"UPDATE ratings AS r SET "
"rating=(r.rating*r.votes/(r.votes+v.votes))+(v.avg_rating*v.votes/(r.votes+v.votes)), "
"votes=r.votes+v.votes "
"FROM (SELECT eid, ROUND(AVG(rating),4) AS avg_rating, COUNT(eid) AS votes FROM votes WHERE in_process=TRUE GROUP BY eid) AS v "
"WHERE r.eid = v.eid")
cursor.execute("DELETE FROM votes WHERE in_process=TRUE")
conn.commit()
return makeResult({})
except DatabaseError:
return makeError(500, 'DB error')
finally:
db_connection_pool.putconn(conn)
return resp
if __name__ == "__main__":
# Used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host="localhost", port=8080, debug=True)
|
en
| 0.813704
|
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # enable GCP debugger when not running locally # If `entrypoint` is not defined in app.yaml, App Engine will look for an app # called `app` in `main.py`. # # APIs # Handles App Engine warmup logic Gets a list of all ratings. Returns: HTTP status 200 and Json payload { ratings: [{'id': (string), 'rating': (number)}] } HTTP status 500 when there is an error querying DB or no data # cast to float because flask.jsonify doesn't work with decimal Gets rating of the entity by its id. Args: eid (string): the entity id. Returns: HTTP status 200 and Json payload { 'id': (string), 'rating': (number), 'votes': (int) } HTTP status 400 when eid is is missing or invalid HTTP status 404 when rating for eid cannot be found HTTP status 500 when there is an error querying DB # cast to float because flas.jsonify doesn't work with decimal Adds new vote for entity's rating. Args: Json payload {'id': (string), 'rating': (integer) } Returns: HTTP status 200 and empty Json payload { } HTTP status 400 when payload is malformed (e.g. missing expected field) HTTP status 400 when eid is missing or invalid or rating is missing, invalid or out of [1..5] range HTTP status 404 when rating for eid cannot be reported HTTP status 500 when there is an error querying DB Updates current ratings for all entities based on new votes received until now. Returns: HTTP status 200 and empty Json payload { } HTTP status 500 when there is an error querying DB # Used when running locally only. When deploying to Google App # Engine, a webserver process such as Gunicorn will serve the app. This # can be configured by adding an `entrypoint` to app.yaml.
| 2.257002
| 2
|
tests/strategies/coordinator/test_orderer.py
|
y-tetsu/othello
| 10
|
6626653
|
<gh_stars>1-10
"""Tests of orderer.py
"""
import unittest
from reversi.board import BitBoard
from reversi.strategies.coordinator import Orderer, Orderer_B, Orderer_C, Orderer_P, Orderer_BC, Orderer_CB, Orderer_PCB
class TestOrderer(unittest.TestCase):
"""orderer
"""
def test_orderer(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
orderer = Orderer()
moves = orderer.move_ordering(color='white', board=board, moves=board.get_legal_moves('white'), best_move=None)
self.assertEqual(moves, [(2, 2), (4, 2), (2, 4)])
def test_orderer_b(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
best_move = (4, 2)
orderer = Orderer_B()
moves = orderer.move_ordering(color='white', board=board, moves=board.get_legal_moves('white'), best_move=best_move)
self.assertEqual(moves, [(4, 2), (2, 2), (2, 4)])
def test_orderer_c(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
orderer = Orderer_C()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=None)
self.assertEqual(moves, [(0, 7), (0, 3), (2, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_p(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
orderer = Orderer_P()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=None)
self.assertEqual(moves, [(5, 4), (4, 5), (5, 5), (0, 7), (0, 3), (2, 3), (0, 4), (0, 5), (0, 6), (2, 7)])
def test_orderer_bc(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_BC()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(0, 7), (2, 3), (0, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_cb(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_CB()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(2, 3), (0, 7), (0, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_pcb(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_PCB()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(2, 3), (0, 7), (5, 4), (4, 5), (5, 5), (0, 3), (0, 4), (0, 5), (0, 6), (2, 7)])
|
"""Tests of orderer.py
"""
import unittest
from reversi.board import BitBoard
from reversi.strategies.coordinator import Orderer, Orderer_B, Orderer_C, Orderer_P, Orderer_BC, Orderer_CB, Orderer_PCB
class TestOrderer(unittest.TestCase):
"""orderer
"""
def test_orderer(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
orderer = Orderer()
moves = orderer.move_ordering(color='white', board=board, moves=board.get_legal_moves('white'), best_move=None)
self.assertEqual(moves, [(2, 2), (4, 2), (2, 4)])
def test_orderer_b(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
best_move = (4, 2)
orderer = Orderer_B()
moves = orderer.move_ordering(color='white', board=board, moves=board.get_legal_moves('white'), best_move=best_move)
self.assertEqual(moves, [(4, 2), (2, 2), (2, 4)])
def test_orderer_c(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
orderer = Orderer_C()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=None)
self.assertEqual(moves, [(0, 7), (0, 3), (2, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_p(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
orderer = Orderer_P()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=None)
self.assertEqual(moves, [(5, 4), (4, 5), (5, 5), (0, 7), (0, 3), (2, 3), (0, 4), (0, 5), (0, 6), (2, 7)])
def test_orderer_bc(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_BC()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(0, 7), (2, 3), (0, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_cb(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_CB()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(2, 3), (0, 7), (0, 3), (0, 4), (5, 4), (0, 5), (4, 5), (5, 5), (0, 6), (2, 7)])
def test_orderer_pcb(self):
board = BitBoard(8)
board.put_disc('black', 3, 2)
board.put_disc('white', 2, 4)
board.put_disc('black', 1, 5)
board.put_disc('white', 1, 4)
board.put_disc('black', 2, 5)
board.put_disc('white', 2, 6)
board.put_disc('black', 1, 6)
board.put_disc('white', 1, 7)
best_move = (2, 3)
orderer = Orderer_PCB()
moves = orderer.move_ordering(color='black', board=board, moves=board.get_legal_moves('black'), best_move=best_move)
self.assertEqual(moves, [(2, 3), (0, 7), (5, 4), (4, 5), (5, 5), (0, 3), (0, 4), (0, 5), (0, 6), (2, 7)])
|
en
| 0.331693
|
Tests of orderer.py orderer
| 3.035022
| 3
|
tests/sig/test_fbanks.py
|
raymondxyy/pyaudlib
| 26
|
6626654
|
from audlib.sig.fbanks import MelFreq, ConstantQ
from audlib.quickstart import welcome
from audlib.sig.window import hamming
from audlib.sig.transform import stmfcc
import numpy as np
import scipy.signal as signal
sig, sr = welcome()
def test_mfcc():
# TODO: need to add proper testing.
nfft = 512
nmel = 40
melbank = MelFreq(sr, nfft, nmel)
window_length = 0.032
wind = hamming(int(window_length*sr))
hop = .25
mfcc = stmfcc(sig, wind, hop, nfft, melbank)
return mfcc
def test_cqt():
"""Test constant Q transform."""
nbins_per_octave = 32
fmin = 100
cqbank = ConstantQ(sr, fmin, bins_per_octave=nbins_per_octave)
frate = 100
cqt_sig = cqbank.cqt(sig, frate)
return
def test_fbs():
"""Test filterbank synthesis."""
window_length = 0.02
window_size = int(window_length * sr)
window = hamming(window_size, nchan=window_size, synth=True)
synth = np.zeros(sig.shape, dtype=np.complex_)
for kk in range(window_size):
wk = 2 * np.pi * (kk / window_size)
band = signal.lfilter(
window * np.exp(1j*wk*np.arange(window_size)), 1, sig
)
synth[:] = synth[:] + band
assert np.allclose(synth.real, sig)
return
if __name__ == '__main__':
test_fbs()
test_mfcc()
test_cqt()
|
from audlib.sig.fbanks import MelFreq, ConstantQ
from audlib.quickstart import welcome
from audlib.sig.window import hamming
from audlib.sig.transform import stmfcc
import numpy as np
import scipy.signal as signal
sig, sr = welcome()
def test_mfcc():
# TODO: need to add proper testing.
nfft = 512
nmel = 40
melbank = MelFreq(sr, nfft, nmel)
window_length = 0.032
wind = hamming(int(window_length*sr))
hop = .25
mfcc = stmfcc(sig, wind, hop, nfft, melbank)
return mfcc
def test_cqt():
"""Test constant Q transform."""
nbins_per_octave = 32
fmin = 100
cqbank = ConstantQ(sr, fmin, bins_per_octave=nbins_per_octave)
frate = 100
cqt_sig = cqbank.cqt(sig, frate)
return
def test_fbs():
"""Test filterbank synthesis."""
window_length = 0.02
window_size = int(window_length * sr)
window = hamming(window_size, nchan=window_size, synth=True)
synth = np.zeros(sig.shape, dtype=np.complex_)
for kk in range(window_size):
wk = 2 * np.pi * (kk / window_size)
band = signal.lfilter(
window * np.exp(1j*wk*np.arange(window_size)), 1, sig
)
synth[:] = synth[:] + band
assert np.allclose(synth.real, sig)
return
if __name__ == '__main__':
test_fbs()
test_mfcc()
test_cqt()
|
en
| 0.667107
|
# TODO: need to add proper testing. Test constant Q transform. Test filterbank synthesis.
| 2.424739
| 2
|
src/iOS/toga_iOS/widgets/progressbar.py
|
luizoti/toga
| 1,261
|
6626655
|
<filename>src/iOS/toga_iOS/widgets/progressbar.py
from travertino.size import at_least
from toga_iOS.libs import CGSize, UIProgressView, UIProgressViewStyle
from toga_iOS.widgets.base import Widget
class ProgressBar(Widget):
def create(self):
self.native = UIProgressView.alloc().initWithProgressViewStyle_(UIProgressViewStyle.Default)
self.add_constraints()
def start(self):
# Indeterminate progress is not supported for UIProgressView in iOS
pass
def stop(self):
pass
def set_value(self, value):
if self.interface.max is not None:
self.native.setProgress_animated_(
self.interface.value / self.interface.max,
True
)
def set_max(self, value):
pass
def rehint(self):
fitting_size = self.native.systemLayoutSizeFittingSize_(CGSize(0, 0))
self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
self.interface.intrinsic.height = fitting_size.height
|
<filename>src/iOS/toga_iOS/widgets/progressbar.py
from travertino.size import at_least
from toga_iOS.libs import CGSize, UIProgressView, UIProgressViewStyle
from toga_iOS.widgets.base import Widget
class ProgressBar(Widget):
def create(self):
self.native = UIProgressView.alloc().initWithProgressViewStyle_(UIProgressViewStyle.Default)
self.add_constraints()
def start(self):
# Indeterminate progress is not supported for UIProgressView in iOS
pass
def stop(self):
pass
def set_value(self, value):
if self.interface.max is not None:
self.native.setProgress_animated_(
self.interface.value / self.interface.max,
True
)
def set_max(self, value):
pass
def rehint(self):
fitting_size = self.native.systemLayoutSizeFittingSize_(CGSize(0, 0))
self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
self.interface.intrinsic.height = fitting_size.height
|
en
| 0.762835
|
# Indeterminate progress is not supported for UIProgressView in iOS
| 2.185416
| 2
|
parcels/plotting.py
|
jelletreep/parcels
| 0
|
6626656
|
<gh_stars>0
from datetime import datetime
from datetime import timedelta as delta
import numpy as np
from parcels.field import Field
from parcels.field import VectorField
from parcels.grid import CurvilinearGrid
from parcels.grid import GridCode
from parcels.tools.error import TimeExtrapolationError
from parcels.tools.loggers import logger
def plotparticles(particles, with_particles=True, show_time=None, field=None, domain=None, projection=None,
land=True, vmin=None, vmax=None, savefile=None, animation=False, **kwargs):
"""Function to plot a Parcels ParticleSet
:param show_time: Time at which to show the ParticleSet
:param with_particles: Boolean whether particles are also plotted on Field
:param field: Field to plot under particles (either None, a Field object, or 'vector')
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
show_time = particles[0].time if show_time is None else show_time
if isinstance(show_time, datetime):
show_time = np.datetime64(show_time)
if isinstance(show_time, np.datetime64):
if not particles.time_origin:
raise NotImplementedError(
'If fieldset.time_origin is not a date, showtime cannot be a date in particleset.show()')
show_time = particles.time_origin.reltime(show_time)
if isinstance(show_time, delta):
show_time = show_time.total_seconds()
if np.isnan(show_time):
show_time, _ = particles.fieldset.gridset.dimrange('time_full')
if field is None:
spherical = True if particles.fieldset.U.grid.mesh == 'spherical' else False
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection)
if plt is None:
return # creating axes was not possible
ax.set_title('Particles' + parsetimestr(particles.fieldset.U.grid.time_origin, show_time))
latN, latS, lonE, lonW = parsedomain(domain, particles.fieldset.U)
if cartopy is None or projection is None:
if domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_xlim(particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE])
else:
ax.set_xlim(particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN])
else:
ax.set_xlim(np.nanmin(particles.fieldset.U.grid.lon), np.nanmax(particles.fieldset.U.grid.lon))
ax.set_ylim(np.nanmin(particles.fieldset.U.grid.lat), np.nanmax(particles.fieldset.U.grid.lat))
elif domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_extent([particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE],
particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE]])
else:
ax.set_extent([particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE],
particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN]])
else:
if field == 'vector':
field = particles.fieldset.UV
elif not isinstance(field, Field):
field = getattr(particles.fieldset, field)
depth_level = kwargs.pop('depth_level', 0)
plt, fig, ax, cartopy = plotfield(field=field, animation=animation, show_time=show_time, domain=domain,
projection=projection, land=land, vmin=vmin, vmax=vmax, savefile=None,
titlestr='Particles and ', depth_level=depth_level)
if plt is None:
return # creating axes was not possible
if with_particles:
plon = np.array([p.lon for p in particles])
plat = np.array([p.lat for p in particles])
if cartopy:
ax.scatter(plon, plat, s=20, color='black', zorder=20, transform=cartopy.crs.PlateCarree())
else:
ax.scatter(plon, plat, s=20, color='black', zorder=20)
if animation:
plt.draw()
plt.pause(0.0001)
elif savefile is None:
plt.show()
else:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
def plotfield(field, show_time=None, domain=None, depth_level=0, projection=None, land=True,
vmin=None, vmax=None, savefile=None, **kwargs):
"""Function to plot a Parcels Field
:param show_time: Time at which to show the Field
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param depth_level: depth level to be plotted (default 0)
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
if type(field) is VectorField:
spherical = True if field.U.grid.mesh == 'spherical' else False
field = [field.U, field.V]
plottype = 'vector'
elif type(field) is Field:
spherical = True if field.grid.mesh == 'spherical' else False
field = [field]
plottype = 'scalar'
else:
raise RuntimeError('field needs to be a Field or VectorField object')
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection=projection)
if plt is None:
return None, None, None, None # creating axes was not possible
data = {}
plotlon = {}
plotlat = {}
for i, fld in enumerate(field):
show_time = fld.grid.time[0] if show_time is None else show_time
if fld.grid.defer_load:
fld.fieldset.computeTimeChunk(show_time, 1)
(idx, periods) = fld.time_index(show_time)
show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
raise TimeExtrapolationError(show_time, field=fld, msg='show_time')
latN, latS, lonE, lonW = parsedomain(domain, fld)
if isinstance(fld.grid, CurvilinearGrid):
plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
else:
plotlon[i] = fld.grid.lon[lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN]
if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
raise RuntimeError('VectorField needs to be on an A-grid for plotting')
if fld.grid.time.size > 1:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[latS:latN, lonW:lonE]
else:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.data)[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]
if plottype == 'vector':
if field[0].interp_method == 'cgrid_velocity':
logger.warning_once('Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy')
d = np.empty_like(data[0])
d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
d[-1, :] = data[0][-1, :]
data[0] = d
d = np.empty_like(data[0])
d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
d[:, -1] = data[0][:, -1]
data[1] = d
spd = data[0] ** 2 + data[1] ** 2
speed = np.where(spd > 0, np.sqrt(spd), 0)
vmin = speed.min() if vmin is None else vmin
vmax = speed.max() if vmax is None else vmax
if isinstance(field[0].grid, CurvilinearGrid):
x, y = plotlon[0], plotlat[0]
else:
x, y = np.meshgrid(plotlon[0], plotlat[0])
u = np.where(speed > 0., data[0]/speed, 0)
v = np.where(speed > 0., data[1]/speed, 0)
if cartopy:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50, transform=cartopy.crs.PlateCarree())
else:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50)
else:
vmin = data[0].min() if vmin is None else vmin
vmax = data[0].max() if vmax is None else vmax
assert len(data[0].shape) == 2
if field[0].interp_method == 'cgrid_tracer':
d = data[0][1:, 1:]
elif field[0].interp_method == 'cgrid_velocity':
if field[0].fieldtype == 'U':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
elif field[0].fieldtype == 'V':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
else: # W
d = data[0][1:, 1:]
else: # if A-grid
d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] + data[0][1:, 1:])/4.
d = np.where(data[0][:-1, :-1] == 0, 0, d)
d = np.where(data[0][1:, :-1] == 0, 0, d)
d = np.where(data[0][1:, 1:] == 0, 0, d)
d = np.where(data[0][:-1, 1:] == 0, 0, d)
if cartopy:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d, transform=cartopy.crs.PlateCarree())
else:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d)
if cartopy is None:
ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
elif domain is not None:
ax.set_extent([np.nanmin(plotlon[0]), np.nanmax(plotlon[0]), np.nanmin(plotlat[0]), np.nanmax(plotlat[0])], crs=cartopy.crs.PlateCarree())
cs.cmap.set_over('k')
cs.cmap.set_under('w')
cs.set_clim(vmin, vmax)
cartopy_colorbar(cs, plt, fig, ax)
timestr = parsetimestr(field[0].grid.time_origin, show_time)
titlestr = kwargs.pop('titlestr', '')
if field[0].grid.zdim > 1:
if field[0].grid.gtype in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
gphrase = 'depth'
depth_or_level = field[0].grid.depth[depth_level]
else:
gphrase = 'level'
depth_or_level = depth_level
depthstr = ' at %s %g ' % (gphrase, depth_or_level)
else:
depthstr = ''
if plottype == 'vector':
ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
else:
ax.set_title(titlestr + field[0].name + depthstr + timestr)
if not spherical:
ax.set_xlabel('Zonal distance [m]')
ax.set_ylabel('Meridional distance [m]')
plt.draw()
if savefile:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
return plt, fig, ax, cartopy
def create_parcelsfig_axis(spherical, land=True, projection=None, central_longitude=0):
try:
import matplotlib.pyplot as plt
except:
logger.info("Visualisation is not possible. Matplotlib not found.")
return None, None, None, None # creating axes was not possible
if projection is not None and not spherical:
raise RuntimeError('projection not accepted when Field doesn''t have geographic coordinates')
if spherical:
try:
import cartopy
except:
logger.info("Visualisation of field with geographic coordinates is not possible. Cartopy not found.")
return None, None, None, None # creating axes was not possible
projection = cartopy.crs.PlateCarree(central_longitude) if projection is None else projection
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': projection})
try: # gridlines not supported for all projections
gl = ax.gridlines(crs=projection, draw_labels=True)
gl.xlabels_top, gl.ylabels_right = (False, False)
gl.xformatter = cartopy.mpl.gridliner.LONGITUDE_FORMATTER
gl.yformatter = cartopy.mpl.gridliner.LATITUDE_FORMATTER
except:
pass
if land:
ax.coastlines()
else:
cartopy = None
fig, ax = plt.subplots(1, 1)
ax.grid()
return plt, fig, ax, cartopy
def parsedomain(domain, field):
field.grid.check_zonal_periodic()
if domain is not None:
if not isinstance(domain, dict) and len(domain) == 4: # for backward compatibility with <v2.0.0
domain = {'N': domain[0], 'S': domain[1], 'E': domain[2], 'W': domain[3]}
_, _, _, lonW, latS, _ = field.search_indices(domain['W'], domain['S'], 0, 0, 0, search2D=True)
_, _, _, lonE, latN, _ = field.search_indices(domain['E'], domain['N'], 0, 0, 0, search2D=True)
return latN+1, latS, lonE+1, lonW
else:
if field.grid.gtype in [GridCode.RectilinearSGrid, GridCode.CurvilinearSGrid]:
return field.grid.lon.shape[0], 0, field.grid.lon.shape[1], 0
else:
return len(field.grid.lat), 0, len(field.grid.lon), 0
def parsetimestr(time_origin, show_time):
if time_origin.calendar is None:
return ' after ' + str(delta(seconds=show_time)) + ' hours'
else:
date_str = str(time_origin.fulltime(show_time))
return ' on ' + date_str[:10] + ' ' + date_str[11:19]
def cartopy_colorbar(cs, plt, fig, ax):
cbar_ax = fig.add_axes([0, 0, 0.1, 0.1])
fig.subplots_adjust(hspace=0, wspace=0, top=0.925, left=0.1)
plt.colorbar(cs, cax=cbar_ax)
def resize_colorbar(event):
plt.draw()
posn = ax.get_position()
cbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0, 0.04, posn.height])
fig.canvas.mpl_connect('resize_event', resize_colorbar)
resize_colorbar(None)
|
from datetime import datetime
from datetime import timedelta as delta
import numpy as np
from parcels.field import Field
from parcels.field import VectorField
from parcels.grid import CurvilinearGrid
from parcels.grid import GridCode
from parcels.tools.error import TimeExtrapolationError
from parcels.tools.loggers import logger
def plotparticles(particles, with_particles=True, show_time=None, field=None, domain=None, projection=None,
land=True, vmin=None, vmax=None, savefile=None, animation=False, **kwargs):
"""Function to plot a Parcels ParticleSet
:param show_time: Time at which to show the ParticleSet
:param with_particles: Boolean whether particles are also plotted on Field
:param field: Field to plot under particles (either None, a Field object, or 'vector')
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
show_time = particles[0].time if show_time is None else show_time
if isinstance(show_time, datetime):
show_time = np.datetime64(show_time)
if isinstance(show_time, np.datetime64):
if not particles.time_origin:
raise NotImplementedError(
'If fieldset.time_origin is not a date, showtime cannot be a date in particleset.show()')
show_time = particles.time_origin.reltime(show_time)
if isinstance(show_time, delta):
show_time = show_time.total_seconds()
if np.isnan(show_time):
show_time, _ = particles.fieldset.gridset.dimrange('time_full')
if field is None:
spherical = True if particles.fieldset.U.grid.mesh == 'spherical' else False
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection)
if plt is None:
return # creating axes was not possible
ax.set_title('Particles' + parsetimestr(particles.fieldset.U.grid.time_origin, show_time))
latN, latS, lonE, lonW = parsedomain(domain, particles.fieldset.U)
if cartopy is None or projection is None:
if domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_xlim(particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE])
else:
ax.set_xlim(particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE])
ax.set_ylim(particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN])
else:
ax.set_xlim(np.nanmin(particles.fieldset.U.grid.lon), np.nanmax(particles.fieldset.U.grid.lon))
ax.set_ylim(np.nanmin(particles.fieldset.U.grid.lat), np.nanmax(particles.fieldset.U.grid.lat))
elif domain is not None:
if isinstance(particles.fieldset.U.grid, CurvilinearGrid):
ax.set_extent([particles.fieldset.U.grid.lon[latS, lonW], particles.fieldset.U.grid.lon[latN, lonE],
particles.fieldset.U.grid.lat[latS, lonW], particles.fieldset.U.grid.lat[latN, lonE]])
else:
ax.set_extent([particles.fieldset.U.grid.lon[lonW], particles.fieldset.U.grid.lon[lonE],
particles.fieldset.U.grid.lat[latS], particles.fieldset.U.grid.lat[latN]])
else:
if field == 'vector':
field = particles.fieldset.UV
elif not isinstance(field, Field):
field = getattr(particles.fieldset, field)
depth_level = kwargs.pop('depth_level', 0)
plt, fig, ax, cartopy = plotfield(field=field, animation=animation, show_time=show_time, domain=domain,
projection=projection, land=land, vmin=vmin, vmax=vmax, savefile=None,
titlestr='Particles and ', depth_level=depth_level)
if plt is None:
return # creating axes was not possible
if with_particles:
plon = np.array([p.lon for p in particles])
plat = np.array([p.lat for p in particles])
if cartopy:
ax.scatter(plon, plat, s=20, color='black', zorder=20, transform=cartopy.crs.PlateCarree())
else:
ax.scatter(plon, plat, s=20, color='black', zorder=20)
if animation:
plt.draw()
plt.pause(0.0001)
elif savefile is None:
plt.show()
else:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
def plotfield(field, show_time=None, domain=None, depth_level=0, projection=None, land=True,
vmin=None, vmax=None, savefile=None, **kwargs):
"""Function to plot a Parcels Field
:param show_time: Time at which to show the Field
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param depth_level: depth level to be plotted (default 0)
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
:param animation: Boolean whether result is a single plot, or an animation
"""
if type(field) is VectorField:
spherical = True if field.U.grid.mesh == 'spherical' else False
field = [field.U, field.V]
plottype = 'vector'
elif type(field) is Field:
spherical = True if field.grid.mesh == 'spherical' else False
field = [field]
plottype = 'scalar'
else:
raise RuntimeError('field needs to be a Field or VectorField object')
plt, fig, ax, cartopy = create_parcelsfig_axis(spherical, land, projection=projection)
if plt is None:
return None, None, None, None # creating axes was not possible
data = {}
plotlon = {}
plotlat = {}
for i, fld in enumerate(field):
show_time = fld.grid.time[0] if show_time is None else show_time
if fld.grid.defer_load:
fld.fieldset.computeTimeChunk(show_time, 1)
(idx, periods) = fld.time_index(show_time)
show_time -= periods * (fld.grid.time_full[-1] - fld.grid.time_full[0])
if show_time > fld.grid.time[-1] or show_time < fld.grid.time[0]:
raise TimeExtrapolationError(show_time, field=fld, msg='show_time')
latN, latS, lonE, lonW = parsedomain(domain, fld)
if isinstance(fld.grid, CurvilinearGrid):
plotlon[i] = fld.grid.lon[latS:latN, lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN, lonW:lonE]
else:
plotlon[i] = fld.grid.lon[lonW:lonE]
plotlat[i] = fld.grid.lat[latS:latN]
if i > 0 and not np.allclose(plotlon[i], plotlon[0]):
raise RuntimeError('VectorField needs to be on an A-grid for plotting')
if fld.grid.time.size > 1:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.temporal_interpolate_fullfield(idx, show_time))[latS:latN, lonW:lonE]
else:
if fld.grid.zdim > 1:
data[i] = np.squeeze(fld.data)[depth_level, latS:latN, lonW:lonE]
else:
data[i] = np.squeeze(fld.data)[latS:latN, lonW:lonE]
if plottype == 'vector':
if field[0].interp_method == 'cgrid_velocity':
logger.warning_once('Plotting a C-grid velocity field is achieved via an A-grid projection, reducing the plot accuracy')
d = np.empty_like(data[0])
d[:-1, :] = (data[0][:-1, :] + data[0][1:, :]) / 2.
d[-1, :] = data[0][-1, :]
data[0] = d
d = np.empty_like(data[0])
d[:, :-1] = (data[0][:, :-1] + data[0][:, 1:]) / 2.
d[:, -1] = data[0][:, -1]
data[1] = d
spd = data[0] ** 2 + data[1] ** 2
speed = np.where(spd > 0, np.sqrt(spd), 0)
vmin = speed.min() if vmin is None else vmin
vmax = speed.max() if vmax is None else vmax
if isinstance(field[0].grid, CurvilinearGrid):
x, y = plotlon[0], plotlat[0]
else:
x, y = np.meshgrid(plotlon[0], plotlat[0])
u = np.where(speed > 0., data[0]/speed, 0)
v = np.where(speed > 0., data[1]/speed, 0)
if cartopy:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50, transform=cartopy.crs.PlateCarree())
else:
cs = ax.quiver(x, y, u, v, speed, cmap=plt.cm.gist_ncar, clim=[vmin, vmax], scale=50)
else:
vmin = data[0].min() if vmin is None else vmin
vmax = data[0].max() if vmax is None else vmax
assert len(data[0].shape) == 2
if field[0].interp_method == 'cgrid_tracer':
d = data[0][1:, 1:]
elif field[0].interp_method == 'cgrid_velocity':
if field[0].fieldtype == 'U':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][1:, :-1] + data[0][1:, 1:]) / 2.
elif field[0].fieldtype == 'V':
d = np.empty_like(data[0])
d[:-1, :-1] = (data[0][:-1, 1:] + data[0][1:, 1:]) / 2.
else: # W
d = data[0][1:, 1:]
else: # if A-grid
d = (data[0][:-1, :-1] + data[0][1:, :-1] + data[0][:-1, 1:] + data[0][1:, 1:])/4.
d = np.where(data[0][:-1, :-1] == 0, 0, d)
d = np.where(data[0][1:, :-1] == 0, 0, d)
d = np.where(data[0][1:, 1:] == 0, 0, d)
d = np.where(data[0][:-1, 1:] == 0, 0, d)
if cartopy:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d, transform=cartopy.crs.PlateCarree())
else:
cs = ax.pcolormesh(plotlon[0], plotlat[0], d)
if cartopy is None:
ax.set_xlim(np.nanmin(plotlon[0]), np.nanmax(plotlon[0]))
ax.set_ylim(np.nanmin(plotlat[0]), np.nanmax(plotlat[0]))
elif domain is not None:
ax.set_extent([np.nanmin(plotlon[0]), np.nanmax(plotlon[0]), np.nanmin(plotlat[0]), np.nanmax(plotlat[0])], crs=cartopy.crs.PlateCarree())
cs.cmap.set_over('k')
cs.cmap.set_under('w')
cs.set_clim(vmin, vmax)
cartopy_colorbar(cs, plt, fig, ax)
timestr = parsetimestr(field[0].grid.time_origin, show_time)
titlestr = kwargs.pop('titlestr', '')
if field[0].grid.zdim > 1:
if field[0].grid.gtype in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:
gphrase = 'depth'
depth_or_level = field[0].grid.depth[depth_level]
else:
gphrase = 'level'
depth_or_level = depth_level
depthstr = ' at %s %g ' % (gphrase, depth_or_level)
else:
depthstr = ''
if plottype == 'vector':
ax.set_title(titlestr + 'Velocity field' + depthstr + timestr)
else:
ax.set_title(titlestr + field[0].name + depthstr + timestr)
if not spherical:
ax.set_xlabel('Zonal distance [m]')
ax.set_ylabel('Meridional distance [m]')
plt.draw()
if savefile:
plt.savefig(savefile)
logger.info('Plot saved to ' + savefile + '.png')
plt.close()
return plt, fig, ax, cartopy
def create_parcelsfig_axis(spherical, land=True, projection=None, central_longitude=0):
try:
import matplotlib.pyplot as plt
except:
logger.info("Visualisation is not possible. Matplotlib not found.")
return None, None, None, None # creating axes was not possible
if projection is not None and not spherical:
raise RuntimeError('projection not accepted when Field doesn''t have geographic coordinates')
if spherical:
try:
import cartopy
except:
logger.info("Visualisation of field with geographic coordinates is not possible. Cartopy not found.")
return None, None, None, None # creating axes was not possible
projection = cartopy.crs.PlateCarree(central_longitude) if projection is None else projection
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': projection})
try: # gridlines not supported for all projections
gl = ax.gridlines(crs=projection, draw_labels=True)
gl.xlabels_top, gl.ylabels_right = (False, False)
gl.xformatter = cartopy.mpl.gridliner.LONGITUDE_FORMATTER
gl.yformatter = cartopy.mpl.gridliner.LATITUDE_FORMATTER
except:
pass
if land:
ax.coastlines()
else:
cartopy = None
fig, ax = plt.subplots(1, 1)
ax.grid()
return plt, fig, ax, cartopy
def parsedomain(domain, field):
field.grid.check_zonal_periodic()
if domain is not None:
if not isinstance(domain, dict) and len(domain) == 4: # for backward compatibility with <v2.0.0
domain = {'N': domain[0], 'S': domain[1], 'E': domain[2], 'W': domain[3]}
_, _, _, lonW, latS, _ = field.search_indices(domain['W'], domain['S'], 0, 0, 0, search2D=True)
_, _, _, lonE, latN, _ = field.search_indices(domain['E'], domain['N'], 0, 0, 0, search2D=True)
return latN+1, latS, lonE+1, lonW
else:
if field.grid.gtype in [GridCode.RectilinearSGrid, GridCode.CurvilinearSGrid]:
return field.grid.lon.shape[0], 0, field.grid.lon.shape[1], 0
else:
return len(field.grid.lat), 0, len(field.grid.lon), 0
def parsetimestr(time_origin, show_time):
if time_origin.calendar is None:
return ' after ' + str(delta(seconds=show_time)) + ' hours'
else:
date_str = str(time_origin.fulltime(show_time))
return ' on ' + date_str[:10] + ' ' + date_str[11:19]
def cartopy_colorbar(cs, plt, fig, ax):
cbar_ax = fig.add_axes([0, 0, 0.1, 0.1])
fig.subplots_adjust(hspace=0, wspace=0, top=0.925, left=0.1)
plt.colorbar(cs, cax=cbar_ax)
def resize_colorbar(event):
plt.draw()
posn = ax.get_position()
cbar_ax.set_position([posn.x0 + posn.width + 0.01, posn.y0, 0.04, posn.height])
fig.canvas.mpl_connect('resize_event', resize_colorbar)
resize_colorbar(None)
|
en
| 0.821714
|
Function to plot a Parcels ParticleSet :param show_time: Time at which to show the ParticleSet :param with_particles: Boolean whether particles are also plotted on Field :param field: Field to plot under particles (either None, a Field object, or 'vector') :param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show :param projection: type of cartopy projection to use (default PlateCarree) :param land: Boolean whether to show land. This is ignored for flat meshes :param vmin: minimum colour scale (only in single-plot mode) :param vmax: maximum colour scale (only in single-plot mode) :param savefile: Name of a file to save the plot to :param animation: Boolean whether result is a single plot, or an animation # creating axes was not possible # creating axes was not possible Function to plot a Parcels Field :param show_time: Time at which to show the Field :param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show :param depth_level: depth level to be plotted (default 0) :param projection: type of cartopy projection to use (default PlateCarree) :param land: Boolean whether to show land. This is ignored for flat meshes :param vmin: minimum colour scale (only in single-plot mode) :param vmax: maximum colour scale (only in single-plot mode) :param savefile: Name of a file to save the plot to :param animation: Boolean whether result is a single plot, or an animation # creating axes was not possible # W # if A-grid # creating axes was not possible # creating axes was not possible # gridlines not supported for all projections # for backward compatibility with <v2.0.0
| 2.352203
| 2
|
bigtable/tests/unit/test_table.py
|
hugovk/google-cloud-python
| 1
|
6626657
|
<filename>bigtable/tests/unit/test_table.py
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
class Test___mutate_rows_request(unittest.TestCase):
def _call_fut(self, table_name, rows):
from google.cloud.bigtable.table import _mutate_rows_request
return _mutate_rows_request(table_name, rows)
@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
def test__mutate_rows_too_many_mutations(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import TooManyMutationsError
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2", table=table),
]
rows[0].set_cell("cf1", b"c1", 1)
rows[0].set_cell("cf1", b"c1", 2)
rows[1].set_cell("cf1", b"c1", 3)
rows[1].set_cell("cf1", b"c1", 4)
with self.assertRaises(TooManyMutationsError):
self._call_fut("table", rows)
def test__mutate_rows_request(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2"),
]
rows[0].set_cell("cf1", b"c1", b"1")
rows[1].set_cell("cf1", b"c1", b"2")
result = self._call_fut("table", rows)
expected_result = _mutate_rows_request_pb(table_name="table")
entry1 = expected_result.entries.add()
entry1.row_key = b"row_key"
mutations1 = entry1.mutations.add()
mutations1.set_cell.family_name = "cf1"
mutations1.set_cell.column_qualifier = b"c1"
mutations1.set_cell.timestamp_micros = -1
mutations1.set_cell.value = b"1"
entry2 = expected_result.entries.add()
entry2.row_key = b"row_key_2"
mutations2 = entry2.mutations.add()
mutations2.set_cell.family_name = "cf1"
mutations2.set_cell.column_qualifier = b"c1"
mutations2.set_cell.timestamp_micros = -1
mutations2.set_cell.value = b"2"
self.assertEqual(result, expected_result)
class Test__check_row_table_name(unittest.TestCase):
def _call_fut(self, table_name, row):
from google.cloud.bigtable.table import _check_row_table_name
return _check_row_table_name(table_name, row)
def test_wrong_table_name(self):
from google.cloud.bigtable.table import TableMismatchError
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
with self.assertRaises(TableMismatchError):
self._call_fut("other_table", row)
def test_right_table_name(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
result = self._call_fut("table", row)
self.assertFalse(result)
class Test__check_row_type(unittest.TestCase):
def _call_fut(self, row):
from google.cloud.bigtable.table import _check_row_type
return _check_row_type(row)
def test_test_wrong_row_type(self):
from google.cloud.bigtable.row import ConditionalRow
row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
with self.assertRaises(TypeError):
self._call_fut(row)
def test_right_row_type(self):
from google.cloud.bigtable.row import DirectRow
row = DirectRow(row_key=b"row_key", table="table")
result = self._call_fut(row)
self.assertFalse(result)
class TestTable(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
ROW_KEY = b"row-key"
ROW_KEY_1 = b"row-key-1"
ROW_KEY_2 = b"row-key-2"
ROW_KEY_3 = b"row-key-3"
FAMILY_NAME = u"family"
QUALIFIER = b"qualifier"
TIMESTAMP_MICROS = 100
VALUE = b"value"
_json_tests = None
@staticmethod
def _get_target_class():
from google.cloud.bigtable.table import Table
return Table
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_w_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def test_constructor_wo_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=False
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def _row_methods_helper(self):
client = self._make_client(
project="project-id", credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
row_key = b"row_key"
return table, row_key
def test_row_factory_direct(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.row(row_key, append=True)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_direct_row(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.direct_row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_conditional_row(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.conditional_row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_append_row(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.append_row(row_key)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table, row_key = self._row_methods_helper()
with self.assertRaises(ValueError):
table.row(row_key, filter_=object(), append=True)
def test___eq__(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
comparison_val = table1 != table2
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._make_one("table_id1", None)
table2 = self._make_one("table_id2", None)
self.assertNotEqual(table1, table2)
def _create_test_helper(self, split_keys=[], column_families={}):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2,
)
from google.cloud.bigtable.column_family import ColumnFamily
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Perform the method and check the result.
table.create(column_families=column_families, initial_split_keys=split_keys)
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=split_key) for split_key in split_keys]
table_api.create_table.assert_called_once_with(
parent=self.INSTANCE_NAME,
table=table_pb2.Table(column_families=families),
table_id=self.TABLE_ID,
initial_splits=splits,
)
def test_create(self):
self._create_test_helper()
def test_create_with_families(self):
from google.cloud.bigtable.column_family import MaxVersionsGCRule
families = {"family": MaxVersionsGCRule(5)}
self._create_test_helper(column_families=families)
def test_create_with_split_keys(self):
self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
def test_exists(self):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import BadRequest
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
# Create response_pb
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)]
)
# Patch API calls
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [
response_pb,
NotFound("testing"),
BadRequest("testing"),
]
# Perform the method and check the result.
table1 = instance.table(self.TABLE_ID)
table2 = instance.table("table-id2")
result = table1.exists()
self.assertEqual(True, result)
result = table2.exists()
self.assertEqual(False, result)
with self.assertRaises(BadRequest):
table2.exists()
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = table.delete()
self.assertEqual(result, expected_result)
def _list_column_families_helper(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_pb
COLUMN_FAMILY_ID = "foo"
column_family = _ColumnFamilyPB()
response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# Create expected_result.
expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
# Perform the method and check the result.
result = table.list_column_families()
self.assertEqual(result, expected_result)
def test_list_column_families(self):
self._list_column_families_helper()
def test_get_cluster_states(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_pb = _TablePB(
cluster_states={
"cluster-id1": _ClusterStatePB(INITIALIZING),
"cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
"cluster-id3": _ClusterStatePB(READY),
}
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# build expected result
expected_result = {
u"cluster-id1": ClusterState(INITIALIZING),
u"cluster-id2": ClusterState(PLANNED_MAINTENANCE),
u"cluster-id3": ClusterState(READY),
}
# Perform the method and check the result.
result = table.get_cluster_states()
self.assertEqual(result, expected_result)
def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_filters import RowSampleFilter
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
# Create response_iterator
if chunks is None:
response_iterator = iter(()) # no responses at all
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
# Patch the stub used by the API method.
client._table_data_client = data_api
client._table_admin_client = table_api
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
# Perform the method and check the result.
filter_obj = RowSampleFilter(0.33)
result = None
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
row_set = RowSet()
row_set.add_row_key(self.ROW_KEY)
expected_request = [
(
table.name,
{
"end_inclusive": False,
"row_set": row_set,
"app_profile_id": app_profile_id,
"end_key": None,
"limit": None,
"start_key": None,
"filter_": filter_obj,
},
)
]
self.assertEqual(result, expected_result)
self.assertEqual(mock_created, expected_request)
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
app_profile_id = "app-profile-id"
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result, app_profile_id)
def test_read_row_more_than_one_row_returned(self):
app_profile_id = "app-profile-id"
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk_1, chunk_2]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None, app_profile_id)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
# No "commit row".
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_mutate_rows(self):
from google.rpc.status_pb2 import Status
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
client._table_admin_client = table_api
table = self._make_one(self.TABLE_ID, instance)
response = [Status(code=0), Status(code=1)]
mock_worker = mock.Mock(return_value=response)
with mock.patch(
"google.cloud.bigtable.table._RetryableMutateRowsWorker",
new=mock.MagicMock(return_value=mock_worker),
):
statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()])
result = [status.code for status in statuses]
expected_result = [0, 1]
self.assertEqual(result, expected_result)
def test_read_rows(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
app_profile_id = "app-profile-id"
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request = retry = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request
# Create expected_result.
expected_result = PartialRowsData(
client._table_data_client.transport.read_rows, request, retry
)
# Perform the method and check the result.
start_key = b"start-key"
end_key = b"end-key"
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key,
end_key=end_key,
filter_=filter_obj,
limit=limit,
retry=retry,
)
self.assertEqual(result.rows, expected_result.rows)
self.assertEqual(result.retry, expected_result.retry)
created_kwargs = {
"start_key": start_key,
"end_key": end_key,
"filter_": filter_obj,
"limit": limit,
"end_inclusive": False,
"app_profile_id": app_profile_id,
"row_set": None,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_read_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.api_core import retry
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
for row in table.read_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
):
rows.append(row)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_rows_with_row_set(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_3 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_3,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_3 = _ReadRowsResponseV2([chunk_3])
response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
rows = []
row_set = RowSet()
row_set.add_row_range(
RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
)
row_set.add_row_key(self.ROW_KEY_3)
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(row_set=row_set):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
def test_sample_row_keys(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
response_iterator = object() # Just passed to a mock.
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["sample_row_keys"] = mock.Mock(
side_effect=[[response_iterator]]
)
# Create expected_result.
expected_result = response_iterator
# Perform the method and check the result.
result = table.sample_row_keys()
self.assertEqual(result[0], expected_result)
def test_truncate(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
result = table.truncate()
table_api.drop_row_range.assert_called_once_with(
name=self.TABLE_NAME, delete_all_data_from_table=True
)
self.assertEqual(result, expected_result)
def test_truncate_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
timeout = 120
result = table.truncate(timeout=timeout)
self.assertEqual(result, expected_result)
def test_drop_by_prefix(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
self.assertEqual(result, expected_result)
def test_drop_by_prefix_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
timeout = 120
result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
self.assertEqual(result, expected_result)
def test_mutations_batcher_factory(self):
flush_count = 100
max_row_bytes = 1000
table = self._make_one(self.TABLE_ID, None)
mutation_batcher = table.mutations_batcher(
flush_count=flush_count, max_row_bytes=max_row_bytes
)
self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
self.assertEqual(mutation_batcher.flush_count, flush_count)
self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.get_iam_policy.return_value = iam_policy
result = table.get_iam_policy()
table_api.get_iam_policy.assert_called_once_with(resource=table.name)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.set_iam_policy.return_value = iam_policy_pb
iam_policy = Policy(etag=etag, version=version)
iam_policy[BIGTABLE_ADMIN_ROLE] = [
Policy.user("<EMAIL>"),
Policy.service_account("<EMAIL>"),
]
result = table.set_iam_policy(iam_policy)
table_api.set_iam_policy.assert_called_once_with(
resource=table.name, policy=iam_policy_pb
)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
table_api.test_iam_permissions.return_value = response
client._table_admin_client = table_api
result = table.test_iam_permissions(permissions)
self.assertEqual(result, permissions)
table_api.test_iam_permissions.assert_called_once_with(
resource=table.name, permissions=permissions
)
class Test__RetryableMutateRowsWorker(unittest.TestCase):
from grpc import StatusCode
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
# RPC Status Codes
SUCCESS = StatusCode.OK.value[0]
RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
RETRYABLE_2 = StatusCode.ABORTED.value[0]
NON_RETRYABLE = StatusCode.CANCELLED.value[0]
@staticmethod
def _get_target_class_for_worker():
from google.cloud.bigtable.table import _RetryableMutateRowsWorker
return _RetryableMutateRowsWorker
def _make_worker(self, *args, **kwargs):
return self._get_target_class_for_worker()(*args, **kwargs)
@staticmethod
def _get_target_class_for_table():
from google.cloud.bigtable.table import Table
return Table
def _make_table(self, *args, **kwargs):
return self._get_target_class_for_table()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def _make_responses_statuses(self, codes):
from google.rpc.status_pb2 import Status
response = [Status(code=code) for code in codes]
return response
def _make_responses(self, codes):
import six
from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse
from google.rpc.status_pb2 import Status
entries = [
MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
for i in six.moves.xrange(len(codes))
]
return MutateRowsResponse(entries=entries)
def test_callable_empty_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker()
self.assertEqual(len(statuses), 0)
def test_callable_no_retry_strategy(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Attempt to mutate the rows w/o any retry strategy.
# Expectation:
# - Since no retry, should return statuses as they come back.
# - Even if there are retryable errors, no retry attempt is made.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
with mock.patch("google.cloud.bigtable.table.wrap_method") as patched:
patched.return_value = mock.Mock(return_value=[response])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=None)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once()
self.assertEqual(result, expected_result)
def test_callable_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import DEFAULT_RETRY
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - First attempt will result in one retryable error.
# - Second attempt will result in success for the retry-ed row.
# - Check MutateRows is called twice.
# - State of responses_statuses should be
# [success, success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response_1 = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
response_2 = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock(
side_effect=[[response_1], [response_2]]
)
retry = DEFAULT_RETRY.with_delay(initial=0.1)
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=retry)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(
client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2
)
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_empty_rows(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker._do_mutate_retryable_rows()
self.assertEqual(len(statuses), 0)
def test_do_mutate_retryable_rows(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# Action:
# - Initial attempt will mutate all 2 rows.
# Expectation:
# - Expect [success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - Second row returns retryable error code, so expect a raise.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, success, non-retryable, retryable]
# - One of the rows tried second time returns retryable error code,
# so expect a raise.
# - Exception contains response whose index should be '3' even though
# only two rows were retried.
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.SUCCESS, self.RETRYABLE_1])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.SUCCESS,
self.NON_RETRYABLE,
self.RETRYABLE_1,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, non-retryable, non-retryable, success]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.NON_RETRYABLE,
self.NON_RETRYABLE,
self.SUCCESS,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try_no_retryable(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# - First try results: [success, non-retryable]
# Action:
# - Second try has no row to retry.
# Expectation:
# - After second try: [success, non-retryable]
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
worker = self._make_worker(client, table.name, [row_1, row_2])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.NON_RETRYABLE]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_mismatch_num_responses(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
with self.assertRaises(RuntimeError):
worker._do_mutate_retryable_rows()
class Test__create_row_request(unittest.TestCase):
def _call_fut(
self,
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
from google.cloud.bigtable.table import _create_row_request
return _create_row_request(
table_name,
start_key=start_key,
end_key=end_key,
filter_=filter_,
limit=limit,
end_inclusive=end_inclusive,
app_profile_id=app_profile_id,
row_set=row_set,
)
def test_table_name_only(self):
table_name = "table_name"
result = self._call_fut(table_name)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_range_row_set_conflict(self):
with self.assertRaises(ValueError):
self._call_fut(None, end_key=object(), row_set=object())
def test_row_range_start_key(self):
table_name = "table_name"
start_key = b"start_key"
result = self._call_fut(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = "table_name"
end_key = b"end_key"
result = self._call_fut(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key
)
self.assertEqual(result, expected_result)
def test_row_range_both_keys_inclusive(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(
table_name, start_key=start_key, end_key=end_key, end_inclusive=True
)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_closed=end_key
)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
table_name = "table_name"
row_filter = RowSampleFilter(0.33)
result = self._call_fut(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name, filter=row_filter.to_pb()
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = "table_name"
limit = 1337
result = self._call_fut(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
self.assertEqual(result, expected_result)
def test_with_row_set(self):
from google.cloud.bigtable.row_set import RowSet
table_name = "table_name"
row_set = RowSet()
result = self._call_fut(table_name, row_set=row_set)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_with_app_profile_id(self):
table_name = "table_name"
limit = 1337
app_profile_id = "app-profile-id"
result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
expected_result = _ReadRowsRequestPB(
table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
)
self.assertEqual(result, expected_result)
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
class Test_ClusterState(unittest.TestCase):
def test___eq__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
self.assertEqual(state1, state2)
def test___eq__type_differ(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = object()
self.assertNotEqual(state1, state2)
def test___ne__same_value(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
comparison_val = state1 != state2
self.assertFalse(comparison_val)
def test___ne__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
INITIALIZING = enum_table.ReplicationState.INITIALIZING
state1 = ClusterState(READY)
state2 = ClusterState(INITIALIZING)
self.assertNotEqual(state1, state2)
def test__repr__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
replication_dict = {
STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
INITIALIZING: "INITIALIZING",
PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
READY: "READY",
}
self.assertEqual(
str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
)
self.assertEqual(
str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
)
self.assertEqual(
str(ClusterState(PLANNED_MAINTENANCE)),
replication_dict[PLANNED_MAINTENANCE],
)
self.assertEqual(
str(ClusterState(UNPLANNED_MAINTENANCE)),
replication_dict[UNPLANNED_MAINTENANCE],
)
self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
self.assertEqual(
ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
)
self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
self.assertEqual(
ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
)
self.assertEqual(
ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
)
self.assertEqual(ClusterState(READY).replication_state, READY)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
family_name = kw.pop("family_name")
qualifier = kw.pop("qualifier")
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _mutate_rows_request_pb(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
return data_messages_v2_pb2.MutateRowsRequest(*args, **kw)
class _MockReadRowsIterator(object):
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
__next__ = next
class _MockFailureIterator_1(object):
def next(self):
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _MockFailureIterator_2(object):
def __init__(self, *values):
self.iter_values = values[0]
self.calls = 0
def next(self):
self.calls += 1
if self.calls == 1:
return self.iter_values[0]
else:
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=""):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _TablePB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.ColumnFamily(*args, **kw)
def _ClusterStatePB(replication_state):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table.ClusterState(replication_state=replication_state)
def _read_rows_retry_exception(exc):
return isinstance(exc, DeadlineExceeded)
|
<filename>bigtable/tests/unit/test_table.py
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
class Test___mutate_rows_request(unittest.TestCase):
def _call_fut(self, table_name, rows):
from google.cloud.bigtable.table import _mutate_rows_request
return _mutate_rows_request(table_name, rows)
@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
def test__mutate_rows_too_many_mutations(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import TooManyMutationsError
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2", table=table),
]
rows[0].set_cell("cf1", b"c1", 1)
rows[0].set_cell("cf1", b"c1", 2)
rows[1].set_cell("cf1", b"c1", 3)
rows[1].set_cell("cf1", b"c1", 4)
with self.assertRaises(TooManyMutationsError):
self._call_fut("table", rows)
def test__mutate_rows_request(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2"),
]
rows[0].set_cell("cf1", b"c1", b"1")
rows[1].set_cell("cf1", b"c1", b"2")
result = self._call_fut("table", rows)
expected_result = _mutate_rows_request_pb(table_name="table")
entry1 = expected_result.entries.add()
entry1.row_key = b"row_key"
mutations1 = entry1.mutations.add()
mutations1.set_cell.family_name = "cf1"
mutations1.set_cell.column_qualifier = b"c1"
mutations1.set_cell.timestamp_micros = -1
mutations1.set_cell.value = b"1"
entry2 = expected_result.entries.add()
entry2.row_key = b"row_key_2"
mutations2 = entry2.mutations.add()
mutations2.set_cell.family_name = "cf1"
mutations2.set_cell.column_qualifier = b"c1"
mutations2.set_cell.timestamp_micros = -1
mutations2.set_cell.value = b"2"
self.assertEqual(result, expected_result)
class Test__check_row_table_name(unittest.TestCase):
def _call_fut(self, table_name, row):
from google.cloud.bigtable.table import _check_row_table_name
return _check_row_table_name(table_name, row)
def test_wrong_table_name(self):
from google.cloud.bigtable.table import TableMismatchError
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
with self.assertRaises(TableMismatchError):
self._call_fut("other_table", row)
def test_right_table_name(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
result = self._call_fut("table", row)
self.assertFalse(result)
class Test__check_row_type(unittest.TestCase):
def _call_fut(self, row):
from google.cloud.bigtable.table import _check_row_type
return _check_row_type(row)
def test_test_wrong_row_type(self):
from google.cloud.bigtable.row import ConditionalRow
row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
with self.assertRaises(TypeError):
self._call_fut(row)
def test_right_row_type(self):
from google.cloud.bigtable.row import DirectRow
row = DirectRow(row_key=b"row_key", table="table")
result = self._call_fut(row)
self.assertFalse(result)
class TestTable(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
ROW_KEY = b"row-key"
ROW_KEY_1 = b"row-key-1"
ROW_KEY_2 = b"row-key-2"
ROW_KEY_3 = b"row-key-3"
FAMILY_NAME = u"family"
QUALIFIER = b"qualifier"
TIMESTAMP_MICROS = 100
VALUE = b"value"
_json_tests = None
@staticmethod
def _get_target_class():
from google.cloud.bigtable.table import Table
return Table
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_w_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def test_constructor_wo_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=False
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def _row_methods_helper(self):
client = self._make_client(
project="project-id", credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
row_key = b"row_key"
return table, row_key
def test_row_factory_direct(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.row(row_key, append=True)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_direct_row(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.direct_row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_conditional_row(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.conditional_row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_append_row(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.append_row(row_key)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table, row_key = self._row_methods_helper()
with self.assertRaises(ValueError):
table.row(row_key, filter_=object(), append=True)
def test___eq__(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
comparison_val = table1 != table2
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._make_one("table_id1", None)
table2 = self._make_one("table_id2", None)
self.assertNotEqual(table1, table2)
def _create_test_helper(self, split_keys=[], column_families={}):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2,
)
from google.cloud.bigtable.column_family import ColumnFamily
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Perform the method and check the result.
table.create(column_families=column_families, initial_split_keys=split_keys)
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=split_key) for split_key in split_keys]
table_api.create_table.assert_called_once_with(
parent=self.INSTANCE_NAME,
table=table_pb2.Table(column_families=families),
table_id=self.TABLE_ID,
initial_splits=splits,
)
def test_create(self):
self._create_test_helper()
def test_create_with_families(self):
from google.cloud.bigtable.column_family import MaxVersionsGCRule
families = {"family": MaxVersionsGCRule(5)}
self._create_test_helper(column_families=families)
def test_create_with_split_keys(self):
self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
def test_exists(self):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import BadRequest
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
# Create response_pb
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)]
)
# Patch API calls
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [
response_pb,
NotFound("testing"),
BadRequest("testing"),
]
# Perform the method and check the result.
table1 = instance.table(self.TABLE_ID)
table2 = instance.table("table-id2")
result = table1.exists()
self.assertEqual(True, result)
result = table2.exists()
self.assertEqual(False, result)
with self.assertRaises(BadRequest):
table2.exists()
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = table.delete()
self.assertEqual(result, expected_result)
def _list_column_families_helper(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_pb
COLUMN_FAMILY_ID = "foo"
column_family = _ColumnFamilyPB()
response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# Create expected_result.
expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
# Perform the method and check the result.
result = table.list_column_families()
self.assertEqual(result, expected_result)
def test_list_column_families(self):
self._list_column_families_helper()
def test_get_cluster_states(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_pb = _TablePB(
cluster_states={
"cluster-id1": _ClusterStatePB(INITIALIZING),
"cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
"cluster-id3": _ClusterStatePB(READY),
}
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# build expected result
expected_result = {
u"cluster-id1": ClusterState(INITIALIZING),
u"cluster-id2": ClusterState(PLANNED_MAINTENANCE),
u"cluster-id3": ClusterState(READY),
}
# Perform the method and check the result.
result = table.get_cluster_states()
self.assertEqual(result, expected_result)
def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_filters import RowSampleFilter
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
# Create response_iterator
if chunks is None:
response_iterator = iter(()) # no responses at all
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
# Patch the stub used by the API method.
client._table_data_client = data_api
client._table_admin_client = table_api
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
# Perform the method and check the result.
filter_obj = RowSampleFilter(0.33)
result = None
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
row_set = RowSet()
row_set.add_row_key(self.ROW_KEY)
expected_request = [
(
table.name,
{
"end_inclusive": False,
"row_set": row_set,
"app_profile_id": app_profile_id,
"end_key": None,
"limit": None,
"start_key": None,
"filter_": filter_obj,
},
)
]
self.assertEqual(result, expected_result)
self.assertEqual(mock_created, expected_request)
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
app_profile_id = "app-profile-id"
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result, app_profile_id)
def test_read_row_more_than_one_row_returned(self):
app_profile_id = "app-profile-id"
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk_1, chunk_2]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None, app_profile_id)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
# No "commit row".
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_mutate_rows(self):
from google.rpc.status_pb2 import Status
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
client._table_admin_client = table_api
table = self._make_one(self.TABLE_ID, instance)
response = [Status(code=0), Status(code=1)]
mock_worker = mock.Mock(return_value=response)
with mock.patch(
"google.cloud.bigtable.table._RetryableMutateRowsWorker",
new=mock.MagicMock(return_value=mock_worker),
):
statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()])
result = [status.code for status in statuses]
expected_result = [0, 1]
self.assertEqual(result, expected_result)
def test_read_rows(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
app_profile_id = "app-profile-id"
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request = retry = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request
# Create expected_result.
expected_result = PartialRowsData(
client._table_data_client.transport.read_rows, request, retry
)
# Perform the method and check the result.
start_key = b"start-key"
end_key = b"end-key"
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key,
end_key=end_key,
filter_=filter_obj,
limit=limit,
retry=retry,
)
self.assertEqual(result.rows, expected_result.rows)
self.assertEqual(result.retry, expected_result.retry)
created_kwargs = {
"start_key": start_key,
"end_key": end_key,
"filter_": filter_obj,
"limit": limit,
"end_inclusive": False,
"app_profile_id": app_profile_id,
"row_set": None,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_read_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.api_core import retry
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
for row in table.read_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
):
rows.append(row)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_rows_with_row_set(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_3 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_3,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_3 = _ReadRowsResponseV2([chunk_3])
response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
rows = []
row_set = RowSet()
row_set.add_row_range(
RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
)
row_set.add_row_key(self.ROW_KEY_3)
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(row_set=row_set):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
def test_sample_row_keys(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
response_iterator = object() # Just passed to a mock.
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["sample_row_keys"] = mock.Mock(
side_effect=[[response_iterator]]
)
# Create expected_result.
expected_result = response_iterator
# Perform the method and check the result.
result = table.sample_row_keys()
self.assertEqual(result[0], expected_result)
def test_truncate(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
result = table.truncate()
table_api.drop_row_range.assert_called_once_with(
name=self.TABLE_NAME, delete_all_data_from_table=True
)
self.assertEqual(result, expected_result)
def test_truncate_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
timeout = 120
result = table.truncate(timeout=timeout)
self.assertEqual(result, expected_result)
def test_drop_by_prefix(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
self.assertEqual(result, expected_result)
def test_drop_by_prefix_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
timeout = 120
result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
self.assertEqual(result, expected_result)
def test_mutations_batcher_factory(self):
flush_count = 100
max_row_bytes = 1000
table = self._make_one(self.TABLE_ID, None)
mutation_batcher = table.mutations_batcher(
flush_count=flush_count, max_row_bytes=max_row_bytes
)
self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
self.assertEqual(mutation_batcher.flush_count, flush_count)
self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.get_iam_policy.return_value = iam_policy
result = table.get_iam_policy()
table_api.get_iam_policy.assert_called_once_with(resource=table.name)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.set_iam_policy.return_value = iam_policy_pb
iam_policy = Policy(etag=etag, version=version)
iam_policy[BIGTABLE_ADMIN_ROLE] = [
Policy.user("<EMAIL>"),
Policy.service_account("<EMAIL>"),
]
result = table.set_iam_policy(iam_policy)
table_api.set_iam_policy.assert_called_once_with(
resource=table.name, policy=iam_policy_pb
)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
table_api.test_iam_permissions.return_value = response
client._table_admin_client = table_api
result = table.test_iam_permissions(permissions)
self.assertEqual(result, permissions)
table_api.test_iam_permissions.assert_called_once_with(
resource=table.name, permissions=permissions
)
class Test__RetryableMutateRowsWorker(unittest.TestCase):
from grpc import StatusCode
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
# RPC Status Codes
SUCCESS = StatusCode.OK.value[0]
RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
RETRYABLE_2 = StatusCode.ABORTED.value[0]
NON_RETRYABLE = StatusCode.CANCELLED.value[0]
@staticmethod
def _get_target_class_for_worker():
from google.cloud.bigtable.table import _RetryableMutateRowsWorker
return _RetryableMutateRowsWorker
def _make_worker(self, *args, **kwargs):
return self._get_target_class_for_worker()(*args, **kwargs)
@staticmethod
def _get_target_class_for_table():
from google.cloud.bigtable.table import Table
return Table
def _make_table(self, *args, **kwargs):
return self._get_target_class_for_table()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def _make_responses_statuses(self, codes):
from google.rpc.status_pb2 import Status
response = [Status(code=code) for code in codes]
return response
def _make_responses(self, codes):
import six
from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse
from google.rpc.status_pb2 import Status
entries = [
MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
for i in six.moves.xrange(len(codes))
]
return MutateRowsResponse(entries=entries)
def test_callable_empty_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker()
self.assertEqual(len(statuses), 0)
def test_callable_no_retry_strategy(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Attempt to mutate the rows w/o any retry strategy.
# Expectation:
# - Since no retry, should return statuses as they come back.
# - Even if there are retryable errors, no retry attempt is made.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
with mock.patch("google.cloud.bigtable.table.wrap_method") as patched:
patched.return_value = mock.Mock(return_value=[response])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=None)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once()
self.assertEqual(result, expected_result)
def test_callable_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import DEFAULT_RETRY
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - First attempt will result in one retryable error.
# - Second attempt will result in success for the retry-ed row.
# - Check MutateRows is called twice.
# - State of responses_statuses should be
# [success, success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response_1 = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
response_2 = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock(
side_effect=[[response_1], [response_2]]
)
retry = DEFAULT_RETRY.with_delay(initial=0.1)
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=retry)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(
client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2
)
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_empty_rows(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker._do_mutate_retryable_rows()
self.assertEqual(len(statuses), 0)
def test_do_mutate_retryable_rows(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# Action:
# - Initial attempt will mutate all 2 rows.
# Expectation:
# - Expect [success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - Second row returns retryable error code, so expect a raise.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, success, non-retryable, retryable]
# - One of the rows tried second time returns retryable error code,
# so expect a raise.
# - Exception contains response whose index should be '3' even though
# only two rows were retried.
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.SUCCESS, self.RETRYABLE_1])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.SUCCESS,
self.NON_RETRYABLE,
self.RETRYABLE_1,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, non-retryable, non-retryable, success]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.NON_RETRYABLE,
self.NON_RETRYABLE,
self.SUCCESS,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try_no_retryable(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# - First try results: [success, non-retryable]
# Action:
# - Second try has no row to retry.
# Expectation:
# - After second try: [success, non-retryable]
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
worker = self._make_worker(client, table.name, [row_1, row_2])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.NON_RETRYABLE]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_mismatch_num_responses(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
with self.assertRaises(RuntimeError):
worker._do_mutate_retryable_rows()
class Test__create_row_request(unittest.TestCase):
def _call_fut(
self,
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
from google.cloud.bigtable.table import _create_row_request
return _create_row_request(
table_name,
start_key=start_key,
end_key=end_key,
filter_=filter_,
limit=limit,
end_inclusive=end_inclusive,
app_profile_id=app_profile_id,
row_set=row_set,
)
def test_table_name_only(self):
table_name = "table_name"
result = self._call_fut(table_name)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_range_row_set_conflict(self):
with self.assertRaises(ValueError):
self._call_fut(None, end_key=object(), row_set=object())
def test_row_range_start_key(self):
table_name = "table_name"
start_key = b"start_key"
result = self._call_fut(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = "table_name"
end_key = b"end_key"
result = self._call_fut(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key
)
self.assertEqual(result, expected_result)
def test_row_range_both_keys_inclusive(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(
table_name, start_key=start_key, end_key=end_key, end_inclusive=True
)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_closed=end_key
)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
table_name = "table_name"
row_filter = RowSampleFilter(0.33)
result = self._call_fut(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name, filter=row_filter.to_pb()
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = "table_name"
limit = 1337
result = self._call_fut(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
self.assertEqual(result, expected_result)
def test_with_row_set(self):
from google.cloud.bigtable.row_set import RowSet
table_name = "table_name"
row_set = RowSet()
result = self._call_fut(table_name, row_set=row_set)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_with_app_profile_id(self):
table_name = "table_name"
limit = 1337
app_profile_id = "app-profile-id"
result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
expected_result = _ReadRowsRequestPB(
table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
)
self.assertEqual(result, expected_result)
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
class Test_ClusterState(unittest.TestCase):
def test___eq__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
self.assertEqual(state1, state2)
def test___eq__type_differ(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = object()
self.assertNotEqual(state1, state2)
def test___ne__same_value(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
comparison_val = state1 != state2
self.assertFalse(comparison_val)
def test___ne__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
INITIALIZING = enum_table.ReplicationState.INITIALIZING
state1 = ClusterState(READY)
state2 = ClusterState(INITIALIZING)
self.assertNotEqual(state1, state2)
def test__repr__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
replication_dict = {
STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
INITIALIZING: "INITIALIZING",
PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
READY: "READY",
}
self.assertEqual(
str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
)
self.assertEqual(
str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
)
self.assertEqual(
str(ClusterState(PLANNED_MAINTENANCE)),
replication_dict[PLANNED_MAINTENANCE],
)
self.assertEqual(
str(ClusterState(UNPLANNED_MAINTENANCE)),
replication_dict[UNPLANNED_MAINTENANCE],
)
self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
self.assertEqual(
ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
)
self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
self.assertEqual(
ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
)
self.assertEqual(
ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
)
self.assertEqual(ClusterState(READY).replication_state, READY)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
family_name = kw.pop("family_name")
qualifier = kw.pop("qualifier")
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _mutate_rows_request_pb(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
return data_messages_v2_pb2.MutateRowsRequest(*args, **kw)
class _MockReadRowsIterator(object):
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
__next__ = next
class _MockFailureIterator_1(object):
def next(self):
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _MockFailureIterator_2(object):
def __init__(self, *values):
self.iter_values = values[0]
self.calls = 0
def next(self):
self.calls += 1
if self.calls == 1:
return self.iter_values[0]
else:
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=""):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _TablePB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.ColumnFamily(*args, **kw)
def _ClusterStatePB(replication_state):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table.ClusterState(replication_state=replication_state)
def _read_rows_retry_exception(exc):
return isinstance(exc, DeadlineExceeded)
|
en
| 0.840471
|
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Patch API calls # Perform the method and check the result. # Create response_pb # Patch API calls # Perform the method and check the result. # Patch API calls # Create expected_result. # delete() has no return value. # Perform the method and check the result. # Create response_pb # Patch the stub used by the API method. # Create expected_result. # Perform the method and check the result. # Patch the stub used by the API method. # build expected result # Perform the method and check the result. # Create request_pb # Returned by our mock. # Create response_iterator # no responses at all # Patch the stub used by the API method. # Perform the method and check the result. # No "commit row". # Create request_pb # Returned by our mock. # Create expected_result. # Perform the method and check the result. # Create response_iterator # Patch the stub used by the API method. # Create response_iterator # Patch the stub used by the API method. # Create response_iterator # Patch the stub used by the API method. # Create response_iterator # Just passed to a mock. # Patch the stub used by the API method. # Create expected_result. # Perform the method and check the result. # truncate() has no return value. # truncate() has no return value. # drop_by_prefix() has no return value. # drop_by_prefix() has no return value. # RPC Status Codes # Setup: # - Mutate 3 rows. # Action: # - Attempt to mutate the rows w/o any retry strategy. # Expectation: # - Since no retry, should return statuses as they come back. # - Even if there are retryable errors, no retry attempt is made. # - State of responses_statuses should be # [success, retryable, non-retryable] # Setup: # - Mutate 3 rows. # Action: # - Initial attempt will mutate all 3 rows. # Expectation: # - First attempt will result in one retryable error. # - Second attempt will result in success for the retry-ed row. # - Check MutateRows is called twice. # - State of responses_statuses should be # [success, success, non-retryable] # Patch the stub used by the API method. # Setup: # - Mutate 2 rows. # Action: # - Initial attempt will mutate all 2 rows. # Expectation: # - Expect [success, non-retryable] # Patch the stub used by the API method. # Setup: # - Mutate 3 rows. # Action: # - Initial attempt will mutate all 3 rows. # Expectation: # - Second row returns retryable error code, so expect a raise. # - State of responses_statuses should be # [success, retryable, non-retryable] # Patch the stub used by the API method. # Setup: # - Mutate 4 rows. # - First try results: # [success, retryable, non-retryable, retryable] # Action: # - Second try should re-attempt the 'retryable' rows. # Expectation: # - After second try: # [success, success, non-retryable, retryable] # - One of the rows tried second time returns retryable error code, # so expect a raise. # - Exception contains response whose index should be '3' even though # only two rows were retried. # Patch the stub used by the API method. # Setup: # - Mutate 4 rows. # - First try results: # [success, retryable, non-retryable, retryable] # Action: # - Second try should re-attempt the 'retryable' rows. # Expectation: # - After second try: # [success, non-retryable, non-retryable, success] # Patch the stub used by the API method. # Setup: # - Mutate 2 rows. # - First try results: [success, non-retryable] # Action: # - Second try has no row to retry. # Expectation: # - After second try: [success, non-retryable] # Patch the stub used by the API method.
| 2.271503
| 2
|
PDF_Text_Extractor/src/PDF_Extractor.py
|
jamescrone1/Python-PDF-Extractor
| 0
|
6626658
|
<reponame>jamescrone1/Python-PDF-Extractor
import PyPDF2
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter, PDFPageAggregator
from pdfminer.layout import LAParams
from pdfminer.layout import LTTextBoxHorizontal
from io import StringIO
# PDFMiner method
def pdfminer_pdf_text(file_path):
with open(file_path, 'rb') as f:
lines = []
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(f):
interpreter.process_page(page)
layout = device.get_result()
for element in layout:
if isinstance(element, LTTextBoxHorizontal):
lines.extend(element.get_text().splitlines())
text = ""
for line in lines:
text += line
return text
# PyPDF2 method
def pdf2_pdf_text(file_path):
with open(file_path, 'rb') as f:
pdf_reader = PyPDF2.PdfFileReader(f)
num_pages = pdf_reader.numPages
count = 0
text = ""
while count < num_pages:
page_obj = pdf_reader.getPage(count)
count += 1
text += page_obj.extractText()
if text != "":
text = text
return text
|
import PyPDF2
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter, PDFPageAggregator
from pdfminer.layout import LAParams
from pdfminer.layout import LTTextBoxHorizontal
from io import StringIO
# PDFMiner method
def pdfminer_pdf_text(file_path):
with open(file_path, 'rb') as f:
lines = []
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(f):
interpreter.process_page(page)
layout = device.get_result()
for element in layout:
if isinstance(element, LTTextBoxHorizontal):
lines.extend(element.get_text().splitlines())
text = ""
for line in lines:
text += line
return text
# PyPDF2 method
def pdf2_pdf_text(file_path):
with open(file_path, 'rb') as f:
pdf_reader = PyPDF2.PdfFileReader(f)
num_pages = pdf_reader.numPages
count = 0
text = ""
while count < num_pages:
page_obj = pdf_reader.getPage(count)
count += 1
text += page_obj.extractText()
if text != "":
text = text
return text
|
fr
| 0.293569
|
# PDFMiner method # PyPDF2 method
| 2.882123
| 3
|
src/primaires/format/__init__.py
|
stormi/tsunami
| 0
|
6626659
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le module primaire format."""
from abstraits.module import *
from primaires.format import commandes
from primaires.format.config import cfg_charte
from primaires.format.description_flottante import DescriptionFlottante
from primaires.format.editeurs.floatedit import EdtFloatedit
from primaires.format.message import Message
class Module(BaseModule):
"""Cette classe décrit le module primaire Format.
Ce module est particulièrement chargé du formatage,
notamment des messages à envoyer aux clients.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "format", "primaire")
def config(self):
"""Configuration du module.
On crée le fichier de configuration afin de l'utiliser plus tard
pour la mise en forme.
"""
type(self.importeur).anaconf.get_config("charte_graph", \
"format/charte.cfg", "modele charte graphique", cfg_charte)
# Ajout des hooks
importeur.hook.ajouter_hook("description:ajouter_variables",
"Hook appelé pour ajouter des variables aux descriptions")
BaseModule.config(self)
self.descriptions_flottantes = {}
def init(self):
"""Initialisation du module.
On récupère les descriptions flottantes.
"""
flottantes = self.importeur.supenr.charger_groupe(DescriptionFlottante)
for flottante in flottantes:
self.descriptions_flottantes[flottante.cle] = flottante
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.flottantes.CmdFlottantes(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs
self.importeur.interpreteur.ajouter_editeur(EdtFloatedit)
def formater(self, message):
"""Retourne le message formaté.
Voir : primaires.format.message
"""
nv_message = Message(message, \
type(self.importeur).anaconf.get_config("charte_graph"))
return nv_message
def creer_description_flottante(self, cle):
"""Crée une description flottante."""
if cle in self.descriptions_flottantes:
raise KeyError(cle)
flottante = DescriptionFlottante(cle)
self.descriptions_flottantes[cle] = flottante
return flottante
def supprimer_description_flottante(self, cle):
"""Supprime la description flottante indiquée."""
if cle not in self.descriptions_flottantes:
raise KeyError(cle)
flottante = self.descriptions_flottantes.pop(cle)
flottante.detruire()
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le module primaire format."""
from abstraits.module import *
from primaires.format import commandes
from primaires.format.config import cfg_charte
from primaires.format.description_flottante import DescriptionFlottante
from primaires.format.editeurs.floatedit import EdtFloatedit
from primaires.format.message import Message
class Module(BaseModule):
"""Cette classe décrit le module primaire Format.
Ce module est particulièrement chargé du formatage,
notamment des messages à envoyer aux clients.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "format", "primaire")
def config(self):
"""Configuration du module.
On crée le fichier de configuration afin de l'utiliser plus tard
pour la mise en forme.
"""
type(self.importeur).anaconf.get_config("charte_graph", \
"format/charte.cfg", "modele charte graphique", cfg_charte)
# Ajout des hooks
importeur.hook.ajouter_hook("description:ajouter_variables",
"Hook appelé pour ajouter des variables aux descriptions")
BaseModule.config(self)
self.descriptions_flottantes = {}
def init(self):
"""Initialisation du module.
On récupère les descriptions flottantes.
"""
flottantes = self.importeur.supenr.charger_groupe(DescriptionFlottante)
for flottante in flottantes:
self.descriptions_flottantes[flottante.cle] = flottante
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.flottantes.CmdFlottantes(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs
self.importeur.interpreteur.ajouter_editeur(EdtFloatedit)
def formater(self, message):
"""Retourne le message formaté.
Voir : primaires.format.message
"""
nv_message = Message(message, \
type(self.importeur).anaconf.get_config("charte_graph"))
return nv_message
def creer_description_flottante(self, cle):
"""Crée une description flottante."""
if cle in self.descriptions_flottantes:
raise KeyError(cle)
flottante = DescriptionFlottante(cle)
self.descriptions_flottantes[cle] = flottante
return flottante
def supprimer_description_flottante(self, cle):
"""Supprime la description flottante indiquée."""
if cle not in self.descriptions_flottantes:
raise KeyError(cle)
flottante = self.descriptions_flottantes.pop(cle)
flottante.detruire()
|
en
| 0.330602
|
# -*-coding:Utf-8 -* # Copyright (c) 2010 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Fichier contenant le module primaire format. Cette classe décrit le module primaire Format. Ce module est particulièrement chargé du formatage, notamment des messages à envoyer aux clients. Constructeur du module Configuration du module. On crée le fichier de configuration afin de l'utiliser plus tard pour la mise en forme. # Ajout des hooks Initialisation du module. On récupère les descriptions flottantes. Ajout des commandes dans l'interpréteur # Ajout des éditeurs Retourne le message formaté. Voir : primaires.format.message Crée une description flottante. Supprime la description flottante indiquée.
| 1.501011
| 2
|
sympy/integrals/benchmarks/bench_integrate.py
|
iamabhishek0/sympy
| 445
|
6626660
|
<filename>sympy/integrals/benchmarks/bench_integrate.py
from __future__ import print_function, division
from sympy import integrate, Symbol, sin
x = Symbol('x')
def bench_integrate_sin():
integrate(sin(x), x)
def bench_integrate_x1sin():
integrate(x**1*sin(x), x)
def bench_integrate_x2sin():
integrate(x**2*sin(x), x)
def bench_integrate_x3sin():
integrate(x**3*sin(x), x)
|
<filename>sympy/integrals/benchmarks/bench_integrate.py
from __future__ import print_function, division
from sympy import integrate, Symbol, sin
x = Symbol('x')
def bench_integrate_sin():
integrate(sin(x), x)
def bench_integrate_x1sin():
integrate(x**1*sin(x), x)
def bench_integrate_x2sin():
integrate(x**2*sin(x), x)
def bench_integrate_x3sin():
integrate(x**3*sin(x), x)
|
none
| 1
| 2.545182
| 3
|
|
addons/snailmail/models/snailmail_letter.py
|
SHIVJITH/Odoo_Machine_Test
| 0
|
6626661
|
<reponame>SHIVJITH/Odoo_Machine_Test
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import base64
from odoo import fields, models, api, _
from odoo.addons.iap.tools import iap_tools
from odoo.tools.safe_eval import safe_eval
DEFAULT_ENDPOINT = 'https://iap-snailmail.odoo.com'
PRINT_ENDPOINT = '/iap/snailmail/1/print'
DEFAULT_TIMEOUT = 30
ERROR_CODES = [
'MISSING_REQUIRED_FIELDS',
'CREDIT_ERROR',
'TRIAL_ERROR',
'NO_PRICE_AVAILABLE',
'FORMAT_ERROR',
'UNKNOWN_ERROR',
]
class SnailmailLetter(models.Model):
_name = 'snailmail.letter'
_description = 'Snailmail Letter'
user_id = fields.Many2one('res.users', 'Sent by')
model = fields.Char('Model', required=True)
res_id = fields.Integer('Document ID', required=True)
partner_id = fields.Many2one('res.partner', string='Recipient', required=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True,
default=lambda self: self.env.company.id)
report_template = fields.Many2one('ir.actions.report', 'Optional report to print and attach')
attachment_id = fields.Many2one('ir.attachment', string='Attachment', ondelete='cascade')
attachment_datas = fields.Binary('Document', related='attachment_id.datas')
attachment_fname = fields.Char('Attachment Filename', related='attachment_id.name')
color = fields.Boolean(string='Color', default=lambda self: self.env.company.snailmail_color)
cover = fields.Boolean(string='Cover Page', default=lambda self: self.env.company.snailmail_cover)
duplex = fields.Boolean(string='Both side', default=lambda self: self.env.company.snailmail_duplex)
state = fields.Selection([
('pending', 'In Queue'),
('sent', 'Sent'),
('error', 'Error'),
('canceled', 'Canceled')
], 'Status', readonly=True, copy=False, default='pending', required=True,
help="When a letter is created, the status is 'Pending'.\n"
"If the letter is correctly sent, the status goes in 'Sent',\n"
"If not, it will got in state 'Error' and the error message will be displayed in the field 'Error Message'.")
error_code = fields.Selection([(err_code, err_code) for err_code in ERROR_CODES], string="Error")
info_msg = fields.Char('Information')
display_name = fields.Char('Display Name', compute="_compute_display_name")
reference = fields.Char(string='Related Record', compute='_compute_reference', readonly=True, store=False)
message_id = fields.Many2one('mail.message', string="Snailmail Status Message")
notification_ids = fields.One2many('mail.notification', 'letter_id', "Notifications")
street = fields.Char('Street')
street2 = fields.Char('Street2')
zip = fields.Char('Zip')
city = fields.Char('City')
state_id = fields.Many2one("res.country.state", string='State')
country_id = fields.Many2one('res.country', string='Country')
@api.depends('reference', 'partner_id')
def _compute_display_name(self):
for letter in self:
if letter.attachment_id:
letter.display_name = "%s - %s" % (letter.attachment_id.name, letter.partner_id.name)
else:
letter.display_name = letter.partner_id.name
@api.depends('model', 'res_id')
def _compute_reference(self):
for res in self:
res.reference = "%s,%s" % (res.model, res.res_id)
@api.model
def create(self, vals):
msg_id = self.env[vals['model']].browse(vals['res_id']).message_post(
body=_("Letter sent by post with Snailmail"),
message_type='snailmail'
)
partner_id = self.env['res.partner'].browse(vals['partner_id'])
vals.update({
'message_id': msg_id.id,
'street': partner_id.street,
'street2': partner_id.street2,
'zip': partner_id.zip,
'city': partner_id.city,
'state_id': partner_id.state_id.id,
'country_id': partner_id.country_id.id,
})
letter = super(SnailmailLetter, self).create(vals)
self.env['mail.notification'].sudo().create({
'mail_message_id': msg_id.id,
'res_partner_id': partner_id.id,
'notification_type': 'snail',
'letter_id': letter.id,
'is_read': True, # discard Inbox notification
'notification_status': 'ready',
})
return letter
def _fetch_attachment(self):
"""
This method will check if we have any existent attachement matching the model
and res_ids and create them if not found.
"""
self.ensure_one()
obj = self.env[self.model].browse(self.res_id)
if not self.attachment_id:
report = self.report_template
if not report:
report_name = self.env.context.get('report_name')
report = self.env['ir.actions.report']._get_report_from_name(report_name)
if not report:
return False
else:
self.write({'report_template': report.id})
# report = self.env.ref('account.account_invoices')
if report.print_report_name:
report_name = safe_eval(report.print_report_name, {'object': obj})
elif report.attachment:
report_name = safe_eval(report.attachment, {'object': obj})
else:
report_name = 'Document'
filename = "%s.%s" % (report_name, "pdf")
pdf_bin, _ = report.with_context(snailmail_layout=not self.cover)._render_qweb_pdf(self.res_id)
attachment = self.env['ir.attachment'].create({
'name': filename,
'datas': base64.b64encode(pdf_bin),
'res_model': 'snailmail.letter',
'res_id': self.id,
'type': 'binary', # override default_type from context, possibly meant for another model!
})
self.write({'attachment_id': attachment.id})
return self.attachment_id
def _count_pages_pdf(self, bin_pdf):
""" Count the number of pages of the given pdf file.
:param bin_pdf : binary content of the pdf file
"""
pages = 0
for match in re.compile(b"/Count\s+(\d+)").finditer(bin_pdf):
pages = int(match.group(1))
return pages
def _snailmail_create(self, route):
"""
Create a dictionnary object to send to snailmail server.
:return: Dict in the form:
{
account_token: string, //IAP Account token of the user
documents: [{
pages: int,
pdf_bin: pdf file
res_id: int (client-side res_id),
res_model: char (client-side res_model),
address: {
name: char,
street: char,
street2: char (OPTIONAL),
zip: int,
city: char,
state: char (state code (OPTIONAL)),
country_code: char (country code)
}
return_address: {
name: char,
street: char,
street2: char (OPTIONAL),
zip: int,
city: char,at
state: char (state code (OPTIONAL)),
country_code: char (country code)
}
}],
options: {
color: boolean (true if color, false if black-white),
duplex: boolean (true if duplex, false otherwise),
currency_name: char
}
}
"""
account_token = self.env['iap.account'].get('snailmail').account_token
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
documents = []
batch = len(self) > 1
for letter in self:
document = {
# generic informations to send
'letter_id': letter.id,
'res_model': letter.model,
'res_id': letter.res_id,
'contact_address': letter.partner_id.with_context(snailmail_layout=True, show_address=True).name_get()[0][1],
'address': {
'name': letter.partner_id.name,
'street': letter.partner_id.street,
'street2': letter.partner_id.street2,
'zip': letter.partner_id.zip,
'state': letter.partner_id.state_id.code if letter.partner_id.state_id else False,
'city': letter.partner_id.city,
'country_code': letter.partner_id.country_id.code
},
'return_address': {
'name': letter.company_id.partner_id.name,
'street': letter.company_id.partner_id.street,
'street2': letter.company_id.partner_id.street2,
'zip': letter.company_id.partner_id.zip,
'state': letter.company_id.partner_id.state_id.code if letter.company_id.partner_id.state_id else False,
'city': letter.company_id.partner_id.city,
'country_code': letter.company_id.partner_id.country_id.code,
}
}
# Specific to each case:
# If we are estimating the price: 1 object = 1 page
# If we are printing -> attach the pdf
if route == 'estimate':
document.update(pages=1)
else:
# adding the web logo from the company for future possible customization
document.update({
'company_logo': letter.company_id.logo_web and letter.company_id.logo_web.decode('utf-8') or False,
})
attachment = letter._fetch_attachment()
if attachment:
document.update({
'pdf_bin': route == 'print' and attachment.datas.decode('utf-8'),
'pages': route == 'estimate' and self._count_pages_pdf(base64.b64decode(attachment.datas)),
})
else:
letter.write({
'info_msg': 'The attachment could not be generated.',
'state': 'error',
'error_code': 'ATTACHMENT_ERROR'
})
continue
if letter.company_id.external_report_layout_id == self.env.ref('l10n_de.external_layout_din5008', False):
document.update({
'rightaddress': 0,
})
documents.append(document)
return {
'account_token': account_token,
'dbuuid': dbuuid,
'documents': documents,
'options': {
'color': self and self[0].color,
'cover': self and self[0].cover,
'duplex': self and self[0].duplex,
'currency_name': 'EUR',
},
# this will not raise the InsufficientCreditError which is the behaviour we want for now
'batch': True,
}
def _get_error_message(self, error):
if error == 'CREDIT_ERROR':
link = self.env['iap.account'].get_credits_url(service_name='snailmail')
return _('You don\'t have enough credits to perform this operation.<br>Please go to your <a href=%s target="new">iap account</a>.', link)
if error == 'TRIAL_ERROR':
link = self.env['iap.account'].get_credits_url(service_name='snailmail', trial=True)
return _('You don\'t have an IAP account registered for this service.<br>Please go to <a href=%s target="new">iap.odoo.com</a> to claim your free credits.', link)
if error == 'NO_PRICE_AVAILABLE':
return _('The country of the partner is not covered by Snailmail.')
if error == 'MISSING_REQUIRED_FIELDS':
return _('One or more required fields are empty.')
if error == 'FORMAT_ERROR':
return _('The attachment of the letter could not be sent. Please check its content and contact the support if the problem persists.')
else:
return _('An unknown error happened. Please contact the support.')
return error
def _get_failure_type(self, error):
if error == 'CREDIT_ERROR':
return 'sn_credit'
if error == 'TRIAL_ERROR':
return 'sn_trial'
if error == 'NO_PRICE_AVAILABLE':
return 'sn_price'
if error == 'MISSING_REQUIRED_FIELDS':
return 'sn_fields'
if error == 'FORMAT_ERROR':
return 'sn_format'
else:
return 'sn_error'
def _snailmail_print(self, immediate=True):
valid_address_letters = self.filtered(lambda l: l._is_valid_address(l))
invalid_address_letters = self - valid_address_letters
invalid_address_letters._snailmail_print_invalid_address()
if valid_address_letters and immediate:
for letter in valid_address_letters:
letter._snailmail_print_valid_address()
self.env.cr.commit()
def _snailmail_print_invalid_address(self):
error = 'MISSING_REQUIRED_FIELDS'
error_message = _("The address of the recipient is not complete")
self.write({
'state': 'error',
'error_code': error,
'info_msg': error_message,
})
self.notification_ids.sudo().write({
'notification_status': 'exception',
'failure_type': self._get_failure_type(error),
'failure_reason': error_message,
})
self.message_id._notify_message_notification_update()
def _snailmail_print_valid_address(self):
"""
get response
{
'request_code': RESPONSE_OK, # because we receive 200 if good or fail
'total_cost': total_cost,
'credit_error': credit_error,
'request': {
'documents': documents,
'options': options
}
}
}
"""
endpoint = self.env['ir.config_parameter'].sudo().get_param('snailmail.endpoint', DEFAULT_ENDPOINT)
timeout = int(self.env['ir.config_parameter'].sudo().get_param('snailmail.timeout', DEFAULT_TIMEOUT))
params = self._snailmail_create('print')
response = iap_tools.iap_jsonrpc(endpoint + PRINT_ENDPOINT, params=params, timeout=timeout)
for doc in response['request']['documents']:
if doc.get('sent') and response['request_code'] == 200:
note = _('The document was correctly sent by post.<br>The tracking id is %s', doc['send_id'])
letter_data = {'info_msg': note, 'state': 'sent', 'error_code': False}
notification_data = {
'notification_status': 'sent',
'failure_type': False,
'failure_reason': False,
}
else:
error = doc['error'] if response['request_code'] == 200 else response['reason']
note = _('An error occured when sending the document by post.<br>Error: %s', self._get_error_message(error))
letter_data = {
'info_msg': note,
'state': 'error',
'error_code': error if error in ERROR_CODES else 'UNKNOWN_ERROR'
}
notification_data = {
'notification_status': 'exception',
'failure_type': self._get_failure_type(error),
'failure_reason': note,
}
letter = self.browse(doc['letter_id'])
letter.write(letter_data)
letter.notification_ids.sudo().write(notification_data)
self.message_id._notify_message_notification_update()
def snailmail_print(self):
self.write({'state': 'pending'})
self.notification_ids.sudo().write({
'notification_status': 'ready',
'failure_type': False,
'failure_reason': False,
})
self.message_id._notify_message_notification_update()
if len(self) == 1:
self._snailmail_print()
def cancel(self):
self.write({'state': 'canceled', 'error_code': False})
self.notification_ids.sudo().write({
'notification_status': 'canceled',
})
self.message_id._notify_message_notification_update()
@api.model
def _snailmail_cron(self, autocommit=True):
letters_send = self.search([
'|',
('state', '=', 'pending'),
'&',
('state', '=', 'error'),
('error_code', 'in', ['TRIAL_ERROR', 'CREDIT_ERROR', 'ATTACHMENT_ERROR', 'MISSING_REQUIRED_FIELDS'])
])
for letter in letters_send:
letter._snailmail_print()
# Commit after every letter sent to avoid to send it again in case of a rollback
if autocommit:
self.env.cr.commit()
@api.model
def _is_valid_address(self, record):
record.ensure_one()
required_keys = ['street', 'city', 'zip', 'country_id']
return all(record[key] for key in required_keys)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import base64
from odoo import fields, models, api, _
from odoo.addons.iap.tools import iap_tools
from odoo.tools.safe_eval import safe_eval
DEFAULT_ENDPOINT = 'https://iap-snailmail.odoo.com'
PRINT_ENDPOINT = '/iap/snailmail/1/print'
DEFAULT_TIMEOUT = 30
ERROR_CODES = [
'MISSING_REQUIRED_FIELDS',
'CREDIT_ERROR',
'TRIAL_ERROR',
'NO_PRICE_AVAILABLE',
'FORMAT_ERROR',
'UNKNOWN_ERROR',
]
class SnailmailLetter(models.Model):
_name = 'snailmail.letter'
_description = 'Snailmail Letter'
user_id = fields.Many2one('res.users', 'Sent by')
model = fields.Char('Model', required=True)
res_id = fields.Integer('Document ID', required=True)
partner_id = fields.Many2one('res.partner', string='Recipient', required=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True,
default=lambda self: self.env.company.id)
report_template = fields.Many2one('ir.actions.report', 'Optional report to print and attach')
attachment_id = fields.Many2one('ir.attachment', string='Attachment', ondelete='cascade')
attachment_datas = fields.Binary('Document', related='attachment_id.datas')
attachment_fname = fields.Char('Attachment Filename', related='attachment_id.name')
color = fields.Boolean(string='Color', default=lambda self: self.env.company.snailmail_color)
cover = fields.Boolean(string='Cover Page', default=lambda self: self.env.company.snailmail_cover)
duplex = fields.Boolean(string='Both side', default=lambda self: self.env.company.snailmail_duplex)
state = fields.Selection([
('pending', 'In Queue'),
('sent', 'Sent'),
('error', 'Error'),
('canceled', 'Canceled')
], 'Status', readonly=True, copy=False, default='pending', required=True,
help="When a letter is created, the status is 'Pending'.\n"
"If the letter is correctly sent, the status goes in 'Sent',\n"
"If not, it will got in state 'Error' and the error message will be displayed in the field 'Error Message'.")
error_code = fields.Selection([(err_code, err_code) for err_code in ERROR_CODES], string="Error")
info_msg = fields.Char('Information')
display_name = fields.Char('Display Name', compute="_compute_display_name")
reference = fields.Char(string='Related Record', compute='_compute_reference', readonly=True, store=False)
message_id = fields.Many2one('mail.message', string="Snailmail Status Message")
notification_ids = fields.One2many('mail.notification', 'letter_id', "Notifications")
street = fields.Char('Street')
street2 = fields.Char('Street2')
zip = fields.Char('Zip')
city = fields.Char('City')
state_id = fields.Many2one("res.country.state", string='State')
country_id = fields.Many2one('res.country', string='Country')
@api.depends('reference', 'partner_id')
def _compute_display_name(self):
for letter in self:
if letter.attachment_id:
letter.display_name = "%s - %s" % (letter.attachment_id.name, letter.partner_id.name)
else:
letter.display_name = letter.partner_id.name
@api.depends('model', 'res_id')
def _compute_reference(self):
for res in self:
res.reference = "%s,%s" % (res.model, res.res_id)
@api.model
def create(self, vals):
msg_id = self.env[vals['model']].browse(vals['res_id']).message_post(
body=_("Letter sent by post with Snailmail"),
message_type='snailmail'
)
partner_id = self.env['res.partner'].browse(vals['partner_id'])
vals.update({
'message_id': msg_id.id,
'street': partner_id.street,
'street2': partner_id.street2,
'zip': partner_id.zip,
'city': partner_id.city,
'state_id': partner_id.state_id.id,
'country_id': partner_id.country_id.id,
})
letter = super(SnailmailLetter, self).create(vals)
self.env['mail.notification'].sudo().create({
'mail_message_id': msg_id.id,
'res_partner_id': partner_id.id,
'notification_type': 'snail',
'letter_id': letter.id,
'is_read': True, # discard Inbox notification
'notification_status': 'ready',
})
return letter
def _fetch_attachment(self):
"""
This method will check if we have any existent attachement matching the model
and res_ids and create them if not found.
"""
self.ensure_one()
obj = self.env[self.model].browse(self.res_id)
if not self.attachment_id:
report = self.report_template
if not report:
report_name = self.env.context.get('report_name')
report = self.env['ir.actions.report']._get_report_from_name(report_name)
if not report:
return False
else:
self.write({'report_template': report.id})
# report = self.env.ref('account.account_invoices')
if report.print_report_name:
report_name = safe_eval(report.print_report_name, {'object': obj})
elif report.attachment:
report_name = safe_eval(report.attachment, {'object': obj})
else:
report_name = 'Document'
filename = "%s.%s" % (report_name, "pdf")
pdf_bin, _ = report.with_context(snailmail_layout=not self.cover)._render_qweb_pdf(self.res_id)
attachment = self.env['ir.attachment'].create({
'name': filename,
'datas': base64.b64encode(pdf_bin),
'res_model': 'snailmail.letter',
'res_id': self.id,
'type': 'binary', # override default_type from context, possibly meant for another model!
})
self.write({'attachment_id': attachment.id})
return self.attachment_id
def _count_pages_pdf(self, bin_pdf):
""" Count the number of pages of the given pdf file.
:param bin_pdf : binary content of the pdf file
"""
pages = 0
for match in re.compile(b"/Count\s+(\d+)").finditer(bin_pdf):
pages = int(match.group(1))
return pages
def _snailmail_create(self, route):
"""
Create a dictionnary object to send to snailmail server.
:return: Dict in the form:
{
account_token: string, //IAP Account token of the user
documents: [{
pages: int,
pdf_bin: pdf file
res_id: int (client-side res_id),
res_model: char (client-side res_model),
address: {
name: char,
street: char,
street2: char (OPTIONAL),
zip: int,
city: char,
state: char (state code (OPTIONAL)),
country_code: char (country code)
}
return_address: {
name: char,
street: char,
street2: char (OPTIONAL),
zip: int,
city: char,at
state: char (state code (OPTIONAL)),
country_code: char (country code)
}
}],
options: {
color: boolean (true if color, false if black-white),
duplex: boolean (true if duplex, false otherwise),
currency_name: char
}
}
"""
account_token = self.env['iap.account'].get('snailmail').account_token
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
documents = []
batch = len(self) > 1
for letter in self:
document = {
# generic informations to send
'letter_id': letter.id,
'res_model': letter.model,
'res_id': letter.res_id,
'contact_address': letter.partner_id.with_context(snailmail_layout=True, show_address=True).name_get()[0][1],
'address': {
'name': letter.partner_id.name,
'street': letter.partner_id.street,
'street2': letter.partner_id.street2,
'zip': letter.partner_id.zip,
'state': letter.partner_id.state_id.code if letter.partner_id.state_id else False,
'city': letter.partner_id.city,
'country_code': letter.partner_id.country_id.code
},
'return_address': {
'name': letter.company_id.partner_id.name,
'street': letter.company_id.partner_id.street,
'street2': letter.company_id.partner_id.street2,
'zip': letter.company_id.partner_id.zip,
'state': letter.company_id.partner_id.state_id.code if letter.company_id.partner_id.state_id else False,
'city': letter.company_id.partner_id.city,
'country_code': letter.company_id.partner_id.country_id.code,
}
}
# Specific to each case:
# If we are estimating the price: 1 object = 1 page
# If we are printing -> attach the pdf
if route == 'estimate':
document.update(pages=1)
else:
# adding the web logo from the company for future possible customization
document.update({
'company_logo': letter.company_id.logo_web and letter.company_id.logo_web.decode('utf-8') or False,
})
attachment = letter._fetch_attachment()
if attachment:
document.update({
'pdf_bin': route == 'print' and attachment.datas.decode('utf-8'),
'pages': route == 'estimate' and self._count_pages_pdf(base64.b64decode(attachment.datas)),
})
else:
letter.write({
'info_msg': 'The attachment could not be generated.',
'state': 'error',
'error_code': 'ATTACHMENT_ERROR'
})
continue
if letter.company_id.external_report_layout_id == self.env.ref('l10n_de.external_layout_din5008', False):
document.update({
'rightaddress': 0,
})
documents.append(document)
return {
'account_token': account_token,
'dbuuid': dbuuid,
'documents': documents,
'options': {
'color': self and self[0].color,
'cover': self and self[0].cover,
'duplex': self and self[0].duplex,
'currency_name': 'EUR',
},
# this will not raise the InsufficientCreditError which is the behaviour we want for now
'batch': True,
}
def _get_error_message(self, error):
if error == 'CREDIT_ERROR':
link = self.env['iap.account'].get_credits_url(service_name='snailmail')
return _('You don\'t have enough credits to perform this operation.<br>Please go to your <a href=%s target="new">iap account</a>.', link)
if error == 'TRIAL_ERROR':
link = self.env['iap.account'].get_credits_url(service_name='snailmail', trial=True)
return _('You don\'t have an IAP account registered for this service.<br>Please go to <a href=%s target="new">iap.odoo.com</a> to claim your free credits.', link)
if error == 'NO_PRICE_AVAILABLE':
return _('The country of the partner is not covered by Snailmail.')
if error == 'MISSING_REQUIRED_FIELDS':
return _('One or more required fields are empty.')
if error == 'FORMAT_ERROR':
return _('The attachment of the letter could not be sent. Please check its content and contact the support if the problem persists.')
else:
return _('An unknown error happened. Please contact the support.')
return error
def _get_failure_type(self, error):
if error == 'CREDIT_ERROR':
return 'sn_credit'
if error == 'TRIAL_ERROR':
return 'sn_trial'
if error == 'NO_PRICE_AVAILABLE':
return 'sn_price'
if error == 'MISSING_REQUIRED_FIELDS':
return 'sn_fields'
if error == 'FORMAT_ERROR':
return 'sn_format'
else:
return 'sn_error'
def _snailmail_print(self, immediate=True):
valid_address_letters = self.filtered(lambda l: l._is_valid_address(l))
invalid_address_letters = self - valid_address_letters
invalid_address_letters._snailmail_print_invalid_address()
if valid_address_letters and immediate:
for letter in valid_address_letters:
letter._snailmail_print_valid_address()
self.env.cr.commit()
def _snailmail_print_invalid_address(self):
error = 'MISSING_REQUIRED_FIELDS'
error_message = _("The address of the recipient is not complete")
self.write({
'state': 'error',
'error_code': error,
'info_msg': error_message,
})
self.notification_ids.sudo().write({
'notification_status': 'exception',
'failure_type': self._get_failure_type(error),
'failure_reason': error_message,
})
self.message_id._notify_message_notification_update()
def _snailmail_print_valid_address(self):
"""
get response
{
'request_code': RESPONSE_OK, # because we receive 200 if good or fail
'total_cost': total_cost,
'credit_error': credit_error,
'request': {
'documents': documents,
'options': options
}
}
}
"""
endpoint = self.env['ir.config_parameter'].sudo().get_param('snailmail.endpoint', DEFAULT_ENDPOINT)
timeout = int(self.env['ir.config_parameter'].sudo().get_param('snailmail.timeout', DEFAULT_TIMEOUT))
params = self._snailmail_create('print')
response = iap_tools.iap_jsonrpc(endpoint + PRINT_ENDPOINT, params=params, timeout=timeout)
for doc in response['request']['documents']:
if doc.get('sent') and response['request_code'] == 200:
note = _('The document was correctly sent by post.<br>The tracking id is %s', doc['send_id'])
letter_data = {'info_msg': note, 'state': 'sent', 'error_code': False}
notification_data = {
'notification_status': 'sent',
'failure_type': False,
'failure_reason': False,
}
else:
error = doc['error'] if response['request_code'] == 200 else response['reason']
note = _('An error occured when sending the document by post.<br>Error: %s', self._get_error_message(error))
letter_data = {
'info_msg': note,
'state': 'error',
'error_code': error if error in ERROR_CODES else 'UNKNOWN_ERROR'
}
notification_data = {
'notification_status': 'exception',
'failure_type': self._get_failure_type(error),
'failure_reason': note,
}
letter = self.browse(doc['letter_id'])
letter.write(letter_data)
letter.notification_ids.sudo().write(notification_data)
self.message_id._notify_message_notification_update()
def snailmail_print(self):
self.write({'state': 'pending'})
self.notification_ids.sudo().write({
'notification_status': 'ready',
'failure_type': False,
'failure_reason': False,
})
self.message_id._notify_message_notification_update()
if len(self) == 1:
self._snailmail_print()
def cancel(self):
self.write({'state': 'canceled', 'error_code': False})
self.notification_ids.sudo().write({
'notification_status': 'canceled',
})
self.message_id._notify_message_notification_update()
@api.model
def _snailmail_cron(self, autocommit=True):
letters_send = self.search([
'|',
('state', '=', 'pending'),
'&',
('state', '=', 'error'),
('error_code', 'in', ['TRIAL_ERROR', 'CREDIT_ERROR', 'ATTACHMENT_ERROR', 'MISSING_REQUIRED_FIELDS'])
])
for letter in letters_send:
letter._snailmail_print()
# Commit after every letter sent to avoid to send it again in case of a rollback
if autocommit:
self.env.cr.commit()
@api.model
def _is_valid_address(self, record):
record.ensure_one()
required_keys = ['street', 'city', 'zip', 'country_id']
return all(record[key] for key in required_keys)
|
en
| 0.733796
|
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. # discard Inbox notification This method will check if we have any existent attachement matching the model and res_ids and create them if not found. # report = self.env.ref('account.account_invoices') # override default_type from context, possibly meant for another model! Count the number of pages of the given pdf file. :param bin_pdf : binary content of the pdf file Create a dictionnary object to send to snailmail server. :return: Dict in the form: { account_token: string, //IAP Account token of the user documents: [{ pages: int, pdf_bin: pdf file res_id: int (client-side res_id), res_model: char (client-side res_model), address: { name: char, street: char, street2: char (OPTIONAL), zip: int, city: char, state: char (state code (OPTIONAL)), country_code: char (country code) } return_address: { name: char, street: char, street2: char (OPTIONAL), zip: int, city: char,at state: char (state code (OPTIONAL)), country_code: char (country code) } }], options: { color: boolean (true if color, false if black-white), duplex: boolean (true if duplex, false otherwise), currency_name: char } } # generic informations to send # Specific to each case: # If we are estimating the price: 1 object = 1 page # If we are printing -> attach the pdf # adding the web logo from the company for future possible customization # this will not raise the InsufficientCreditError which is the behaviour we want for now get response { 'request_code': RESPONSE_OK, # because we receive 200 if good or fail 'total_cost': total_cost, 'credit_error': credit_error, 'request': { 'documents': documents, 'options': options } } } # Commit after every letter sent to avoid to send it again in case of a rollback
| 1.787769
| 2
|
setup.py
|
caos21/Grodi
| 2
|
6626662
|
#
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" setup.py, compiles coagulatio and charging extensions
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
EXT_COAGULATIO = [Extension("coagulatio",
["coagulatio/coagulatio.pyx"],
extra_compile_args=["-Ofast", "-fopenmp"],
extra_link_args=['-fopenmp'])]
EXT_CHARGING = [Extension("charging", ["charging/charging.pyx"],
include_dirs=["charging/include/",
"charging/external/liblsoda/src/",
numpy.get_include()],
libraries=["charging", "lsoda", "m"],
library_dirs=["charging/lib/",
"charging/external/liblsoda/src/"],
extra_compile_args=["-Ofast", "-fopenmp"],
extra_link_args=["-fopenmp",
"-Wl,-rpath=charging/lib/",
"-Wl,-rpath=charging/external/liblsoda/src/"])]
setup(
name="coagulatio",
cmdclass={"build_ext": build_ext},
ext_modules=cythonize(EXT_COAGULATIO, annotate=True, ),
include_dirs=[numpy.get_include()])
setup(
name="charging",
cmdclass={"build_ext": build_ext},
ext_modules=cythonize(EXT_CHARGING, annotate=True, ),
)
|
#
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" setup.py, compiles coagulatio and charging extensions
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy
EXT_COAGULATIO = [Extension("coagulatio",
["coagulatio/coagulatio.pyx"],
extra_compile_args=["-Ofast", "-fopenmp"],
extra_link_args=['-fopenmp'])]
EXT_CHARGING = [Extension("charging", ["charging/charging.pyx"],
include_dirs=["charging/include/",
"charging/external/liblsoda/src/",
numpy.get_include()],
libraries=["charging", "lsoda", "m"],
library_dirs=["charging/lib/",
"charging/external/liblsoda/src/"],
extra_compile_args=["-Ofast", "-fopenmp"],
extra_link_args=["-fopenmp",
"-Wl,-rpath=charging/lib/",
"-Wl,-rpath=charging/external/liblsoda/src/"])]
setup(
name="coagulatio",
cmdclass={"build_ext": build_ext},
ext_modules=cythonize(EXT_COAGULATIO, annotate=True, ),
include_dirs=[numpy.get_include()])
setup(
name="charging",
cmdclass={"build_ext": build_ext},
ext_modules=cythonize(EXT_CHARGING, annotate=True, ),
)
|
en
| 0.846117
|
# # Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # setup.py, compiles coagulatio and charging extensions
| 1.716371
| 2
|
ansiotropy/models/soft_t5.py
|
ethankim00/soft_prompt_ansiotropy
| 0
|
6626663
|
<reponame>ethankim00/soft_prompt_ansiotropy<filename>ansiotropy/models/soft_t5.py
from tqdm import tqdm
from openprompt.data_utils import PROCESSORS
import torch
from openprompt.data_utils.utils import InputExample
import argparse
import numpy as np
from pathlib import Path
from datetime import datetime
import json
import pickle
from openprompt import PromptDataLoader
from openprompt.prompts import ManualVerbalizer
from openprompt.prompts import SoftTemplate
from openprompt import PromptForClassification
from ansiotropy.embeddings.generate_embeddings import SoftPromptConfig
import time
import os
import wandb
def parse():
parser = argparse.ArgumentParser("")
parser.add_argument("--shot", type=int, default=-1)
parser.add_argument("--seed", type=int, default=144)
parser.add_argument(
"--plm_eval_mode",
action="store_true",
help="whether to turn off the dropout in the freezed model. Set to true to turn off.",
)
parser.add_argument("--tune_plm", action="store_true")
parser.add_argument(
"--model",
type=str,
default="t5-lm",
help="We test both t5 and t5-lm in this scripts, the corresponding tokenizerwrapper will be automatically loaded.",
)
parser.add_argument("--model_name_or_path", default="t5-base")
parser.add_argument(
"--project_root",
default="/",
help="The project root in the file system, i.e. the absolute path of OpenPrompt",
)
parser.add_argument("--template_id", default=0, type=int)
parser.add_argument("--verbalizer_id", default=0, type=int)
parser.add_argument(
"--data_dir", type=str, default="./data/"
) # sometimes, huggingface datasets can not be automatically downloaded due to network issue, please refer to 0_basic.py line 15 for solutions.
parser.add_argument("--dataset", default="boolq", type=str)
parser.add_argument("--result_file", type=str, default="./results.txt")
parser.add_argument("--max_steps", default=1000, type=int)
parser.add_argument("--prompt_lr", type=float, default=0.3)
parser.add_argument("--warmup_step_prompt", type=int, default=500)
parser.add_argument("--init_from_vocab", action="store_false")
parser.add_argument("--eval_every_steps", type=int, default=5)
parser.add_argument("--soft_token_num", type=int, default=20)
parser.add_argument("--optimizer", type=str, default="Adafactor")
args = parser.parse_args()
args.result_file = os.path.join(args.project_root, args.result_file)
content_write = "=" * 20 + "\n"
content_write += f"dataset {args.dataset}\t"
content_write += f"temp {args.template_id}\t"
content_write += f"verb {args.verbalizer_id}\t"
content_write += f"model {args.model}\t"
content_write += f"seed {args.seed}\t"
content_write += f"shot {args.shot}\t"
content_write += f"plm_eval_mode {args.plm_eval_mode}\t"
content_write += f"init_from_vocab {args.init_from_vocab}\t"
content_write += f"eval_every_steps {args.eval_every_steps}\t"
content_write += f"prompt_lr {args.prompt_lr}\t"
content_write += f"optimizer {args.optimizer}\t"
content_write += f"warmup_step_prompt {args.warmup_step_prompt}\t"
content_write += f"soft_token_num {args.soft_token_num}\t"
content_write += "\n"
print(content_write)
return args
from openprompt.utils.reproduciblity import set_seed
import random
# use lm-adapted version or t5-v1.1 checkpoint. Note that the originial t5 checkpoint has been pretrained
# on part of GLUE dataset, thus should not be used.
from openprompt.plms.seq2seq import T5TokenizerWrapper, T5LMTokenizerWrapper
from transformers import T5Config, T5Tokenizer, T5ForConditionalGeneration
from openprompt.data_utils.data_sampler import FewShotSampler
from openprompt.plms import load_plm
def get_dataset(args):
dataset = {}
# Below are multiple dataset examples, including few-shot ones.
if args.dataset == "boolq":
Processor = PROCESSORS["super_glue.boolq"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/BoolQ"
scriptformat = "txt"
max_seq_l = (
480 # this should be specified according to the running GPU's capacity
)
if (
args.tune_plm
): # tune the entire plm will use more gpu-memories, thus we should use a smaller batch_size.
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = (
True # if multiple gpus are available, one can use model_parallelize
)
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "multirc":
Processor = PROCESSORS["super_glue.multirc"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/MultiRC"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "rte":
Processor = PROCESSORS["super_glue.rte"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/RTE"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 2
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "cb":
Processor = PROCESSORS["super_glue.cb"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/CB"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "wic":
Processor = PROCESSORS["super_glue.wic"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/WiC"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_boolq":
Processor = PROCESSORS["super_glue.boolq"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/BoolQ"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_multirc":
Processor = PROCESSORS["super_glue.multirc"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/MultiRC"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_wic":
Processor = PROCESSORS["super_glue.wic"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/WiC"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
else:
raise NotImplementedError
return (
dataset,
class_labels,
scriptsbase,
scriptformat,
max_seq_l,
batchsize_t,
batchsize_e,
gradient_accumulation_steps,
model_parallelize,
)
# Now define the template and verbalizer.
# Note that soft template can be combined with hard template, by loading the hard template from file.
# For example, the template in soft_template.txt is {}
# The choice_id 1 is the hard template
def evaluate(prompt_model, dataloader, desc):
prompt_model.eval()
allpreds = []
alllabels = []
for step, inputs in enumerate(dataloader):
if use_cuda:
inputs = inputs.cuda()
logits = prompt_model(inputs)
labels = inputs["label"]
alllabels.extend(labels.cpu().tolist())
allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())
acc = sum([int(i == j) for i, j in zip(allpreds, alllabels)]) / len(allpreds)
return acc
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
get_constant_schedule_with_warmup,
) # use AdamW is a standard practice for transformer
from transformers.optimization import (
Adafactor,
AdafactorSchedule,
) # use Adafactor is the default setting for T5
from openprompt.data_utils.utils import InputFeatures
if __name__ == "__main__":
wandb.init(project="soft_prompt_anisotropy", entity="ethankim10")
args = parse()
wandb.config.update(args)
exp_config = SoftPromptConfig(
model=args.model,
model_name_or_path=args.model_name_or_path,
num_prompt_tokens=args.soft_token_num,
initialize_from_vocab=args.init_from_vocab,
)
this_run_unicode = str(random.randint(0, 1e10))
wandb.config.update({"id":this_run_unicode})
set_seed(args.seed)
plm, tokenizer, model_config, WrapperClass = load_plm(
args.model, args.model_name_or_path
)
(
dataset,
class_labels,
scriptsbase,
scriptformat,
max_seq_l,
batchsize_t,
batchsize_e,
gradient_accumulation_steps,
model_parallelize,
) = get_dataset(args)
mytemplate = SoftTemplate(
model=plm,
tokenizer=tokenizer,
num_tokens=args.soft_token_num,
initialize_from_vocab=args.init_from_vocab,
).from_file(f"scripts/{scriptsbase}/soft_template.txt", choice=args.template_id)
myverbalizer = ManualVerbalizer(tokenizer, classes=class_labels).from_file(
f"scripts/{scriptsbase}/manual_verbalizer.{scriptformat}",
choice=args.verbalizer_id,
)
wrapped_example = mytemplate.wrap_one_example(dataset["train"][0])
print(wrapped_example)
use_cuda = True
prompt_model = PromptForClassification(
plm=plm,
template=mytemplate,
verbalizer=myverbalizer,
freeze_plm=(not args.tune_plm),
plm_eval_mode=args.plm_eval_mode,
)
if use_cuda:
prompt_model = prompt_model.cuda()
if model_parallelize:
prompt_model.parallelize()
train_dataloader = PromptDataLoader(
dataset=dataset["train"],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_t,
shuffle=True,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
validation_dataloader = PromptDataLoader(
dataset=dataset["validation"][0:30],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_e,
shuffle=False,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
# zero-shot test
test_dataloader = PromptDataLoader(
dataset=dataset["test"],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_e,
shuffle=False,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
print(
"truncate rate: {}".format(test_dataloader.tokenizer_wrapper.truncate_rate),
flush=True,
)
loss_func = torch.nn.CrossEntropyLoss()
tot_step = args.max_steps
if (
args.tune_plm
): # normally we freeze the model when using soft_template. However, we keep the option to tune plm
no_decay = [
"bias",
"LayerNorm.weight",
] # it's always good practice to set no decay to biase and LayerNorm parameters
optimizer_grouped_parameters1 = [
{
"params": [
p
for n, p in prompt_model.plm.named_parameters()
if (not any(nd in n for nd in no_decay))
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in prompt_model.plm.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer1 = AdamW(optimizer_grouped_parameters1, lr=3e-5)
scheduler1 = get_linear_schedule_with_warmup(
optimizer1, num_warmup_steps=500, num_training_steps=tot_step
)
else:
optimizer1 = None
scheduler1 = None
optimizer_grouped_parameters2 = [
{
"params": [
p
for name, p in prompt_model.template.named_parameters()
if "raw_embedding" not in name
]
}
] # note that you have to remove the raw_embedding manually from the optimization
if args.optimizer.lower() == "adafactor":
optimizer2 = Adafactor(
optimizer_grouped_parameters2,
lr=args.prompt_lr,
relative_step=False,
scale_parameter=False,
warmup_init=False,
) # when lr is 0.3, it is the same as the configuration of https://arxiv.org/abs/2104.08691
scheduler2 = get_constant_schedule_with_warmup(
optimizer2, num_warmup_steps=args.warmup_step_prompt
) # when num_warmup_steps is 0, it is the same as the configuration of https://arxiv.org/abs/2104.08691
elif args.optimizer.lower() == "adamw":
optimizer2 = AdamW(
optimizer_grouped_parameters2, lr=args.prompt_lr
) # usually lr = 0.5
scheduler2 = get_linear_schedule_with_warmup(
optimizer2,
num_warmup_steps=args.warmup_step_prompt,
num_training_steps=tot_step,
) # usually num_warmup_steps is 500
tot_loss = 0
log_loss = 0
best_val_acc = 0
glb_step = 0
actual_step = 0
leave_training = False
acc_traces = []
tot_train_time = 0
pbar_update_freq = 10
prompt_model.train()
pbar = tqdm(total=tot_step, desc="Train")
for epoch in range(10):
print(f"Begin epoch {epoch}")
for step, inputs in enumerate(train_dataloader):
if use_cuda:
inputs_copy = InputFeatures(**inputs.to_dict()).cuda()
inputs = inputs.cuda()
tot_train_time -= time.time()
logits = prompt_model(inputs)
labels = inputs["label"]
loss = loss_func(logits, labels)
loss.backward()
wandb.log({"loss": loss})
tot_loss += loss.item()
actual_step += 1
if actual_step % gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(prompt_model.parameters(), 1.0)
glb_step += 1
if glb_step % pbar_update_freq == 0:
aveloss = (tot_loss - log_loss) / pbar_update_freq
pbar.update(10)
pbar.set_postfix({"loss": aveloss})
log_loss = tot_loss
if optimizer1 is not None:
optimizer1.step()
optimizer1.zero_grad()
if scheduler1 is not None:
scheduler1.step()
if optimizer2 is not None:
optimizer2.step()
optimizer2.zero_grad()
if scheduler2 is not None:
scheduler2.step()
tot_train_time += time.time()
if (
actual_step % gradient_accumulation_steps == 0
and glb_step > 0
and glb_step % args.eval_every_steps == 0
):
val_acc = evaluate(prompt_model, validation_dataloader, desc="Valid")
print(val_acc)
wandb.log({"val_acc": val_acc})
if val_acc >= best_val_acc:
torch.save(
{
"exp": exp_config.__dict__,
"model": prompt_model.state_dict(),
},
f".{args.project_root}{this_run_unicode}.ckpt",
)
best_val_acc = val_acc
wandb.log({"best_val_acc": best_val_acc})
acc_traces.append(val_acc)
print(
"Glb_step {}, val_acc {}, average time {}".format(
glb_step, val_acc, tot_train_time / actual_step
),
flush=True,
)
prompt_model.train()
if glb_step > args.max_steps:
leave_training = True
break
if leave_training:
break
# # super_glue test split can not be evaluated without submitting the results to their website. So we skip it here and keep them as comments.
#
# prompt_model.load_state_dict(torch.load(f"{args.project_root}/ckpts/{this_run_unicode}.ckpt"))
# prompt_model = prompt_model.cuda()
# test_acc = evaluate(prompt_model, test_dataloader, desc="Test")
# test_acc = evaluate(prompt_model, test_dataloader, desc="Test")
# a simple measure for the convergence speed.
thres99 = 0.99 * best_val_acc
thres98 = 0.98 * best_val_acc
thres100 = best_val_acc
step100 = step98 = step99 = args.max_steps
for val_time, acc in enumerate(acc_traces):
if acc >= thres98:
step98 = min(val_time * args.eval_every_steps, step98)
if acc >= thres99:
step99 = min(val_time * args.eval_every_steps, step99)
if acc >= thres100:
step100 = min(val_time * args.eval_every_steps, step100)
content_write = ""
content_write += f"BestValAcc:{best_val_acc}\tEndValAcc:{acc_traces[-1]}\tcritical_steps:{[step98,step99,step100]}\n"
content_write += "\n"
print(content_write)
#with open(f"{args.result_file}", "a") as fout:
# fout.write(content_write)
import os
#os.remove(f"../ckpts/{this_run_unicode}.ckpt")
|
from tqdm import tqdm
from openprompt.data_utils import PROCESSORS
import torch
from openprompt.data_utils.utils import InputExample
import argparse
import numpy as np
from pathlib import Path
from datetime import datetime
import json
import pickle
from openprompt import PromptDataLoader
from openprompt.prompts import ManualVerbalizer
from openprompt.prompts import SoftTemplate
from openprompt import PromptForClassification
from ansiotropy.embeddings.generate_embeddings import SoftPromptConfig
import time
import os
import wandb
def parse():
parser = argparse.ArgumentParser("")
parser.add_argument("--shot", type=int, default=-1)
parser.add_argument("--seed", type=int, default=144)
parser.add_argument(
"--plm_eval_mode",
action="store_true",
help="whether to turn off the dropout in the freezed model. Set to true to turn off.",
)
parser.add_argument("--tune_plm", action="store_true")
parser.add_argument(
"--model",
type=str,
default="t5-lm",
help="We test both t5 and t5-lm in this scripts, the corresponding tokenizerwrapper will be automatically loaded.",
)
parser.add_argument("--model_name_or_path", default="t5-base")
parser.add_argument(
"--project_root",
default="/",
help="The project root in the file system, i.e. the absolute path of OpenPrompt",
)
parser.add_argument("--template_id", default=0, type=int)
parser.add_argument("--verbalizer_id", default=0, type=int)
parser.add_argument(
"--data_dir", type=str, default="./data/"
) # sometimes, huggingface datasets can not be automatically downloaded due to network issue, please refer to 0_basic.py line 15 for solutions.
parser.add_argument("--dataset", default="boolq", type=str)
parser.add_argument("--result_file", type=str, default="./results.txt")
parser.add_argument("--max_steps", default=1000, type=int)
parser.add_argument("--prompt_lr", type=float, default=0.3)
parser.add_argument("--warmup_step_prompt", type=int, default=500)
parser.add_argument("--init_from_vocab", action="store_false")
parser.add_argument("--eval_every_steps", type=int, default=5)
parser.add_argument("--soft_token_num", type=int, default=20)
parser.add_argument("--optimizer", type=str, default="Adafactor")
args = parser.parse_args()
args.result_file = os.path.join(args.project_root, args.result_file)
content_write = "=" * 20 + "\n"
content_write += f"dataset {args.dataset}\t"
content_write += f"temp {args.template_id}\t"
content_write += f"verb {args.verbalizer_id}\t"
content_write += f"model {args.model}\t"
content_write += f"seed {args.seed}\t"
content_write += f"shot {args.shot}\t"
content_write += f"plm_eval_mode {args.plm_eval_mode}\t"
content_write += f"init_from_vocab {args.init_from_vocab}\t"
content_write += f"eval_every_steps {args.eval_every_steps}\t"
content_write += f"prompt_lr {args.prompt_lr}\t"
content_write += f"optimizer {args.optimizer}\t"
content_write += f"warmup_step_prompt {args.warmup_step_prompt}\t"
content_write += f"soft_token_num {args.soft_token_num}\t"
content_write += "\n"
print(content_write)
return args
from openprompt.utils.reproduciblity import set_seed
import random
# use lm-adapted version or t5-v1.1 checkpoint. Note that the originial t5 checkpoint has been pretrained
# on part of GLUE dataset, thus should not be used.
from openprompt.plms.seq2seq import T5TokenizerWrapper, T5LMTokenizerWrapper
from transformers import T5Config, T5Tokenizer, T5ForConditionalGeneration
from openprompt.data_utils.data_sampler import FewShotSampler
from openprompt.plms import load_plm
def get_dataset(args):
dataset = {}
# Below are multiple dataset examples, including few-shot ones.
if args.dataset == "boolq":
Processor = PROCESSORS["super_glue.boolq"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/BoolQ"
scriptformat = "txt"
max_seq_l = (
480 # this should be specified according to the running GPU's capacity
)
if (
args.tune_plm
): # tune the entire plm will use more gpu-memories, thus we should use a smaller batch_size.
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = (
True # if multiple gpus are available, one can use model_parallelize
)
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "multirc":
Processor = PROCESSORS["super_glue.multirc"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/MultiRC"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "rte":
Processor = PROCESSORS["super_glue.rte"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/RTE"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 2
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "cb":
Processor = PROCESSORS["super_glue.cb"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/CB"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "wic":
Processor = PROCESSORS["super_glue.wic"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/WiC"
scriptformat = "txt"
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_boolq":
Processor = PROCESSORS["super_glue.boolq"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/BoolQ"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_multirc":
Processor = PROCESSORS["super_glue.multirc"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/MultiRC"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
elif args.dataset == "fewshot_wic":
Processor = PROCESSORS["super_glue.wic"]
dataset["train"] = Processor().get_train_examples(args.data_dir)
dataset["validation"] = Processor().get_dev_examples(args.data_dir)
dataset["test"] = Processor().get_test_examples(args.data_dir)
class_labels = Processor().get_labels()
scriptsbase = "SuperGLUE/WiC"
scriptformat = "txt"
sampler = FewShotSampler(num_examples_per_label=32)
dataset["train"] = sampler(dataset["train"], seed=args.seed)
max_seq_l = 480
if args.tune_plm:
batchsize_t = 4
batchsize_e = 4
gradient_accumulation_steps = 8
model_parallelize = True
else:
batchsize_t = 8
batchsize_e = 4
gradient_accumulation_steps = 4
model_parallelize = False
else:
raise NotImplementedError
return (
dataset,
class_labels,
scriptsbase,
scriptformat,
max_seq_l,
batchsize_t,
batchsize_e,
gradient_accumulation_steps,
model_parallelize,
)
# Now define the template and verbalizer.
# Note that soft template can be combined with hard template, by loading the hard template from file.
# For example, the template in soft_template.txt is {}
# The choice_id 1 is the hard template
def evaluate(prompt_model, dataloader, desc):
prompt_model.eval()
allpreds = []
alllabels = []
for step, inputs in enumerate(dataloader):
if use_cuda:
inputs = inputs.cuda()
logits = prompt_model(inputs)
labels = inputs["label"]
alllabels.extend(labels.cpu().tolist())
allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())
acc = sum([int(i == j) for i, j in zip(allpreds, alllabels)]) / len(allpreds)
return acc
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
get_constant_schedule_with_warmup,
) # use AdamW is a standard practice for transformer
from transformers.optimization import (
Adafactor,
AdafactorSchedule,
) # use Adafactor is the default setting for T5
from openprompt.data_utils.utils import InputFeatures
if __name__ == "__main__":
wandb.init(project="soft_prompt_anisotropy", entity="ethankim10")
args = parse()
wandb.config.update(args)
exp_config = SoftPromptConfig(
model=args.model,
model_name_or_path=args.model_name_or_path,
num_prompt_tokens=args.soft_token_num,
initialize_from_vocab=args.init_from_vocab,
)
this_run_unicode = str(random.randint(0, 1e10))
wandb.config.update({"id":this_run_unicode})
set_seed(args.seed)
plm, tokenizer, model_config, WrapperClass = load_plm(
args.model, args.model_name_or_path
)
(
dataset,
class_labels,
scriptsbase,
scriptformat,
max_seq_l,
batchsize_t,
batchsize_e,
gradient_accumulation_steps,
model_parallelize,
) = get_dataset(args)
mytemplate = SoftTemplate(
model=plm,
tokenizer=tokenizer,
num_tokens=args.soft_token_num,
initialize_from_vocab=args.init_from_vocab,
).from_file(f"scripts/{scriptsbase}/soft_template.txt", choice=args.template_id)
myverbalizer = ManualVerbalizer(tokenizer, classes=class_labels).from_file(
f"scripts/{scriptsbase}/manual_verbalizer.{scriptformat}",
choice=args.verbalizer_id,
)
wrapped_example = mytemplate.wrap_one_example(dataset["train"][0])
print(wrapped_example)
use_cuda = True
prompt_model = PromptForClassification(
plm=plm,
template=mytemplate,
verbalizer=myverbalizer,
freeze_plm=(not args.tune_plm),
plm_eval_mode=args.plm_eval_mode,
)
if use_cuda:
prompt_model = prompt_model.cuda()
if model_parallelize:
prompt_model.parallelize()
train_dataloader = PromptDataLoader(
dataset=dataset["train"],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_t,
shuffle=True,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
validation_dataloader = PromptDataLoader(
dataset=dataset["validation"][0:30],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_e,
shuffle=False,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
# zero-shot test
test_dataloader = PromptDataLoader(
dataset=dataset["test"],
template=mytemplate,
tokenizer=tokenizer,
tokenizer_wrapper_class=WrapperClass,
max_seq_length=max_seq_l,
decoder_max_length=3,
batch_size=batchsize_e,
shuffle=False,
teacher_forcing=False,
predict_eos_token=False,
truncate_method="tail",
)
print(
"truncate rate: {}".format(test_dataloader.tokenizer_wrapper.truncate_rate),
flush=True,
)
loss_func = torch.nn.CrossEntropyLoss()
tot_step = args.max_steps
if (
args.tune_plm
): # normally we freeze the model when using soft_template. However, we keep the option to tune plm
no_decay = [
"bias",
"LayerNorm.weight",
] # it's always good practice to set no decay to biase and LayerNorm parameters
optimizer_grouped_parameters1 = [
{
"params": [
p
for n, p in prompt_model.plm.named_parameters()
if (not any(nd in n for nd in no_decay))
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in prompt_model.plm.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer1 = AdamW(optimizer_grouped_parameters1, lr=3e-5)
scheduler1 = get_linear_schedule_with_warmup(
optimizer1, num_warmup_steps=500, num_training_steps=tot_step
)
else:
optimizer1 = None
scheduler1 = None
optimizer_grouped_parameters2 = [
{
"params": [
p
for name, p in prompt_model.template.named_parameters()
if "raw_embedding" not in name
]
}
] # note that you have to remove the raw_embedding manually from the optimization
if args.optimizer.lower() == "adafactor":
optimizer2 = Adafactor(
optimizer_grouped_parameters2,
lr=args.prompt_lr,
relative_step=False,
scale_parameter=False,
warmup_init=False,
) # when lr is 0.3, it is the same as the configuration of https://arxiv.org/abs/2104.08691
scheduler2 = get_constant_schedule_with_warmup(
optimizer2, num_warmup_steps=args.warmup_step_prompt
) # when num_warmup_steps is 0, it is the same as the configuration of https://arxiv.org/abs/2104.08691
elif args.optimizer.lower() == "adamw":
optimizer2 = AdamW(
optimizer_grouped_parameters2, lr=args.prompt_lr
) # usually lr = 0.5
scheduler2 = get_linear_schedule_with_warmup(
optimizer2,
num_warmup_steps=args.warmup_step_prompt,
num_training_steps=tot_step,
) # usually num_warmup_steps is 500
tot_loss = 0
log_loss = 0
best_val_acc = 0
glb_step = 0
actual_step = 0
leave_training = False
acc_traces = []
tot_train_time = 0
pbar_update_freq = 10
prompt_model.train()
pbar = tqdm(total=tot_step, desc="Train")
for epoch in range(10):
print(f"Begin epoch {epoch}")
for step, inputs in enumerate(train_dataloader):
if use_cuda:
inputs_copy = InputFeatures(**inputs.to_dict()).cuda()
inputs = inputs.cuda()
tot_train_time -= time.time()
logits = prompt_model(inputs)
labels = inputs["label"]
loss = loss_func(logits, labels)
loss.backward()
wandb.log({"loss": loss})
tot_loss += loss.item()
actual_step += 1
if actual_step % gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(prompt_model.parameters(), 1.0)
glb_step += 1
if glb_step % pbar_update_freq == 0:
aveloss = (tot_loss - log_loss) / pbar_update_freq
pbar.update(10)
pbar.set_postfix({"loss": aveloss})
log_loss = tot_loss
if optimizer1 is not None:
optimizer1.step()
optimizer1.zero_grad()
if scheduler1 is not None:
scheduler1.step()
if optimizer2 is not None:
optimizer2.step()
optimizer2.zero_grad()
if scheduler2 is not None:
scheduler2.step()
tot_train_time += time.time()
if (
actual_step % gradient_accumulation_steps == 0
and glb_step > 0
and glb_step % args.eval_every_steps == 0
):
val_acc = evaluate(prompt_model, validation_dataloader, desc="Valid")
print(val_acc)
wandb.log({"val_acc": val_acc})
if val_acc >= best_val_acc:
torch.save(
{
"exp": exp_config.__dict__,
"model": prompt_model.state_dict(),
},
f".{args.project_root}{this_run_unicode}.ckpt",
)
best_val_acc = val_acc
wandb.log({"best_val_acc": best_val_acc})
acc_traces.append(val_acc)
print(
"Glb_step {}, val_acc {}, average time {}".format(
glb_step, val_acc, tot_train_time / actual_step
),
flush=True,
)
prompt_model.train()
if glb_step > args.max_steps:
leave_training = True
break
if leave_training:
break
# # super_glue test split can not be evaluated without submitting the results to their website. So we skip it here and keep them as comments.
#
# prompt_model.load_state_dict(torch.load(f"{args.project_root}/ckpts/{this_run_unicode}.ckpt"))
# prompt_model = prompt_model.cuda()
# test_acc = evaluate(prompt_model, test_dataloader, desc="Test")
# test_acc = evaluate(prompt_model, test_dataloader, desc="Test")
# a simple measure for the convergence speed.
thres99 = 0.99 * best_val_acc
thres98 = 0.98 * best_val_acc
thres100 = best_val_acc
step100 = step98 = step99 = args.max_steps
for val_time, acc in enumerate(acc_traces):
if acc >= thres98:
step98 = min(val_time * args.eval_every_steps, step98)
if acc >= thres99:
step99 = min(val_time * args.eval_every_steps, step99)
if acc >= thres100:
step100 = min(val_time * args.eval_every_steps, step100)
content_write = ""
content_write += f"BestValAcc:{best_val_acc}\tEndValAcc:{acc_traces[-1]}\tcritical_steps:{[step98,step99,step100]}\n"
content_write += "\n"
print(content_write)
#with open(f"{args.result_file}", "a") as fout:
# fout.write(content_write)
import os
#os.remove(f"../ckpts/{this_run_unicode}.ckpt")
|
en
| 0.790699
|
# sometimes, huggingface datasets can not be automatically downloaded due to network issue, please refer to 0_basic.py line 15 for solutions. # use lm-adapted version or t5-v1.1 checkpoint. Note that the originial t5 checkpoint has been pretrained # on part of GLUE dataset, thus should not be used. # Below are multiple dataset examples, including few-shot ones. # this should be specified according to the running GPU's capacity # tune the entire plm will use more gpu-memories, thus we should use a smaller batch_size. # if multiple gpus are available, one can use model_parallelize # Now define the template and verbalizer. # Note that soft template can be combined with hard template, by loading the hard template from file. # For example, the template in soft_template.txt is {} # The choice_id 1 is the hard template # use AdamW is a standard practice for transformer # use Adafactor is the default setting for T5 # zero-shot test # normally we freeze the model when using soft_template. However, we keep the option to tune plm # it's always good practice to set no decay to biase and LayerNorm parameters # note that you have to remove the raw_embedding manually from the optimization # when lr is 0.3, it is the same as the configuration of https://arxiv.org/abs/2104.08691 # when num_warmup_steps is 0, it is the same as the configuration of https://arxiv.org/abs/2104.08691 # usually lr = 0.5 # usually num_warmup_steps is 500 # # super_glue test split can not be evaluated without submitting the results to their website. So we skip it here and keep them as comments. # # prompt_model.load_state_dict(torch.load(f"{args.project_root}/ckpts/{this_run_unicode}.ckpt")) # prompt_model = prompt_model.cuda() # test_acc = evaluate(prompt_model, test_dataloader, desc="Test") # test_acc = evaluate(prompt_model, test_dataloader, desc="Test") # a simple measure for the convergence speed. #with open(f"{args.result_file}", "a") as fout: # fout.write(content_write) #os.remove(f"../ckpts/{this_run_unicode}.ckpt")
| 2.1432
| 2
|
LeetCode/Python3/Stack&PriorityQueue/150. Evaluate Reverse Polish Notation.py
|
WatsonWangZh/CodingPractice
| 11
|
6626664
|
<gh_stars>10-100
# Evaluate the value of an arithmetic expression in Reverse Polish Notation.
# Valid operators are +, -, *, /. Each operand may be an integer or another expression.
# Note:
# Division between two integers should truncate toward zero.
# The given RPN expression is always valid.
# That means the expression would always evaluate to a result
# and there won't be any divide by zero operation.
# Example 1:
# Input: ["2", "1", "+", "3", "*"]
# Output: 9
# Explanation: ((2 + 1) * 3) = 9
# Example 2:
# Input: ["4", "13", "5", "/", "+"]
# Output: 6
# Explanation: (4 + (13 / 5)) = 6
# Example 3:
# Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]
# Output: 22
# Explanation:
# ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
# = ((10 * (6 / (12 * -11))) + 17) + 5
# = ((10 * (6 / -132)) + 17) + 5
# = ((10 * 0) + 17) + 5
# = (0 + 17) + 5
# = 17 + 5
# = 22
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
# 栈模拟
stack = []
for token in tokens:
# print(stack,token)
if token.isdigit() or len(token) > 1: # len(token)>1 for negative numbers, eg -11.
stack.append(int(token))
else:
num2, num1 = stack.pop(), stack.pop()
output = 0
if token == '+':
output = num1 + num2
elif token == '-':
output = num1 - num2
elif token == '*':
output = num1 * num2
else:
output = int(num1*1. / num2)
stack.append(output)
return stack.pop()
|
# Evaluate the value of an arithmetic expression in Reverse Polish Notation.
# Valid operators are +, -, *, /. Each operand may be an integer or another expression.
# Note:
# Division between two integers should truncate toward zero.
# The given RPN expression is always valid.
# That means the expression would always evaluate to a result
# and there won't be any divide by zero operation.
# Example 1:
# Input: ["2", "1", "+", "3", "*"]
# Output: 9
# Explanation: ((2 + 1) * 3) = 9
# Example 2:
# Input: ["4", "13", "5", "/", "+"]
# Output: 6
# Explanation: (4 + (13 / 5)) = 6
# Example 3:
# Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]
# Output: 22
# Explanation:
# ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
# = ((10 * (6 / (12 * -11))) + 17) + 5
# = ((10 * (6 / -132)) + 17) + 5
# = ((10 * 0) + 17) + 5
# = (0 + 17) + 5
# = 17 + 5
# = 22
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
# 栈模拟
stack = []
for token in tokens:
# print(stack,token)
if token.isdigit() or len(token) > 1: # len(token)>1 for negative numbers, eg -11.
stack.append(int(token))
else:
num2, num1 = stack.pop(), stack.pop()
output = 0
if token == '+':
output = num1 + num2
elif token == '-':
output = num1 - num2
elif token == '*':
output = num1 * num2
else:
output = int(num1*1. / num2)
stack.append(output)
return stack.pop()
|
en
| 0.660244
|
# Evaluate the value of an arithmetic expression in Reverse Polish Notation. # Valid operators are +, -, *, /. Each operand may be an integer or another expression. # Note: # Division between two integers should truncate toward zero. # The given RPN expression is always valid. # That means the expression would always evaluate to a result # and there won't be any divide by zero operation. # Example 1: # Input: ["2", "1", "+", "3", "*"] # Output: 9 # Explanation: ((2 + 1) * 3) = 9 # Example 2: # Input: ["4", "13", "5", "/", "+"] # Output: 6 # Explanation: (4 + (13 / 5)) = 6 # Example 3: # Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"] # Output: 22 # Explanation: # ((10 * (6 / ((9 + 3) * -11))) + 17) + 5 # = ((10 * (6 / (12 * -11))) + 17) + 5 # = ((10 * (6 / -132)) + 17) + 5 # = ((10 * 0) + 17) + 5 # = (0 + 17) + 5 # = 17 + 5 # = 22 :type tokens: List[str] :rtype: int # 栈模拟 # print(stack,token) # len(token)>1 for negative numbers, eg -11.
| 4.049736
| 4
|
src/Pybind11Wraps/SpheralCommon.py
|
markguozhiming/spheral
| 1
|
6626665
|
#-------------------------------------------------------------------------------
# Common PYB11 initialization code for all Spheral modules.
#-------------------------------------------------------------------------------
from PYB11Generator import *
PYB11includes = ['"Geometry/Dimension.hh"',
'"Geometry/GeomPlane.hh"',
"<vector>",
"<map>",
"<set>",
"<string>"]
PYB11preamble = """
typedef Spheral::GeomPlane<Spheral::Dim<1>> Plane1d;
typedef Spheral::Dim<1>::Vector Vector1d;
typedef Spheral::Dim<1>::Tensor Tensor1d;
typedef Spheral::Dim<1>::SymTensor SymTensor1d;
typedef Spheral::Dim<1>::ThirdRankTensor ThirdRankTensor1d;
typedef Spheral::Dim<1>::FourthRankTensor FourthRankTensor1d;
typedef Spheral::Dim<1>::FifthRankTensor FifthRankTensor1d;
typedef Spheral::Dim<1>::FacetedVolume FacetedVolume1d;
typedef Spheral::GeomPlane<Spheral::Dim<2>> Plane2d;
typedef Spheral::Dim<2>::Vector Vector2d;
typedef Spheral::Dim<2>::Tensor Tensor2d;
typedef Spheral::Dim<2>::SymTensor SymTensor2d;
typedef Spheral::Dim<2>::ThirdRankTensor ThirdRankTensor2d;
typedef Spheral::Dim<2>::FourthRankTensor FourthRankTensor2d;
typedef Spheral::Dim<2>::FifthRankTensor FifthRankTensor2d;
typedef Spheral::Dim<2>::FacetedVolume FacetedVolume2d;
typedef Spheral::GeomPlane<Spheral::Dim<3>> Plane3d;
typedef Spheral::Dim<3>::Vector Vector3d;
typedef Spheral::Dim<3>::Tensor Tensor3d;
typedef Spheral::Dim<3>::SymTensor SymTensor3d;
typedef Spheral::Dim<3>::ThirdRankTensor ThirdRankTensor3d;
typedef Spheral::Dim<3>::FourthRankTensor FourthRankTensor3d;
typedef Spheral::Dim<3>::FifthRankTensor FifthRankTensor3d;
typedef Spheral::Dim<3>::FacetedVolume FacetedVolume3d;
"""
PYB11opaque = ["std::vector<char>",
"std::vector<unsigned>",
"std::vector<uint64_t>",
"std::vector<int>",
"std::vector<float>",
"std::vector<double>",
"std::vector<std::string>",
"std::vector<std::vector<char>>",
"std::vector<std::vector<unsigned>>",
"std::vector<std::vector<uint64_t>>",
"std::vector<std::vector<int>>",
"std::vector<std::vector<float>>",
"std::vector<std::vector<double>>",
"std::vector<std::vector<std::string>>",
"std::pair<double, double>",
"std::pair<double, std::string>",
"std::pair<unsigned, unsigned>",
"std::pair<uint64_t, uint64_t>",
"std::pair<std::string, std::string>",
"std::map<std::string, double>",
"std::map<int, std::string>",
"std::vector<Dim<1>::Vector>",
"std::vector<Dim<1>::Tensor>",
"std::vector<Dim<1>::SymTensor>",
"std::vector<Dim<1>::ThirdRankTensor>",
"std::vector<Dim<1>::FourthRankTensor>",
"std::vector<Dim<1>::FifthRankTensor>",
"std::vector<Dim<1>::FacetedVolume>",
"std::vector<Dim<2>::Vector>",
"std::vector<Dim<2>::Tensor>",
"std::vector<Dim<2>::SymTensor>",
"std::vector<Dim<2>::ThirdRankTensor>",
"std::vector<Dim<2>::FourthRankTensor>",
"std::vector<Dim<2>::FifthRankTensor>",
"std::vector<Dim<2>::FacetedVolume>",
"std::vector<Dim<3>::Vector>",
"std::vector<Dim<3>::Tensor>",
"std::vector<Dim<3>::SymTensor>",
"std::vector<Dim<3>::ThirdRankTensor>",
"std::vector<Dim<3>::FourthRankTensor>",
"std::vector<Dim<3>::FifthRankTensor>",
"std::vector<Dim<3>::FacetedVolume>",
"std::vector<GeomFacet2d>",
"std::vector<GeomFacet3d>",
"std::vector<Plane1d>",
"std::vector<Plane2d>",
"std::vector<Plane3d>"]
|
#-------------------------------------------------------------------------------
# Common PYB11 initialization code for all Spheral modules.
#-------------------------------------------------------------------------------
from PYB11Generator import *
PYB11includes = ['"Geometry/Dimension.hh"',
'"Geometry/GeomPlane.hh"',
"<vector>",
"<map>",
"<set>",
"<string>"]
PYB11preamble = """
typedef Spheral::GeomPlane<Spheral::Dim<1>> Plane1d;
typedef Spheral::Dim<1>::Vector Vector1d;
typedef Spheral::Dim<1>::Tensor Tensor1d;
typedef Spheral::Dim<1>::SymTensor SymTensor1d;
typedef Spheral::Dim<1>::ThirdRankTensor ThirdRankTensor1d;
typedef Spheral::Dim<1>::FourthRankTensor FourthRankTensor1d;
typedef Spheral::Dim<1>::FifthRankTensor FifthRankTensor1d;
typedef Spheral::Dim<1>::FacetedVolume FacetedVolume1d;
typedef Spheral::GeomPlane<Spheral::Dim<2>> Plane2d;
typedef Spheral::Dim<2>::Vector Vector2d;
typedef Spheral::Dim<2>::Tensor Tensor2d;
typedef Spheral::Dim<2>::SymTensor SymTensor2d;
typedef Spheral::Dim<2>::ThirdRankTensor ThirdRankTensor2d;
typedef Spheral::Dim<2>::FourthRankTensor FourthRankTensor2d;
typedef Spheral::Dim<2>::FifthRankTensor FifthRankTensor2d;
typedef Spheral::Dim<2>::FacetedVolume FacetedVolume2d;
typedef Spheral::GeomPlane<Spheral::Dim<3>> Plane3d;
typedef Spheral::Dim<3>::Vector Vector3d;
typedef Spheral::Dim<3>::Tensor Tensor3d;
typedef Spheral::Dim<3>::SymTensor SymTensor3d;
typedef Spheral::Dim<3>::ThirdRankTensor ThirdRankTensor3d;
typedef Spheral::Dim<3>::FourthRankTensor FourthRankTensor3d;
typedef Spheral::Dim<3>::FifthRankTensor FifthRankTensor3d;
typedef Spheral::Dim<3>::FacetedVolume FacetedVolume3d;
"""
PYB11opaque = ["std::vector<char>",
"std::vector<unsigned>",
"std::vector<uint64_t>",
"std::vector<int>",
"std::vector<float>",
"std::vector<double>",
"std::vector<std::string>",
"std::vector<std::vector<char>>",
"std::vector<std::vector<unsigned>>",
"std::vector<std::vector<uint64_t>>",
"std::vector<std::vector<int>>",
"std::vector<std::vector<float>>",
"std::vector<std::vector<double>>",
"std::vector<std::vector<std::string>>",
"std::pair<double, double>",
"std::pair<double, std::string>",
"std::pair<unsigned, unsigned>",
"std::pair<uint64_t, uint64_t>",
"std::pair<std::string, std::string>",
"std::map<std::string, double>",
"std::map<int, std::string>",
"std::vector<Dim<1>::Vector>",
"std::vector<Dim<1>::Tensor>",
"std::vector<Dim<1>::SymTensor>",
"std::vector<Dim<1>::ThirdRankTensor>",
"std::vector<Dim<1>::FourthRankTensor>",
"std::vector<Dim<1>::FifthRankTensor>",
"std::vector<Dim<1>::FacetedVolume>",
"std::vector<Dim<2>::Vector>",
"std::vector<Dim<2>::Tensor>",
"std::vector<Dim<2>::SymTensor>",
"std::vector<Dim<2>::ThirdRankTensor>",
"std::vector<Dim<2>::FourthRankTensor>",
"std::vector<Dim<2>::FifthRankTensor>",
"std::vector<Dim<2>::FacetedVolume>",
"std::vector<Dim<3>::Vector>",
"std::vector<Dim<3>::Tensor>",
"std::vector<Dim<3>::SymTensor>",
"std::vector<Dim<3>::ThirdRankTensor>",
"std::vector<Dim<3>::FourthRankTensor>",
"std::vector<Dim<3>::FifthRankTensor>",
"std::vector<Dim<3>::FacetedVolume>",
"std::vector<GeomFacet2d>",
"std::vector<GeomFacet3d>",
"std::vector<Plane1d>",
"std::vector<Plane2d>",
"std::vector<Plane3d>"]
|
en
| 0.366421
|
#------------------------------------------------------------------------------- # Common PYB11 initialization code for all Spheral modules. #------------------------------------------------------------------------------- typedef Spheral::GeomPlane<Spheral::Dim<1>> Plane1d; typedef Spheral::Dim<1>::Vector Vector1d; typedef Spheral::Dim<1>::Tensor Tensor1d; typedef Spheral::Dim<1>::SymTensor SymTensor1d; typedef Spheral::Dim<1>::ThirdRankTensor ThirdRankTensor1d; typedef Spheral::Dim<1>::FourthRankTensor FourthRankTensor1d; typedef Spheral::Dim<1>::FifthRankTensor FifthRankTensor1d; typedef Spheral::Dim<1>::FacetedVolume FacetedVolume1d; typedef Spheral::GeomPlane<Spheral::Dim<2>> Plane2d; typedef Spheral::Dim<2>::Vector Vector2d; typedef Spheral::Dim<2>::Tensor Tensor2d; typedef Spheral::Dim<2>::SymTensor SymTensor2d; typedef Spheral::Dim<2>::ThirdRankTensor ThirdRankTensor2d; typedef Spheral::Dim<2>::FourthRankTensor FourthRankTensor2d; typedef Spheral::Dim<2>::FifthRankTensor FifthRankTensor2d; typedef Spheral::Dim<2>::FacetedVolume FacetedVolume2d; typedef Spheral::GeomPlane<Spheral::Dim<3>> Plane3d; typedef Spheral::Dim<3>::Vector Vector3d; typedef Spheral::Dim<3>::Tensor Tensor3d; typedef Spheral::Dim<3>::SymTensor SymTensor3d; typedef Spheral::Dim<3>::ThirdRankTensor ThirdRankTensor3d; typedef Spheral::Dim<3>::FourthRankTensor FourthRankTensor3d; typedef Spheral::Dim<3>::FifthRankTensor FifthRankTensor3d; typedef Spheral::Dim<3>::FacetedVolume FacetedVolume3d;
| 2.10816
| 2
|
src/analysis_integrity/hashlock.py
|
inakleinbottle/analysis_integrity
| 0
|
6626666
|
<filename>src/analysis_integrity/hashlock.py
import hmac
import hashlib
import json
import pathlib
import sys
import warnings
class HashLockError(Exception):
pass
class HashLock:
hash = hashlib.sha256
lock_file_name = "hash_lock.json"
def __init__(self, name, *files):
self.name = name
self.files = list(map(pathlib.Path, files))
for file in self.files:
if not file.exists():
raise FileNotFoundError(f"File {file} does not exist")
if "--generate-lock" in sys.argv:
print("Generating new lock file")
self.populate_lock_file()
print("Done")
sys.exit(0)
failures = self.compare_hashes()
if failures:
raise HashLockError(f"{failures} files do not match their locked hash digest")
@classmethod
def load_or_create_lock_file(cls):
path = pathlib.Path.cwd() / cls.lock_file_name
if not path.exists():
return {}
with path.open("rt") as fp:
return json.load(fp)
@classmethod
def write_lock_file(cls, lock_dict):
path = pathlib.Path.cwd() / cls.lock_file_name
with path.open("wt") as fp:
json.dump(lock_dict, fp)
@classmethod
def hash_file(cls, path):
assert isinstance(path, pathlib.Path)
return cls.hash(path.read_bytes()).hexdigest()
def hash_files(self):
return {
str(path): self.hash_file(path) for path in self.files
}
def populate_lock_file(self):
data = {
"hash-algorithm": self.hash().name,
"hashes": self.hash_files()
}
lock_data = self.load_or_create_lock_file()
lock_data[self.name] = data
self.write_lock_file(lock_data)
def compare_hashes(self, throw=False):
lock_hashes = self.load_or_create_lock_file().get(self.name)
if lock_hashes is None:
raise HashLockError("The hashes for this file do not exist")
algo = lock_hashes.get("hash-algorithm")
if algo is None or not algo == self.hash().name:
raise HashLockError("Hashing algorithms do not match")
# This is symmetric difference of sets
difference = set(map(str, self.files)) ^ set(lock_hashes["hashes"])
if difference:
raise HashLockError("Files in lock file do not match file list")
new_hashes = self.hash_files()
failures = 0
for file in lock_hashes["hashes"]:
if not hmac.compare_digest(lock_hashes["hashes"][file], new_hashes[file]):
if throw:
raise HashLockError(f"Hashes for file {file} do not match")
failures += 1
warnings.warn(f"Hashes for file {file} do not match")
return failures
|
<filename>src/analysis_integrity/hashlock.py
import hmac
import hashlib
import json
import pathlib
import sys
import warnings
class HashLockError(Exception):
pass
class HashLock:
hash = hashlib.sha256
lock_file_name = "hash_lock.json"
def __init__(self, name, *files):
self.name = name
self.files = list(map(pathlib.Path, files))
for file in self.files:
if not file.exists():
raise FileNotFoundError(f"File {file} does not exist")
if "--generate-lock" in sys.argv:
print("Generating new lock file")
self.populate_lock_file()
print("Done")
sys.exit(0)
failures = self.compare_hashes()
if failures:
raise HashLockError(f"{failures} files do not match their locked hash digest")
@classmethod
def load_or_create_lock_file(cls):
path = pathlib.Path.cwd() / cls.lock_file_name
if not path.exists():
return {}
with path.open("rt") as fp:
return json.load(fp)
@classmethod
def write_lock_file(cls, lock_dict):
path = pathlib.Path.cwd() / cls.lock_file_name
with path.open("wt") as fp:
json.dump(lock_dict, fp)
@classmethod
def hash_file(cls, path):
assert isinstance(path, pathlib.Path)
return cls.hash(path.read_bytes()).hexdigest()
def hash_files(self):
return {
str(path): self.hash_file(path) for path in self.files
}
def populate_lock_file(self):
data = {
"hash-algorithm": self.hash().name,
"hashes": self.hash_files()
}
lock_data = self.load_or_create_lock_file()
lock_data[self.name] = data
self.write_lock_file(lock_data)
def compare_hashes(self, throw=False):
lock_hashes = self.load_or_create_lock_file().get(self.name)
if lock_hashes is None:
raise HashLockError("The hashes for this file do not exist")
algo = lock_hashes.get("hash-algorithm")
if algo is None or not algo == self.hash().name:
raise HashLockError("Hashing algorithms do not match")
# This is symmetric difference of sets
difference = set(map(str, self.files)) ^ set(lock_hashes["hashes"])
if difference:
raise HashLockError("Files in lock file do not match file list")
new_hashes = self.hash_files()
failures = 0
for file in lock_hashes["hashes"]:
if not hmac.compare_digest(lock_hashes["hashes"][file], new_hashes[file]):
if throw:
raise HashLockError(f"Hashes for file {file} do not match")
failures += 1
warnings.warn(f"Hashes for file {file} do not match")
return failures
|
en
| 0.951887
|
# This is symmetric difference of sets
| 3.060903
| 3
|
python/uw/like2/convolution.py
|
tburnett/pointlike
| 1
|
6626667
|
"""
Convolution interface for like2
Extends classes from uw.utilities
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/convolution.py,v 1.9 2018/01/27 15:37:17 burnett Exp $
author: <NAME>
"""
import os, pickle, zipfile
import numpy as np
import pandas as pd
from uw.utilities import keyword_options
from uw.utilities import convolution as utilities_convolution
import skymaps #from Science Tools: for SkyDir
class FillMixin(object):
"""A Mixin class for like2 convolution, to replace functions in utilities.convolution
"""
def fill(self, skyfun):
""" Evaluate skyfun along the internal grid and return the resulting array.
(Identical to superclass, except skyfun can be either a python functor or a
C++ SkySkySpectrum)
"""
v = np.empty(self.npix*self.npix)
if isinstance(skyfun, skymaps.SkySpectrum):
skymaps.PythonUtilities.val_grid(v,self.lons,self.lats,self.center,skyfun)
else:
def pyskyfun(u):
return skyfun(skymaps.SkyDir(skymaps.Hep3Vector(u[0],u[1],u[2])))
skymaps.PythonUtilities.val_grid(v,self.lons,self.lats,self.center,
skymaps.PySkyFunction(pyskyfun))
return v.reshape([self.npix,self.npix])
def bg_fill(self, exp, dm, cache=None, ignore_nan=False):
""" Evaluate product of exposure and diffuse map on the grid
exp : SkyFunction for exposure
dm : [SkyFuntion for diffuse map | None]
If None, expect predetermined values in cache, which may be an array or a scalar
"""
#print 'filling with product of exposure "%s" model "%s"' % (exp, dm)
if dm is None:
assert cache is not None, 'Logic error'
self.bg_vals = self.fill(exp) * cache
else:
def exp_dm(skydir):
return exp(skydir)*dm(skydir)
self.bg_vals = self.fill(exp_dm)
#self.bg_vals = self.fill(exp) * (self.fill(dm) if cache is None else cache) #product of exposure and map
#self.dm_vals = self.fill(dm) #temporary
#self.exp_vals = self.fill(exp)
# check for nans, replace with zeros if not full ROI
nans = np.isnan(self.bg_vals)
if np.all(nans):
if dm is None: raise Exception('Cache entry has all nans: %s'%cache)
raise Exception('Diffuse source %s has no overlap with ROi' % dm.filename)
if np.any(nans) and ignore_nan:
self.bg_vals[nans]=0
def psf_fill(self, psf):
""" Evaluate PSF on the grid
"""
#print 'filling with psf %s' % psf
psf_vals = psf(self.dists).reshape([self.npix,self.npix])
self.psf_vals = psf_vals / psf_vals.sum()
def set_npix(self, psf, edge=0, r_multi=1.2, r_max=20):
""" modify the npix with
psf : PSF object
edge: float --Source size (degrees)
r_multi float multiple of r95 to set max dimension of grid
r_max float an absolute maximum (half)-size of grid (deg)
"""
r95 = psf.inverse_integral(95)
rad = r_multi*r95 + edge
rad = max(min(r_max,rad),edge+2.5)
npix = int(round(2*rad/self.pixelsize))
npix += (npix%2 == 0)
return npix
class ShowMixin(object):
""" A mixin class to add or replace show methods
"""
def show_vals(self, vals=None, ax=None, roi_radius=5, roi_dir=None, colorbar=True, npix=None, **kw):
"""Make a display.
vals : 2-d array of float
generated by the fill method; expect to be npix x npix
npix : [int | None]
if int, override self.npix to for central npix x npix
"""
import pylab as plt
if ax is None: fig,ax=plt.subplots()
if vals is None: vals = self.cvals
if npix is not None and npix!=self.npix:
delta = (self.npix-npix)/2
assert delta>0, 'npix not >= self.npix'
tvals = vals[delta:delta+npix, delta:delta+npix]
else:
npix=self.npix; tvals = vals
if roi_radius is not None:
if roi_dir is None: roi_dir = self.center
circle = plt.Circle(self.pix(roi_dir),roi_radius/self.pixelsize, color='grey', lw=2,fill=False)
ax.add_artist(circle)
v = ax.imshow( tvals.transpose()[::-1], interpolation='nearest', **kw)
marker = float(npix)/2
ax.axvline(marker,color='k')
ax.axhline(marker,color='k')
if colorbar:
cb = plt.colorbar(v, shrink=0.8)
def scale(x, factor=1.0):
return x*factor/self.pixelsize+self.npix/2.
r = np.arange(-8,9,4)
ax.set_xticks(scale(r))
ax.set_xticklabels(map(lambda x:'%.0f'%x ,r))
ax.set_yticks(scale(r, -1))
ax.set_yticklabels(map(lambda x:'%.0f'%x ,r))
return ax.figure
def show(self, roi_radius=None,roi_dir=None, **kwargs):
"""Three subplots: PSF, raw, convolved"""
import pylab as plt
from matplotlib.colors import LogNorm
title = kwargs.pop('title', None)
if hasattr(self, 'band'):
roi_radius = self.band.radius
roi_dir = self.band.sd
fig, axx = plt.subplots(1,3, figsize=(10,3), sharex=True, sharey=True)
plt.subplots_adjust(wspace=0.05)
if hasattr(self, 'psf_vals'):
axx[0].imshow(self.psf_vals,interpolation='nearest')
vmax = self.bg_vals.max()
norm = LogNorm(vmax=vmax, vmin=vmax/1e3)
marker = float(self.npix)/2
for ax,what in zip(axx[1:], (self.bg_vals, self.cvals) ):
what[what==0]=vmax/1e6
ax.imshow(what.transpose()[::-1], norm=norm, interpolation='nearest')
ax.axvline(marker,color='grey')
ax.axhline(marker,color='grey')
if roi_radius is not None:
if roi_dir is None: roi_dir = self.center
circle = plt.Circle(self.pix(roi_dir),roi_radius/self.pixelsize, color='grey', lw=2,fill=False)
ax.add_artist(circle)
axx[0].set_aspect(1.0)
if title is not None:
plt.suptitle(title,fontsize='small')
return fig
class ConvolvableGrid(FillMixin, ShowMixin, utilities_convolution.BackgroundConvolution):
""" Convolution used by response classes. This subclass uses the mixin classes defined here to:
1) changes the default for a bounds error (to check)
2) Replaces fill method with version that works for python class
3) provides useful show methods
"""
defaults =(
('pixelsize', 0.1, 'Size of pixels to use for convolution grid'),
('npix', 201, 'Number of pixels (must be an odd number'),
)
@keyword_options.decorate(defaults)
def __init__(self, center, **kwargs):
""" center -- a SkyDir giving the center of the grid on which to convolve bg
kwargs are passed to Grid.
"""
keyword_options.process(self, kwargs)
defaults=dict(bounds_error=False)
defaults.update(kwargs)
# note do not use code in superclass needing psf, diffuse function
super(ConvolvableGrid, self).__init__(center, None, None, **defaults)
self.center = center
def __repr__(self):
return '%s.%s: center %s npix %d pixelsize %.2f' %(
self.__module__,self.__class__.__name__, self.center, self.npix, self.pixelsize)
def spherical_harmonic(f, lmax, thetamax=45):
""" Calculate spherical harmonics for a function f, l<=lmax
thetamax : float, optionial. units degrees
integral over costheta is in principle from -1 (180 deg) to +1
but the function may be limited to much smaller than that
"""
from scipy.integrate import quad
from scipy.special import legendre
func = lambda x,n : f(np.sqrt(2*(1-x))) * legendre(n)(x)
ctmin = np.cos(np.radians(thetamax))
G = lambda n :quad(func, ctmin,1, args=n)[0] #note lower limit not -1
norm = G(0)
return np.array([G(n) for n in range(lmax+1)])/norm
class TestPSFFT(object):
"""Test spherical harmonic decomposition of PSF
"""
def __init__(self, event_type=0, energy=133, config_dir='.'):
""" config_dir : string
where to find a config.jaml file, to obtain IRF. Can start with '$FERMI'
energy : float
event_type : int
0 or 1 for front, back
"""
from . import configuration
config = configuration.Configuration(config_dir, quiet=True, postpone=True)
irfname = config.irf
psf = config.irfs.psf(0, 133)
self.psf = config.irfs.psf(event_type, energy)
self.label= 'PSF {} {} MeV'.format(['front', 'back'][event_type], energy)
print 'Evaluating the sherical harmonic content for {} {}...'.format(irfname,self.label),
self.sh = spherical_harmonic(self.psf, 128, psf.inverse_integral(99.5));
print
def plot(self, psf_label='PSF Front 133 MeV', sig_deg=1.5):
import matplotlib.pylab as plt
sigma=np.radians(sig_deg)
gsh =lambda el : np.exp(-0.5 * (el * (el + 1)) * sigma**2)
fig, axx = plt.subplots(1,2, figsize=(8,4))
glabel = '{} deg Gaussian'.format(sig_deg)
ax=axx[0]
f = lambda x: np.exp(-0.5*(x/sigma)**2)
x=np.linspace(0,10,51)
theta = np.radians(x)
norm = self.psf(0)
ax.plot(x, self.psf(theta)/norm, '-', label=self.label)
ax.plot(x, f(theta), '-', label=glabel)
ax.legend()
ax.axhline(0, color='lightgray')
ax.set_title('Function')
ax.set_xlabel('displacement [deg.]')
ax=axx[1]
ax.plot(self.sh, '-', label=psf_label)
ax.plot(map(gsh,range(128)), '-', label=glabel)
ax.axhline(0, color='lightgray')
ax.legend();
ax.set_xlabel('Sperical harmonic')
ax.set_title('Fourier Transform');
def convolve_healpix(input_map, func=None, sigma=None, thetamax=10 ):
"""
Convolve a HEALPix map with a function, or Gaussian
input_map : array of float
a HEALPix array, RING indexing, nside a power of 2
func : The function of an integer el | None
returns the amplitude for spherical harmonic el
example: for a Gaussian with sigma in radians:
lambda el : np.exp(-0.5 * (el * (el + 1)) * sigma**2)
sigma : None | float (deg)
If not None, use gaussian for func
Returns: the convolved map
"""
import healpy
nside = int(np.sqrt(len(input_map)/12))
assert 12*nside**2 == len(input_map),'Bad length: expect power of 2'
if func is None:
assert sigma is not None, 'If no func, must specify sigma'
func= lambda el : np.exp(-0.5 * (el * (el + 1)) * np.radians(sigma)**2)
else:
assert func(thetamax)/func(0) <1e-3
alm = healpy.map2alm(input_map);
lmax = healpy.Alm.getlmax(len(alm))
if lmax < 0:
raise TypeError('Wrong alm size for the given '
'mmax (len(alms[%d]) = %d).'%(ialm, len(alm)))
ell = np.arange(lmax + 1.)
fact = np.array([func(x) for x in ell])
healpy.almxfl(alm, fact, inplace=True)
return healpy.alm2map(alm, nside=nside, verbose=False)
class SphericalHarmonicContent(object):
""" This class is a functor, defining a function of the spherical harmonic index
The integral is expensive: it samples the function
"""
def __init__(self, f, lmax, thetamax=45., tolerance=1e-3, quiet=True):
"""Evaluate spherical harmonic content of a funtion of theta
f : function
lmax : int
thetamax : limit integral over cos theta
tolerance : paramter to adjust points to evaluate
"""
from scipy.integrate import quad
from scipy.special import legendre
func = lambda x,n : f(np.sqrt(2*(1-x))) * legendre(n)(x)
ctmin = np.cos(np.radians(thetamax))
norm=1
self.G = lambda n :quad(func, ctmin,1, args=n)[0]/norm #note lower limit not -1
norm=self.G(0)
self.lmax = lmax
self.fun=None
self.values = []
self.addpoint(0)
self.addpoint(lmax)
if tolerance is not None:
self._approximate(tolerance, quiet=quiet)
def addpoint(self, el, test=False):
if test:
cvalue = self(el)
self.values.append((el, self.G(el)))
if self.fun is not None:
self._setup_interpolation()
if test: return self(el)/cvalue -1
def _setup_interpolation(self):
from scipy import interpolate
t = np.array(self.values, dtype = [('el', float), ('value',float)])
s = np.sort(t, order='el')
self.el=s['el']; self.value=s['value']
self.fun = interpolate.interp1d(s['el'],s['value'],
kind='quadratic' if len(self.values)>2 else 'linear')
def __call__(self, ell):
"""
ell : value or array of int
returns the interpolating function output
"""
if self.fun is None:
self._setup_interpolation()
return self.fun(ell)
def _approximate(self, tolerance=1e-3, quiet=True):
el=int(self.lmax/2)
done = False
while el>2 and not done :
x = self.addpoint(el,True)
if not quiet:
print '{}:{:.4f}'.format(el, x)
done = abs(x)<1e-3
el= el//2
def plot(self, title='', ax=None):
import matplotlib.pyplot as plt
if ax is None: fig,ax = plt.subplots()
ax.plot(self(np.arange(self.lmax+1)), '--', label='interpolation')
ax.plot(self.el,self.value,'o', label='evaluated')
ax.set_xlabel('$l$');
ax.set_ylim((0,1.05))
ax.set_title(title)
ax.legend();
|
"""
Convolution interface for like2
Extends classes from uw.utilities
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/convolution.py,v 1.9 2018/01/27 15:37:17 burnett Exp $
author: <NAME>
"""
import os, pickle, zipfile
import numpy as np
import pandas as pd
from uw.utilities import keyword_options
from uw.utilities import convolution as utilities_convolution
import skymaps #from Science Tools: for SkyDir
class FillMixin(object):
"""A Mixin class for like2 convolution, to replace functions in utilities.convolution
"""
def fill(self, skyfun):
""" Evaluate skyfun along the internal grid and return the resulting array.
(Identical to superclass, except skyfun can be either a python functor or a
C++ SkySkySpectrum)
"""
v = np.empty(self.npix*self.npix)
if isinstance(skyfun, skymaps.SkySpectrum):
skymaps.PythonUtilities.val_grid(v,self.lons,self.lats,self.center,skyfun)
else:
def pyskyfun(u):
return skyfun(skymaps.SkyDir(skymaps.Hep3Vector(u[0],u[1],u[2])))
skymaps.PythonUtilities.val_grid(v,self.lons,self.lats,self.center,
skymaps.PySkyFunction(pyskyfun))
return v.reshape([self.npix,self.npix])
def bg_fill(self, exp, dm, cache=None, ignore_nan=False):
""" Evaluate product of exposure and diffuse map on the grid
exp : SkyFunction for exposure
dm : [SkyFuntion for diffuse map | None]
If None, expect predetermined values in cache, which may be an array or a scalar
"""
#print 'filling with product of exposure "%s" model "%s"' % (exp, dm)
if dm is None:
assert cache is not None, 'Logic error'
self.bg_vals = self.fill(exp) * cache
else:
def exp_dm(skydir):
return exp(skydir)*dm(skydir)
self.bg_vals = self.fill(exp_dm)
#self.bg_vals = self.fill(exp) * (self.fill(dm) if cache is None else cache) #product of exposure and map
#self.dm_vals = self.fill(dm) #temporary
#self.exp_vals = self.fill(exp)
# check for nans, replace with zeros if not full ROI
nans = np.isnan(self.bg_vals)
if np.all(nans):
if dm is None: raise Exception('Cache entry has all nans: %s'%cache)
raise Exception('Diffuse source %s has no overlap with ROi' % dm.filename)
if np.any(nans) and ignore_nan:
self.bg_vals[nans]=0
def psf_fill(self, psf):
""" Evaluate PSF on the grid
"""
#print 'filling with psf %s' % psf
psf_vals = psf(self.dists).reshape([self.npix,self.npix])
self.psf_vals = psf_vals / psf_vals.sum()
def set_npix(self, psf, edge=0, r_multi=1.2, r_max=20):
""" modify the npix with
psf : PSF object
edge: float --Source size (degrees)
r_multi float multiple of r95 to set max dimension of grid
r_max float an absolute maximum (half)-size of grid (deg)
"""
r95 = psf.inverse_integral(95)
rad = r_multi*r95 + edge
rad = max(min(r_max,rad),edge+2.5)
npix = int(round(2*rad/self.pixelsize))
npix += (npix%2 == 0)
return npix
class ShowMixin(object):
""" A mixin class to add or replace show methods
"""
def show_vals(self, vals=None, ax=None, roi_radius=5, roi_dir=None, colorbar=True, npix=None, **kw):
"""Make a display.
vals : 2-d array of float
generated by the fill method; expect to be npix x npix
npix : [int | None]
if int, override self.npix to for central npix x npix
"""
import pylab as plt
if ax is None: fig,ax=plt.subplots()
if vals is None: vals = self.cvals
if npix is not None and npix!=self.npix:
delta = (self.npix-npix)/2
assert delta>0, 'npix not >= self.npix'
tvals = vals[delta:delta+npix, delta:delta+npix]
else:
npix=self.npix; tvals = vals
if roi_radius is not None:
if roi_dir is None: roi_dir = self.center
circle = plt.Circle(self.pix(roi_dir),roi_radius/self.pixelsize, color='grey', lw=2,fill=False)
ax.add_artist(circle)
v = ax.imshow( tvals.transpose()[::-1], interpolation='nearest', **kw)
marker = float(npix)/2
ax.axvline(marker,color='k')
ax.axhline(marker,color='k')
if colorbar:
cb = plt.colorbar(v, shrink=0.8)
def scale(x, factor=1.0):
return x*factor/self.pixelsize+self.npix/2.
r = np.arange(-8,9,4)
ax.set_xticks(scale(r))
ax.set_xticklabels(map(lambda x:'%.0f'%x ,r))
ax.set_yticks(scale(r, -1))
ax.set_yticklabels(map(lambda x:'%.0f'%x ,r))
return ax.figure
def show(self, roi_radius=None,roi_dir=None, **kwargs):
"""Three subplots: PSF, raw, convolved"""
import pylab as plt
from matplotlib.colors import LogNorm
title = kwargs.pop('title', None)
if hasattr(self, 'band'):
roi_radius = self.band.radius
roi_dir = self.band.sd
fig, axx = plt.subplots(1,3, figsize=(10,3), sharex=True, sharey=True)
plt.subplots_adjust(wspace=0.05)
if hasattr(self, 'psf_vals'):
axx[0].imshow(self.psf_vals,interpolation='nearest')
vmax = self.bg_vals.max()
norm = LogNorm(vmax=vmax, vmin=vmax/1e3)
marker = float(self.npix)/2
for ax,what in zip(axx[1:], (self.bg_vals, self.cvals) ):
what[what==0]=vmax/1e6
ax.imshow(what.transpose()[::-1], norm=norm, interpolation='nearest')
ax.axvline(marker,color='grey')
ax.axhline(marker,color='grey')
if roi_radius is not None:
if roi_dir is None: roi_dir = self.center
circle = plt.Circle(self.pix(roi_dir),roi_radius/self.pixelsize, color='grey', lw=2,fill=False)
ax.add_artist(circle)
axx[0].set_aspect(1.0)
if title is not None:
plt.suptitle(title,fontsize='small')
return fig
class ConvolvableGrid(FillMixin, ShowMixin, utilities_convolution.BackgroundConvolution):
""" Convolution used by response classes. This subclass uses the mixin classes defined here to:
1) changes the default for a bounds error (to check)
2) Replaces fill method with version that works for python class
3) provides useful show methods
"""
defaults =(
('pixelsize', 0.1, 'Size of pixels to use for convolution grid'),
('npix', 201, 'Number of pixels (must be an odd number'),
)
@keyword_options.decorate(defaults)
def __init__(self, center, **kwargs):
""" center -- a SkyDir giving the center of the grid on which to convolve bg
kwargs are passed to Grid.
"""
keyword_options.process(self, kwargs)
defaults=dict(bounds_error=False)
defaults.update(kwargs)
# note do not use code in superclass needing psf, diffuse function
super(ConvolvableGrid, self).__init__(center, None, None, **defaults)
self.center = center
def __repr__(self):
return '%s.%s: center %s npix %d pixelsize %.2f' %(
self.__module__,self.__class__.__name__, self.center, self.npix, self.pixelsize)
def spherical_harmonic(f, lmax, thetamax=45):
""" Calculate spherical harmonics for a function f, l<=lmax
thetamax : float, optionial. units degrees
integral over costheta is in principle from -1 (180 deg) to +1
but the function may be limited to much smaller than that
"""
from scipy.integrate import quad
from scipy.special import legendre
func = lambda x,n : f(np.sqrt(2*(1-x))) * legendre(n)(x)
ctmin = np.cos(np.radians(thetamax))
G = lambda n :quad(func, ctmin,1, args=n)[0] #note lower limit not -1
norm = G(0)
return np.array([G(n) for n in range(lmax+1)])/norm
class TestPSFFT(object):
"""Test spherical harmonic decomposition of PSF
"""
def __init__(self, event_type=0, energy=133, config_dir='.'):
""" config_dir : string
where to find a config.jaml file, to obtain IRF. Can start with '$FERMI'
energy : float
event_type : int
0 or 1 for front, back
"""
from . import configuration
config = configuration.Configuration(config_dir, quiet=True, postpone=True)
irfname = config.irf
psf = config.irfs.psf(0, 133)
self.psf = config.irfs.psf(event_type, energy)
self.label= 'PSF {} {} MeV'.format(['front', 'back'][event_type], energy)
print 'Evaluating the sherical harmonic content for {} {}...'.format(irfname,self.label),
self.sh = spherical_harmonic(self.psf, 128, psf.inverse_integral(99.5));
print
def plot(self, psf_label='PSF Front 133 MeV', sig_deg=1.5):
import matplotlib.pylab as plt
sigma=np.radians(sig_deg)
gsh =lambda el : np.exp(-0.5 * (el * (el + 1)) * sigma**2)
fig, axx = plt.subplots(1,2, figsize=(8,4))
glabel = '{} deg Gaussian'.format(sig_deg)
ax=axx[0]
f = lambda x: np.exp(-0.5*(x/sigma)**2)
x=np.linspace(0,10,51)
theta = np.radians(x)
norm = self.psf(0)
ax.plot(x, self.psf(theta)/norm, '-', label=self.label)
ax.plot(x, f(theta), '-', label=glabel)
ax.legend()
ax.axhline(0, color='lightgray')
ax.set_title('Function')
ax.set_xlabel('displacement [deg.]')
ax=axx[1]
ax.plot(self.sh, '-', label=psf_label)
ax.plot(map(gsh,range(128)), '-', label=glabel)
ax.axhline(0, color='lightgray')
ax.legend();
ax.set_xlabel('Sperical harmonic')
ax.set_title('Fourier Transform');
def convolve_healpix(input_map, func=None, sigma=None, thetamax=10 ):
"""
Convolve a HEALPix map with a function, or Gaussian
input_map : array of float
a HEALPix array, RING indexing, nside a power of 2
func : The function of an integer el | None
returns the amplitude for spherical harmonic el
example: for a Gaussian with sigma in radians:
lambda el : np.exp(-0.5 * (el * (el + 1)) * sigma**2)
sigma : None | float (deg)
If not None, use gaussian for func
Returns: the convolved map
"""
import healpy
nside = int(np.sqrt(len(input_map)/12))
assert 12*nside**2 == len(input_map),'Bad length: expect power of 2'
if func is None:
assert sigma is not None, 'If no func, must specify sigma'
func= lambda el : np.exp(-0.5 * (el * (el + 1)) * np.radians(sigma)**2)
else:
assert func(thetamax)/func(0) <1e-3
alm = healpy.map2alm(input_map);
lmax = healpy.Alm.getlmax(len(alm))
if lmax < 0:
raise TypeError('Wrong alm size for the given '
'mmax (len(alms[%d]) = %d).'%(ialm, len(alm)))
ell = np.arange(lmax + 1.)
fact = np.array([func(x) for x in ell])
healpy.almxfl(alm, fact, inplace=True)
return healpy.alm2map(alm, nside=nside, verbose=False)
class SphericalHarmonicContent(object):
""" This class is a functor, defining a function of the spherical harmonic index
The integral is expensive: it samples the function
"""
def __init__(self, f, lmax, thetamax=45., tolerance=1e-3, quiet=True):
"""Evaluate spherical harmonic content of a funtion of theta
f : function
lmax : int
thetamax : limit integral over cos theta
tolerance : paramter to adjust points to evaluate
"""
from scipy.integrate import quad
from scipy.special import legendre
func = lambda x,n : f(np.sqrt(2*(1-x))) * legendre(n)(x)
ctmin = np.cos(np.radians(thetamax))
norm=1
self.G = lambda n :quad(func, ctmin,1, args=n)[0]/norm #note lower limit not -1
norm=self.G(0)
self.lmax = lmax
self.fun=None
self.values = []
self.addpoint(0)
self.addpoint(lmax)
if tolerance is not None:
self._approximate(tolerance, quiet=quiet)
def addpoint(self, el, test=False):
if test:
cvalue = self(el)
self.values.append((el, self.G(el)))
if self.fun is not None:
self._setup_interpolation()
if test: return self(el)/cvalue -1
def _setup_interpolation(self):
from scipy import interpolate
t = np.array(self.values, dtype = [('el', float), ('value',float)])
s = np.sort(t, order='el')
self.el=s['el']; self.value=s['value']
self.fun = interpolate.interp1d(s['el'],s['value'],
kind='quadratic' if len(self.values)>2 else 'linear')
def __call__(self, ell):
"""
ell : value or array of int
returns the interpolating function output
"""
if self.fun is None:
self._setup_interpolation()
return self.fun(ell)
def _approximate(self, tolerance=1e-3, quiet=True):
el=int(self.lmax/2)
done = False
while el>2 and not done :
x = self.addpoint(el,True)
if not quiet:
print '{}:{:.4f}'.format(el, x)
done = abs(x)<1e-3
el= el//2
def plot(self, title='', ax=None):
import matplotlib.pyplot as plt
if ax is None: fig,ax = plt.subplots()
ax.plot(self(np.arange(self.lmax+1)), '--', label='interpolation')
ax.plot(self.el,self.value,'o', label='evaluated')
ax.set_xlabel('$l$');
ax.set_ylim((0,1.05))
ax.set_title(title)
ax.legend();
|
en
| 0.609807
|
Convolution interface for like2 Extends classes from uw.utilities $Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/convolution.py,v 1.9 2018/01/27 15:37:17 burnett Exp $ author: <NAME> #from Science Tools: for SkyDir A Mixin class for like2 convolution, to replace functions in utilities.convolution Evaluate skyfun along the internal grid and return the resulting array. (Identical to superclass, except skyfun can be either a python functor or a C++ SkySkySpectrum) Evaluate product of exposure and diffuse map on the grid exp : SkyFunction for exposure dm : [SkyFuntion for diffuse map | None] If None, expect predetermined values in cache, which may be an array or a scalar #print 'filling with product of exposure "%s" model "%s"' % (exp, dm) #self.bg_vals = self.fill(exp) * (self.fill(dm) if cache is None else cache) #product of exposure and map #self.dm_vals = self.fill(dm) #temporary #self.exp_vals = self.fill(exp) # check for nans, replace with zeros if not full ROI Evaluate PSF on the grid #print 'filling with psf %s' % psf modify the npix with psf : PSF object edge: float --Source size (degrees) r_multi float multiple of r95 to set max dimension of grid r_max float an absolute maximum (half)-size of grid (deg) A mixin class to add or replace show methods Make a display. vals : 2-d array of float generated by the fill method; expect to be npix x npix npix : [int | None] if int, override self.npix to for central npix x npix Three subplots: PSF, raw, convolved Convolution used by response classes. This subclass uses the mixin classes defined here to: 1) changes the default for a bounds error (to check) 2) Replaces fill method with version that works for python class 3) provides useful show methods center -- a SkyDir giving the center of the grid on which to convolve bg kwargs are passed to Grid. # note do not use code in superclass needing psf, diffuse function Calculate spherical harmonics for a function f, l<=lmax thetamax : float, optionial. units degrees integral over costheta is in principle from -1 (180 deg) to +1 but the function may be limited to much smaller than that #note lower limit not -1 Test spherical harmonic decomposition of PSF config_dir : string where to find a config.jaml file, to obtain IRF. Can start with '$FERMI' energy : float event_type : int 0 or 1 for front, back Convolve a HEALPix map with a function, or Gaussian input_map : array of float a HEALPix array, RING indexing, nside a power of 2 func : The function of an integer el | None returns the amplitude for spherical harmonic el example: for a Gaussian with sigma in radians: lambda el : np.exp(-0.5 * (el * (el + 1)) * sigma**2) sigma : None | float (deg) If not None, use gaussian for func Returns: the convolved map This class is a functor, defining a function of the spherical harmonic index The integral is expensive: it samples the function Evaluate spherical harmonic content of a funtion of theta f : function lmax : int thetamax : limit integral over cos theta tolerance : paramter to adjust points to evaluate #note lower limit not -1 ell : value or array of int returns the interpolating function output
| 2.254826
| 2
|
rlx2nix/util.py
|
relacs/rlx2nix
| 0
|
6626668
|
<filename>rlx2nix/util.py
import re
import enum
import nixio as nix
class ValueType(enum.Enum):
floating = 1
integer = 2
number_and_unit = 3
string = 4
only_number = re.compile("^([+-]?\\d+\\.?\\d*)$")
integer_number = re.compile("^[+-]?\\d+$")
number_and_unit = re.compile("^(^[+-]?\\d*\\.?\\d*)\\s?\\w+%?(/\\w+)?$")
units = ["mV", "mV/cm", "sec","ms", "min", "uS/cm", "C", "°C", "Hz", "kHz", "cm", "mm", "um", "mg/l", "ul" "MOhm", "g", "%"]
unit_pattern = {}
for unit in units:
unit_pattern[unit] = re.compile(f"^(^[+-]?\\d+\\.?\\d*)\\s?{unit}$", re.IGNORECASE|re.UNICODE)
def guess_value_type(value_str):
if only_number.search(value_str) is not None:
if integer_number.search(value_str) is not None:
return ValueType.integer
else:
return ValueType.floating
elif number_and_unit.search(value_str) is not None:
return ValueType.number_and_unit
else:
return ValueType.string
def convert_value(val, val_type):
if val_type == ValueType.integer:
val = int(val)
elif val_type == ValueType.floating:
val = float(val)
return val
def parse_value(value_str):
value = value_str
unit = ""
vt = guess_value_type(value_str)
if vt == ValueType.integer or vt == ValueType.floating:
value = convert_value(value_str, vt)
elif vt == ValueType.number_and_unit:
for u in unit_pattern.keys():
if unit_pattern[u].search(value_str) is not None:
unit = u
value_str = value_str.split(u)[0]
vt = guess_value_type(value_str)
value = convert_value(value_str, vt)
break
return value, unit
def odml2nix(odml_section, nix_section):
for op in odml_section.props:
values = op.values
if len(values) > 0:
nixp = nix_section.create_property(op.name, op.values)
else:
nixp = nix_section.create_property(op.name, nix.DataType.String)
if op.unit is not None:
nixp.unit = op.unit
for osec in odml_section.sections:
name = osec.name
if "/" in osec.name:
name = name.replace("/", "_")
nsec = nix_section.create_section(name, osec.type)
odml2nix(osec, nsec)
|
<filename>rlx2nix/util.py
import re
import enum
import nixio as nix
class ValueType(enum.Enum):
floating = 1
integer = 2
number_and_unit = 3
string = 4
only_number = re.compile("^([+-]?\\d+\\.?\\d*)$")
integer_number = re.compile("^[+-]?\\d+$")
number_and_unit = re.compile("^(^[+-]?\\d*\\.?\\d*)\\s?\\w+%?(/\\w+)?$")
units = ["mV", "mV/cm", "sec","ms", "min", "uS/cm", "C", "°C", "Hz", "kHz", "cm", "mm", "um", "mg/l", "ul" "MOhm", "g", "%"]
unit_pattern = {}
for unit in units:
unit_pattern[unit] = re.compile(f"^(^[+-]?\\d+\\.?\\d*)\\s?{unit}$", re.IGNORECASE|re.UNICODE)
def guess_value_type(value_str):
if only_number.search(value_str) is not None:
if integer_number.search(value_str) is not None:
return ValueType.integer
else:
return ValueType.floating
elif number_and_unit.search(value_str) is not None:
return ValueType.number_and_unit
else:
return ValueType.string
def convert_value(val, val_type):
if val_type == ValueType.integer:
val = int(val)
elif val_type == ValueType.floating:
val = float(val)
return val
def parse_value(value_str):
value = value_str
unit = ""
vt = guess_value_type(value_str)
if vt == ValueType.integer or vt == ValueType.floating:
value = convert_value(value_str, vt)
elif vt == ValueType.number_and_unit:
for u in unit_pattern.keys():
if unit_pattern[u].search(value_str) is not None:
unit = u
value_str = value_str.split(u)[0]
vt = guess_value_type(value_str)
value = convert_value(value_str, vt)
break
return value, unit
def odml2nix(odml_section, nix_section):
for op in odml_section.props:
values = op.values
if len(values) > 0:
nixp = nix_section.create_property(op.name, op.values)
else:
nixp = nix_section.create_property(op.name, nix.DataType.String)
if op.unit is not None:
nixp.unit = op.unit
for osec in odml_section.sections:
name = osec.name
if "/" in osec.name:
name = name.replace("/", "_")
nsec = nix_section.create_section(name, osec.type)
odml2nix(osec, nsec)
|
none
| 1
| 2.937664
| 3
|
|
msticpy/nbtools/entityschema.py
|
roopeshvs/msticpy
| 4
|
6626669
|
<filename>msticpy/nbtools/entityschema.py<gh_stars>1-10
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
entityschema module.
Module for V3 Entities class
"""
import pprint
from abc import ABC, abstractmethod
from enum import Enum
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import Any, Dict, Mapping, Type, Union
from .._version import VERSION
from ..common.utility import export
__version__ = VERSION
__author__ = "<NAME>"
_ENTITY_ENUMS: Dict[str, Type] = {}
# pylint: disable=too-many-lines, invalid-name
# pylint: disable=too-many-instance-attributes
@export
class Entity(ABC):
"""
Entity abstract base class.
Implements common methods for Entity classes
"""
ENTITY_NAME_MAP: Dict[str, Type] = {}
_entity_schema: Dict[str, Any] = {}
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of an entity.
Parameters
----------
src_entity : Mapping[str, Any], optional
If src_entity is supplied it attempts to extract common
properties from the source entity and assign them to
the new instance. (the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
self.Type = type(self).__name__.lower()
# If we have an unknown entity see if we a type passed in
if self.Type == "unknownentity" and "Type" in kwargs:
self.Type = kwargs["Type"]
# Make sure Type is in the class schema dictionary
self._entity_schema["Type"] = None
# if we didn't populate AdditionalData, add an empty dict in case it's
# needed
if "AdditionalData" not in self:
self["AdditionalData"] = {}
if src_entity is not None:
self._extract_src_entity(src_entity)
# add AdditionalData dictionary if it's populated
if "AdditionalData" in src_entity:
self["AdditionalData"] = src_entity["AdditionalData"]
if kwargs:
self.__dict__.update(kwargs)
def _extract_src_entity(self, src_entity: Mapping[str, Any]):
"""
Extract source entity properties.
Parameters
----------
src_entity : Mapping[str, Any]
The source mappable object from which to
extract entity properties.
"""
schema_dict = dict(**(self._entity_schema))
schema_dict["Type"] = None
for k, v in schema_dict.items():
if k not in src_entity:
continue
self[k] = src_entity[k]
if v is not None:
try:
# If the property is an enum
if v in _ENTITY_ENUMS:
self[k] = _ENTITY_ENUMS[v][src_entity[k]]
continue
except KeyError:
# Catch key errors from invalid enum values
self[k] = None
if isinstance(v, tuple):
# if the property is a collection
entity_list = []
for col_entity in src_entity[k]:
entity_list.append(Entity.instantiate_entity(col_entity))
self[k] = entity_list
else:
# else try to instantiate an entity
self[k] = Entity.instantiate_entity(src_entity[k])
def __getitem__(self, key: str):
"""Allow property get using dictionary key syntax."""
if key in self.__dict__:
return self.__dict__[key]
if key in self._entity_schema:
return None
raise KeyError
def __setitem__(self, key: str, value: Any):
"""Allow property set using dictionary key syntax."""
self.__dict__[key] = value
def __contains__(self, key: str):
"""Allow property in test."""
# In operator overload
return key in self.__dict__
def __getattr__(self, name: str):
"""Return the value of the named property 'name'."""
if name in self._entity_schema:
return None
raise AttributeError(f"{name} is not a valid attribute.")
def __iter__(self):
"""Iterate over entity_properties."""
return iter(self.properties)
def __len__(self) -> int:
"""Return length/number of entity_properties."""
return len(self.properties)
def __str__(self) -> str:
"""Return string representation of entity."""
return pprint.pformat(self._to_dict(self), indent=2, width=100)
def __repr__(self) -> str:
"""Return repr of entity."""
params = ", ".join(
[f"{name}={val}" for name, val in self.properties.items() if val]
)
if len(params) > 80:
params = params[:80] + "..."
return f"{self.__class__.__name__}({params})"
def _to_dict(self, entity) -> dict:
"""Return as simple nested dictionary."""
ent_dict = {}
for prop, val in entity.properties.items():
if val is not None:
if isinstance(val, Entity):
ent_dict[prop] = self._to_dict(val)
else:
ent_dict[prop] = val
return ent_dict
def _repr_html_(self) -> str:
"""
Display entity in IPython/Notebook.
Returns
-------
HTML
IPython HTML object
"""
return self.to_html()
def to_html(self) -> str:
"""
Return HTML representation of entity.
Returns
-------
str
HTML representation of entity
"""
e_text = str(self)
e_type = self.Type
e_text = e_text.replace("\n", "<br>").replace(" ", " ")
return f"<h3>{e_type}</h3>{e_text}"
@property
def properties(self) -> dict:
"""
Return dictionary properties of entity.
Returns
-------
dict
Entity properties.
"""
return {
name: value
for name, value in self.__dict__.items()
if not name.startswith("_")
}
@property
@abstractmethod
def description_str(self) -> str:
"""
Return Entity Description.
Returns
-------
str
Entity description (optional). If not overridden
by the Entity instance type, it will return the
Type string.
"""
return self.Type
# pylint: disable=bad-continuation, too-many-branches
@classmethod
def instantiate_entity( # noqa: C901
cls, raw_entity: Mapping[str, Any]
) -> Union["Entity", Mapping[str, Any]]:
"""
Class factory to return entity from raw dictionary representation.
Parameters
----------
raw_entity : Mapping[str, Any]
A mapping object (e.g. dictionary or pandas Series)
that contains the properties of the entity.
Returns
-------
Entity
The instantiated entity
"""
if "Type" not in raw_entity:
return raw_entity
entity_type = raw_entity["Type"]
# We get an undefined-variable warning here. _ENTITY_NAME_MAP
# is not defined/populated until end of module since it needs
# entity
if entity_type in cls.ENTITY_NAME_MAP:
return cls.ENTITY_NAME_MAP[entity_type](raw_entity)
raise TypeError("Could not find a suitable type for {}".format(entity_type))
@export
class Account(Entity):
"""
Account Entity class.
Attributes
----------
Name : str
Account Name
NTDomain : str
Account NTDomain
UPNSuffix : str
Account UPNSuffix
Host : Host
Account Host
LogonId : str
Account LogonId (deprecated)
Sid : str
Account Sid
AadTenantId : str
Account AadTenantId
AadUserId : str
Account AadUserId
PUID : str
Account PUID
IsDomainJoined : bool
Account IsDomainJoined
DisplayName : str
Account DisplayName
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role: str = "subject",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing Account entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'subject' or 'target' - only relevant if the entity
is being constructed from an event.
(the default is 'subject')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
# pylint: disable=locally-disabled, line-too-long
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if role == "subject" and "SubjectUserName" in src_event:
self.Name = src_event["SubjectUserName"]
self.NTDomain = (
src_event["SubjectUserDomain"]
if "SubjectUserDomain" in src_event
else None
)
self.Sid = (
src_event["SubjectUserSid"]
if "SubjectUserSid" in src_event
else None
)
self.LogonId = (
src_event["SubjectLogonId"]
if "SubjectLogonId" in src_event
else None
)
if role == "target" and "TargetUserName" in src_event:
self.Name = src_event["TargetUserName"]
self.NTDomain = (
src_event["TargetUserDomain"]
if "TargetUserDomain" in src_event
else None
)
self.Sid = (
src_event["TargetUserSid"] if "TargetUserSid" in src_event else None
)
self.LogonId = (
src_event["TargetLogonId"] if "TargetLogonId" in src_event else None
)
self.AadTenantId = (
src_event["AadTenantId"] if "AadTenantId" in src_event else None
)
self.AadUserId = (
src_event["AadUserId"] if "AadUserId" in src_event else None
)
self.PUID = src_event["PUID"] if "PUID" in src_event else None
self.DisplayName = (
src_event["DisplayName"] if "DisplayName" in src_event else None
)
self.UPNSuffix = (
src_event["UPNSuffix"] if "UPNSuffix" in src_event else None
)
# pylint: enable=locally-disabled, line-too-long
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.qualified_name
@property
def qualified_name(self) -> str:
"""Windows qualified account name."""
if "Name" in self:
name = self["Name"]
if "NTDomain" in self and self.NTDomain:
return "{}\\{}".format(self.NTDomain, name)
if "UPNSuffix" in self and self.UPNSuffix:
return "{}@{}".format(name, self.UPNSuffix)
if "Host" in self and self.Host:
return "{}\\{}".format(self.Host.HostName, name)
return name
_entity_schema = {
# Name (type System.String)
"Name": None,
# NTDomain (type System.String)
"NTDomain": None,
# UPNSuffix (type System.String)
"UPNSuffix": None,
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": "Host",
# LogonId (type System.String)
"LogonId": None,
# Sid (type System.String)
"Sid": None,
# AadTenantId (type System.Nullable`1[System.Guid])
"AadTenantId": None,
# AadUserId (type System.Nullable`1[System.Guid])
"AadUserId": None,
# PUID (type System.Nullable`1[System.Guid])
"PUID": None,
# IsDomainJoined (type System.Nullable`1[System.Boolean])
"IsDomainJoined": None,
# DisplayName (type System.String)
"DisplayName": None,
}
@export
class SecurityGroup(Entity):
"""
SecurityGroup Entity class.
Attributes
----------
DistinguishedName : str
SecurityGroup DistinguishedName
SID : str
SecurityGroup SID
ObjectGuid : str
SecurityGroup ObjectGuid
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self):
"""Return Entity Description."""
return self.DistinguishedName
_entity_schema = {
# DistinguishedName (type System.String)
"DistinguishedName": None,
# SID (type System.String)
"SID": None,
# ObjectGuid (type System.String)
"ObjectGuid": None,
}
@export
class HostLogonSession(Entity):
"""
HostLogonSession Entity class.
Attributes
----------
Account : Account
HostLogonSession Account
StartTimeUtc : datetime
HostLogonSession StartTimeUtc
EndTimeUtc : datetime
HostLogonSession EndTimeUtc
Host : Host
HostLogonSession Host
SessionId : str
HostLogonSession SessionId
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if "TimeCreatedUtc" in src_event:
self.StartTimeUtc = src_event["TimeCreatedUtc"]
elif "TimeGenerated" in src_event:
self.StartTimeUtc = src_event["TimeGenerated"]
self.EndTimeUtc = self.StartTimeUtc
self.SessionId = (
src_event["TargetLogonId"] if "TargetLogonId" in src_event else None
)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Host.HostName}: session: {self.SessionId}"
_entity_schema = {
# Account
"Account": "Account",
# StartTimeUtc (type System.Nullable`1[System.DateTime])
"StartTimeUtc": None,
# EndTimeUtc (type System.Nullable`1[System.DateTime])
"EndTimeUtc": None,
# Host
"Host": "Host",
# SessionId (type System.String)
"SessionId": None,
}
@export
class CloudApplication(Entity):
"""
CloudApplication Entity class.
Attributes
----------
Name : str
CloudApplication Name
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.Name
_entity_schema = {
# Name (type System.String)
"Name": None
}
@export
class DnsResolve(Entity):
"""
DNS Resolve Entity class.
Attributes
----------
DomainName : str
DnsResolve DomainName
IpAdresses : List[str]
DnsResolve IpAdresses
DnsServerIp : IPAddress
DnsResolve DnsServerIp
HostIpAddress : IPAddress
DnsResolve HostIpAddress
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DomainName}: IPs: {repr(self.IpAdresses)}"
_entity_schema = {
# DomainName (type System.String)
"DomainName": None,
# IpAdresses (type System.Collections.Generic.List`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.IP])
"IpAdresses": None,
# DnsServerIp (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"DnsServerIp": "IPAddress",
# HostIpAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"HostIpAddress": "IPAddress",
}
@export
class File(Entity):
"""
File Entity class.
Attributes
----------
FullPath : str
File FullPath
Directory : str
File Directory
Name : str
File Name
Md5 : str
File Md5
Host : str
File Host
Sha1 : str
File Sha1
Sha256 : str
File Sha256
Sha256Ac : str
File Sha256Ac
FileHashes : List[FileHash]
File FileHashes
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role: str = "new",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'new' or 'parent' - only relevant if the entity
is being constructed from an event.
(the default is 'new')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if role == "new" and "NewProcessName" in src_event:
self._add_paths(src_event["NewProcessName"])
elif role == "parent" and "ParentProcessName" in src_event:
self._add_paths(src_event["ParentProcessName"])
if "FullPath" not in self:
file = self["Name"]
directory = self["Directory"]
sep = self.path_separator if directory else None
self["FullPath"] = f"{directory}{sep}{file}"
@property
def path_separator(self):
"""Return the path separator used by the file."""
directory = self["Directory"]
if directory and "/" in directory:
return "/"
return "\\"
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.FullPath
_entity_schema = {
# FullPath (type System.String)
"FullPath": None,
# Directory (type System.String)
"Directory": None,
# Name (type System.String)
"Name": None,
# Md5 (type System.String)
"Md5": None,
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": None,
# Sha1 (type System.String)
"Sha1": None,
# Sha256 (type System.String)
"Sha256": None,
# Sha256Ac (type System.String)
"Sha256Ac": None,
"FileHashes": (list, "FileHash"),
}
def _add_paths(self, full_path):
if "/" in full_path:
self.PathSeparator = "/"
self.OSFamily = OSFamily.Linux
else:
self.PathSeparator = "\\"
self.OSFamily = OSFamily.Windows
self.FullPath = full_path
self.Name = full_path.split(self.PathSeparator)[-1]
self.Directory = full_path.split(self.PathSeparator)[:-1]
@export
class FileHash(Entity):
"""
File Hash class.
Attributes
----------
Algorithm : Algorithm
FileHash Algorithm
Value : str
FileHash Value
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Algorithm}: {self.Value}"
_entity_schema = {
# The hash algorithm (type System.String)
"Algorithm": "Algorithm",
# Value (type System.String)
"Value": None,
}
@export
class Algorithm(Enum):
"""FileHash Algorithm Enumeration."""
Unknown = 0
MD5 = 1
SHA1 = 2
SHA256 = 3
SHA256AC = 4
_ENTITY_ENUMS[Algorithm.__name__] = Algorithm
@export
class Host(Entity):
"""
Host Entity class.
Attributes
----------
DnsDomain : str
Host DnsDomain
NTDomain : str
Host NTDomain
HostName : str
Host HostName
NetBiosName : str
Host NetBiosName
AzureID : str
Host AzureID
OMSAgentID : str
Host OMSAgentID
OSFamily : str
Host OSFamily
IsDomainJoined : bool
Host IsDomainJoined
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
self._computer = None
if src_event is not None:
if "Computer" in src_event:
self._computer = src_event["Computer"]
if "." in src_event["Computer"]:
self.HostName = src_event["Computer"].split(".", 1)[0]
self.DnsDomain = src_event["Computer"].split(".", 1)[1]
else:
self.HostName = src_event["Computer"]
self.NetBiosName = self.HostName
@property
def computer(self) -> str:
"""Return computer from source event."""
return self._computer if self._computer is not None else self.fqdn
@property
def fqdn(self) -> str:
"""Construct FQDN from host + dns."""
if self.DnsDomain:
return f"{self.HostName}.{self.DnsDomain}"
return self.HostName
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.fqdn} ({self.OSFamily})"
_entity_schema = {
# DnsDomain (type System.String)
"DnsDomain": None,
# NTDomain (type System.String)
"NTDomain": None,
# HostName (type System.String)
"HostName": None,
# NetBiosName (type System.String)
"NetBiosName": None,
# AzureID (type System.String)
"AzureID": None,
# OMSAgentID (type System.String)
"OMSAgentID": None,
# OSFamily (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.OSFamily])
"OSFamily": None,
# IsDomainJoined (type System.Nullable`1[System.Boolean])
"IsDomainJoined": None,
}
@export
class IpAddress(Entity):
"""
IPAddress Entity class.
Attributes
----------
Address : str
IpAddress Address
Location : GeoLocation
IpAddress Location
ThreatIntelligence : List[ThreatIntelligence]
IpAddress ThreatIntelligence
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if "IpAddress" in src_event:
self.Address = src_event["IpAddress"]
@property
def ip_address(self) -> Union[IPv4Address, IPv6Address]:
"""Return a python ipaddress object from the entity property."""
return ip_address(self["Address"])
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.Address
_entity_schema = {
# Address (type System.String)
"Address": None,
# Location (type Microsoft.Azure.Security.Detection.AlertContracts
# .V3.ContextObjects.GeoLocation)
"Location": "GeoLocation",
# ThreatIntelligence (type System.Collections.Generic.List`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3
# .ContextObjects.ThreatIntelligence])
"ThreatIntelligence": (list, "Threatintelligence"),
}
@export
class GeoLocation(Entity):
"""
GeoLocation class.
Attributes
----------
CountryCode : str
GeoLocation CountryCode
CountryName : str
GeoLocation CountryName
State : str
GeoLocation State
City : str
GeoLocation City
Longitude : float
GeoLocation Longitude
Latitude : float
GeoLocation Latitude
Asn : str
GeoLocation Asn
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.CountryCode}; {self.State}; {self.City}"
_entity_schema = {
# str
"CountryCode": None,
# str
"CountryName": None,
# str
"State": None,
# str
"City": None,
# double?
"Longitude": None,
# double?
"Latitude": None,
# int
"Asn": None,
}
@export
class Malware(Entity):
"""
Malware Entity class.
Attributes
----------
Name : str
Malware Name
Category : str
Malware Category
File : File
Malware File
Files : List[File]
Malware Files
Processes : List[Process]
Malware Processes
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Name}: {self.Category}"
_entity_schema = {
# Name (type System.String)
"Name": None,
# Category (type System.String)
"Category": None,
# File (type Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.File)
"File": "File",
"Files": (list, "File"),
"Processes": (list, "Process"),
}
@export
class NetworkConnection(Entity):
"""
NetworkConnection Entity class.
Attributes
----------
SourceAddress : IPAddress
NetworkConnection SourceAddress
SourcePort : int
NetworkConnection SourcePort
DestinationAddress : IPAddress
NetworkConnection DestinationAddress
DestinationPort : int
NetworkConnection DestinationPort
Protocol : str
NetworkConnection Protocol
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
desc = "{}:{} [{}]-> {}:{}".format(
self.SourceAddress,
self.SourcePort,
self.Protocol,
self.DestinationAddress,
self.DestinationPort,
)
return desc
_entity_schema = {
# SourceAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"SourceAddress": "IPAddress",
# SourcePort (type System.Nullable`1[System.Int32])
"SourcePort": None,
# DestinationAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"DestinationAddress": "IPAddress",
# DestinationPort (type System.Nullable`1[System.Int32])
"DestinationPort": None,
# Protocol (type System.Nullable`1[System.Net.Sockets.ProtocolType])
"Protocol": None,
}
@export
class Process(Entity):
"""
Process Entity class.
Attributes
----------
ProcessId : str
Process ProcessId
CommandLine : str
Process CommandLine
ElevationToken : str
Process ElevationToken
CreationTimeUtc : datetime
Process CreationTimeUtc
ImageFile : File
Process ImageFile
Account : Account
Process Account
ParentProcess : Process
Process ParentProcess
Host : Host
Process Host
LogonSession : HostLogonSession
Process LogonSession
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role="new",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'new' or 'parent' - only relevant if the entity
is being constructed from an event.
(the default is 'new')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
# pylint: disable=locally-disabled, line-too-long
if src_event is not None:
if role == "new":
self.ProcessId = (
src_event["NewProcessId"] if "NewProcessId" in src_event else None
)
self.CommandLine = (
src_event["CommandLine"] if "CommandLine" in src_event else None
)
if "TimeCreatedUtc" in src_event:
self.CreationTimeUtc = src_event["TimeCreatedUtc"]
elif "TimeGenerated" in src_event:
self.CreationTimeUtc = src_event["TimeGenerated"]
self.ProcessId = (
src_event["NewProcessId"] if "NewProcessId" in src_event else None
)
self.ImageFile = File(src_event=src_event, role="new")
self.Account = Account(src_event=src_event, role="subject")
if "ParentProcessName" in src_event or "ProcessName" in src_event:
parent = Process(src_event=src_event, role="parent")
self.ParentProcess = parent
# Linux properties
self.success = src_event["success"] if "success" in src_event else None
self.audit_user = (
src_event["audit_user"] if "audit_user" in src_event else None
)
self.auid = src_event["auid"] if "auid" in src_event else None
self.group = src_event["group"] if "group" in src_event else None
self.gid = src_event["gid"] if "gid" in src_event else None
self.effective_user = (
src_event["effective_user"]
if "effective_user" in src_event
else None
)
self.euid = src_event["euid"] if "euid" in src_event else None
self.effective_group = (
src_event["effective_group"]
if "effective_group" in src_event
else None
)
self.egid = (
src_event["effective_group"]
if "effective_group" in src_event
else None
)
self.cwd = src_event["cwd"] if "cwd" in src_event else None
self.name = src_event["cwd"] if "cwd" in src_event else None
else:
self.ProcessId = (
src_event["ProcessId"] if "ProcessId" in src_event else None
)
self.ImageFile = File(src_event=src_event, role="parent")
# pylint: enable=locally-disabled, line-too-long
@property
def ProcessName(self) -> str: # noqa: N802
"""Return the name of the process file."""
file = self["ImageFile"]
return file.Name if file else None
@property
def ProcessFilePath(self) -> str: # noqa: N802
"""Return the name of the process file path."""
file = self["ImageFile"]
return file.FullPath if file else None
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.ProcessFilePath}: {self.CommandLine}"
_entity_schema = {
# ProcessId (type System.String)
"ProcessId": None,
# CommandLine (type System.String)
"CommandLine": None,
# ElevationToken (type System.Nullable`1
# [Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.ElevationToken])
"ElevationToken": None,
# CreationTimeUtc (type System.Nullable`1[System.DateTime])
"CreationTimeUtc": None,
# ImageFile (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.File)
"ImageFile": "File",
# Account (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Account)
"Account": "Account",
# ParentProcess (type Microsoft.Azure.Security.Detection.AlertContracts
# .V3.Entities.Process)
"ParentProcess": "Process",
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": "Host",
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.HostLogonSession)
"LogonSession": "HostLogonSession",
}
@export
class RegistryHive(Enum):
"""RegistryHive enumeration."""
# <summary>HKEY_LOCAL_MACHINE</summary>
HKEY_LOCAL_MACHINE = 0
# <summary>HKEY_CLASSES_ROOT</summary>
HKEY_CLASSES_ROOT = 1
# <summary>HKEY_CURRENT_CONFIG</summary>
HKEY_CURRENT_CONFIG = 2
# <summary>HKEY_USERS</summary>
HKEY_USERS = 3
# <summary>HKEY_CURRENT_USER_LOCAL_SETTINGS</summary>
HKEY_CURRENT_USER_LOCAL_SETTINGS = 4
# <summary>HKEY_PERFORMANCE_DATA</summary>
HKEY_PERFORMANCE_DATA = 5
# <summary>HKEY_PERFORMANCE_NLSTEXT</summary>
HKEY_PERFORMANCE_NLSTEXT = 6
# <summary>HKEY_PERFORMANCE_TEXT</summary>
HKEY_PERFORMANCE_TEXT = 7
# <summary>HKEY_A</summary>
HKEY_A = 8
# <summary>HKEY_CURRENT_USER</summary>
HKEY_CURRENT_USER = 9
_ENTITY_ENUMS[RegistryHive.__name__] = RegistryHive
@export
class RegistryKey(Entity):
"""
RegistryKey Entity class.
Attributes
----------
Hive : RegistryHive
RegistryKey Hive
Key : str
RegistryKey Key
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Hive}\\{self.Key}"
_entity_schema = {
# Hive (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.RegistryHive])
"Hive": "RegistryHive",
# Key (type System.String)
"Key": None,
}
class RegistryValue(Entity):
"""
RegistryValue Entity class.
Attributes
----------
Key : str
RegistryValue Key
Name : str
RegistryValue Name
Value : str
RegistryValue Value
ValueType : str
RegistryValue ValueType
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Name}[{self.ValueType}]:{repr(self.Value)}"
_entity_schema = {
# Key (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.RegistryKey)
"Key": None,
# Name (type System.String)
"Name": None,
# Value (type System.String)
"Value": None,
# ValueType (type System.Nullable`1[Microsoft.Win32.RegistryValueKind])
"ValueType": None,
}
@export
class OSFamily(Enum):
"""OSFamily enumeration."""
Linux = 0
Windows = 1
_ENTITY_ENUMS[OSFamily.__name__] = OSFamily
@export
class ElevationToken(Enum):
"""ElevationToken enumeration."""
Default = 0
Full = 1
Limited = 2
_ENTITY_ENUMS[ElevationToken.__name__] = ElevationToken
@export
class AzureResource(Entity):
"""
AzureResource Entity class.
Attributes
----------
ResourceId : str
AzureResource ResourceId
SubscriptionId : str
AzureResource SubscriptionId
ResourceIdParts : Dict[str, str]
AzureResource ResourceIdParts
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.ResourceId
_entity_schema = {
# ResourceId (type System.String)
"ResourceId": None,
# SubscriptionId (type System.String)
"SubscriptionId": None,
# ResourceIdParts (type System.Collections.Generic.IReadOnlyDictionary`2
# [System.String,System.String])
"ResourceIdParts": None,
}
@export
class Alert(Entity):
"""
Alert Entity class.
Attributes
----------
DisplayName : str
Alert DisplayName
CompromisedEntity : str
Alert CompromisedEntity
Count : int
Alert Count
StartTimeUtc : datetime
Alert StartTimeUtc
EndTimeUtc : datetime
Alert EndTimeUtc
Severity : str
Alert Severity
SystemAlertIds : List[str]
Alert SystemAlertIds
AlertType : str
Alert AlertType
VendorName : str
Alert VendorName
ProviderName : str
Alert ProviderName
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DisplayName} ({self.StartTimeUtc}) {self.CompromisedEntity}"
_entity_schema = {
# DisplayName (type System.String)
"DisplayName": None,
# CompromisedEntity (type System.String)
"CompromisedEntity": None,
# Count (type System.Nullable`1[System.Int32])
"Count": None,
# StartTimeUtc (type System.Nullable`1[System.DateTime])
"StartTimeUtc": None,
# EndTimeUtc (type System.Nullable`1[System.DateTime])
"EndTimeUtc": None,
# Severity (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Severity])
"Severity": None,
# SystemAlertIds (type System.Collections.Generic.List`1[System.String])
"SystemAlertIds": None,
# AlertType (type System.String)
"AlertType": None,
# VendorName (type System.String)
"VendorName": None,
# ProviderName (type System.String)
"ProviderName": None,
}
@export
class Threatintelligence(Entity):
"""
Threatintelligence Entity class.
Attributes
----------
ProviderName : str
Threatintelligence ProviderName
ThreatType : str
Threatintelligence ThreatType
ThreatName : str
Threatintelligence ThreatName
Confidence : str
Threatintelligence Confidence
ReportLink : str
Threatintelligence ReportLink
ThreatDescription : str
Threatintelligence ThreatDescription
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DisplayName} ({self.StartTimeUtc}) {self.CompromisedEntity}"
_entity_schema = {
# String Name of the provider from whom this
# Threat Intelligence information was received
"ProviderName": None,
"ThreatType": None,
"ThreatName": None,
"Confidence": None,
"ReportLink": None,
"ThreatDescription": None,
}
@export
class UnknownEntity(Entity):
"""Generic Entity class."""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return "OtherEntity"
_entity_schema = {} # type: Dict[str, Any]
# Dictionary to map text names of types to the class.
Entity.ENTITY_NAME_MAP.update(
{
"account": Account,
"host": Host,
"process": Process,
"file": File,
"cloudapplication": CloudApplication,
"dnsresolve": DnsResolve,
"ipaddress": IpAddress,
"ip": IpAddress,
"networkconnection": NetworkConnection,
"malware": Malware,
"registry-key": RegistryKey,
"registrykey": RegistryKey,
"registry-value": RegistryValue,
"registryvalue": RegistryValue,
"host-logon-session": HostLogonSession,
"hostlogonsession": HostLogonSession,
"filehash": FileHash,
"security-group": SecurityGroup,
"securitygroup": SecurityGroup,
"alerts": Alert,
"alert": Alert,
}
)
|
<filename>msticpy/nbtools/entityschema.py<gh_stars>1-10
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
entityschema module.
Module for V3 Entities class
"""
import pprint
from abc import ABC, abstractmethod
from enum import Enum
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import Any, Dict, Mapping, Type, Union
from .._version import VERSION
from ..common.utility import export
__version__ = VERSION
__author__ = "<NAME>"
_ENTITY_ENUMS: Dict[str, Type] = {}
# pylint: disable=too-many-lines, invalid-name
# pylint: disable=too-many-instance-attributes
@export
class Entity(ABC):
"""
Entity abstract base class.
Implements common methods for Entity classes
"""
ENTITY_NAME_MAP: Dict[str, Type] = {}
_entity_schema: Dict[str, Any] = {}
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of an entity.
Parameters
----------
src_entity : Mapping[str, Any], optional
If src_entity is supplied it attempts to extract common
properties from the source entity and assign them to
the new instance. (the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
self.Type = type(self).__name__.lower()
# If we have an unknown entity see if we a type passed in
if self.Type == "unknownentity" and "Type" in kwargs:
self.Type = kwargs["Type"]
# Make sure Type is in the class schema dictionary
self._entity_schema["Type"] = None
# if we didn't populate AdditionalData, add an empty dict in case it's
# needed
if "AdditionalData" not in self:
self["AdditionalData"] = {}
if src_entity is not None:
self._extract_src_entity(src_entity)
# add AdditionalData dictionary if it's populated
if "AdditionalData" in src_entity:
self["AdditionalData"] = src_entity["AdditionalData"]
if kwargs:
self.__dict__.update(kwargs)
def _extract_src_entity(self, src_entity: Mapping[str, Any]):
"""
Extract source entity properties.
Parameters
----------
src_entity : Mapping[str, Any]
The source mappable object from which to
extract entity properties.
"""
schema_dict = dict(**(self._entity_schema))
schema_dict["Type"] = None
for k, v in schema_dict.items():
if k not in src_entity:
continue
self[k] = src_entity[k]
if v is not None:
try:
# If the property is an enum
if v in _ENTITY_ENUMS:
self[k] = _ENTITY_ENUMS[v][src_entity[k]]
continue
except KeyError:
# Catch key errors from invalid enum values
self[k] = None
if isinstance(v, tuple):
# if the property is a collection
entity_list = []
for col_entity in src_entity[k]:
entity_list.append(Entity.instantiate_entity(col_entity))
self[k] = entity_list
else:
# else try to instantiate an entity
self[k] = Entity.instantiate_entity(src_entity[k])
def __getitem__(self, key: str):
"""Allow property get using dictionary key syntax."""
if key in self.__dict__:
return self.__dict__[key]
if key in self._entity_schema:
return None
raise KeyError
def __setitem__(self, key: str, value: Any):
"""Allow property set using dictionary key syntax."""
self.__dict__[key] = value
def __contains__(self, key: str):
"""Allow property in test."""
# In operator overload
return key in self.__dict__
def __getattr__(self, name: str):
"""Return the value of the named property 'name'."""
if name in self._entity_schema:
return None
raise AttributeError(f"{name} is not a valid attribute.")
def __iter__(self):
"""Iterate over entity_properties."""
return iter(self.properties)
def __len__(self) -> int:
"""Return length/number of entity_properties."""
return len(self.properties)
def __str__(self) -> str:
"""Return string representation of entity."""
return pprint.pformat(self._to_dict(self), indent=2, width=100)
def __repr__(self) -> str:
"""Return repr of entity."""
params = ", ".join(
[f"{name}={val}" for name, val in self.properties.items() if val]
)
if len(params) > 80:
params = params[:80] + "..."
return f"{self.__class__.__name__}({params})"
def _to_dict(self, entity) -> dict:
"""Return as simple nested dictionary."""
ent_dict = {}
for prop, val in entity.properties.items():
if val is not None:
if isinstance(val, Entity):
ent_dict[prop] = self._to_dict(val)
else:
ent_dict[prop] = val
return ent_dict
def _repr_html_(self) -> str:
"""
Display entity in IPython/Notebook.
Returns
-------
HTML
IPython HTML object
"""
return self.to_html()
def to_html(self) -> str:
"""
Return HTML representation of entity.
Returns
-------
str
HTML representation of entity
"""
e_text = str(self)
e_type = self.Type
e_text = e_text.replace("\n", "<br>").replace(" ", " ")
return f"<h3>{e_type}</h3>{e_text}"
@property
def properties(self) -> dict:
"""
Return dictionary properties of entity.
Returns
-------
dict
Entity properties.
"""
return {
name: value
for name, value in self.__dict__.items()
if not name.startswith("_")
}
@property
@abstractmethod
def description_str(self) -> str:
"""
Return Entity Description.
Returns
-------
str
Entity description (optional). If not overridden
by the Entity instance type, it will return the
Type string.
"""
return self.Type
# pylint: disable=bad-continuation, too-many-branches
@classmethod
def instantiate_entity( # noqa: C901
cls, raw_entity: Mapping[str, Any]
) -> Union["Entity", Mapping[str, Any]]:
"""
Class factory to return entity from raw dictionary representation.
Parameters
----------
raw_entity : Mapping[str, Any]
A mapping object (e.g. dictionary or pandas Series)
that contains the properties of the entity.
Returns
-------
Entity
The instantiated entity
"""
if "Type" not in raw_entity:
return raw_entity
entity_type = raw_entity["Type"]
# We get an undefined-variable warning here. _ENTITY_NAME_MAP
# is not defined/populated until end of module since it needs
# entity
if entity_type in cls.ENTITY_NAME_MAP:
return cls.ENTITY_NAME_MAP[entity_type](raw_entity)
raise TypeError("Could not find a suitable type for {}".format(entity_type))
@export
class Account(Entity):
"""
Account Entity class.
Attributes
----------
Name : str
Account Name
NTDomain : str
Account NTDomain
UPNSuffix : str
Account UPNSuffix
Host : Host
Account Host
LogonId : str
Account LogonId (deprecated)
Sid : str
Account Sid
AadTenantId : str
Account AadTenantId
AadUserId : str
Account AadUserId
PUID : str
Account PUID
IsDomainJoined : bool
Account IsDomainJoined
DisplayName : str
Account DisplayName
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role: str = "subject",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing Account entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'subject' or 'target' - only relevant if the entity
is being constructed from an event.
(the default is 'subject')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
# pylint: disable=locally-disabled, line-too-long
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if role == "subject" and "SubjectUserName" in src_event:
self.Name = src_event["SubjectUserName"]
self.NTDomain = (
src_event["SubjectUserDomain"]
if "SubjectUserDomain" in src_event
else None
)
self.Sid = (
src_event["SubjectUserSid"]
if "SubjectUserSid" in src_event
else None
)
self.LogonId = (
src_event["SubjectLogonId"]
if "SubjectLogonId" in src_event
else None
)
if role == "target" and "TargetUserName" in src_event:
self.Name = src_event["TargetUserName"]
self.NTDomain = (
src_event["TargetUserDomain"]
if "TargetUserDomain" in src_event
else None
)
self.Sid = (
src_event["TargetUserSid"] if "TargetUserSid" in src_event else None
)
self.LogonId = (
src_event["TargetLogonId"] if "TargetLogonId" in src_event else None
)
self.AadTenantId = (
src_event["AadTenantId"] if "AadTenantId" in src_event else None
)
self.AadUserId = (
src_event["AadUserId"] if "AadUserId" in src_event else None
)
self.PUID = src_event["PUID"] if "PUID" in src_event else None
self.DisplayName = (
src_event["DisplayName"] if "DisplayName" in src_event else None
)
self.UPNSuffix = (
src_event["UPNSuffix"] if "UPNSuffix" in src_event else None
)
# pylint: enable=locally-disabled, line-too-long
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.qualified_name
@property
def qualified_name(self) -> str:
"""Windows qualified account name."""
if "Name" in self:
name = self["Name"]
if "NTDomain" in self and self.NTDomain:
return "{}\\{}".format(self.NTDomain, name)
if "UPNSuffix" in self and self.UPNSuffix:
return "{}@{}".format(name, self.UPNSuffix)
if "Host" in self and self.Host:
return "{}\\{}".format(self.Host.HostName, name)
return name
_entity_schema = {
# Name (type System.String)
"Name": None,
# NTDomain (type System.String)
"NTDomain": None,
# UPNSuffix (type System.String)
"UPNSuffix": None,
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": "Host",
# LogonId (type System.String)
"LogonId": None,
# Sid (type System.String)
"Sid": None,
# AadTenantId (type System.Nullable`1[System.Guid])
"AadTenantId": None,
# AadUserId (type System.Nullable`1[System.Guid])
"AadUserId": None,
# PUID (type System.Nullable`1[System.Guid])
"PUID": None,
# IsDomainJoined (type System.Nullable`1[System.Boolean])
"IsDomainJoined": None,
# DisplayName (type System.String)
"DisplayName": None,
}
@export
class SecurityGroup(Entity):
"""
SecurityGroup Entity class.
Attributes
----------
DistinguishedName : str
SecurityGroup DistinguishedName
SID : str
SecurityGroup SID
ObjectGuid : str
SecurityGroup ObjectGuid
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self):
"""Return Entity Description."""
return self.DistinguishedName
_entity_schema = {
# DistinguishedName (type System.String)
"DistinguishedName": None,
# SID (type System.String)
"SID": None,
# ObjectGuid (type System.String)
"ObjectGuid": None,
}
@export
class HostLogonSession(Entity):
"""
HostLogonSession Entity class.
Attributes
----------
Account : Account
HostLogonSession Account
StartTimeUtc : datetime
HostLogonSession StartTimeUtc
EndTimeUtc : datetime
HostLogonSession EndTimeUtc
Host : Host
HostLogonSession Host
SessionId : str
HostLogonSession SessionId
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if "TimeCreatedUtc" in src_event:
self.StartTimeUtc = src_event["TimeCreatedUtc"]
elif "TimeGenerated" in src_event:
self.StartTimeUtc = src_event["TimeGenerated"]
self.EndTimeUtc = self.StartTimeUtc
self.SessionId = (
src_event["TargetLogonId"] if "TargetLogonId" in src_event else None
)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Host.HostName}: session: {self.SessionId}"
_entity_schema = {
# Account
"Account": "Account",
# StartTimeUtc (type System.Nullable`1[System.DateTime])
"StartTimeUtc": None,
# EndTimeUtc (type System.Nullable`1[System.DateTime])
"EndTimeUtc": None,
# Host
"Host": "Host",
# SessionId (type System.String)
"SessionId": None,
}
@export
class CloudApplication(Entity):
"""
CloudApplication Entity class.
Attributes
----------
Name : str
CloudApplication Name
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.Name
_entity_schema = {
# Name (type System.String)
"Name": None
}
@export
class DnsResolve(Entity):
"""
DNS Resolve Entity class.
Attributes
----------
DomainName : str
DnsResolve DomainName
IpAdresses : List[str]
DnsResolve IpAdresses
DnsServerIp : IPAddress
DnsResolve DnsServerIp
HostIpAddress : IPAddress
DnsResolve HostIpAddress
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DomainName}: IPs: {repr(self.IpAdresses)}"
_entity_schema = {
# DomainName (type System.String)
"DomainName": None,
# IpAdresses (type System.Collections.Generic.List`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.IP])
"IpAdresses": None,
# DnsServerIp (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"DnsServerIp": "IPAddress",
# HostIpAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"HostIpAddress": "IPAddress",
}
@export
class File(Entity):
"""
File Entity class.
Attributes
----------
FullPath : str
File FullPath
Directory : str
File Directory
Name : str
File Name
Md5 : str
File Md5
Host : str
File Host
Sha1 : str
File Sha1
Sha256 : str
File Sha256
Sha256Ac : str
File Sha256Ac
FileHashes : List[FileHash]
File FileHashes
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role: str = "new",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'new' or 'parent' - only relevant if the entity
is being constructed from an event.
(the default is 'new')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if role == "new" and "NewProcessName" in src_event:
self._add_paths(src_event["NewProcessName"])
elif role == "parent" and "ParentProcessName" in src_event:
self._add_paths(src_event["ParentProcessName"])
if "FullPath" not in self:
file = self["Name"]
directory = self["Directory"]
sep = self.path_separator if directory else None
self["FullPath"] = f"{directory}{sep}{file}"
@property
def path_separator(self):
"""Return the path separator used by the file."""
directory = self["Directory"]
if directory and "/" in directory:
return "/"
return "\\"
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.FullPath
_entity_schema = {
# FullPath (type System.String)
"FullPath": None,
# Directory (type System.String)
"Directory": None,
# Name (type System.String)
"Name": None,
# Md5 (type System.String)
"Md5": None,
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": None,
# Sha1 (type System.String)
"Sha1": None,
# Sha256 (type System.String)
"Sha256": None,
# Sha256Ac (type System.String)
"Sha256Ac": None,
"FileHashes": (list, "FileHash"),
}
def _add_paths(self, full_path):
if "/" in full_path:
self.PathSeparator = "/"
self.OSFamily = OSFamily.Linux
else:
self.PathSeparator = "\\"
self.OSFamily = OSFamily.Windows
self.FullPath = full_path
self.Name = full_path.split(self.PathSeparator)[-1]
self.Directory = full_path.split(self.PathSeparator)[:-1]
@export
class FileHash(Entity):
"""
File Hash class.
Attributes
----------
Algorithm : Algorithm
FileHash Algorithm
Value : str
FileHash Value
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Algorithm}: {self.Value}"
_entity_schema = {
# The hash algorithm (type System.String)
"Algorithm": "Algorithm",
# Value (type System.String)
"Value": None,
}
@export
class Algorithm(Enum):
"""FileHash Algorithm Enumeration."""
Unknown = 0
MD5 = 1
SHA1 = 2
SHA256 = 3
SHA256AC = 4
_ENTITY_ENUMS[Algorithm.__name__] = Algorithm
@export
class Host(Entity):
"""
Host Entity class.
Attributes
----------
DnsDomain : str
Host DnsDomain
NTDomain : str
Host NTDomain
HostName : str
Host HostName
NetBiosName : str
Host NetBiosName
AzureID : str
Host AzureID
OMSAgentID : str
Host OMSAgentID
OSFamily : str
Host OSFamily
IsDomainJoined : bool
Host IsDomainJoined
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
self._computer = None
if src_event is not None:
if "Computer" in src_event:
self._computer = src_event["Computer"]
if "." in src_event["Computer"]:
self.HostName = src_event["Computer"].split(".", 1)[0]
self.DnsDomain = src_event["Computer"].split(".", 1)[1]
else:
self.HostName = src_event["Computer"]
self.NetBiosName = self.HostName
@property
def computer(self) -> str:
"""Return computer from source event."""
return self._computer if self._computer is not None else self.fqdn
@property
def fqdn(self) -> str:
"""Construct FQDN from host + dns."""
if self.DnsDomain:
return f"{self.HostName}.{self.DnsDomain}"
return self.HostName
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.fqdn} ({self.OSFamily})"
_entity_schema = {
# DnsDomain (type System.String)
"DnsDomain": None,
# NTDomain (type System.String)
"NTDomain": None,
# HostName (type System.String)
"HostName": None,
# NetBiosName (type System.String)
"NetBiosName": None,
# AzureID (type System.String)
"AzureID": None,
# OMSAgentID (type System.String)
"OMSAgentID": None,
# OSFamily (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.OSFamily])
"OSFamily": None,
# IsDomainJoined (type System.Nullable`1[System.Boolean])
"IsDomainJoined": None,
}
@export
class IpAddress(Entity):
"""
IPAddress Entity class.
Attributes
----------
Address : str
IpAddress Address
Location : GeoLocation
IpAddress Location
ThreatIntelligence : List[ThreatIntelligence]
IpAddress ThreatIntelligence
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
if src_event is not None:
if "IpAddress" in src_event:
self.Address = src_event["IpAddress"]
@property
def ip_address(self) -> Union[IPv4Address, IPv6Address]:
"""Return a python ipaddress object from the entity property."""
return ip_address(self["Address"])
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.Address
_entity_schema = {
# Address (type System.String)
"Address": None,
# Location (type Microsoft.Azure.Security.Detection.AlertContracts
# .V3.ContextObjects.GeoLocation)
"Location": "GeoLocation",
# ThreatIntelligence (type System.Collections.Generic.List`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3
# .ContextObjects.ThreatIntelligence])
"ThreatIntelligence": (list, "Threatintelligence"),
}
@export
class GeoLocation(Entity):
"""
GeoLocation class.
Attributes
----------
CountryCode : str
GeoLocation CountryCode
CountryName : str
GeoLocation CountryName
State : str
GeoLocation State
City : str
GeoLocation City
Longitude : float
GeoLocation Longitude
Latitude : float
GeoLocation Latitude
Asn : str
GeoLocation Asn
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.CountryCode}; {self.State}; {self.City}"
_entity_schema = {
# str
"CountryCode": None,
# str
"CountryName": None,
# str
"State": None,
# str
"City": None,
# double?
"Longitude": None,
# double?
"Latitude": None,
# int
"Asn": None,
}
@export
class Malware(Entity):
"""
Malware Entity class.
Attributes
----------
Name : str
Malware Name
Category : str
Malware Category
File : File
Malware File
Files : List[File]
Malware Files
Processes : List[Process]
Malware Processes
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Name}: {self.Category}"
_entity_schema = {
# Name (type System.String)
"Name": None,
# Category (type System.String)
"Category": None,
# File (type Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.File)
"File": "File",
"Files": (list, "File"),
"Processes": (list, "Process"),
}
@export
class NetworkConnection(Entity):
"""
NetworkConnection Entity class.
Attributes
----------
SourceAddress : IPAddress
NetworkConnection SourceAddress
SourcePort : int
NetworkConnection SourcePort
DestinationAddress : IPAddress
NetworkConnection DestinationAddress
DestinationPort : int
NetworkConnection DestinationPort
Protocol : str
NetworkConnection Protocol
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
desc = "{}:{} [{}]-> {}:{}".format(
self.SourceAddress,
self.SourcePort,
self.Protocol,
self.DestinationAddress,
self.DestinationPort,
)
return desc
_entity_schema = {
# SourceAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"SourceAddress": "IPAddress",
# SourcePort (type System.Nullable`1[System.Int32])
"SourcePort": None,
# DestinationAddress (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.IP)
"DestinationAddress": "IPAddress",
# DestinationPort (type System.Nullable`1[System.Int32])
"DestinationPort": None,
# Protocol (type System.Nullable`1[System.Net.Sockets.ProtocolType])
"Protocol": None,
}
@export
class Process(Entity):
"""
Process Entity class.
Attributes
----------
ProcessId : str
Process ProcessId
CommandLine : str
Process CommandLine
ElevationToken : str
Process ElevationToken
CreationTimeUtc : datetime
Process CreationTimeUtc
ImageFile : File
Process ImageFile
Account : Account
Process Account
ParentProcess : Process
Process ParentProcess
Host : Host
Process Host
LogonSession : HostLogonSession
Process LogonSession
"""
def __init__(
self,
src_entity: Mapping[str, Any] = None,
src_event: Mapping[str, Any] = None,
role="new",
**kwargs,
):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
src_event : Mapping[str, Any], optional
Create entity from event properties
(the default is None)
role : str, optional
'new' or 'parent' - only relevant if the entity
is being constructed from an event.
(the default is 'new')
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
# pylint: disable=locally-disabled, line-too-long
if src_event is not None:
if role == "new":
self.ProcessId = (
src_event["NewProcessId"] if "NewProcessId" in src_event else None
)
self.CommandLine = (
src_event["CommandLine"] if "CommandLine" in src_event else None
)
if "TimeCreatedUtc" in src_event:
self.CreationTimeUtc = src_event["TimeCreatedUtc"]
elif "TimeGenerated" in src_event:
self.CreationTimeUtc = src_event["TimeGenerated"]
self.ProcessId = (
src_event["NewProcessId"] if "NewProcessId" in src_event else None
)
self.ImageFile = File(src_event=src_event, role="new")
self.Account = Account(src_event=src_event, role="subject")
if "ParentProcessName" in src_event or "ProcessName" in src_event:
parent = Process(src_event=src_event, role="parent")
self.ParentProcess = parent
# Linux properties
self.success = src_event["success"] if "success" in src_event else None
self.audit_user = (
src_event["audit_user"] if "audit_user" in src_event else None
)
self.auid = src_event["auid"] if "auid" in src_event else None
self.group = src_event["group"] if "group" in src_event else None
self.gid = src_event["gid"] if "gid" in src_event else None
self.effective_user = (
src_event["effective_user"]
if "effective_user" in src_event
else None
)
self.euid = src_event["euid"] if "euid" in src_event else None
self.effective_group = (
src_event["effective_group"]
if "effective_group" in src_event
else None
)
self.egid = (
src_event["effective_group"]
if "effective_group" in src_event
else None
)
self.cwd = src_event["cwd"] if "cwd" in src_event else None
self.name = src_event["cwd"] if "cwd" in src_event else None
else:
self.ProcessId = (
src_event["ProcessId"] if "ProcessId" in src_event else None
)
self.ImageFile = File(src_event=src_event, role="parent")
# pylint: enable=locally-disabled, line-too-long
@property
def ProcessName(self) -> str: # noqa: N802
"""Return the name of the process file."""
file = self["ImageFile"]
return file.Name if file else None
@property
def ProcessFilePath(self) -> str: # noqa: N802
"""Return the name of the process file path."""
file = self["ImageFile"]
return file.FullPath if file else None
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.ProcessFilePath}: {self.CommandLine}"
_entity_schema = {
# ProcessId (type System.String)
"ProcessId": None,
# CommandLine (type System.String)
"CommandLine": None,
# ElevationToken (type System.Nullable`1
# [Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.ElevationToken])
"ElevationToken": None,
# CreationTimeUtc (type System.Nullable`1[System.DateTime])
"CreationTimeUtc": None,
# ImageFile (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.File)
"ImageFile": "File",
# Account (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Account)
"Account": "Account",
# ParentProcess (type Microsoft.Azure.Security.Detection.AlertContracts
# .V3.Entities.Process)
"ParentProcess": "Process",
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.Host)
"Host": "Host",
# Host (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.HostLogonSession)
"LogonSession": "HostLogonSession",
}
@export
class RegistryHive(Enum):
"""RegistryHive enumeration."""
# <summary>HKEY_LOCAL_MACHINE</summary>
HKEY_LOCAL_MACHINE = 0
# <summary>HKEY_CLASSES_ROOT</summary>
HKEY_CLASSES_ROOT = 1
# <summary>HKEY_CURRENT_CONFIG</summary>
HKEY_CURRENT_CONFIG = 2
# <summary>HKEY_USERS</summary>
HKEY_USERS = 3
# <summary>HKEY_CURRENT_USER_LOCAL_SETTINGS</summary>
HKEY_CURRENT_USER_LOCAL_SETTINGS = 4
# <summary>HKEY_PERFORMANCE_DATA</summary>
HKEY_PERFORMANCE_DATA = 5
# <summary>HKEY_PERFORMANCE_NLSTEXT</summary>
HKEY_PERFORMANCE_NLSTEXT = 6
# <summary>HKEY_PERFORMANCE_TEXT</summary>
HKEY_PERFORMANCE_TEXT = 7
# <summary>HKEY_A</summary>
HKEY_A = 8
# <summary>HKEY_CURRENT_USER</summary>
HKEY_CURRENT_USER = 9
_ENTITY_ENUMS[RegistryHive.__name__] = RegistryHive
@export
class RegistryKey(Entity):
"""
RegistryKey Entity class.
Attributes
----------
Hive : RegistryHive
RegistryKey Hive
Key : str
RegistryKey Key
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Hive}\\{self.Key}"
_entity_schema = {
# Hive (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.RegistryHive])
"Hive": "RegistryHive",
# Key (type System.String)
"Key": None,
}
class RegistryValue(Entity):
"""
RegistryValue Entity class.
Attributes
----------
Key : str
RegistryValue Key
Name : str
RegistryValue Name
Value : str
RegistryValue Value
ValueType : str
RegistryValue ValueType
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Name}[{self.ValueType}]:{repr(self.Value)}"
_entity_schema = {
# Key (type Microsoft.Azure.Security.Detection
# .AlertContracts.V3.Entities.RegistryKey)
"Key": None,
# Name (type System.String)
"Name": None,
# Value (type System.String)
"Value": None,
# ValueType (type System.Nullable`1[Microsoft.Win32.RegistryValueKind])
"ValueType": None,
}
@export
class OSFamily(Enum):
"""OSFamily enumeration."""
Linux = 0
Windows = 1
_ENTITY_ENUMS[OSFamily.__name__] = OSFamily
@export
class ElevationToken(Enum):
"""ElevationToken enumeration."""
Default = 0
Full = 1
Limited = 2
_ENTITY_ENUMS[ElevationToken.__name__] = ElevationToken
@export
class AzureResource(Entity):
"""
AzureResource Entity class.
Attributes
----------
ResourceId : str
AzureResource ResourceId
SubscriptionId : str
AzureResource SubscriptionId
ResourceIdParts : Dict[str, str]
AzureResource ResourceIdParts
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return self.ResourceId
_entity_schema = {
# ResourceId (type System.String)
"ResourceId": None,
# SubscriptionId (type System.String)
"SubscriptionId": None,
# ResourceIdParts (type System.Collections.Generic.IReadOnlyDictionary`2
# [System.String,System.String])
"ResourceIdParts": None,
}
@export
class Alert(Entity):
"""
Alert Entity class.
Attributes
----------
DisplayName : str
Alert DisplayName
CompromisedEntity : str
Alert CompromisedEntity
Count : int
Alert Count
StartTimeUtc : datetime
Alert StartTimeUtc
EndTimeUtc : datetime
Alert EndTimeUtc
Severity : str
Alert Severity
SystemAlertIds : List[str]
Alert SystemAlertIds
AlertType : str
Alert AlertType
VendorName : str
Alert VendorName
ProviderName : str
Alert ProviderName
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
Parameters
----------
src_entity : Mapping[str, Any], optional
Create entity from existing entity or
other mapping object that implements entity properties.
(the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DisplayName} ({self.StartTimeUtc}) {self.CompromisedEntity}"
_entity_schema = {
# DisplayName (type System.String)
"DisplayName": None,
# CompromisedEntity (type System.String)
"CompromisedEntity": None,
# Count (type System.Nullable`1[System.Int32])
"Count": None,
# StartTimeUtc (type System.Nullable`1[System.DateTime])
"StartTimeUtc": None,
# EndTimeUtc (type System.Nullable`1[System.DateTime])
"EndTimeUtc": None,
# Severity (type System.Nullable`1
# [Microsoft.Azure.Security.Detection.AlertContracts.V3.Severity])
"Severity": None,
# SystemAlertIds (type System.Collections.Generic.List`1[System.String])
"SystemAlertIds": None,
# AlertType (type System.String)
"AlertType": None,
# VendorName (type System.String)
"VendorName": None,
# ProviderName (type System.String)
"ProviderName": None,
}
@export
class Threatintelligence(Entity):
"""
Threatintelligence Entity class.
Attributes
----------
ProviderName : str
Threatintelligence ProviderName
ThreatType : str
Threatintelligence ThreatType
ThreatName : str
Threatintelligence ThreatName
Confidence : str
Threatintelligence Confidence
ReportLink : str
Threatintelligence ReportLink
ThreatDescription : str
Threatintelligence ThreatDescription
"""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.DisplayName} ({self.StartTimeUtc}) {self.CompromisedEntity}"
_entity_schema = {
# String Name of the provider from whom this
# Threat Intelligence information was received
"ProviderName": None,
"ThreatType": None,
"ThreatName": None,
"Confidence": None,
"ReportLink": None,
"ThreatDescription": None,
}
@export
class UnknownEntity(Entity):
"""Generic Entity class."""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
@property
def description_str(self) -> str:
"""Return Entity Description."""
return "OtherEntity"
_entity_schema = {} # type: Dict[str, Any]
# Dictionary to map text names of types to the class.
Entity.ENTITY_NAME_MAP.update(
{
"account": Account,
"host": Host,
"process": Process,
"file": File,
"cloudapplication": CloudApplication,
"dnsresolve": DnsResolve,
"ipaddress": IpAddress,
"ip": IpAddress,
"networkconnection": NetworkConnection,
"malware": Malware,
"registry-key": RegistryKey,
"registrykey": RegistryKey,
"registry-value": RegistryValue,
"registryvalue": RegistryValue,
"host-logon-session": HostLogonSession,
"hostlogonsession": HostLogonSession,
"filehash": FileHash,
"security-group": SecurityGroup,
"securitygroup": SecurityGroup,
"alerts": Alert,
"alert": Alert,
}
)
|
en
| 0.446717
|
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- entityschema module. Module for V3 Entities class # pylint: disable=too-many-lines, invalid-name # pylint: disable=too-many-instance-attributes Entity abstract base class. Implements common methods for Entity classes Create a new instance of an entity. Parameters ---------- src_entity : Mapping[str, Any], optional If src_entity is supplied it attempts to extract common properties from the source entity and assign them to the new instance. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. # If we have an unknown entity see if we a type passed in # Make sure Type is in the class schema dictionary # if we didn't populate AdditionalData, add an empty dict in case it's # needed # add AdditionalData dictionary if it's populated Extract source entity properties. Parameters ---------- src_entity : Mapping[str, Any] The source mappable object from which to extract entity properties. # If the property is an enum # Catch key errors from invalid enum values # if the property is a collection # else try to instantiate an entity Allow property get using dictionary key syntax. Allow property set using dictionary key syntax. Allow property in test. # In operator overload Return the value of the named property 'name'. Iterate over entity_properties. Return length/number of entity_properties. Return string representation of entity. Return repr of entity. Return as simple nested dictionary. Display entity in IPython/Notebook. Returns ------- HTML IPython HTML object Return HTML representation of entity. Returns ------- str HTML representation of entity Return dictionary properties of entity. Returns ------- dict Entity properties. Return Entity Description. Returns ------- str Entity description (optional). If not overridden by the Entity instance type, it will return the Type string. # pylint: disable=bad-continuation, too-many-branches # noqa: C901 Class factory to return entity from raw dictionary representation. Parameters ---------- raw_entity : Mapping[str, Any] A mapping object (e.g. dictionary or pandas Series) that contains the properties of the entity. Returns ------- Entity The instantiated entity # We get an undefined-variable warning here. _ENTITY_NAME_MAP # is not defined/populated until end of module since it needs # entity Account Entity class. Attributes ---------- Name : str Account Name NTDomain : str Account NTDomain UPNSuffix : str Account UPNSuffix Host : Host Account Host LogonId : str Account LogonId (deprecated) Sid : str Account Sid AadTenantId : str Account AadTenantId AadUserId : str Account AadUserId PUID : str Account PUID IsDomainJoined : bool Account IsDomainJoined DisplayName : str Account DisplayName Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing Account entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) role : str, optional 'subject' or 'target' - only relevant if the entity is being constructed from an event. (the default is 'subject') Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. # pylint: disable=locally-disabled, line-too-long # pylint: enable=locally-disabled, line-too-long Return Entity Description. Windows qualified account name. # Name (type System.String) # NTDomain (type System.String) # UPNSuffix (type System.String) # Host (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.Host) # LogonId (type System.String) # Sid (type System.String) # AadTenantId (type System.Nullable`1[System.Guid]) # AadUserId (type System.Nullable`1[System.Guid]) # PUID (type System.Nullable`1[System.Guid]) # IsDomainJoined (type System.Nullable`1[System.Boolean]) # DisplayName (type System.String) SecurityGroup Entity class. Attributes ---------- DistinguishedName : str SecurityGroup DistinguishedName SID : str SecurityGroup SID ObjectGuid : str SecurityGroup ObjectGuid Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # DistinguishedName (type System.String) # SID (type System.String) # ObjectGuid (type System.String) HostLogonSession Entity class. Attributes ---------- Account : Account HostLogonSession Account StartTimeUtc : datetime HostLogonSession StartTimeUtc EndTimeUtc : datetime HostLogonSession EndTimeUtc Host : Host HostLogonSession Host SessionId : str HostLogonSession SessionId Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # Account # StartTimeUtc (type System.Nullable`1[System.DateTime]) # EndTimeUtc (type System.Nullable`1[System.DateTime]) # Host # SessionId (type System.String) CloudApplication Entity class. Attributes ---------- Name : str CloudApplication Name Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # Name (type System.String) DNS Resolve Entity class. Attributes ---------- DomainName : str DnsResolve DomainName IpAdresses : List[str] DnsResolve IpAdresses DnsServerIp : IPAddress DnsResolve DnsServerIp HostIpAddress : IPAddress DnsResolve HostIpAddress Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # DomainName (type System.String) # IpAdresses (type System.Collections.Generic.List`1 # [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.IP]) # DnsServerIp (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.IP) # HostIpAddress (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.IP) File Entity class. Attributes ---------- FullPath : str File FullPath Directory : str File Directory Name : str File Name Md5 : str File Md5 Host : str File Host Sha1 : str File Sha1 Sha256 : str File Sha256 Sha256Ac : str File Sha256Ac FileHashes : List[FileHash] File FileHashes Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) role : str, optional 'new' or 'parent' - only relevant if the entity is being constructed from an event. (the default is 'new') Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return the path separator used by the file. Return Entity Description. # FullPath (type System.String) # Directory (type System.String) # Name (type System.String) # Md5 (type System.String) # Host (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.Host) # Sha1 (type System.String) # Sha256 (type System.String) # Sha256Ac (type System.String) File Hash class. Attributes ---------- Algorithm : Algorithm FileHash Algorithm Value : str FileHash Value Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # The hash algorithm (type System.String) # Value (type System.String) FileHash Algorithm Enumeration. Host Entity class. Attributes ---------- DnsDomain : str Host DnsDomain NTDomain : str Host NTDomain HostName : str Host HostName NetBiosName : str Host NetBiosName AzureID : str Host AzureID OMSAgentID : str Host OMSAgentID OSFamily : str Host OSFamily IsDomainJoined : bool Host IsDomainJoined Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return computer from source event. Construct FQDN from host + dns. Return Entity Description. # DnsDomain (type System.String) # NTDomain (type System.String) # HostName (type System.String) # NetBiosName (type System.String) # AzureID (type System.String) # OMSAgentID (type System.String) # OSFamily (type System.Nullable`1 # [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.OSFamily]) # IsDomainJoined (type System.Nullable`1[System.Boolean]) IPAddress Entity class. Attributes ---------- Address : str IpAddress Address Location : GeoLocation IpAddress Location ThreatIntelligence : List[ThreatIntelligence] IpAddress ThreatIntelligence Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return a python ipaddress object from the entity property. Return Entity Description. # Address (type System.String) # Location (type Microsoft.Azure.Security.Detection.AlertContracts # .V3.ContextObjects.GeoLocation) # ThreatIntelligence (type System.Collections.Generic.List`1 # [Microsoft.Azure.Security.Detection.AlertContracts.V3 # .ContextObjects.ThreatIntelligence]) GeoLocation class. Attributes ---------- CountryCode : str GeoLocation CountryCode CountryName : str GeoLocation CountryName State : str GeoLocation State City : str GeoLocation City Longitude : float GeoLocation Longitude Latitude : float GeoLocation Latitude Asn : str GeoLocation Asn Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # str # str # str # str # double? # double? # int Malware Entity class. Attributes ---------- Name : str Malware Name Category : str Malware Category File : File Malware File Files : List[File] Malware Files Processes : List[Process] Malware Processes Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # Name (type System.String) # Category (type System.String) # File (type Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.File) NetworkConnection Entity class. Attributes ---------- SourceAddress : IPAddress NetworkConnection SourceAddress SourcePort : int NetworkConnection SourcePort DestinationAddress : IPAddress NetworkConnection DestinationAddress DestinationPort : int NetworkConnection DestinationPort Protocol : str NetworkConnection Protocol Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # SourceAddress (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.IP) # SourcePort (type System.Nullable`1[System.Int32]) # DestinationAddress (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.IP) # DestinationPort (type System.Nullable`1[System.Int32]) # Protocol (type System.Nullable`1[System.Net.Sockets.ProtocolType]) Process Entity class. Attributes ---------- ProcessId : str Process ProcessId CommandLine : str Process CommandLine ElevationToken : str Process ElevationToken CreationTimeUtc : datetime Process CreationTimeUtc ImageFile : File Process ImageFile Account : Account Process Account ParentProcess : Process Process ParentProcess Host : Host Process Host LogonSession : HostLogonSession Process LogonSession Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) src_event : Mapping[str, Any], optional Create entity from event properties (the default is None) role : str, optional 'new' or 'parent' - only relevant if the entity is being constructed from an event. (the default is 'new') Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. # pylint: disable=locally-disabled, line-too-long # Linux properties # pylint: enable=locally-disabled, line-too-long # noqa: N802 Return the name of the process file. # noqa: N802 Return the name of the process file path. Return Entity Description. # ProcessId (type System.String) # CommandLine (type System.String) # ElevationToken (type System.Nullable`1 # [Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.ElevationToken]) # CreationTimeUtc (type System.Nullable`1[System.DateTime]) # ImageFile (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.File) # Account (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.Account) # ParentProcess (type Microsoft.Azure.Security.Detection.AlertContracts # .V3.Entities.Process) # Host (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.Host) # Host (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.HostLogonSession) RegistryHive enumeration. # <summary>HKEY_LOCAL_MACHINE</summary> # <summary>HKEY_CLASSES_ROOT</summary> # <summary>HKEY_CURRENT_CONFIG</summary> # <summary>HKEY_USERS</summary> # <summary>HKEY_CURRENT_USER_LOCAL_SETTINGS</summary> # <summary>HKEY_PERFORMANCE_DATA</summary> # <summary>HKEY_PERFORMANCE_NLSTEXT</summary> # <summary>HKEY_PERFORMANCE_TEXT</summary> # <summary>HKEY_A</summary> # <summary>HKEY_CURRENT_USER</summary> RegistryKey Entity class. Attributes ---------- Hive : RegistryHive RegistryKey Hive Key : str RegistryKey Key Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # Hive (type System.Nullable`1 # [Microsoft.Azure.Security.Detection.AlertContracts.V3.Entities.RegistryHive]) # Key (type System.String) RegistryValue Entity class. Attributes ---------- Key : str RegistryValue Key Name : str RegistryValue Name Value : str RegistryValue Value ValueType : str RegistryValue ValueType Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # Key (type Microsoft.Azure.Security.Detection # .AlertContracts.V3.Entities.RegistryKey) # Name (type System.String) # Value (type System.String) # ValueType (type System.Nullable`1[Microsoft.Win32.RegistryValueKind]) OSFamily enumeration. ElevationToken enumeration. AzureResource Entity class. Attributes ---------- ResourceId : str AzureResource ResourceId SubscriptionId : str AzureResource SubscriptionId ResourceIdParts : Dict[str, str] AzureResource ResourceIdParts Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # ResourceId (type System.String) # SubscriptionId (type System.String) # ResourceIdParts (type System.Collections.Generic.IReadOnlyDictionary`2 # [System.String,System.String]) Alert Entity class. Attributes ---------- DisplayName : str Alert DisplayName CompromisedEntity : str Alert CompromisedEntity Count : int Alert Count StartTimeUtc : datetime Alert StartTimeUtc EndTimeUtc : datetime Alert EndTimeUtc Severity : str Alert Severity SystemAlertIds : List[str] Alert SystemAlertIds AlertType : str Alert AlertType VendorName : str Alert VendorName ProviderName : str Alert ProviderName Create a new instance of the entity type. Parameters ---------- src_entity : Mapping[str, Any], optional Create entity from existing entity or other mapping object that implements entity properties. (the default is None) Other Parameters ---------------- kwargs : Dict[str, Any] Supply the entity properties as a set of kw arguments. Return Entity Description. # DisplayName (type System.String) # CompromisedEntity (type System.String) # Count (type System.Nullable`1[System.Int32]) # StartTimeUtc (type System.Nullable`1[System.DateTime]) # EndTimeUtc (type System.Nullable`1[System.DateTime]) # Severity (type System.Nullable`1 # [Microsoft.Azure.Security.Detection.AlertContracts.V3.Severity]) # SystemAlertIds (type System.Collections.Generic.List`1[System.String]) # AlertType (type System.String) # VendorName (type System.String) # ProviderName (type System.String) Threatintelligence Entity class. Attributes ---------- ProviderName : str Threatintelligence ProviderName ThreatType : str Threatintelligence ThreatType ThreatName : str Threatintelligence ThreatName Confidence : str Threatintelligence Confidence ReportLink : str Threatintelligence ReportLink ThreatDescription : str Threatintelligence ThreatDescription Create a new instance of the entity type. :param src_entity: instantiate entity using properties of src entity :param kwargs: key-value pair representation of entity Return Entity Description. # String Name of the provider from whom this # Threat Intelligence information was received Generic Entity class. Create a new instance of the entity type. :param src_entity: instantiate entity using properties of src entity :param kwargs: key-value pair representation of entity Return Entity Description. # type: Dict[str, Any] # Dictionary to map text names of types to the class.
| 1.926297
| 2
|
test/test_projects/c.py
|
sthagen/pypa-cibuildwheel
| 0
|
6626670
|
import jinja2
from .base import TestProject
SPAM_C_TEMPLATE = r"""
#include <Python.h>
{{ spam_c_top_level_add }}
static PyObject *
spam_system(PyObject *self, PyObject *args)
{
const char *command;
int sts;
if (!PyArg_ParseTuple(args, "s", &command))
return NULL;
sts = system(command);
{{ spam_c_function_add | indent(4) }}
return PyLong_FromLong(sts);
}
/* Module initialization */
static PyMethodDef module_methods[] = {
{"system", (PyCFunction)spam_system, METH_VARARGS,
"Execute a shell command."},
{NULL} /* Sentinel */
};
PyMODINIT_FUNC PyInit_spam(void)
{
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT, "spam", "Example module", -1, module_methods,
};
return PyModule_Create(&moduledef);
}
"""
SETUP_PY_TEMPLATE = r"""
import sys
from setuptools import setup, Extension
{{ setup_py_add }}
libraries = []
if sys.platform.startswith('linux'):
libraries.extend(['m', 'c'])
setup(
ext_modules=[Extension(
'spam',
sources=['spam.c'],
libraries=libraries,
{{ setup_py_extension_args_add | indent(8) }}
)],
{{ setup_py_setup_args_add | indent(4) }}
)
"""
SETUP_CFG_TEMPLATE = r"""
[metadata]
name = spam
version = 0.1.0
{{ setup_cfg_add }}
"""
def new_c_project(
*,
spam_c_top_level_add="",
spam_c_function_add="",
setup_py_add="",
setup_py_extension_args_add="",
setup_py_setup_args_add="",
setup_cfg_add="",
):
project = TestProject()
project.files.update(
{
"spam.c": jinja2.Template(SPAM_C_TEMPLATE),
"setup.py": jinja2.Template(SETUP_PY_TEMPLATE),
"setup.cfg": jinja2.Template(SETUP_CFG_TEMPLATE),
}
)
project.template_context.update(
{
"spam_c_top_level_add": spam_c_top_level_add,
"spam_c_function_add": spam_c_function_add,
"setup_py_add": setup_py_add,
"setup_py_extension_args_add": setup_py_extension_args_add,
"setup_py_setup_args_add": setup_py_setup_args_add,
"setup_cfg_add": setup_cfg_add,
}
)
return project
|
import jinja2
from .base import TestProject
SPAM_C_TEMPLATE = r"""
#include <Python.h>
{{ spam_c_top_level_add }}
static PyObject *
spam_system(PyObject *self, PyObject *args)
{
const char *command;
int sts;
if (!PyArg_ParseTuple(args, "s", &command))
return NULL;
sts = system(command);
{{ spam_c_function_add | indent(4) }}
return PyLong_FromLong(sts);
}
/* Module initialization */
static PyMethodDef module_methods[] = {
{"system", (PyCFunction)spam_system, METH_VARARGS,
"Execute a shell command."},
{NULL} /* Sentinel */
};
PyMODINIT_FUNC PyInit_spam(void)
{
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT, "spam", "Example module", -1, module_methods,
};
return PyModule_Create(&moduledef);
}
"""
SETUP_PY_TEMPLATE = r"""
import sys
from setuptools import setup, Extension
{{ setup_py_add }}
libraries = []
if sys.platform.startswith('linux'):
libraries.extend(['m', 'c'])
setup(
ext_modules=[Extension(
'spam',
sources=['spam.c'],
libraries=libraries,
{{ setup_py_extension_args_add | indent(8) }}
)],
{{ setup_py_setup_args_add | indent(4) }}
)
"""
SETUP_CFG_TEMPLATE = r"""
[metadata]
name = spam
version = 0.1.0
{{ setup_cfg_add }}
"""
def new_c_project(
*,
spam_c_top_level_add="",
spam_c_function_add="",
setup_py_add="",
setup_py_extension_args_add="",
setup_py_setup_args_add="",
setup_cfg_add="",
):
project = TestProject()
project.files.update(
{
"spam.c": jinja2.Template(SPAM_C_TEMPLATE),
"setup.py": jinja2.Template(SETUP_PY_TEMPLATE),
"setup.cfg": jinja2.Template(SETUP_CFG_TEMPLATE),
}
)
project.template_context.update(
{
"spam_c_top_level_add": spam_c_top_level_add,
"spam_c_function_add": spam_c_function_add,
"setup_py_add": setup_py_add,
"setup_py_extension_args_add": setup_py_extension_args_add,
"setup_py_setup_args_add": setup_py_setup_args_add,
"setup_cfg_add": setup_cfg_add,
}
)
return project
|
en
| 0.287604
|
#include <Python.h> {{ spam_c_top_level_add }} static PyObject * spam_system(PyObject *self, PyObject *args) { const char *command; int sts; if (!PyArg_ParseTuple(args, "s", &command)) return NULL; sts = system(command); {{ spam_c_function_add | indent(4) }} return PyLong_FromLong(sts); } /* Module initialization */ static PyMethodDef module_methods[] = { {"system", (PyCFunction)spam_system, METH_VARARGS, "Execute a shell command."}, {NULL} /* Sentinel */ }; PyMODINIT_FUNC PyInit_spam(void) { static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "spam", "Example module", -1, module_methods, }; return PyModule_Create(&moduledef); } import sys from setuptools import setup, Extension {{ setup_py_add }} libraries = [] if sys.platform.startswith('linux'): libraries.extend(['m', 'c']) setup( ext_modules=[Extension( 'spam', sources=['spam.c'], libraries=libraries, {{ setup_py_extension_args_add | indent(8) }} )], {{ setup_py_setup_args_add | indent(4) }} ) [metadata] name = spam version = 0.1.0 {{ setup_cfg_add }}
| 2.147182
| 2
|
noxfile.py
|
kianmeng/sphinx-autobuild
| 264
|
6626671
|
"""Development automation."""
import nox
def _install_this_editable(session, *, extras=None):
if extras is None:
extras = []
session.install("flit")
session.run(
"flit",
"install",
"-s",
"--deps=production",
"--extras",
",".join(extras),
silent=True,
)
@nox.session(reuse_venv=True)
def lint(session):
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files", *session.posargs)
@nox.session(python=["3.6", "3.7", "3.8"])
def test(session):
_install_this_editable(session, extras=["test"])
default_args = ["--cov-report", "term", "--cov", "sphinx_autobuild"]
args = session.posargs or default_args
session.run("pytest", *args)
@nox.session(reuse_venv=True)
def docs(session):
_install_this_editable(session, extras=["docs"])
session.run("sphinx-build", "-b", "html", "docs/", "build/docs")
@nox.session(name="docs-live", reuse_venv=True)
def docs_live(session):
_install_this_editable(session, extras=["docs"])
session.run(
"sphinx-autobuild", "-b", "html", "docs/", "build/docs", *session.posargs
)
|
"""Development automation."""
import nox
def _install_this_editable(session, *, extras=None):
if extras is None:
extras = []
session.install("flit")
session.run(
"flit",
"install",
"-s",
"--deps=production",
"--extras",
",".join(extras),
silent=True,
)
@nox.session(reuse_venv=True)
def lint(session):
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files", *session.posargs)
@nox.session(python=["3.6", "3.7", "3.8"])
def test(session):
_install_this_editable(session, extras=["test"])
default_args = ["--cov-report", "term", "--cov", "sphinx_autobuild"]
args = session.posargs or default_args
session.run("pytest", *args)
@nox.session(reuse_venv=True)
def docs(session):
_install_this_editable(session, extras=["docs"])
session.run("sphinx-build", "-b", "html", "docs/", "build/docs")
@nox.session(name="docs-live", reuse_venv=True)
def docs_live(session):
_install_this_editable(session, extras=["docs"])
session.run(
"sphinx-autobuild", "-b", "html", "docs/", "build/docs", *session.posargs
)
|
en
| 0.630693
|
Development automation.
| 2.053603
| 2
|
tyrian/typarser/grammar_parser/__init__.py
|
Mause/tyrian
| 1
|
6626672
|
<filename>tyrian/typarser/grammar_parser/__init__.py
"""
Contains code for parsing the Grammar,
and for using it to parse a stream of tokens
"""
from .grammar_parser import GrammarParser
GrammarParser
|
<filename>tyrian/typarser/grammar_parser/__init__.py
"""
Contains code for parsing the Grammar,
and for using it to parse a stream of tokens
"""
from .grammar_parser import GrammarParser
GrammarParser
|
en
| 0.759431
|
Contains code for parsing the Grammar, and for using it to parse a stream of tokens
| 1.778752
| 2
|
lib.py
|
CJ-5/Python_Adventure_Game
| 0
|
6626673
|
# Holds the main functions that operate the backend of the game (e.g battle system)
import os
from os import system
import lib
import movement_engine
import time
from colorama import Fore, Style
import game_data
import random
import math
import ctypes
import msvcrt
import subprocess
from ctypes import wintypes
from game_data import MQ, InvItem
class Logo:
__slots__ = ("logo_a", "logo_b")
# logo_a: equivalent to "Adventure"
# logo_b: equivalent to "Game"
def __init__(self):
self.logo_a = [10, 32, 32, 32, 32, 10, 32, 9608, 9608, 9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608, 9608,
9608, 9559, 32, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608, 9608, 9608,
9608, 9608, 9559, 9608, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608, 9608,
9608, 9608, 9608, 9608, 9559, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608,
9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 10, 9608, 9608, 9556,
9552, 9552, 9608, 9608, 9559, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9559, 9608, 9608, 9553,
32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 9608, 9608, 9608,
9608, 9559, 32, 32, 9608, 9608, 9553, 9562, 9552, 9552, 9608, 9608, 9556, 9552, 9552, 9565, 9608,
9608, 9553, 32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9559,
9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 10, 9608, 9608, 9608, 9608, 9608, 9608, 9608,
9553, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 9608, 9608, 9556, 9608, 9608, 9559, 32, 9608, 9608,
9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 10,
9608, 9608, 9556, 9552, 9552, 9608, 9608, 9553, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9562,
9608, 9608, 9559, 32, 9608, 9608, 9556, 9565, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 9608,
9608, 9553, 9562, 9608, 9608, 9559, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32,
9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608,
9559, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 10, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 32, 9562, 9608, 9608, 9608, 9608, 9556, 9565, 32,
9608, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 9608, 9608, 9553, 32, 9562, 9608, 9608, 9608,
9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9562, 9608, 9608, 9608, 9608, 9608, 9608,
9556, 9565, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608, 9608, 9608,
9559, 10, 9562, 9552, 9565, 32, 32, 9562, 9552, 9565, 9562, 9552, 9552, 9552, 9552, 9552, 9565,
32, 32, 32, 9562, 9552, 9552, 9552, 9565, 32, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9552, 9565,
9562, 9552, 9565, 32, 32, 9562, 9552, 9552, 9552, 9565, 32, 32, 32, 9562, 9552, 9565,
32, 32, 32, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9565, 32, 9562, 9552, 9565, 32, 32, 9562,
9552, 9565, 9562, 9552, 9552, 9552, 9552, 9552, 9552, 9565, 10, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 10]
self.logo_b = [10, 32, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 9608, 9608, 9608, 9608, 9608,
9559, 32, 9608, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9608, 9559, 9608, 9608, 9608, 9608,
9608, 9608, 9608, 9559, 10, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 32, 9608, 9608,
9556, 9552, 9552, 9608, 9608, 9559, 9608, 9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608,
9553, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 10, 9608, 9608, 9553, 32, 32, 9608, 9608,
9608, 9559, 9608, 9608, 9608, 9608, 9608, 9608, 9608, 9553, 9608, 9608, 9556, 9608, 9608, 9608,
9608, 9556, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 10, 9608, 9608, 9553,
32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9553, 9608, 9608, 9553,
9562, 9608, 9608, 9556, 9565, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 10,
9562, 9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 9608, 9608, 9553, 32, 32, 9608, 9608,
9553, 9608, 9608, 9553, 32, 9562, 9552, 9565, 32, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608,
9608, 9608, 9559, 10, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9565, 32, 9562, 9552, 9565, 32, 32,
9562, 9552, 9565, 9562, 9552, 9565, 32, 32, 32, 32, 32, 9562, 9552, 9565, 9562, 9552, 9552, 9552,
9552, 9552, 9552, 9565, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 10]
def print_logo():
# Print the Logo
logo_instance = Logo()
for logo_char in logo_instance.logo_a:
if logo_char == 10: # Check for new line
print(f"{chr(logo_char):<10}", end='') # Spacing so text is not left-aligned
else:
print(chr(logo_char), end='')
for logo_char in logo_instance.logo_b:
if logo_char == 10:
print(f"{chr(logo_char):<30}", end='')
else:
print(chr(logo_char), end='')
print('\n')
def get_max():
# Initiate the max size of the console
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
user32 = ctypes.WinDLL('user32', use_last_error=True)
kernel32.GetConsoleWindow.restype = wintypes.HWND
kernel32.GetLargestConsoleWindowSize.restype = wintypes._COORD
kernel32.GetLargestConsoleWindowSize.argtypes = (wintypes.HANDLE,)
user32.ShowWindow.argtypes = (wintypes.HWND, ctypes.c_int)
fd = os.open('CONOUT$', os.O_RDWR)
try:
hcon = msvcrt.get_osfhandle(fd)
max_size = kernel32.GetLargestConsoleWindowSize(hcon)
if max_size.X == 0 and max_size.Y == 0:
raise ctypes.WinError(ctypes.get_last_error())
cols = max_size.X
hwnd = kernel32.GetConsoleWindow()
if cols and hwnd:
lines = max_size.Y
game_data.SysData.max_screen_size = (cols, lines)
finally:
os.close(fd)
def is_full_screen():
try:
col, row = os.get_terminal_size()
print((col, row))
print(game_data.SysData.max_screen_size)
print((col, row) == get_max())
return (col, row) == game_data.SysData.max_screen_size
except:
return False
def maximize_console(lines=None):
# I hate how long this took to figure out
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
user32 = ctypes.WinDLL('user32', use_last_error=True)
SW_MAXIMIZE = 3 # specifies to maximize the window
kernel32.GetConsoleWindow.restype = wintypes.HWND
kernel32.GetLargestConsoleWindowSize.restype = wintypes._COORD
kernel32.GetLargestConsoleWindowSize.argtypes = (wintypes.HANDLE,)
user32.ShowWindow.argtypes = (wintypes.HWND, ctypes.c_int)
fd = os.open('CONOUT$', os.O_RDWR)
try:
hcon = msvcrt.get_osfhandle(fd)
max_size = kernel32.GetLargestConsoleWindowSize(hcon)
if max_size.X == 0 and max_size.Y == 0:
raise ctypes.WinError(ctypes.get_last_error())
cols = max_size.X
hwnd = kernel32.GetConsoleWindow()
if cols and hwnd:
if lines is None:
lines = max_size.Y
else:
lines = max(min(lines, 9999), max_size.Y)
game_data.SysData.max_screen_size = (cols, lines)
subprocess.check_call('mode.com con cols={} lines={}'.format(cols, lines))
user32.ShowWindow(hwnd, SW_MAXIMIZE)
finally:
os.close(fd)
def clear_line(num: int, max_line_length: int = None,
reset: bool = False, direction: str = 'A'):
# Clear the specified amount of lines from the console
# Num = The amount of line to clear
# Max_Line_Length = The length of the largest line amongst the lines being cleared
# Reset = Whether or not to reset the cursor after clearing specified line amount
# direction = The direction to clear the lines (default: A [Up])
if max_line_length is None:
max_line_length = game_data.SysData.max_screen_size[0]
for i in range(num):
print(f'\x1b[{1}{direction.upper()}', end='')
print(f'\r{Fore.RED}{" " * max_line_length}{Fore.RESET}\r', end='')
if reset is True:
print(f'\x1b[{num // 2}B') # Reset the cursor to the original position with magic
def back_line(num: int, delay: int = 10, index: int = 1):
# Clear specified line in a typing backspace fashion
print(f'\x1b[{index}A' + f'\x1b[{num}C', end=' ')
for i in range(num):
print(f'\x1b[2D ', end='')
time.sleep(delay / 1000)
print('\r', end='')
def display_help(cmd: str = None):
help_page = game_data.HelpPage()
# Display the help page for all or just one command
if cmd.isspace() or cmd is cmd == "":
# Display the full help page
print("Game Command List\n")
for cmd_info in help_page.ind_def:
print(f"{cmd_info:<20}", end='')
print(f": {help_page.ind_def[cmd_info]}")
else:
# Index the command info from the command info list
pass
def get_distance(object_pos0: tuple, object_pos1: tuple):
return math.sqrt(abs((object_pos0[0] - object_pos1[0]) ** 2 + (object_pos0[1] - object_pos1[1]) ** 2))
def check_proximity(object_pos: tuple):
# Return the distance of the player to an object
return math.sqrt(abs((object_pos[0] - game_data.MapData.x) ** 2 + (object_pos[1] - game_data.MapData.y) ** 2)) <= \
game_data.PlayerData.Detection_Distance
def add_item(item_id: int):
# Add an item by id to a players inventory
if game_data.PlayerData.Inventory_Accessible:
item_data = item_info(str(item_id))
size_calc = game_data.PlayerData.Inventory_Space - item_data.item_size
if size_calc >= 0:
# Check for duplicate entries and combine their qty
dupe = False
for idx, inv_item in enumerate(game_data.PlayerData.Inventory):
if inv_item.item_id == item_data.item_id:
if not game_data.PlayerData.Inventory[idx].qty + 1 > inv_item.max_qty:
# Makes sure to not add items that can't have multiple instances in the inventory
dupe = True
game_data.PlayerData.Inventory[idx].qty += 1
break
if not dupe:
game_data.PlayerData.Inventory.append(item_data)
# print(game_data.PlayerData.Inventory[ind])
elif size_calc < 0:
print("Could not add item(s) to your inventory due to lack of space")
else:
print("Error: Player Inventory is inaccessible")
def remove_item(item_id: int, qty: int = 1):
if game_data.PlayerData.Inventory_Accessible:
for i in game_data.PlayerData.Inventory[::-1]: # Reverse order search
if i.item_id == item_id:
if i.qty > 1:
i.qty -= qty
else:
game_data.PlayerData.Inventory.remove(i)
break
def reset_sys_font(font_size: int = 18):
LF_FACESIZE = 32
STD_OUTPUT_HANDLE = -11
class COORD(ctypes.Structure):
_fields_ = [("X", ctypes.c_short), ("Y", ctypes.c_short)]
class CONSOLE_FONT_INFOEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_ulong),
("nFont", ctypes.c_ulong),
("dwFontSize", COORD),
("FontFamily", ctypes.c_uint),
("FontWeight", ctypes.c_uint),
("FaceName", ctypes.c_wchar * LF_FACESIZE)]
font = CONSOLE_FONT_INFOEX()
font.cbSize = ctypes.sizeof(CONSOLE_FONT_INFOEX)
font.dwFontSize.Y = font_size # The actual scalable size of the font
font.FontFamily = 54
font.FontWeight = 400
font.FaceName = "NSimSun"
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetCurrentConsoleFontEx(
handle, ctypes.c_long(False), ctypes.pointer(font))
def has_item(item_search: str, data_return: bool = False):
# Check if the player has the item in their inventory
if str(item_search).isnumeric(): # Check if the player specified an id
for n in game_data.PlayerData.Inventory:
if n.item_id == int(item_search):
if data_return:
return n
else:
return True
else:
item_search = item_search.replace("-", " ")
for n in game_data.PlayerData.Inventory:
if n.name.lower() == item_search.lower():
if data_return:
return n
else:
return True
return False # item not found
def item_info(item: str):
if str(item).isnumeric():
for i in movement_engine.Data.game_items:
if i.item_id == int(item):
return i
return False
else:
for i in movement_engine.Data.game_items:
if i.name.lower() == item.lower():
return i # Item found by name
return False # Item not found
def cmove(num: int = 1, direction: str = 'A'): # Dunno, seems kinda useless, but who will actually read all of this?
# Move the console cursor
print(f"\x1b[{num}{direction}", end='')
def map_index(map_id: int):
# Find and return the map data for the specified id
maps = [game_data.MainMap, game_data.Floor0, game_data.Floor1, game_data.Floor2, game_data.Floor3,
game_data.Floor4, game_data.GateKeeper, game_data.FinalFloor]
if not map_id > len(maps) - 1:
return maps[map_id]
else:
return False
def display_inv():
# if the map is displayed, clear the map and then display the inventory
# Display the inventory
os.system("cls")
item_spacing = 25
side_spacing = 5
element_num = 1 # Which side of the array is printing
key_num = 0 # The current item to print in the first column
sub_key_num = 0 # The current item to print in the second column
# inv_size = len(game_data.PlayerData.Inventory) - 1
row1 = game_data.PlayerData.Inventory_Space // 2
inv0 = []
inv1 = []
if game_data.PlayerData.Inventory_Space % 2 == 1:
# If the inventory space num is odd, the first column will print 1 more than the second column
row1 += 1
# Initialize the inventory columns
for x, i in enumerate(game_data.PlayerData.Inventory):
if x > row1 - 1:
inv1.append(i)
else:
inv0.append(i)
print(f"{'':<{side_spacing}}", end='') # Title Side Spacing
print(f"{Fore.RED}{'Item Name':^{item_spacing}}{'Item QTY':^{item_spacing}}{'Item ID':^{item_spacing}}"
f"{'Item Name':^{item_spacing}}{'Item QTY':^{item_spacing}}{'Item ID':^{item_spacing}}{Fore.RESET}\n")
for i in range(game_data.PlayerData.Inventory_Space):
if element_num == 1:
print(f"{'':<{side_spacing}}", end='')
if key_num > len(inv0) - 1:
# No Item to print
print(f"{Style.BRIGHT}{Fore.BLACK}{'*':^{item_spacing}}{'*':^{item_spacing}}{'*':^{item_spacing}}"
f"{Fore.RESET}", end='')
else:
# There is an item to print
item = inv0[key_num]
print(f"{item.name:^{item_spacing}}{item.qty:^{item_spacing}}{item.item_id:^{item_spacing}}", end='')
key_num += 1
element_num = 2
elif element_num == 2:
# Print second row, check to see if requested item exists if so print
# Check to see if the second column has anything to print
if sub_key_num > len(inv1) - 1:
print(f"{Style.BRIGHT}{Fore.BLACK}{'*':^{item_spacing}}{'*':^{item_spacing}}{'*':^{item_spacing}}"
f"{Fore.RESET}", end='')
else:
item = inv1[sub_key_num]
print(f"{item.name:^{item_spacing}}{item.qty:^{item_spacing}}{item.item_id:^{item_spacing}}", end='')
sub_key_num += 1
element_num = 1 # Set to first column
print(f"{Fore.RESET}\n", end='')
print(Fore.RESET + Style.RESET_ALL) # Create newline at end of printout
# print([x.name for x in game_data.PlayerData.Inventory])
# print([x.name for x in inv0])
# print([x.name for x in inv1])
game_data.PlayerData.Inventory_Displayed = True
game_data.PlayerData.command_status = False # Disable command input
def display_stats(): # Display stats of system and player
pass
def display_item_info(item_data): # Get raw item info and display it in formatted statement
spacing = 30
item_has = has_item(item_data.item_id)
print('\n' * 3 + f'{item_data.name:-^20}')
print(f'{Fore.YELLOW}{"Player has item:":<{spacing}}{[Fore.RED, Fore.GREEN][item_has]}{item_has}')
print(f'{Fore.YELLOW}{"Item: ":<{spacing}}{item_data.item_id}/{Fore.RED}{len(movement_engine.Data.game_items) - 1}'
f'{Fore.RESET}')
print(f'{Fore.YELLOW}{"Item ID:":<{spacing}}{Fore.RESET}{item_data.item_id}')
print(f'{Fore.YELLOW}{"Item Type:":<{spacing}}{Fore.RESET}{item_data.type}')
print(f'{Fore.YELLOW}{"Item Max Quantity:":<{spacing}}{Fore.RESET}{item_data.max_qty}')
print(f'{Fore.YELLOW}{"Item Size:":<{spacing}}{Fore.RESET}{item_data.item_size}')
print(f'{Fore.YELLOW}{"Damage: ":<{spacing}}{Fore.RESET}{item_data.damage[0]} {Fore.YELLOW}-> '
f'{Fore.RESET}{item_data.damage[1]}')
print(f'{Fore.YELLOW}{"Health Regeneration:":<{spacing}}{Fore.RESET}{item_data.health_regen}')
# print(f'{"Stamina Regeneration:":<{spacing}}{item_data.stamina_regen}') # Not Implemented yet
print(f'{Fore.YELLOW}{"Description:":<{spacing}}{Fore.RESET}{item_data.desc}')
def ck(text: str, color: str = None): # Kind of useless
return text, color
def process_command(cmd_raw):
# Process command
cmd = cmd_raw.lower().split(' ')
if (len(cmd_raw) > 0 and game_data.HelpPage().cmd_list.__contains__(cmd[0])
and game_data.MapData.valid_cmd.__contains__(cmd[0])) or cmd[0] == "exit":
cmd_latter = " ".join(cmd[1:]) # Removes the command keyword
if cmd[0] == "help" or cmd[0] == "?": # Print the help page
system('cls')
game_data.PlayerData.Inventory_Displayed = True
display_help(cmd_latter)
elif cmd[0] == "inventory": # print the players inventory
system('cls')
display_inv()
gprint(game_data.MQ([ck("\nMove to exit...")]))
elif cmd[0] == "item-info": # Print the specified items info
system('cls')
# game_data.PlayerData.command_status = False # Disable command input
game_data.PlayerData.Inventory_Displayed = True
game_data.PlayerData.command_status = False
info = item_info(cmd_latter)
if info is False:
err_msg('Invalid Item')
else:
display_item_info(info)
gprint(game_data.MQ([ck("\nMove to exit...")]))
elif cmd[0] == "stats": # print system & player statistics
system('cls')
display_stats()
elif cmd[0] == 'drop': # Remove the specified item from the players inventory
item = item_info(cmd_latter)
if item is False:
err_msg('Invalid Item')
elif not has_item(item.item_id):
err_msg('You don\'t have this item')
else: # Remove the item from players inventory
remove_item(item.item_id)
script = [ck('Dropped', 'yellow'), ck('['), ck(item.name, 'red'), ck(']')]
sl = 0
for i in script:
sl += len(i[0])
game_data.MapData.map_idle = True
system('cls')
lib.center_cursor(sl)
gprint(game_data.MQ(script))
time.sleep(1)
game_data.MapData.map_idle = False
movement_engine.show_map(game_data.MapData.current_map)
elif cmd[0] == "exit":
game_data.MapData.map_kill = True # Exit listener thread
os.system('cls')
reset_sys_font(30)
get_max()
print(f"{'':<{game_data.SysData.max_screen_size[0] // 2}}", end='')
gprint(MQ([ck("Goodbye :(")]))
time.sleep(1)
system('exit')
game_data.SysData.full_kill = True
else:
err_msg('Invalid Command')
game_data.MapData.current_command = "" # Reset the inputted command
def err_msg(msg: str):
game_data.MapData.map_idle = True
game_data.PlayerData.command_status = False
system('cls')
center_cursor(len(msg))
gprint(MQ([ck(msg, "red")]))
time.sleep(2)
movement_engine.show_map(game_data.MapData.current_map)
game_data.MapData.map_idle = False
game_data.PlayerData.command_status = True
def center_cursor(x_offset: int, y_offset: int = 0): # Move the cursor to the middle of the screen with optional offset
# Maybe change to use /x1b[#A/B/C/D exit code to move cursor
game_data.MapData.current_command = ""
print('\n' * ((game_data.SysData.max_screen_size[1] // 2) - y_offset) +
' ' * ((game_data.SysData.max_screen_size[0] // 2) - (x_offset // 2)), end='')
def event_handler(event_id: int, event_type: int, reset_map: bool = True):
if event_id not in game_data.MapDataCache.event_cache: # Make sure not to duplicate events
game_data.MapData.map_idle = True # Stop keyboard listener and printout
game_data.PlayerData.command_status = False # Disable command input
system('cls')
time.sleep(2)
# Pull event data
for x, m in enumerate(game_data.EventData.events[list(game_data.EventData.events.keys())[event_type]]):
if m.object_id == event_id:
event_id = x
break
# Fetch event data
for m in game_data.EventData.events[list(game_data.EventData.events.keys())
[event_type]][event_id].event_dialogue:
if type(m[1]) is tuple:
delay = m[1][0]
colour = m[1][1]
else:
delay = m[1]
colour = 'white'
center_cursor(len(m[0]))
gprint(game_data.MQ([ck(m[0], colour)])) # Print specified dialogue
time.sleep(delay / 1000) # Pause for specified delay in MS
system('cls')
game_data.MapDataCache.event_cache.append(event_id) # Avoids the event being triggered again
game_data.MapData.map_idle = False # Resume the map listener
game_data.PlayerData.command_status = True # Re-Enable user command input
if reset_map:
movement_engine.show_map(game_data.MapData.current_map)
def question_handler(question_diff: int):
"""
Order of operations:
1. Set map movement system into idle
2. Pull a random question of the specified difficulty
3. Ask and open input (kb_listener on_press thread will handle question accumulation)
4. if the user got the question right progress to the next map (return True), if the user got it wrong
give them the option to retry or to leave (leaving will leave them on the same floor, adds number of tries
to total to avoid a leave and retry loophole) 3 wrong questions spawns them outside the mine
"""
question = movement_engine.Data.questions[0][question_diff][
random.randint(0, len(movement_engine.Data.questions[0][0]))][0]
# Find the longest line
question_cache = question.split("\n")
max_l = 0
for line in question_cache:
if len(line) > max_l:
max_l = len(line)
os.system("cls")
print("\n" * (game_data.SysData.max_screen_size[1] // 2) + " " *
(game_data.SysData.max_screen_size[0] - (max_l // 2)), end='')
print(question)
game_data.PlayerData.question_status = True # set the input listener to accumulate the answer
while game_data.PlayerData.question_status: # Lock the script here until the question input has been satisfied
time.sleep(0.1)
continue
answer = game_data.PlayerData.question_answer
def gprint(queue, speed: int = 25):
# Print as if the text was being typed
if type(queue) is not MQ:
# Converts raw string into MQ format
queue = MQ([(queue, None)])
delay = speed / 1000 # Seconds to milliseconds conversion
# Used to index color by string key
colors_list = {"red": Fore.RED, "green": Fore.GREEN, "yellow": Fore.YELLOW, "blue": Fore.BLUE,
"magenta": Fore.MAGENTA, "cyan": Fore.CYAN, "white": Fore.WHITE}
for msg in queue.messages:
if msg[1] is not None:
# if color printing is specified
print(colors_list[msg[1].lower()], end='')
for char in msg[0]:
print(char, end='')
time.sleep(delay)
print(Fore.RESET, end='')
else:
for char in msg[0]:
print(char, end='')
time.sleep(delay)
print() # Create new line
|
# Holds the main functions that operate the backend of the game (e.g battle system)
import os
from os import system
import lib
import movement_engine
import time
from colorama import Fore, Style
import game_data
import random
import math
import ctypes
import msvcrt
import subprocess
from ctypes import wintypes
from game_data import MQ, InvItem
class Logo:
__slots__ = ("logo_a", "logo_b")
# logo_a: equivalent to "Adventure"
# logo_b: equivalent to "Game"
def __init__(self):
self.logo_a = [10, 32, 32, 32, 32, 10, 32, 9608, 9608, 9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608, 9608,
9608, 9559, 32, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608, 9608, 9608,
9608, 9608, 9559, 9608, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608, 9608,
9608, 9608, 9608, 9608, 9559, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9559, 9608, 9608, 9608,
9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 10, 9608, 9608, 9556,
9552, 9552, 9608, 9608, 9559, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9559, 9608, 9608, 9553,
32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 9608, 9608, 9608,
9608, 9559, 32, 32, 9608, 9608, 9553, 9562, 9552, 9552, 9608, 9608, 9556, 9552, 9552, 9565, 9608,
9608, 9553, 32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9559,
9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 10, 9608, 9608, 9608, 9608, 9608, 9608, 9608,
9553, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 9608, 9608, 9556, 9608, 9608, 9559, 32, 9608, 9608,
9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 10,
9608, 9608, 9556, 9552, 9552, 9608, 9608, 9553, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9562,
9608, 9608, 9559, 32, 9608, 9608, 9556, 9565, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 9608,
9608, 9553, 9562, 9608, 9608, 9559, 9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32,
9608, 9608, 9553, 32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608,
9559, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 10, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553,
9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 32, 9562, 9608, 9608, 9608, 9608, 9556, 9565, 32,
9608, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 9608, 9608, 9553, 32, 9562, 9608, 9608, 9608,
9608, 9553, 32, 32, 32, 9608, 9608, 9553, 32, 32, 32, 9562, 9608, 9608, 9608, 9608, 9608, 9608,
9556, 9565, 9608, 9608, 9553, 32, 32, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608, 9608, 9608,
9559, 10, 9562, 9552, 9565, 32, 32, 9562, 9552, 9565, 9562, 9552, 9552, 9552, 9552, 9552, 9565,
32, 32, 32, 9562, 9552, 9552, 9552, 9565, 32, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9552, 9565,
9562, 9552, 9565, 32, 32, 9562, 9552, 9552, 9552, 9565, 32, 32, 32, 9562, 9552, 9565,
32, 32, 32, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9565, 32, 9562, 9552, 9565, 32, 32, 9562,
9552, 9565, 9562, 9552, 9552, 9552, 9552, 9552, 9552, 9565, 10, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 10]
self.logo_b = [10, 32, 9608, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 9608, 9608, 9608, 9608, 9608,
9559, 32, 9608, 9608, 9608, 9559, 32, 32, 32, 9608, 9608, 9608, 9559, 9608, 9608, 9608, 9608,
9608, 9608, 9608, 9559, 10, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 32, 9608, 9608,
9556, 9552, 9552, 9608, 9608, 9559, 9608, 9608, 9608, 9608, 9559, 32, 9608, 9608, 9608, 9608,
9553, 9608, 9608, 9556, 9552, 9552, 9552, 9552, 9565, 10, 9608, 9608, 9553, 32, 32, 9608, 9608,
9608, 9559, 9608, 9608, 9608, 9608, 9608, 9608, 9608, 9553, 9608, 9608, 9556, 9608, 9608, 9608,
9608, 9556, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608, 9559, 32, 32, 10, 9608, 9608, 9553,
32, 32, 32, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9608, 9608, 9553, 9608, 9608, 9553,
9562, 9608, 9608, 9556, 9565, 9608, 9608, 9553, 9608, 9608, 9556, 9552, 9552, 9565, 32, 32, 10,
9562, 9608, 9608, 9608, 9608, 9608, 9608, 9556, 9565, 9608, 9608, 9553, 32, 32, 9608, 9608,
9553, 9608, 9608, 9553, 32, 9562, 9552, 9565, 32, 9608, 9608, 9553, 9608, 9608, 9608, 9608, 9608,
9608, 9608, 9559, 10, 32, 9562, 9552, 9552, 9552, 9552, 9552, 9565, 32, 9562, 9552, 9565, 32, 32,
9562, 9552, 9565, 9562, 9552, 9565, 32, 32, 32, 32, 32, 9562, 9552, 9565, 9562, 9552, 9552, 9552,
9552, 9552, 9552, 9565, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 10]
def print_logo():
# Print the Logo
logo_instance = Logo()
for logo_char in logo_instance.logo_a:
if logo_char == 10: # Check for new line
print(f"{chr(logo_char):<10}", end='') # Spacing so text is not left-aligned
else:
print(chr(logo_char), end='')
for logo_char in logo_instance.logo_b:
if logo_char == 10:
print(f"{chr(logo_char):<30}", end='')
else:
print(chr(logo_char), end='')
print('\n')
def get_max():
# Initiate the max size of the console
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
user32 = ctypes.WinDLL('user32', use_last_error=True)
kernel32.GetConsoleWindow.restype = wintypes.HWND
kernel32.GetLargestConsoleWindowSize.restype = wintypes._COORD
kernel32.GetLargestConsoleWindowSize.argtypes = (wintypes.HANDLE,)
user32.ShowWindow.argtypes = (wintypes.HWND, ctypes.c_int)
fd = os.open('CONOUT$', os.O_RDWR)
try:
hcon = msvcrt.get_osfhandle(fd)
max_size = kernel32.GetLargestConsoleWindowSize(hcon)
if max_size.X == 0 and max_size.Y == 0:
raise ctypes.WinError(ctypes.get_last_error())
cols = max_size.X
hwnd = kernel32.GetConsoleWindow()
if cols and hwnd:
lines = max_size.Y
game_data.SysData.max_screen_size = (cols, lines)
finally:
os.close(fd)
def is_full_screen():
try:
col, row = os.get_terminal_size()
print((col, row))
print(game_data.SysData.max_screen_size)
print((col, row) == get_max())
return (col, row) == game_data.SysData.max_screen_size
except:
return False
def maximize_console(lines=None):
# I hate how long this took to figure out
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
user32 = ctypes.WinDLL('user32', use_last_error=True)
SW_MAXIMIZE = 3 # specifies to maximize the window
kernel32.GetConsoleWindow.restype = wintypes.HWND
kernel32.GetLargestConsoleWindowSize.restype = wintypes._COORD
kernel32.GetLargestConsoleWindowSize.argtypes = (wintypes.HANDLE,)
user32.ShowWindow.argtypes = (wintypes.HWND, ctypes.c_int)
fd = os.open('CONOUT$', os.O_RDWR)
try:
hcon = msvcrt.get_osfhandle(fd)
max_size = kernel32.GetLargestConsoleWindowSize(hcon)
if max_size.X == 0 and max_size.Y == 0:
raise ctypes.WinError(ctypes.get_last_error())
cols = max_size.X
hwnd = kernel32.GetConsoleWindow()
if cols and hwnd:
if lines is None:
lines = max_size.Y
else:
lines = max(min(lines, 9999), max_size.Y)
game_data.SysData.max_screen_size = (cols, lines)
subprocess.check_call('mode.com con cols={} lines={}'.format(cols, lines))
user32.ShowWindow(hwnd, SW_MAXIMIZE)
finally:
os.close(fd)
def clear_line(num: int, max_line_length: int = None,
reset: bool = False, direction: str = 'A'):
# Clear the specified amount of lines from the console
# Num = The amount of line to clear
# Max_Line_Length = The length of the largest line amongst the lines being cleared
# Reset = Whether or not to reset the cursor after clearing specified line amount
# direction = The direction to clear the lines (default: A [Up])
if max_line_length is None:
max_line_length = game_data.SysData.max_screen_size[0]
for i in range(num):
print(f'\x1b[{1}{direction.upper()}', end='')
print(f'\r{Fore.RED}{" " * max_line_length}{Fore.RESET}\r', end='')
if reset is True:
print(f'\x1b[{num // 2}B') # Reset the cursor to the original position with magic
def back_line(num: int, delay: int = 10, index: int = 1):
# Clear specified line in a typing backspace fashion
print(f'\x1b[{index}A' + f'\x1b[{num}C', end=' ')
for i in range(num):
print(f'\x1b[2D ', end='')
time.sleep(delay / 1000)
print('\r', end='')
def display_help(cmd: str = None):
help_page = game_data.HelpPage()
# Display the help page for all or just one command
if cmd.isspace() or cmd is cmd == "":
# Display the full help page
print("Game Command List\n")
for cmd_info in help_page.ind_def:
print(f"{cmd_info:<20}", end='')
print(f": {help_page.ind_def[cmd_info]}")
else:
# Index the command info from the command info list
pass
def get_distance(object_pos0: tuple, object_pos1: tuple):
return math.sqrt(abs((object_pos0[0] - object_pos1[0]) ** 2 + (object_pos0[1] - object_pos1[1]) ** 2))
def check_proximity(object_pos: tuple):
# Return the distance of the player to an object
return math.sqrt(abs((object_pos[0] - game_data.MapData.x) ** 2 + (object_pos[1] - game_data.MapData.y) ** 2)) <= \
game_data.PlayerData.Detection_Distance
def add_item(item_id: int):
# Add an item by id to a players inventory
if game_data.PlayerData.Inventory_Accessible:
item_data = item_info(str(item_id))
size_calc = game_data.PlayerData.Inventory_Space - item_data.item_size
if size_calc >= 0:
# Check for duplicate entries and combine their qty
dupe = False
for idx, inv_item in enumerate(game_data.PlayerData.Inventory):
if inv_item.item_id == item_data.item_id:
if not game_data.PlayerData.Inventory[idx].qty + 1 > inv_item.max_qty:
# Makes sure to not add items that can't have multiple instances in the inventory
dupe = True
game_data.PlayerData.Inventory[idx].qty += 1
break
if not dupe:
game_data.PlayerData.Inventory.append(item_data)
# print(game_data.PlayerData.Inventory[ind])
elif size_calc < 0:
print("Could not add item(s) to your inventory due to lack of space")
else:
print("Error: Player Inventory is inaccessible")
def remove_item(item_id: int, qty: int = 1):
if game_data.PlayerData.Inventory_Accessible:
for i in game_data.PlayerData.Inventory[::-1]: # Reverse order search
if i.item_id == item_id:
if i.qty > 1:
i.qty -= qty
else:
game_data.PlayerData.Inventory.remove(i)
break
def reset_sys_font(font_size: int = 18):
LF_FACESIZE = 32
STD_OUTPUT_HANDLE = -11
class COORD(ctypes.Structure):
_fields_ = [("X", ctypes.c_short), ("Y", ctypes.c_short)]
class CONSOLE_FONT_INFOEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_ulong),
("nFont", ctypes.c_ulong),
("dwFontSize", COORD),
("FontFamily", ctypes.c_uint),
("FontWeight", ctypes.c_uint),
("FaceName", ctypes.c_wchar * LF_FACESIZE)]
font = CONSOLE_FONT_INFOEX()
font.cbSize = ctypes.sizeof(CONSOLE_FONT_INFOEX)
font.dwFontSize.Y = font_size # The actual scalable size of the font
font.FontFamily = 54
font.FontWeight = 400
font.FaceName = "NSimSun"
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetCurrentConsoleFontEx(
handle, ctypes.c_long(False), ctypes.pointer(font))
def has_item(item_search: str, data_return: bool = False):
# Check if the player has the item in their inventory
if str(item_search).isnumeric(): # Check if the player specified an id
for n in game_data.PlayerData.Inventory:
if n.item_id == int(item_search):
if data_return:
return n
else:
return True
else:
item_search = item_search.replace("-", " ")
for n in game_data.PlayerData.Inventory:
if n.name.lower() == item_search.lower():
if data_return:
return n
else:
return True
return False # item not found
def item_info(item: str):
if str(item).isnumeric():
for i in movement_engine.Data.game_items:
if i.item_id == int(item):
return i
return False
else:
for i in movement_engine.Data.game_items:
if i.name.lower() == item.lower():
return i # Item found by name
return False # Item not found
def cmove(num: int = 1, direction: str = 'A'): # Dunno, seems kinda useless, but who will actually read all of this?
# Move the console cursor
print(f"\x1b[{num}{direction}", end='')
def map_index(map_id: int):
# Find and return the map data for the specified id
maps = [game_data.MainMap, game_data.Floor0, game_data.Floor1, game_data.Floor2, game_data.Floor3,
game_data.Floor4, game_data.GateKeeper, game_data.FinalFloor]
if not map_id > len(maps) - 1:
return maps[map_id]
else:
return False
def display_inv():
# if the map is displayed, clear the map and then display the inventory
# Display the inventory
os.system("cls")
item_spacing = 25
side_spacing = 5
element_num = 1 # Which side of the array is printing
key_num = 0 # The current item to print in the first column
sub_key_num = 0 # The current item to print in the second column
# inv_size = len(game_data.PlayerData.Inventory) - 1
row1 = game_data.PlayerData.Inventory_Space // 2
inv0 = []
inv1 = []
if game_data.PlayerData.Inventory_Space % 2 == 1:
# If the inventory space num is odd, the first column will print 1 more than the second column
row1 += 1
# Initialize the inventory columns
for x, i in enumerate(game_data.PlayerData.Inventory):
if x > row1 - 1:
inv1.append(i)
else:
inv0.append(i)
print(f"{'':<{side_spacing}}", end='') # Title Side Spacing
print(f"{Fore.RED}{'Item Name':^{item_spacing}}{'Item QTY':^{item_spacing}}{'Item ID':^{item_spacing}}"
f"{'Item Name':^{item_spacing}}{'Item QTY':^{item_spacing}}{'Item ID':^{item_spacing}}{Fore.RESET}\n")
for i in range(game_data.PlayerData.Inventory_Space):
if element_num == 1:
print(f"{'':<{side_spacing}}", end='')
if key_num > len(inv0) - 1:
# No Item to print
print(f"{Style.BRIGHT}{Fore.BLACK}{'*':^{item_spacing}}{'*':^{item_spacing}}{'*':^{item_spacing}}"
f"{Fore.RESET}", end='')
else:
# There is an item to print
item = inv0[key_num]
print(f"{item.name:^{item_spacing}}{item.qty:^{item_spacing}}{item.item_id:^{item_spacing}}", end='')
key_num += 1
element_num = 2
elif element_num == 2:
# Print second row, check to see if requested item exists if so print
# Check to see if the second column has anything to print
if sub_key_num > len(inv1) - 1:
print(f"{Style.BRIGHT}{Fore.BLACK}{'*':^{item_spacing}}{'*':^{item_spacing}}{'*':^{item_spacing}}"
f"{Fore.RESET}", end='')
else:
item = inv1[sub_key_num]
print(f"{item.name:^{item_spacing}}{item.qty:^{item_spacing}}{item.item_id:^{item_spacing}}", end='')
sub_key_num += 1
element_num = 1 # Set to first column
print(f"{Fore.RESET}\n", end='')
print(Fore.RESET + Style.RESET_ALL) # Create newline at end of printout
# print([x.name for x in game_data.PlayerData.Inventory])
# print([x.name for x in inv0])
# print([x.name for x in inv1])
game_data.PlayerData.Inventory_Displayed = True
game_data.PlayerData.command_status = False # Disable command input
def display_stats(): # Display stats of system and player
pass
def display_item_info(item_data): # Get raw item info and display it in formatted statement
spacing = 30
item_has = has_item(item_data.item_id)
print('\n' * 3 + f'{item_data.name:-^20}')
print(f'{Fore.YELLOW}{"Player has item:":<{spacing}}{[Fore.RED, Fore.GREEN][item_has]}{item_has}')
print(f'{Fore.YELLOW}{"Item: ":<{spacing}}{item_data.item_id}/{Fore.RED}{len(movement_engine.Data.game_items) - 1}'
f'{Fore.RESET}')
print(f'{Fore.YELLOW}{"Item ID:":<{spacing}}{Fore.RESET}{item_data.item_id}')
print(f'{Fore.YELLOW}{"Item Type:":<{spacing}}{Fore.RESET}{item_data.type}')
print(f'{Fore.YELLOW}{"Item Max Quantity:":<{spacing}}{Fore.RESET}{item_data.max_qty}')
print(f'{Fore.YELLOW}{"Item Size:":<{spacing}}{Fore.RESET}{item_data.item_size}')
print(f'{Fore.YELLOW}{"Damage: ":<{spacing}}{Fore.RESET}{item_data.damage[0]} {Fore.YELLOW}-> '
f'{Fore.RESET}{item_data.damage[1]}')
print(f'{Fore.YELLOW}{"Health Regeneration:":<{spacing}}{Fore.RESET}{item_data.health_regen}')
# print(f'{"Stamina Regeneration:":<{spacing}}{item_data.stamina_regen}') # Not Implemented yet
print(f'{Fore.YELLOW}{"Description:":<{spacing}}{Fore.RESET}{item_data.desc}')
def ck(text: str, color: str = None): # Kind of useless
return text, color
def process_command(cmd_raw):
# Process command
cmd = cmd_raw.lower().split(' ')
if (len(cmd_raw) > 0 and game_data.HelpPage().cmd_list.__contains__(cmd[0])
and game_data.MapData.valid_cmd.__contains__(cmd[0])) or cmd[0] == "exit":
cmd_latter = " ".join(cmd[1:]) # Removes the command keyword
if cmd[0] == "help" or cmd[0] == "?": # Print the help page
system('cls')
game_data.PlayerData.Inventory_Displayed = True
display_help(cmd_latter)
elif cmd[0] == "inventory": # print the players inventory
system('cls')
display_inv()
gprint(game_data.MQ([ck("\nMove to exit...")]))
elif cmd[0] == "item-info": # Print the specified items info
system('cls')
# game_data.PlayerData.command_status = False # Disable command input
game_data.PlayerData.Inventory_Displayed = True
game_data.PlayerData.command_status = False
info = item_info(cmd_latter)
if info is False:
err_msg('Invalid Item')
else:
display_item_info(info)
gprint(game_data.MQ([ck("\nMove to exit...")]))
elif cmd[0] == "stats": # print system & player statistics
system('cls')
display_stats()
elif cmd[0] == 'drop': # Remove the specified item from the players inventory
item = item_info(cmd_latter)
if item is False:
err_msg('Invalid Item')
elif not has_item(item.item_id):
err_msg('You don\'t have this item')
else: # Remove the item from players inventory
remove_item(item.item_id)
script = [ck('Dropped', 'yellow'), ck('['), ck(item.name, 'red'), ck(']')]
sl = 0
for i in script:
sl += len(i[0])
game_data.MapData.map_idle = True
system('cls')
lib.center_cursor(sl)
gprint(game_data.MQ(script))
time.sleep(1)
game_data.MapData.map_idle = False
movement_engine.show_map(game_data.MapData.current_map)
elif cmd[0] == "exit":
game_data.MapData.map_kill = True # Exit listener thread
os.system('cls')
reset_sys_font(30)
get_max()
print(f"{'':<{game_data.SysData.max_screen_size[0] // 2}}", end='')
gprint(MQ([ck("Goodbye :(")]))
time.sleep(1)
system('exit')
game_data.SysData.full_kill = True
else:
err_msg('Invalid Command')
game_data.MapData.current_command = "" # Reset the inputted command
def err_msg(msg: str):
game_data.MapData.map_idle = True
game_data.PlayerData.command_status = False
system('cls')
center_cursor(len(msg))
gprint(MQ([ck(msg, "red")]))
time.sleep(2)
movement_engine.show_map(game_data.MapData.current_map)
game_data.MapData.map_idle = False
game_data.PlayerData.command_status = True
def center_cursor(x_offset: int, y_offset: int = 0): # Move the cursor to the middle of the screen with optional offset
# Maybe change to use /x1b[#A/B/C/D exit code to move cursor
game_data.MapData.current_command = ""
print('\n' * ((game_data.SysData.max_screen_size[1] // 2) - y_offset) +
' ' * ((game_data.SysData.max_screen_size[0] // 2) - (x_offset // 2)), end='')
def event_handler(event_id: int, event_type: int, reset_map: bool = True):
if event_id not in game_data.MapDataCache.event_cache: # Make sure not to duplicate events
game_data.MapData.map_idle = True # Stop keyboard listener and printout
game_data.PlayerData.command_status = False # Disable command input
system('cls')
time.sleep(2)
# Pull event data
for x, m in enumerate(game_data.EventData.events[list(game_data.EventData.events.keys())[event_type]]):
if m.object_id == event_id:
event_id = x
break
# Fetch event data
for m in game_data.EventData.events[list(game_data.EventData.events.keys())
[event_type]][event_id].event_dialogue:
if type(m[1]) is tuple:
delay = m[1][0]
colour = m[1][1]
else:
delay = m[1]
colour = 'white'
center_cursor(len(m[0]))
gprint(game_data.MQ([ck(m[0], colour)])) # Print specified dialogue
time.sleep(delay / 1000) # Pause for specified delay in MS
system('cls')
game_data.MapDataCache.event_cache.append(event_id) # Avoids the event being triggered again
game_data.MapData.map_idle = False # Resume the map listener
game_data.PlayerData.command_status = True # Re-Enable user command input
if reset_map:
movement_engine.show_map(game_data.MapData.current_map)
def question_handler(question_diff: int):
"""
Order of operations:
1. Set map movement system into idle
2. Pull a random question of the specified difficulty
3. Ask and open input (kb_listener on_press thread will handle question accumulation)
4. if the user got the question right progress to the next map (return True), if the user got it wrong
give them the option to retry or to leave (leaving will leave them on the same floor, adds number of tries
to total to avoid a leave and retry loophole) 3 wrong questions spawns them outside the mine
"""
question = movement_engine.Data.questions[0][question_diff][
random.randint(0, len(movement_engine.Data.questions[0][0]))][0]
# Find the longest line
question_cache = question.split("\n")
max_l = 0
for line in question_cache:
if len(line) > max_l:
max_l = len(line)
os.system("cls")
print("\n" * (game_data.SysData.max_screen_size[1] // 2) + " " *
(game_data.SysData.max_screen_size[0] - (max_l // 2)), end='')
print(question)
game_data.PlayerData.question_status = True # set the input listener to accumulate the answer
while game_data.PlayerData.question_status: # Lock the script here until the question input has been satisfied
time.sleep(0.1)
continue
answer = game_data.PlayerData.question_answer
def gprint(queue, speed: int = 25):
# Print as if the text was being typed
if type(queue) is not MQ:
# Converts raw string into MQ format
queue = MQ([(queue, None)])
delay = speed / 1000 # Seconds to milliseconds conversion
# Used to index color by string key
colors_list = {"red": Fore.RED, "green": Fore.GREEN, "yellow": Fore.YELLOW, "blue": Fore.BLUE,
"magenta": Fore.MAGENTA, "cyan": Fore.CYAN, "white": Fore.WHITE}
for msg in queue.messages:
if msg[1] is not None:
# if color printing is specified
print(colors_list[msg[1].lower()], end='')
for char in msg[0]:
print(char, end='')
time.sleep(delay)
print(Fore.RESET, end='')
else:
for char in msg[0]:
print(char, end='')
time.sleep(delay)
print() # Create new line
|
en
| 0.785875
|
# Holds the main functions that operate the backend of the game (e.g battle system) # logo_a: equivalent to "Adventure" # logo_b: equivalent to "Game" # Print the Logo # Check for new line # Spacing so text is not left-aligned # Initiate the max size of the console # I hate how long this took to figure out # specifies to maximize the window # Clear the specified amount of lines from the console # Num = The amount of line to clear # Max_Line_Length = The length of the largest line amongst the lines being cleared # Reset = Whether or not to reset the cursor after clearing specified line amount # direction = The direction to clear the lines (default: A [Up]) # Reset the cursor to the original position with magic # Clear specified line in a typing backspace fashion # Display the help page for all or just one command # Display the full help page # Index the command info from the command info list # Return the distance of the player to an object # Add an item by id to a players inventory # Check for duplicate entries and combine their qty # Makes sure to not add items that can't have multiple instances in the inventory # print(game_data.PlayerData.Inventory[ind]) # Reverse order search # The actual scalable size of the font # Check if the player has the item in their inventory # Check if the player specified an id # item not found # Item found by name # Item not found # Dunno, seems kinda useless, but who will actually read all of this? # Move the console cursor # Find and return the map data for the specified id # if the map is displayed, clear the map and then display the inventory # Display the inventory # Which side of the array is printing # The current item to print in the first column # The current item to print in the second column # inv_size = len(game_data.PlayerData.Inventory) - 1 # If the inventory space num is odd, the first column will print 1 more than the second column # Initialize the inventory columns # Title Side Spacing # No Item to print # There is an item to print # Print second row, check to see if requested item exists if so print # Check to see if the second column has anything to print # Set to first column # Create newline at end of printout # print([x.name for x in game_data.PlayerData.Inventory]) # print([x.name for x in inv0]) # print([x.name for x in inv1]) # Disable command input # Display stats of system and player # Get raw item info and display it in formatted statement # print(f'{"Stamina Regeneration:":<{spacing}}{item_data.stamina_regen}') # Not Implemented yet # Kind of useless # Process command # Removes the command keyword # Print the help page # print the players inventory # Print the specified items info # game_data.PlayerData.command_status = False # Disable command input # print system & player statistics # Remove the specified item from the players inventory # Remove the item from players inventory # Exit listener thread # Reset the inputted command # Move the cursor to the middle of the screen with optional offset # Maybe change to use /x1b[#A/B/C/D exit code to move cursor # Make sure not to duplicate events # Stop keyboard listener and printout # Disable command input # Pull event data # Fetch event data # Print specified dialogue # Pause for specified delay in MS # Avoids the event being triggered again # Resume the map listener # Re-Enable user command input Order of operations: 1. Set map movement system into idle 2. Pull a random question of the specified difficulty 3. Ask and open input (kb_listener on_press thread will handle question accumulation) 4. if the user got the question right progress to the next map (return True), if the user got it wrong give them the option to retry or to leave (leaving will leave them on the same floor, adds number of tries to total to avoid a leave and retry loophole) 3 wrong questions spawns them outside the mine # Find the longest line # set the input listener to accumulate the answer # Lock the script here until the question input has been satisfied # Print as if the text was being typed # Converts raw string into MQ format # Seconds to milliseconds conversion # Used to index color by string key # if color printing is specified # Create new line
| 2.42192
| 2
|
app/recipe/apps.py
|
AnshumanRohella/recipe-api
| 0
|
6626674
|
<reponame>AnshumanRohella/recipe-api<filename>app/recipe/apps.py
from django.apps import AppConfig
class RecipieConfig(AppConfig):
name = 'recipe'
|
from django.apps import AppConfig
class RecipieConfig(AppConfig):
name = 'recipe'
|
none
| 1
| 1.14439
| 1
|
|
testauth/celery.py
|
buahaha/allianceauth-opcalendar
| 0
|
6626675
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testauth.settings.local")
from django.conf import settings # noqa
app = Celery("testauth")
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
app.config_from_object("django.conf:settings")
app.conf.ONCE = {"backend": "allianceauth.services.tasks.DjangoBackend", "settings": {}}
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testauth.settings.local")
from django.conf import settings # noqa
app = Celery("testauth")
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
app.config_from_object("django.conf:settings")
app.conf.ONCE = {"backend": "allianceauth.services.tasks.DjangoBackend", "settings": {}}
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
en
| 0.709567
|
# set the default Django settings module for the 'celery' program. # noqa # Using a string here means the worker don't have to serialize # the configuration object to child processes. # Load task modules from all registered Django app configs.
| 2.029638
| 2
|
tests/test_model.py
|
probprog/pyprob
| 268
|
6626676
|
import unittest
import math
import torch
import os
import tempfile
import uuid
import pyprob
from pyprob import util, Model, InferenceEngine
from pyprob.distributions import Normal, Uniform, Empirical
importance_sampling_samples = 5000
class ModelTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMeanMarsaglia(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean (Marsaglia)')
def marsaglia(self, mean, stddev):
uniform = Uniform(-1, 1)
s = 1
while float(s) >= 1:
x = pyprob.sample(uniform)
y = pyprob.sample(uniform)
s = x*x + y*y
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
def forward(self):
mu = self.marsaglia(self.prior_mean, self.prior_stddev)
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.observe(likelihood, 0, name='obs0')
pyprob.observe(likelihood, 0, name='obs1')
return mu
self._model = GaussianWithUnknownMeanMarsaglia()
super().__init__(*args, **kwargs)
def test_model_prior(self):
num_traces = 5000
prior_mean_correct = 1
prior_stddev_correct = math.sqrt(5)
prior = self._model.prior_results(num_traces)
prior_mean = float(prior.mean)
prior_stddev = float(prior.stddev)
util.eval_print('num_traces', 'prior_mean', 'prior_mean_correct', 'prior_stddev', 'prior_stddev_correct')
self.assertAlmostEqual(prior_mean, prior_mean_correct, places=0)
self.assertAlmostEqual(prior_stddev, prior_stddev_correct, places=0)
def test_model_prior_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
num_traces = 1000
prior_mean_correct = 1
prior_stddev_correct = math.sqrt(5)
prior_length_correct = 2 * num_traces
prior = self._model.prior_results(num_traces, file_name=file_name)
prior.close()
prior = self._model.prior_results(num_traces, file_name=file_name)
# prior.close()
prior_length = prior.length
prior_mean = float(prior.mean)
prior_stddev = float(prior.stddev)
util.eval_print('num_traces', 'prior_mean', 'prior_mean_correct', 'prior_stddev', 'prior_stddev_correct', 'prior_length', 'prior_length_correct')
self.assertAlmostEqual(prior_mean, prior_mean_correct, places=0)
self.assertAlmostEqual(prior_stddev, prior_stddev_correct, places=0)
self.assertEqual(prior_length, prior_length_correct)
def test_model_trace_length_statistics(self):
num_traces = 2000
trace_length_mean_correct = 2.5630438327789307
trace_length_stddev_correct = 1.2081329822540283
trace_length_min_correct = 2
trace_lengths = self._model.prior(num_traces, map_func=lambda trace: trace.length_controlled)
trace_length_dist = Empirical(trace_lengths)
trace_length_mean = float(trace_length_dist.mean)
trace_length_stddev = float(trace_length_dist.stddev)
trace_length_min = float(trace_length_dist.min)
trace_length_max = (trace_length_dist.max)
util.eval_print('num_traces', 'trace_length_mean', 'trace_length_mean_correct', 'trace_length_stddev', 'trace_length_stddev_correct', 'trace_length_min', 'trace_length_min_correct', 'trace_length_max')
self.assertAlmostEqual(trace_length_mean, trace_length_mean_correct, places=0)
self.assertAlmostEqual(trace_length_stddev, trace_length_stddev_correct, places=0)
self.assertAlmostEqual(trace_length_min, trace_length_min_correct, places=0)
def test_model_lmh_posterior_with_stop_and_resume(self):
posterior_num_runs = 200
posterior_num_traces_each_run = 20
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posteriors = []
initial_trace = None
for i in range(posterior_num_runs):
posterior = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace)
initial_trace = posterior[-1]
posteriors.append(posterior)
posterior = Empirical(concat_empiricals=posteriors).map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_stop_and_resume(self):
posterior_num_runs = 100
posterior_num_traces_each_run = 20
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posteriors = []
initial_trace = None
for i in range(posterior_num_runs):
posterior = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace)
initial_trace = posterior[-1]
posteriors.append(posterior)
posterior = Empirical(concat_empiricals=posteriors).map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_online_thinning(self):
thinning_steps = 10
posterior_num_traces = 3000
posterior_with_thinning_num_traces_correct = posterior_num_traces / thinning_steps
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
posterior = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_stddev = float(posterior.stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
posterior_with_thinning = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, thinning_steps=thinning_steps)
posterior_with_thinning_num_traces = posterior_with_thinning.length
posterior_with_thinning_mean = float(posterior_with_thinning.mean)
posterior_with_thinning_stddev = float(posterior_with_thinning.stddev)
kl_divergence_with_thinning = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior_with_thinning.mean, posterior_with_thinning.stddev)))
util.eval_print('posterior_num_traces', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence', 'thinning_steps', 'posterior_with_thinning_num_traces', 'posterior_with_thinning_num_traces_correct', 'posterior_with_thinning_mean', 'posterior_with_thinning_stddev', 'kl_divergence_with_thinning')
self.assertEqual(posterior_with_thinning_num_traces, posterior_with_thinning_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
self.assertAlmostEqual(posterior_with_thinning_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_with_thinning_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence_with_thinning, 0.25)
def test_model_lmh_posterior_with_online_thinning(self):
thinning_steps = 10
posterior_num_traces = 4000
posterior_with_thinning_num_traces_correct = posterior_num_traces / thinning_steps
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
posterior = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_stddev = float(posterior.stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
posterior_with_thinning = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, thinning_steps=thinning_steps)
posterior_with_thinning_num_traces = posterior_with_thinning.length
posterior_with_thinning_mean = float(posterior_with_thinning.mean)
posterior_with_thinning_stddev = float(posterior_with_thinning.stddev)
kl_divergence_with_thinning = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior_with_thinning.mean, posterior_with_thinning.stddev)))
util.eval_print('posterior_num_traces', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence', 'thinning_steps', 'posterior_with_thinning_num_traces', 'posterior_with_thinning_num_traces_correct', 'posterior_with_thinning_mean', 'posterior_with_thinning_stddev', 'kl_divergence_with_thinning')
self.assertEqual(posterior_with_thinning_num_traces, posterior_with_thinning_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
self.assertAlmostEqual(posterior_with_thinning_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_with_thinning_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence_with_thinning, 0.25)
def test_model_lmh_posterior_with_stop_and_resume_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
posterior_num_runs = 200
posterior_num_traces_each_run = 50
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
initial_trace = None
for i in range(posterior_num_runs):
posterior_traces = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace, file_name=file_name)
initial_trace = posterior_traces[-1]
posterior_traces.close()
posterior = Empirical(file_name=file_name)
posterior.finalize()
posterior = posterior.map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_stop_and_resume_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
posterior_num_runs = 50
posterior_num_traces_each_run = 50
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
initial_trace = None
for i in range(posterior_num_runs):
posterior_traces = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace, file_name=file_name)
initial_trace = posterior_traces[-1]
posterior_traces.close()
posterior = Empirical(file_name=file_name)
posterior.finalize()
posterior = posterior.map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
class ModelWithReplacementTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMeanMarsagliaWithReplacement(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean (Marsaglia)')
def marsaglia(self, mean, stddev):
uniform = Uniform(-1, 1)
s = 1
while float(s) >= 1:
x = pyprob.sample(uniform, replace=True)
y = pyprob.sample(uniform, replace=True)
s = x*x + y*y
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
def forward(self):
mu = self.marsaglia(self.prior_mean, self.prior_stddev)
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.observe(likelihood, 0, name='obs0')
pyprob.observe(likelihood, 0, name='obs1')
return mu
self._model = GaussianWithUnknownMeanMarsagliaWithReplacement()
super().__init__(*args, **kwargs)
def test_model_with_replacement_trace_length_statistics(self):
num_traces = 2000
trace_length_mean_correct = 2
trace_length_stddev_correct = 0
trace_length_min_correct = 2
trace_length_max_correct = 2
trace_lengths = self._model.prior(num_traces, map_func=lambda trace: trace.length_controlled)
trace_length_dist = Empirical(trace_lengths)
trace_length_mean = float(trace_length_dist.mean)
trace_length_stddev = float(trace_length_dist.stddev)
trace_length_min = float(trace_length_dist.min)
trace_length_max = (trace_length_dist.max)
util.eval_print('num_traces', 'trace_length_mean', 'trace_length_mean_correct', 'trace_length_stddev', 'trace_length_stddev_correct', 'trace_length_min', 'trace_length_min_correct', 'trace_length_max', 'trace_length_max_correct')
self.assertAlmostEqual(trace_length_mean, trace_length_mean_correct, places=0)
self.assertAlmostEqual(trace_length_stddev, trace_length_stddev_correct, places=0)
self.assertAlmostEqual(trace_length_min, trace_length_min_correct, places=0)
self.assertAlmostEqual(trace_length_max, trace_length_max_correct, places=0)
class ModelObservationStyle1TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMean(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean')
def forward(self):
mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
likelihood = Normal(mu, self.likelihood_stddev)
# pyprob.observe usage alternative #1
pyprob.observe(likelihood, name='obs0')
pyprob.observe(likelihood, name='obs1')
return mu
self._model = GaussianWithUnknownMean()
super().__init__(*args, **kwargs)
def test_observation_style1_gum_posterior_importance_sampling(self):
samples = importance_sampling_samples
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 8, 'obs1': 9})
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('samples', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertAlmostEqual(posterior_mean_unweighted, prior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev_unweighted, prior_stddev_correct, places=0)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
class ModelObservationStyle2TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMean(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean')
def forward(self):
mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
likelihood = Normal(mu, self.likelihood_stddev)
# pyprob.observe usage alternative #2
pyprob.sample(likelihood, name='obs0')
pyprob.sample(likelihood, name='obs1')
return mu
self._model = GaussianWithUnknownMean()
super().__init__(*args, **kwargs)
def test_observation_style2_gum_posterior_importance_sampling(self):
samples = importance_sampling_samples
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 8, 'obs1': 9})
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('samples', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertAlmostEqual(posterior_mean_unweighted, prior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev_unweighted, prior_stddev_correct, places=0)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
if __name__ == '__main__':
pyprob.set_random_seed(123)
pyprob.set_verbosity(1)
unittest.main(verbosity=2)
|
import unittest
import math
import torch
import os
import tempfile
import uuid
import pyprob
from pyprob import util, Model, InferenceEngine
from pyprob.distributions import Normal, Uniform, Empirical
importance_sampling_samples = 5000
class ModelTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMeanMarsaglia(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean (Marsaglia)')
def marsaglia(self, mean, stddev):
uniform = Uniform(-1, 1)
s = 1
while float(s) >= 1:
x = pyprob.sample(uniform)
y = pyprob.sample(uniform)
s = x*x + y*y
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
def forward(self):
mu = self.marsaglia(self.prior_mean, self.prior_stddev)
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.observe(likelihood, 0, name='obs0')
pyprob.observe(likelihood, 0, name='obs1')
return mu
self._model = GaussianWithUnknownMeanMarsaglia()
super().__init__(*args, **kwargs)
def test_model_prior(self):
num_traces = 5000
prior_mean_correct = 1
prior_stddev_correct = math.sqrt(5)
prior = self._model.prior_results(num_traces)
prior_mean = float(prior.mean)
prior_stddev = float(prior.stddev)
util.eval_print('num_traces', 'prior_mean', 'prior_mean_correct', 'prior_stddev', 'prior_stddev_correct')
self.assertAlmostEqual(prior_mean, prior_mean_correct, places=0)
self.assertAlmostEqual(prior_stddev, prior_stddev_correct, places=0)
def test_model_prior_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
num_traces = 1000
prior_mean_correct = 1
prior_stddev_correct = math.sqrt(5)
prior_length_correct = 2 * num_traces
prior = self._model.prior_results(num_traces, file_name=file_name)
prior.close()
prior = self._model.prior_results(num_traces, file_name=file_name)
# prior.close()
prior_length = prior.length
prior_mean = float(prior.mean)
prior_stddev = float(prior.stddev)
util.eval_print('num_traces', 'prior_mean', 'prior_mean_correct', 'prior_stddev', 'prior_stddev_correct', 'prior_length', 'prior_length_correct')
self.assertAlmostEqual(prior_mean, prior_mean_correct, places=0)
self.assertAlmostEqual(prior_stddev, prior_stddev_correct, places=0)
self.assertEqual(prior_length, prior_length_correct)
def test_model_trace_length_statistics(self):
num_traces = 2000
trace_length_mean_correct = 2.5630438327789307
trace_length_stddev_correct = 1.2081329822540283
trace_length_min_correct = 2
trace_lengths = self._model.prior(num_traces, map_func=lambda trace: trace.length_controlled)
trace_length_dist = Empirical(trace_lengths)
trace_length_mean = float(trace_length_dist.mean)
trace_length_stddev = float(trace_length_dist.stddev)
trace_length_min = float(trace_length_dist.min)
trace_length_max = (trace_length_dist.max)
util.eval_print('num_traces', 'trace_length_mean', 'trace_length_mean_correct', 'trace_length_stddev', 'trace_length_stddev_correct', 'trace_length_min', 'trace_length_min_correct', 'trace_length_max')
self.assertAlmostEqual(trace_length_mean, trace_length_mean_correct, places=0)
self.assertAlmostEqual(trace_length_stddev, trace_length_stddev_correct, places=0)
self.assertAlmostEqual(trace_length_min, trace_length_min_correct, places=0)
def test_model_lmh_posterior_with_stop_and_resume(self):
posterior_num_runs = 200
posterior_num_traces_each_run = 20
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posteriors = []
initial_trace = None
for i in range(posterior_num_runs):
posterior = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace)
initial_trace = posterior[-1]
posteriors.append(posterior)
posterior = Empirical(concat_empiricals=posteriors).map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_stop_and_resume(self):
posterior_num_runs = 100
posterior_num_traces_each_run = 20
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posteriors = []
initial_trace = None
for i in range(posterior_num_runs):
posterior = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace)
initial_trace = posterior[-1]
posteriors.append(posterior)
posterior = Empirical(concat_empiricals=posteriors).map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_online_thinning(self):
thinning_steps = 10
posterior_num_traces = 3000
posterior_with_thinning_num_traces_correct = posterior_num_traces / thinning_steps
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
posterior = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_stddev = float(posterior.stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
posterior_with_thinning = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, thinning_steps=thinning_steps)
posterior_with_thinning_num_traces = posterior_with_thinning.length
posterior_with_thinning_mean = float(posterior_with_thinning.mean)
posterior_with_thinning_stddev = float(posterior_with_thinning.stddev)
kl_divergence_with_thinning = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior_with_thinning.mean, posterior_with_thinning.stddev)))
util.eval_print('posterior_num_traces', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence', 'thinning_steps', 'posterior_with_thinning_num_traces', 'posterior_with_thinning_num_traces_correct', 'posterior_with_thinning_mean', 'posterior_with_thinning_stddev', 'kl_divergence_with_thinning')
self.assertEqual(posterior_with_thinning_num_traces, posterior_with_thinning_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
self.assertAlmostEqual(posterior_with_thinning_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_with_thinning_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence_with_thinning, 0.25)
def test_model_lmh_posterior_with_online_thinning(self):
thinning_steps = 10
posterior_num_traces = 4000
posterior_with_thinning_num_traces_correct = posterior_num_traces / thinning_steps
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
posterior = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_stddev = float(posterior.stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
posterior_with_thinning = self._model.posterior_results(num_traces=posterior_num_traces, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, thinning_steps=thinning_steps)
posterior_with_thinning_num_traces = posterior_with_thinning.length
posterior_with_thinning_mean = float(posterior_with_thinning.mean)
posterior_with_thinning_stddev = float(posterior_with_thinning.stddev)
kl_divergence_with_thinning = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior_with_thinning.mean, posterior_with_thinning.stddev)))
util.eval_print('posterior_num_traces', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence', 'thinning_steps', 'posterior_with_thinning_num_traces', 'posterior_with_thinning_num_traces_correct', 'posterior_with_thinning_mean', 'posterior_with_thinning_stddev', 'kl_divergence_with_thinning')
self.assertEqual(posterior_with_thinning_num_traces, posterior_with_thinning_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
self.assertAlmostEqual(posterior_with_thinning_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_with_thinning_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence_with_thinning, 0.25)
def test_model_lmh_posterior_with_stop_and_resume_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
posterior_num_runs = 200
posterior_num_traces_each_run = 50
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
initial_trace = None
for i in range(posterior_num_runs):
posterior_traces = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace, file_name=file_name)
initial_trace = posterior_traces[-1]
posterior_traces.close()
posterior = Empirical(file_name=file_name)
posterior.finalize()
posterior = posterior.map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
def test_model_rmh_posterior_with_stop_and_resume_on_disk(self):
file_name = os.path.join(tempfile.mkdtemp(), str(uuid.uuid4()))
posterior_num_runs = 50
posterior_num_traces_each_run = 50
posterior_num_traces_correct = posterior_num_traces_each_run * posterior_num_runs
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
initial_trace = None
for i in range(posterior_num_runs):
posterior_traces = self._model.posterior(num_traces=posterior_num_traces_each_run, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9}, initial_trace=initial_trace, file_name=file_name)
initial_trace = posterior_traces[-1]
posterior_traces.close()
posterior = Empirical(file_name=file_name)
posterior.finalize()
posterior = posterior.map(lambda trace: trace.result)
posterior_num_traces = posterior.length
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('posterior_num_runs', 'posterior_num_traces_each_run', 'posterior_num_traces', 'posterior_num_traces_correct', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertEqual(posterior_num_traces, posterior_num_traces_correct)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
class ModelWithReplacementTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMeanMarsagliaWithReplacement(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean (Marsaglia)')
def marsaglia(self, mean, stddev):
uniform = Uniform(-1, 1)
s = 1
while float(s) >= 1:
x = pyprob.sample(uniform, replace=True)
y = pyprob.sample(uniform, replace=True)
s = x*x + y*y
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
def forward(self):
mu = self.marsaglia(self.prior_mean, self.prior_stddev)
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.observe(likelihood, 0, name='obs0')
pyprob.observe(likelihood, 0, name='obs1')
return mu
self._model = GaussianWithUnknownMeanMarsagliaWithReplacement()
super().__init__(*args, **kwargs)
def test_model_with_replacement_trace_length_statistics(self):
num_traces = 2000
trace_length_mean_correct = 2
trace_length_stddev_correct = 0
trace_length_min_correct = 2
trace_length_max_correct = 2
trace_lengths = self._model.prior(num_traces, map_func=lambda trace: trace.length_controlled)
trace_length_dist = Empirical(trace_lengths)
trace_length_mean = float(trace_length_dist.mean)
trace_length_stddev = float(trace_length_dist.stddev)
trace_length_min = float(trace_length_dist.min)
trace_length_max = (trace_length_dist.max)
util.eval_print('num_traces', 'trace_length_mean', 'trace_length_mean_correct', 'trace_length_stddev', 'trace_length_stddev_correct', 'trace_length_min', 'trace_length_min_correct', 'trace_length_max', 'trace_length_max_correct')
self.assertAlmostEqual(trace_length_mean, trace_length_mean_correct, places=0)
self.assertAlmostEqual(trace_length_stddev, trace_length_stddev_correct, places=0)
self.assertAlmostEqual(trace_length_min, trace_length_min_correct, places=0)
self.assertAlmostEqual(trace_length_max, trace_length_max_correct, places=0)
class ModelObservationStyle1TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMean(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean')
def forward(self):
mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
likelihood = Normal(mu, self.likelihood_stddev)
# pyprob.observe usage alternative #1
pyprob.observe(likelihood, name='obs0')
pyprob.observe(likelihood, name='obs1')
return mu
self._model = GaussianWithUnknownMean()
super().__init__(*args, **kwargs)
def test_observation_style1_gum_posterior_importance_sampling(self):
samples = importance_sampling_samples
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 8, 'obs1': 9})
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('samples', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertAlmostEqual(posterior_mean_unweighted, prior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev_unweighted, prior_stddev_correct, places=0)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
class ModelObservationStyle2TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
class GaussianWithUnknownMean(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2)):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
super().__init__('Gaussian with unknown mean')
def forward(self):
mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
likelihood = Normal(mu, self.likelihood_stddev)
# pyprob.observe usage alternative #2
pyprob.sample(likelihood, name='obs0')
pyprob.sample(likelihood, name='obs1')
return mu
self._model = GaussianWithUnknownMean()
super().__init__(*args, **kwargs)
def test_observation_style2_gum_posterior_importance_sampling(self):
samples = importance_sampling_samples
true_posterior = Normal(7.25, math.sqrt(1/1.2))
posterior_mean_correct = float(true_posterior.mean)
posterior_stddev_correct = float(true_posterior.stddev)
prior_mean_correct = 1.
prior_stddev_correct = math.sqrt(5)
posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 8, 'obs1': 9})
posterior_mean = float(posterior.mean)
posterior_mean_unweighted = float(posterior.unweighted().mean)
posterior_stddev = float(posterior.stddev)
posterior_stddev_unweighted = float(posterior.unweighted().stddev)
kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
util.eval_print('samples', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
self.assertAlmostEqual(posterior_mean_unweighted, prior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev_unweighted, prior_stddev_correct, places=0)
self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
self.assertLess(kl_divergence, 0.25)
if __name__ == '__main__':
pyprob.set_random_seed(123)
pyprob.set_verbosity(1)
unittest.main(verbosity=2)
|
en
| 0.402053
|
# http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf # prior.close() # http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf # http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf # pyprob.observe usage alternative #1 # http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf # pyprob.observe usage alternative #2
| 2.344829
| 2
|
examples/manual_test.py
|
fossabot/vtk
| 2
|
6626677
|
<filename>examples/manual_test.py
import cv2
import os
import time
from termcolor import cprint
from vtk.inferrers.tensorflow import TensorFlowInferrer
start = time.time()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
cprint("[0/6] Capturing frame...", "green", attrs=["bold"])
status, frame = cv2.VideoCapture(0).read()
cprint("[1/6] Loading graph into inference class...", "green", attrs=["bold"])
inferrer = TensorFlowInferrer("tests/testdata/models/frozen_inference_graph.pb")
cprint("[2/6] Preparing graph in memory...", "green", attrs=["bold"])
cprint("[3/6] Running inference on frame...", "green", attrs=["bold"])
results = inferrer.run(frame)
cprint("[4/6] Drawing on frame...", "green", attrs=["bold"])
for i in results["detections"]:
cv2.rectangle(frame, (i["bbox"][0], i["bbox"][1]), (i["bbox"][2], i["bbox"][3]), 2, (125, 125, 0))
cprint("[5/6] Displaying result, press Q to quit...", "green", attrs=["bold"])
end = time.time()
while not cv2.waitKey(1) & 0xFF == ord("q"):
cv2.imshow("Output", frame)
cprint("[6/6] Cleaning up...", "green", attrs=["bold"])
cv2.destroyAllWindows()
cprint("Successfully completed test!", "blue", attrs=["bold"])
cprint("Took {s} seconds.".format(s=str(round(end - start, 2))), "blue", attrs=["bold"])
|
<filename>examples/manual_test.py
import cv2
import os
import time
from termcolor import cprint
from vtk.inferrers.tensorflow import TensorFlowInferrer
start = time.time()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
cprint("[0/6] Capturing frame...", "green", attrs=["bold"])
status, frame = cv2.VideoCapture(0).read()
cprint("[1/6] Loading graph into inference class...", "green", attrs=["bold"])
inferrer = TensorFlowInferrer("tests/testdata/models/frozen_inference_graph.pb")
cprint("[2/6] Preparing graph in memory...", "green", attrs=["bold"])
cprint("[3/6] Running inference on frame...", "green", attrs=["bold"])
results = inferrer.run(frame)
cprint("[4/6] Drawing on frame...", "green", attrs=["bold"])
for i in results["detections"]:
cv2.rectangle(frame, (i["bbox"][0], i["bbox"][1]), (i["bbox"][2], i["bbox"][3]), 2, (125, 125, 0))
cprint("[5/6] Displaying result, press Q to quit...", "green", attrs=["bold"])
end = time.time()
while not cv2.waitKey(1) & 0xFF == ord("q"):
cv2.imshow("Output", frame)
cprint("[6/6] Cleaning up...", "green", attrs=["bold"])
cv2.destroyAllWindows()
cprint("Successfully completed test!", "blue", attrs=["bold"])
cprint("Took {s} seconds.".format(s=str(round(end - start, 2))), "blue", attrs=["bold"])
|
none
| 1
| 2.54266
| 3
|
|
taln2016/icsisumm-primary-sys34_v1/preprocess/text.py
|
hectormartinez/rougexstem
| 0
|
6626678
|
<filename>taln2016/icsisumm-primary-sys34_v1/preprocess/text.py<gh_stars>0
import os, sys, re, math
import util
from globals import *
import nltk
import sbd
class TextProcessor:
def __init__(self):
self._no_punct_pattern = re.compile('[a-zA-Z0-9- ]')
self._stopwords = set(open(STOPWORDS).read().splitlines())
self._porter_stemmer = nltk.stem.porter.PorterStemmer()
#self._sent_tokenizer = util.load_pickle('%s%s' %(STATIC_DATA_ROOT, 'punkt/m07_punkt.pickle'))
self._sent_split_ABBR_LIST = set(['Mr.', 'Mrs.', 'Sen.', 'No.', 'Dr.', 'Gen.', 'St.', 'Lt.', 'Col.', 'Capt.'])
self._sent_split_PUNCT_LIST = set(['\" ', '\")', ') ', '\' ', '\"\''])
def load_splitta_model(self, path):
use_svm = False
if 'svm' in path.lower(): use_svm = True
self._splitta_model = sbd.load_sbd_model(path, use_svm)
def load_punkt_model(self, path):
self._sent_tokenizer = util.load_pickle(path)
def train_punkt_model(self, text, save_path=None):
"""
unsupervised training given some text
optional save_path for future use
"""
## train tokenizer
sys.stderr.write('Training...\n')
t = nltk.tokenize.punkt.PunktSentenceTokenizer()
t.ABBREV = 0.1 # threshold for identifying abbrevs (lower is more aggressive)
t.train(rawtext)
self._sent_tokenizer = t
## pickle it
if save_path:
util.save_pickle(t, save_path)
sys.stderr.write('Saved model as [%s]\n' %output)
def split_sents(self, text):
sents = []
psents = self._sent_tokenizer.tokenize(text)
## fix end of sentence punctuation errors
for i in range(len(psents)-1, -1, -1):
if psents[i][0:2] in self._sent_split_PUNCT_LIST:
psents[i-1] += psents[i][0]
psents[i] = psents[i][2:]
elif psents[i] in ['"', ')', '\'']:
psents[i-1] += psents[i][0]
psents[i] = ''
elif psents[i][0] in [',', ';', ':']:
psents[i-1] += psents[i]
psents[i] = ''
elif i+1 < len(psents) and psents[i].split()[-1] in self._sent_split_ABBR_LIST:
psents[i] += ' ' + psents[i+1]
psents[i+1] = ''
sents.extend([p for p in psents if len(p) > 1])
return sents
def splitta(self, text):
return sbd.sbd_text(self._splitta_model, text, do_tok=False)
def tokenize(self, text):
return nltk.tokenize.punkt_word_tokenize(text)
def porter_stem(self, word):
return self._porter_stemmer.stem(word)
def remove_stopwords(self, words):
return [w for w in words if not w in self._stopwords]
def is_just_stopwords(self, words):
if type(words) == type(''): words = words.split()
for word in words:
if word not in self._stopwords:
return False
return True
def remove_punct(self, sentence):
return re.sub(r'[^a-zA-Z0-9- ]', '', sentence).strip()
text_processor = TextProcessor()
class Sentence:
"""
class for holding information about a single sentence
self.original original text string
self.parsed s-exp representation of a parse tree
"""
def __init__(self, text, order = 0, source = "?", date = "?"):
self.order = order
self.date = date
self.source = source
self.set_text(text)
def set_text(self, text):
self.original = text.strip()
self.parsed = None
self.length = len(self.original.split())
self.tokens = text_processor.tokenize(text_processor.remove_punct(self.original.lower()))
self.stemmed = map(text_processor.porter_stem, self.tokens)
self.no_stop = map(text_processor.porter_stem, text_processor.remove_stopwords(self.tokens))
self.no_stop_freq = {}
for word in self.no_stop:
if word not in self.no_stop_freq: self.no_stop_freq[word] = 1
else: self.no_stop_freq[word] += 1
def parse(self, parser=None):
if self.parsed:
return
if parser:
parser.add_job(self, self.original)
else:
#parser = CommandLineParser()
self.parsed = parser.parse(self.original)
def sim_basic(self, s):
"""
basic word overlap similarity between two sentences
"""
if type(s) != type(''):
s = s.no_stop
else:
s = s.split()
w1 = set(self.no_stop)
w2 = set(s)
return 1.0 * len(w1.intersection(w2)) / max(len(w1), len(w2))
# compute norm for cosine similarity
def compute_norm(self, words_idf = None):
self.norm = 0
for word in self.no_stop_freq:
score = self.no_stop_freq[word]
if words_idf != None and word in words_idf:
score *= words_idf[word]
self.norm += score * score
self.norm = math.sqrt(self.norm)
# simple cosine similarity with ignored
def sim_cosine(self, s, words_idf = None):
norm = self.norm * s.norm
if math.fabs(norm) < 0.00001:
return 0
score = 0
for word in self.no_stop_freq:
if word in s.no_stop_freq:
factor = self.no_stop_freq[word]
if words_idf != None and word in words_idf:
factor *= words_idf[word] * words_idf[word]
factor *= s.no_stop_freq[word]
score += factor
return score / norm
def __str__(self):
return self.original
def glue_quotes(sentences):
starts = []
ends = []
id = 0
offset = 0
for sentence in sentences:
for match in re.finditer(r'(^|\s)[\(]*"', sentence):
starts.append((id, offset + match.end(), match.end()))
for match in re.finditer(r'"[,.\'\)]*(\s|$)', sentence):
ends.append((id, offset + match.start(), match.start()))
for match in re.finditer(r'([^\(\s]"[^\s.,\'])', sentence):
starts.append((id, offset + match.end(), match.end()))
ends.append((id, offset + match.start(), match.start()))
offset += len(sentence)
id += 1
gluelist = []
bounds = {}
for i in xrange(len(starts)):
min = offset
argmin = None
for j in xrange(len(ends)):
if ends[j] == None: continue
dist = ends[j][1] - starts[i][1]
if dist < 0: continue
if dist < min or argmin == None:
min = dist
argmin = j
if argmin != None:
if argmin not in bounds:
bounds[argmin] = (i, min)
else:
if bounds[argmin][1] > min:
bounds[argmin] = (i, min)
for end, start in bounds.items():
if starts[start[0]][0] != ends[end][0]:
gluelist.append((starts[start[0]][0], ends[end][0]))
starts[start[0]] = None
ends[end] = None
for start in starts:
if start != None:
sentence = sentences[start[0]][:start[2]] + "<start>" + sentences[start[0]][start[2]:]
#print ('WARNING: unused quote [%s]\n' % sentence)
for end in ends:
if end != None:
sentence = sentences[end[0]][:end[2]] + "<end>" + sentences[end[0]][end[2]:]
#print ('WARNING: unused quote [%s]\n' % sentence)
output = []
for i in xrange(len(sentences)):
glued = False
for item in gluelist:
if i > item[0] and i <= item[1]:
output[-1] += " " + sentences[i]
glued = True
break
if not glued:
output.append(sentences[i])
return output
def glue_pars(pars):
glued = []
for i in range(len(pars)-1):
## next par starts with lowercase and this par doesn't end with a period
if par[i+1][0:2].islower() and not re.search('\.[")]?$', par[i]):
glued.append(par[i] + par[i+1])
else:
glued.append(par[i])
return glued
class Document:
"""
Class for storing documents.
doc = Document(<document_path>) will load the document and parse it
for desired information.
Public Member Variables:
self.id 'XIE19980304.0061'
self.source 'XIE'
self.date '19980304.0061'
self.paragraphs ['Par 1 text', 'Par 2 text', ... ]
self.sentences ['sent 1 text', 'sent 2 text', ... ]
"""
def _parse_clean(self, path):
return open(path).read().splitlines()
def _parse_newswire(self, data):
data = data.replace('``', '\"').replace('\'\'', '\"').replace('`', '\'')
data = data.replace('\n', '\t')
pattern = re.compile(r'<\/?(p|text|doc)>', re.I | re.M) # convert <p> and <text> to paragraph breaks
data = re.sub(pattern, '\t', data)
pattern = re.compile(r'<[^>]*>.*?<\/[^>]*>', re.M) # remove tagged content
data = re.sub(pattern, '\t', data)
pattern = re.compile(r'<[^>]*>', re.M) # remove remaining tags
data = re.sub(pattern, ' ', data)
pattern = re.compile(r'\s+', re.M)
text = map(lambda x: re.sub(pattern, ' ', x.strip()), filter(lambda x: x != '', re.split(r' *\t *\t *', data)))
return text
def _fix_newswire(self, par):
"""
clean up newswire paragraphs
"""
fixed = par
## get rid of leaders in newswire text
fixed = re.sub('^(.{0,35} )?\(\w{2,10}?\) ?(--?|_) ?', '', fixed)
fixed = re.sub('^([A-Z]{2,}.{0,30}? (--?|_) ){,2}', '', fixed)
## replace underscore, dash, double-dash with comma
fixed = fixed.replace(' _ ', ', ')
fixed = fixed.replace(' - ', ', ')
fixed = fixed.replace(' -- ', ', ')
fixed = re.sub('([\w\d])--([\w\d])', '\\1, \\2', fixed)
## other fixes
fixed = re.sub('^(_|--?)', '', fixed)
fixed = re.sub(re.compile(r' ?& ?', re.I), '&', fixed)
fixed = re.sub(' ?&\w{2}; ?', ' ', fixed)
fixed = fixed.replace(' ,', ',')
fixed = re.sub('^, ', '', fixed)
fixed = re.sub('\s+', ' ', fixed)
fixed = re.sub('(\w)\.("?[A-Z])', '\\1. \\2', fixed)
fixed = fixed.strip()
if util.is_punct(fixed): fixed = ''
return fixed
def get_sentences(self):
self.sentences = []
order = 0
for par in self.paragraphs:
#sents_text = text_processor.split_sents(par)
sents_text = text_processor.splitta(par)
sents_text_glued = glue_quotes(sents_text)
par_sent_count = 0
for sent_text in sents_text_glued:
#print order, sent_text
if order == 0 and re.search('By [A-Z]', sent_text): continue
if order == 0 and sent_text.startswith('('): continue
if order == 0 and re.search('c\.\d', sent_text): continue
if order == 0 and sent_text.startswith('"') and sent_text.endswith('"'): continue
if sent_text.isupper(): continue
if 1.0*len([1 for c in sent_text if c.isupper()]) / len(sent_text) > 0.2: continue
if len(sent_text.split()) < 20 and not re.search('\.[")]?$', sent_text): continue
if re.search(re.compile('eds:', re.I), sent_text): continue
if re.search('[ \-]\d\d\d-\d\d\d\d', sent_text): continue
if '(k)' in sent_text: continue
sentence = Sentence(sent_text, order, self.source, self.date)
if par_sent_count == 0: sentence.paragraph_starter = True
else: sentence.paragraph_starter = False
self.sentences.append(sentence)
order += 1
par_sent_count += 1
print self.id, len(self.sentences)
def parse_sentences(self, parser=None):
if parser:
for sentence in self.sentences:
sentence.parse(parser)
else:
#parser = CommandLineParser(BERKELEY_PARSER_CMD)
for sentence in self.sentences:
sentence.parse(parser)
parser.run()
for sentence in parser.parsed:
sentence.parsed = parser.parsed[sentence]
def __init__(self, path, is_clean=False):
"""
path is the location of the file to process
is_clean=True means that file has no XML or other markup: just text
"""
self.id = 'NONE'
self.date = 'NONE'
self.source = 'NONE'
self.paragraphs = []
self._isempty = True
## get generic info
if os.path.isfile(path): rawdata = open(path).read()
elif path.strip().startswith('<DOC>'): rawdata = path
else:
sys.stderr.write('ERROR: could not read: %s\n' %path)
return
try:
self.id = util.remove_tags(re.findall('<DOCNO>[^>]+</DOCNO>', rawdata[:100])[0])
except:
match = re.search('<DOC id=\"([^"]+)\"', rawdata[:100])
if match:
self.id = str(match.groups(1)[0])
else:
sys.stderr.write('ERROR: no <DOCNO>/<DOC id=...> tag: %s\n' %path)
## source and date from id (assumes newswire style)
if self.id != 'NONE':
self.source = re.findall('^[^_\d]*', self.id)[0]
self.date = self.id.replace(self.source, '')
## parse various types of newswire xml
if is_clean: text = self._parse_clean(rawdata)
else: text = self._parse_newswire(rawdata)
if len(text)==0:
#sys.stderr.write('WARNING: no text read for: %s\n' %path)
return
self.paragraphs = []
for paragraph in text:
fixed_par = self._fix_newswire(paragraph)
if fixed_par == '': continue
self.paragraphs.append(fixed_par)
self._isempty = False
def __str__(self):
s = []
s.append('%s DOCUMENT' %'#START')
s.append('ID %s' %self.id)
s.append('SOURCE %s' %self.source)
s.append('DATE %s' %self.date)
s.append('TEXT')
s.extend(self.paragraphs)
return '\n'.join(s)
|
<filename>taln2016/icsisumm-primary-sys34_v1/preprocess/text.py<gh_stars>0
import os, sys, re, math
import util
from globals import *
import nltk
import sbd
class TextProcessor:
def __init__(self):
self._no_punct_pattern = re.compile('[a-zA-Z0-9- ]')
self._stopwords = set(open(STOPWORDS).read().splitlines())
self._porter_stemmer = nltk.stem.porter.PorterStemmer()
#self._sent_tokenizer = util.load_pickle('%s%s' %(STATIC_DATA_ROOT, 'punkt/m07_punkt.pickle'))
self._sent_split_ABBR_LIST = set(['Mr.', 'Mrs.', 'Sen.', 'No.', 'Dr.', 'Gen.', 'St.', 'Lt.', 'Col.', 'Capt.'])
self._sent_split_PUNCT_LIST = set(['\" ', '\")', ') ', '\' ', '\"\''])
def load_splitta_model(self, path):
use_svm = False
if 'svm' in path.lower(): use_svm = True
self._splitta_model = sbd.load_sbd_model(path, use_svm)
def load_punkt_model(self, path):
self._sent_tokenizer = util.load_pickle(path)
def train_punkt_model(self, text, save_path=None):
"""
unsupervised training given some text
optional save_path for future use
"""
## train tokenizer
sys.stderr.write('Training...\n')
t = nltk.tokenize.punkt.PunktSentenceTokenizer()
t.ABBREV = 0.1 # threshold for identifying abbrevs (lower is more aggressive)
t.train(rawtext)
self._sent_tokenizer = t
## pickle it
if save_path:
util.save_pickle(t, save_path)
sys.stderr.write('Saved model as [%s]\n' %output)
def split_sents(self, text):
sents = []
psents = self._sent_tokenizer.tokenize(text)
## fix end of sentence punctuation errors
for i in range(len(psents)-1, -1, -1):
if psents[i][0:2] in self._sent_split_PUNCT_LIST:
psents[i-1] += psents[i][0]
psents[i] = psents[i][2:]
elif psents[i] in ['"', ')', '\'']:
psents[i-1] += psents[i][0]
psents[i] = ''
elif psents[i][0] in [',', ';', ':']:
psents[i-1] += psents[i]
psents[i] = ''
elif i+1 < len(psents) and psents[i].split()[-1] in self._sent_split_ABBR_LIST:
psents[i] += ' ' + psents[i+1]
psents[i+1] = ''
sents.extend([p for p in psents if len(p) > 1])
return sents
def splitta(self, text):
return sbd.sbd_text(self._splitta_model, text, do_tok=False)
def tokenize(self, text):
return nltk.tokenize.punkt_word_tokenize(text)
def porter_stem(self, word):
return self._porter_stemmer.stem(word)
def remove_stopwords(self, words):
return [w for w in words if not w in self._stopwords]
def is_just_stopwords(self, words):
if type(words) == type(''): words = words.split()
for word in words:
if word not in self._stopwords:
return False
return True
def remove_punct(self, sentence):
return re.sub(r'[^a-zA-Z0-9- ]', '', sentence).strip()
text_processor = TextProcessor()
class Sentence:
"""
class for holding information about a single sentence
self.original original text string
self.parsed s-exp representation of a parse tree
"""
def __init__(self, text, order = 0, source = "?", date = "?"):
self.order = order
self.date = date
self.source = source
self.set_text(text)
def set_text(self, text):
self.original = text.strip()
self.parsed = None
self.length = len(self.original.split())
self.tokens = text_processor.tokenize(text_processor.remove_punct(self.original.lower()))
self.stemmed = map(text_processor.porter_stem, self.tokens)
self.no_stop = map(text_processor.porter_stem, text_processor.remove_stopwords(self.tokens))
self.no_stop_freq = {}
for word in self.no_stop:
if word not in self.no_stop_freq: self.no_stop_freq[word] = 1
else: self.no_stop_freq[word] += 1
def parse(self, parser=None):
if self.parsed:
return
if parser:
parser.add_job(self, self.original)
else:
#parser = CommandLineParser()
self.parsed = parser.parse(self.original)
def sim_basic(self, s):
"""
basic word overlap similarity between two sentences
"""
if type(s) != type(''):
s = s.no_stop
else:
s = s.split()
w1 = set(self.no_stop)
w2 = set(s)
return 1.0 * len(w1.intersection(w2)) / max(len(w1), len(w2))
# compute norm for cosine similarity
def compute_norm(self, words_idf = None):
self.norm = 0
for word in self.no_stop_freq:
score = self.no_stop_freq[word]
if words_idf != None and word in words_idf:
score *= words_idf[word]
self.norm += score * score
self.norm = math.sqrt(self.norm)
# simple cosine similarity with ignored
def sim_cosine(self, s, words_idf = None):
norm = self.norm * s.norm
if math.fabs(norm) < 0.00001:
return 0
score = 0
for word in self.no_stop_freq:
if word in s.no_stop_freq:
factor = self.no_stop_freq[word]
if words_idf != None and word in words_idf:
factor *= words_idf[word] * words_idf[word]
factor *= s.no_stop_freq[word]
score += factor
return score / norm
def __str__(self):
return self.original
def glue_quotes(sentences):
starts = []
ends = []
id = 0
offset = 0
for sentence in sentences:
for match in re.finditer(r'(^|\s)[\(]*"', sentence):
starts.append((id, offset + match.end(), match.end()))
for match in re.finditer(r'"[,.\'\)]*(\s|$)', sentence):
ends.append((id, offset + match.start(), match.start()))
for match in re.finditer(r'([^\(\s]"[^\s.,\'])', sentence):
starts.append((id, offset + match.end(), match.end()))
ends.append((id, offset + match.start(), match.start()))
offset += len(sentence)
id += 1
gluelist = []
bounds = {}
for i in xrange(len(starts)):
min = offset
argmin = None
for j in xrange(len(ends)):
if ends[j] == None: continue
dist = ends[j][1] - starts[i][1]
if dist < 0: continue
if dist < min or argmin == None:
min = dist
argmin = j
if argmin != None:
if argmin not in bounds:
bounds[argmin] = (i, min)
else:
if bounds[argmin][1] > min:
bounds[argmin] = (i, min)
for end, start in bounds.items():
if starts[start[0]][0] != ends[end][0]:
gluelist.append((starts[start[0]][0], ends[end][0]))
starts[start[0]] = None
ends[end] = None
for start in starts:
if start != None:
sentence = sentences[start[0]][:start[2]] + "<start>" + sentences[start[0]][start[2]:]
#print ('WARNING: unused quote [%s]\n' % sentence)
for end in ends:
if end != None:
sentence = sentences[end[0]][:end[2]] + "<end>" + sentences[end[0]][end[2]:]
#print ('WARNING: unused quote [%s]\n' % sentence)
output = []
for i in xrange(len(sentences)):
glued = False
for item in gluelist:
if i > item[0] and i <= item[1]:
output[-1] += " " + sentences[i]
glued = True
break
if not glued:
output.append(sentences[i])
return output
def glue_pars(pars):
glued = []
for i in range(len(pars)-1):
## next par starts with lowercase and this par doesn't end with a period
if par[i+1][0:2].islower() and not re.search('\.[")]?$', par[i]):
glued.append(par[i] + par[i+1])
else:
glued.append(par[i])
return glued
class Document:
"""
Class for storing documents.
doc = Document(<document_path>) will load the document and parse it
for desired information.
Public Member Variables:
self.id 'XIE19980304.0061'
self.source 'XIE'
self.date '19980304.0061'
self.paragraphs ['Par 1 text', 'Par 2 text', ... ]
self.sentences ['sent 1 text', 'sent 2 text', ... ]
"""
def _parse_clean(self, path):
return open(path).read().splitlines()
def _parse_newswire(self, data):
data = data.replace('``', '\"').replace('\'\'', '\"').replace('`', '\'')
data = data.replace('\n', '\t')
pattern = re.compile(r'<\/?(p|text|doc)>', re.I | re.M) # convert <p> and <text> to paragraph breaks
data = re.sub(pattern, '\t', data)
pattern = re.compile(r'<[^>]*>.*?<\/[^>]*>', re.M) # remove tagged content
data = re.sub(pattern, '\t', data)
pattern = re.compile(r'<[^>]*>', re.M) # remove remaining tags
data = re.sub(pattern, ' ', data)
pattern = re.compile(r'\s+', re.M)
text = map(lambda x: re.sub(pattern, ' ', x.strip()), filter(lambda x: x != '', re.split(r' *\t *\t *', data)))
return text
def _fix_newswire(self, par):
"""
clean up newswire paragraphs
"""
fixed = par
## get rid of leaders in newswire text
fixed = re.sub('^(.{0,35} )?\(\w{2,10}?\) ?(--?|_) ?', '', fixed)
fixed = re.sub('^([A-Z]{2,}.{0,30}? (--?|_) ){,2}', '', fixed)
## replace underscore, dash, double-dash with comma
fixed = fixed.replace(' _ ', ', ')
fixed = fixed.replace(' - ', ', ')
fixed = fixed.replace(' -- ', ', ')
fixed = re.sub('([\w\d])--([\w\d])', '\\1, \\2', fixed)
## other fixes
fixed = re.sub('^(_|--?)', '', fixed)
fixed = re.sub(re.compile(r' ?& ?', re.I), '&', fixed)
fixed = re.sub(' ?&\w{2}; ?', ' ', fixed)
fixed = fixed.replace(' ,', ',')
fixed = re.sub('^, ', '', fixed)
fixed = re.sub('\s+', ' ', fixed)
fixed = re.sub('(\w)\.("?[A-Z])', '\\1. \\2', fixed)
fixed = fixed.strip()
if util.is_punct(fixed): fixed = ''
return fixed
def get_sentences(self):
self.sentences = []
order = 0
for par in self.paragraphs:
#sents_text = text_processor.split_sents(par)
sents_text = text_processor.splitta(par)
sents_text_glued = glue_quotes(sents_text)
par_sent_count = 0
for sent_text in sents_text_glued:
#print order, sent_text
if order == 0 and re.search('By [A-Z]', sent_text): continue
if order == 0 and sent_text.startswith('('): continue
if order == 0 and re.search('c\.\d', sent_text): continue
if order == 0 and sent_text.startswith('"') and sent_text.endswith('"'): continue
if sent_text.isupper(): continue
if 1.0*len([1 for c in sent_text if c.isupper()]) / len(sent_text) > 0.2: continue
if len(sent_text.split()) < 20 and not re.search('\.[")]?$', sent_text): continue
if re.search(re.compile('eds:', re.I), sent_text): continue
if re.search('[ \-]\d\d\d-\d\d\d\d', sent_text): continue
if '(k)' in sent_text: continue
sentence = Sentence(sent_text, order, self.source, self.date)
if par_sent_count == 0: sentence.paragraph_starter = True
else: sentence.paragraph_starter = False
self.sentences.append(sentence)
order += 1
par_sent_count += 1
print self.id, len(self.sentences)
def parse_sentences(self, parser=None):
if parser:
for sentence in self.sentences:
sentence.parse(parser)
else:
#parser = CommandLineParser(BERKELEY_PARSER_CMD)
for sentence in self.sentences:
sentence.parse(parser)
parser.run()
for sentence in parser.parsed:
sentence.parsed = parser.parsed[sentence]
def __init__(self, path, is_clean=False):
"""
path is the location of the file to process
is_clean=True means that file has no XML or other markup: just text
"""
self.id = 'NONE'
self.date = 'NONE'
self.source = 'NONE'
self.paragraphs = []
self._isempty = True
## get generic info
if os.path.isfile(path): rawdata = open(path).read()
elif path.strip().startswith('<DOC>'): rawdata = path
else:
sys.stderr.write('ERROR: could not read: %s\n' %path)
return
try:
self.id = util.remove_tags(re.findall('<DOCNO>[^>]+</DOCNO>', rawdata[:100])[0])
except:
match = re.search('<DOC id=\"([^"]+)\"', rawdata[:100])
if match:
self.id = str(match.groups(1)[0])
else:
sys.stderr.write('ERROR: no <DOCNO>/<DOC id=...> tag: %s\n' %path)
## source and date from id (assumes newswire style)
if self.id != 'NONE':
self.source = re.findall('^[^_\d]*', self.id)[0]
self.date = self.id.replace(self.source, '')
## parse various types of newswire xml
if is_clean: text = self._parse_clean(rawdata)
else: text = self._parse_newswire(rawdata)
if len(text)==0:
#sys.stderr.write('WARNING: no text read for: %s\n' %path)
return
self.paragraphs = []
for paragraph in text:
fixed_par = self._fix_newswire(paragraph)
if fixed_par == '': continue
self.paragraphs.append(fixed_par)
self._isempty = False
def __str__(self):
s = []
s.append('%s DOCUMENT' %'#START')
s.append('ID %s' %self.id)
s.append('SOURCE %s' %self.source)
s.append('DATE %s' %self.date)
s.append('TEXT')
s.extend(self.paragraphs)
return '\n'.join(s)
|
en
| 0.599903
|
#self._sent_tokenizer = util.load_pickle('%s%s' %(STATIC_DATA_ROOT, 'punkt/m07_punkt.pickle')) unsupervised training given some text optional save_path for future use ## train tokenizer # threshold for identifying abbrevs (lower is more aggressive) ## pickle it ## fix end of sentence punctuation errors class for holding information about a single sentence self.original original text string self.parsed s-exp representation of a parse tree #parser = CommandLineParser() basic word overlap similarity between two sentences # compute norm for cosine similarity # simple cosine similarity with ignored #print ('WARNING: unused quote [%s]\n' % sentence) #print ('WARNING: unused quote [%s]\n' % sentence) ## next par starts with lowercase and this par doesn't end with a period Class for storing documents. doc = Document(<document_path>) will load the document and parse it for desired information. Public Member Variables: self.id 'XIE19980304.0061' self.source 'XIE' self.date '19980304.0061' self.paragraphs ['Par 1 text', 'Par 2 text', ... ] self.sentences ['sent 1 text', 'sent 2 text', ... ] # convert <p> and <text> to paragraph breaks # remove tagged content # remove remaining tags clean up newswire paragraphs ## get rid of leaders in newswire text ## replace underscore, dash, double-dash with comma ## other fixes #sents_text = text_processor.split_sents(par) #print order, sent_text #parser = CommandLineParser(BERKELEY_PARSER_CMD) path is the location of the file to process is_clean=True means that file has no XML or other markup: just text ## get generic info ## source and date from id (assumes newswire style) ## parse various types of newswire xml #sys.stderr.write('WARNING: no text read for: %s\n' %path)
| 2.258119
| 2
|
cloudmesh-exercises/e-cloudmesh-common-3.py
|
cloudmesh-community/fa19-516-140
| 0
|
6626679
|
<gh_stars>0
# fa19-516-140
#This program demonistrate the use of flatdict
#function which been stored in cloudmesh.common.flatdict
from cloudmesh.common.flatdict import FlatDict
#Assigining values to dicts
values = {'Cloudera': {'Address':{'USA':0,'CA': 1,'Palo Alto': 2}}}
# converting nested dicts to a one flat dict that illustrates all levels in one level with delimited keys
flat = flatdict.FlatDict(values)
#calling the flatdict in a key calling loop for each key based in the flat result
for key in flat:
print (key)
|
# fa19-516-140
#This program demonistrate the use of flatdict
#function which been stored in cloudmesh.common.flatdict
from cloudmesh.common.flatdict import FlatDict
#Assigining values to dicts
values = {'Cloudera': {'Address':{'USA':0,'CA': 1,'Palo Alto': 2}}}
# converting nested dicts to a one flat dict that illustrates all levels in one level with delimited keys
flat = flatdict.FlatDict(values)
#calling the flatdict in a key calling loop for each key based in the flat result
for key in flat:
print (key)
|
en
| 0.801644
|
# fa19-516-140 #This program demonistrate the use of flatdict #function which been stored in cloudmesh.common.flatdict #Assigining values to dicts # converting nested dicts to a one flat dict that illustrates all levels in one level with delimited keys #calling the flatdict in a key calling loop for each key based in the flat result
| 3.46101
| 3
|
python/pyspark/tests/test_daemon.py
|
ILuffZhe/spark
| 0
|
6626680
|
<filename>python/pyspark/tests/test_daemon.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import unittest
from pyspark.serializers import read_int
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, AF_INET6, SOCK_STREAM
family, host = AF_INET, "127.0.0.1"
if os.environ.get("SPARK_PREFER_IPV6", "false").lower() == "true":
family, host = AF_INET6, "::1"
sock = socket(family, SOCK_STREAM)
sock.connect((host, port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "..", "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# wait worker process spawned from daemon exit.
time.sleep(1)
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
if __name__ == "__main__":
from pyspark.tests.test_daemon import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
<filename>python/pyspark/tests/test_daemon.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import unittest
from pyspark.serializers import read_int
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, AF_INET6, SOCK_STREAM
family, host = AF_INET, "127.0.0.1"
if os.environ.get("SPARK_PREFER_IPV6", "false").lower() == "true":
family, host = AF_INET6, "::1"
sock = socket(family, SOCK_STREAM)
sock.connect((host, port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "..", "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# wait worker process spawned from daemon exit.
time.sleep(1)
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
if __name__ == "__main__":
from pyspark.tests.test_daemon import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
en
| 0.867186
|
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # send a split index of -1 to shutdown the worker # start daemon # read the port number # daemon should accept connections # wait worker process spawned from daemon exit. # request shutdown # daemon should no longer accept connections Ensure that daemon and workers terminate when stdin is closed. Ensure that daemon and workers terminate on SIGTERM. # noqa: F401 # type: ignore[import]
| 2.072551
| 2
|
bot.py
|
ilovetocode2019/Logger
| 0
|
6626681
|
import discord
from discord.ext import commands
import asyncpg
import aiohttp
import asyncio
import os
import logging
import json
import asyncio
import datetime
import config
from cogs.utils import formats
logging.basicConfig(
level=logging.INFO,
format="(%(asctime)s) %(levelname)s %(message)s",
datefmt="%m/%d/%y - %H:%M:%S %Z",
)
log = logging.getLogger("logger")
class Logger(commands.Bot):
def __init__(self):
super().__init__(command_prefix=config.prefix, intents=discord.Intents.all())
self.db_ready = asyncio.Event()
self.startup_time = datetime.datetime.utcnow()
self.log = log
self.loop.create_task(self.prepare_bot())
self.cogs_to_add = ["cogs.admin", "cogs.meta", "cogs.tracking", "cogs.settings"]
self.load_extension("jishaku")
for cog in self.cogs_to_add:
self.load_extension(cog)
async def wait_until_db_ready(self):
if not self.db_ready.is_set():
await self.db_ready.wait()
async def prepare_bot(self):
log.info("Preparing image directory")
if not os.path.isdir("images"):
os.mkdir("images")
log.info("Creating aiohttp session")
self.session = aiohttp.ClientSession()
async def init(conn):
await conn.set_type_codec(
"jsonb",
schema="pg_catalog",
encoder=json.dumps,
decoder=json.loads,
format="text",
)
log.info("Connecting to database")
self.db = await asyncpg.create_pool(config.database_uri, init=init)
log.info("Initiating database")
query = """CREATE TABLE IF NOT EXISTS avatars (
id SERIAL PRIMARY KEY,
user_id BIGINT,
filename TEXT,
hash TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS nicks (
id SERIAL PRIMARY KEY,
user_id BIGINT,
guild_id BIGINT,
nick TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS names (
id SERIAL PRIMARY KEY,
user_id BIGINT,
name TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS presences (
id SERIAL PRIMARY KEY,
user_id BIGINT,
status TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS user_config (
id BIGINT PRIMARY KEY,
theme INTEGER DEFAULT 0
);
"""
await self.db.execute(query)
async def update_users(self, users):
names = await self.db.fetch("SELECT * FROM names;")
avatars = await self.db.fetch("SELECT * FROM avatars;")
avatar_batch = []
name_batch = []
for user in users:
user_avatars = [
avatar for avatar in avatars if avatar["user_id"] == user.id
]
if not user_avatars or user_avatars[-1]["hash"] != user.avatar:
if user.avatar:
try:
filename = f"{user.id}-{user.avatar}.png"
await user.avatar_url_as(format="png").save(f"images/{filename}")
avatar_batch.append(
{"user_id": user.id, "filename": filename, "hash": user.avatar}
)
except discord.NotFound:
log.warning(f"Failed to fetch avatar for {user} ({user.id}). Ignoring")
else:
avatar = int(user.discriminator)%5
filename = f"{avatar}.png"
async with self.session.get(f"https://cdn.discordapp.com/embed/avatars/{avatar}.png") as resp:
with open(f"images/{filename}", "wb") as f:
f.write(await resp.read())
avatar_batch.append(
{"user_id": user.id, "filename": filename, "hash": None}
)
user_names = [name for name in names if name["user_id"] == user.id]
if not user_names or user_names[-1]["name"] != user.name:
name_batch.append({"user_id": user.id, "name": user.name})
query = """INSERT INTO avatars (user_id, filename, hash)
SELECT x.user_id, x.filename, x.hash
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, filename TEXT, hash TEXT)
"""
if avatar_batch:
await self.db.execute(query, avatar_batch)
total = len(avatar_batch)
log.info("Registered %s to the database", format(formats.plural(total), "avatar"))
else:
log.info("No work needed for avatars")
query = """INSERT INTO names (user_id, name)
SELECT x.user_id, x.name
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, name TEXT)
"""
if name_batch:
await self.db.execute(query, name_batch)
total = len(avatar_batch)
log.info("Registered %s to the database", format(formats.plural(total), "name"))
else:
log.info("No work needed for names")
self.db_ready.set()
async def on_ready(self):
log.info(f"Logged in as {self.user.name} - {self.user.id}")
self.console = bot.get_channel(config.console)
log.info("Loading database")
nicks = await self.db.fetch("SELECT * FROM nicks;")
presences = await self.db.fetch("SELECT * FROM presences;")
log.info("Loading all members and users")
users = [discord.User._copy(user) for user in bot.users]
members = [discord.Member._copy(member) for member in self.get_all_members()]
log.info("Preparing database")
log.info("Querying nick, and presence changes")
nick_batch = []
presence_batch = []
for member in members:
member_nicks = [
nick
for nick in nicks
if nick["user_id"] == member.id and nick["guild_id"] == member.guild.id
]
if member.nick and (
not member_nicks or member_nicks[-1]["nick"] != member.nick
):
nick_batch.append(
{
"user_id": member.id,
"guild_id": member.guild.id,
"nick": member.nick,
}
)
member_presences = [
presence
for presence in presences
if presence["user_id"] == member.id
]
if (not member_presences or member_presences[-1]["status"] != str(member.status)) and member.id not in [presence["user_id"] for presence in presence_batch]:
presence_batch.append(
{
"user_id": member.id,
"status": str(member.status)
}
)
query = """INSERT INTO nicks (user_id, guild_id, nick)
SELECT x.user_id, x.guild_id, x.nick
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, guild_id BIGINT, nick TEXT)
"""
if nick_batch:
await self.db.execute(query, nick_batch)
total = len(nick_batch)
log.info("Registered %s to the database", format(formats.plural(total), "nick"))
else:
log.info("No work needed for nicks")
query = """INSERT INTO presences (user_id, status)
SELECT x.user_id, x.status
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, guild_id BIGINT, status TEXT)
"""
if presence_batch:
await self.db.execute(query, presence_batch)
total = len(presence_batch)
log.info("Registered %s to the database", format(formats.plural(total), "presence"))
else:
log.info("No work needed to presences")
log.info("Querying avatar and name changes")
await self.update_users(users)
log.info("Database is now up-to-date")
def run(self):
super().run(config.token)
async def logout(self):
await self.db.close()
await self.session.close()
await super().logout()
bot = Logger()
bot.run()
|
import discord
from discord.ext import commands
import asyncpg
import aiohttp
import asyncio
import os
import logging
import json
import asyncio
import datetime
import config
from cogs.utils import formats
logging.basicConfig(
level=logging.INFO,
format="(%(asctime)s) %(levelname)s %(message)s",
datefmt="%m/%d/%y - %H:%M:%S %Z",
)
log = logging.getLogger("logger")
class Logger(commands.Bot):
def __init__(self):
super().__init__(command_prefix=config.prefix, intents=discord.Intents.all())
self.db_ready = asyncio.Event()
self.startup_time = datetime.datetime.utcnow()
self.log = log
self.loop.create_task(self.prepare_bot())
self.cogs_to_add = ["cogs.admin", "cogs.meta", "cogs.tracking", "cogs.settings"]
self.load_extension("jishaku")
for cog in self.cogs_to_add:
self.load_extension(cog)
async def wait_until_db_ready(self):
if not self.db_ready.is_set():
await self.db_ready.wait()
async def prepare_bot(self):
log.info("Preparing image directory")
if not os.path.isdir("images"):
os.mkdir("images")
log.info("Creating aiohttp session")
self.session = aiohttp.ClientSession()
async def init(conn):
await conn.set_type_codec(
"jsonb",
schema="pg_catalog",
encoder=json.dumps,
decoder=json.loads,
format="text",
)
log.info("Connecting to database")
self.db = await asyncpg.create_pool(config.database_uri, init=init)
log.info("Initiating database")
query = """CREATE TABLE IF NOT EXISTS avatars (
id SERIAL PRIMARY KEY,
user_id BIGINT,
filename TEXT,
hash TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS nicks (
id SERIAL PRIMARY KEY,
user_id BIGINT,
guild_id BIGINT,
nick TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS names (
id SERIAL PRIMARY KEY,
user_id BIGINT,
name TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS presences (
id SERIAL PRIMARY KEY,
user_id BIGINT,
status TEXT,
recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc')
);
CREATE TABLE IF NOT EXISTS user_config (
id BIGINT PRIMARY KEY,
theme INTEGER DEFAULT 0
);
"""
await self.db.execute(query)
async def update_users(self, users):
names = await self.db.fetch("SELECT * FROM names;")
avatars = await self.db.fetch("SELECT * FROM avatars;")
avatar_batch = []
name_batch = []
for user in users:
user_avatars = [
avatar for avatar in avatars if avatar["user_id"] == user.id
]
if not user_avatars or user_avatars[-1]["hash"] != user.avatar:
if user.avatar:
try:
filename = f"{user.id}-{user.avatar}.png"
await user.avatar_url_as(format="png").save(f"images/{filename}")
avatar_batch.append(
{"user_id": user.id, "filename": filename, "hash": user.avatar}
)
except discord.NotFound:
log.warning(f"Failed to fetch avatar for {user} ({user.id}). Ignoring")
else:
avatar = int(user.discriminator)%5
filename = f"{avatar}.png"
async with self.session.get(f"https://cdn.discordapp.com/embed/avatars/{avatar}.png") as resp:
with open(f"images/{filename}", "wb") as f:
f.write(await resp.read())
avatar_batch.append(
{"user_id": user.id, "filename": filename, "hash": None}
)
user_names = [name for name in names if name["user_id"] == user.id]
if not user_names or user_names[-1]["name"] != user.name:
name_batch.append({"user_id": user.id, "name": user.name})
query = """INSERT INTO avatars (user_id, filename, hash)
SELECT x.user_id, x.filename, x.hash
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, filename TEXT, hash TEXT)
"""
if avatar_batch:
await self.db.execute(query, avatar_batch)
total = len(avatar_batch)
log.info("Registered %s to the database", format(formats.plural(total), "avatar"))
else:
log.info("No work needed for avatars")
query = """INSERT INTO names (user_id, name)
SELECT x.user_id, x.name
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, name TEXT)
"""
if name_batch:
await self.db.execute(query, name_batch)
total = len(avatar_batch)
log.info("Registered %s to the database", format(formats.plural(total), "name"))
else:
log.info("No work needed for names")
self.db_ready.set()
async def on_ready(self):
log.info(f"Logged in as {self.user.name} - {self.user.id}")
self.console = bot.get_channel(config.console)
log.info("Loading database")
nicks = await self.db.fetch("SELECT * FROM nicks;")
presences = await self.db.fetch("SELECT * FROM presences;")
log.info("Loading all members and users")
users = [discord.User._copy(user) for user in bot.users]
members = [discord.Member._copy(member) for member in self.get_all_members()]
log.info("Preparing database")
log.info("Querying nick, and presence changes")
nick_batch = []
presence_batch = []
for member in members:
member_nicks = [
nick
for nick in nicks
if nick["user_id"] == member.id and nick["guild_id"] == member.guild.id
]
if member.nick and (
not member_nicks or member_nicks[-1]["nick"] != member.nick
):
nick_batch.append(
{
"user_id": member.id,
"guild_id": member.guild.id,
"nick": member.nick,
}
)
member_presences = [
presence
for presence in presences
if presence["user_id"] == member.id
]
if (not member_presences or member_presences[-1]["status"] != str(member.status)) and member.id not in [presence["user_id"] for presence in presence_batch]:
presence_batch.append(
{
"user_id": member.id,
"status": str(member.status)
}
)
query = """INSERT INTO nicks (user_id, guild_id, nick)
SELECT x.user_id, x.guild_id, x.nick
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, guild_id BIGINT, nick TEXT)
"""
if nick_batch:
await self.db.execute(query, nick_batch)
total = len(nick_batch)
log.info("Registered %s to the database", format(formats.plural(total), "nick"))
else:
log.info("No work needed for nicks")
query = """INSERT INTO presences (user_id, status)
SELECT x.user_id, x.status
FROM jsonb_to_recordset($1::jsonb) AS
x(user_id BIGINT, guild_id BIGINT, status TEXT)
"""
if presence_batch:
await self.db.execute(query, presence_batch)
total = len(presence_batch)
log.info("Registered %s to the database", format(formats.plural(total), "presence"))
else:
log.info("No work needed to presences")
log.info("Querying avatar and name changes")
await self.update_users(users)
log.info("Database is now up-to-date")
def run(self):
super().run(config.token)
async def logout(self):
await self.db.close()
await self.session.close()
await super().logout()
bot = Logger()
bot.run()
|
en
| 0.413161
|
CREATE TABLE IF NOT EXISTS avatars ( id SERIAL PRIMARY KEY, user_id BIGINT, filename TEXT, hash TEXT, recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc') ); CREATE TABLE IF NOT EXISTS nicks ( id SERIAL PRIMARY KEY, user_id BIGINT, guild_id BIGINT, nick TEXT, recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc') ); CREATE TABLE IF NOT EXISTS names ( id SERIAL PRIMARY KEY, user_id BIGINT, name TEXT, recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc') ); CREATE TABLE IF NOT EXISTS presences ( id SERIAL PRIMARY KEY, user_id BIGINT, status TEXT, recorded_at TIMESTAMP DEFAULT (now() at time zone 'utc') ); CREATE TABLE IF NOT EXISTS user_config ( id BIGINT PRIMARY KEY, theme INTEGER DEFAULT 0 ); INSERT INTO avatars (user_id, filename, hash) SELECT x.user_id, x.filename, x.hash FROM jsonb_to_recordset($1::jsonb) AS x(user_id BIGINT, filename TEXT, hash TEXT) INSERT INTO names (user_id, name) SELECT x.user_id, x.name FROM jsonb_to_recordset($1::jsonb) AS x(user_id BIGINT, name TEXT) INSERT INTO nicks (user_id, guild_id, nick) SELECT x.user_id, x.guild_id, x.nick FROM jsonb_to_recordset($1::jsonb) AS x(user_id BIGINT, guild_id BIGINT, nick TEXT) INSERT INTO presences (user_id, status) SELECT x.user_id, x.status FROM jsonb_to_recordset($1::jsonb) AS x(user_id BIGINT, guild_id BIGINT, status TEXT)
| 2.252059
| 2
|
edx_data_research/reporting/report_stats.py
|
gopa1959/test
| 0
|
6626682
|
<filename>edx_data_research/reporting/report_stats.py
from collections import defaultdict
from datetime import date
from prettytable import PrettyTable
from edx_data_research.reporting.report import Report
class Stats(Report):
def __init__(self, args):
super(Stats, self).__init__(args)
self.csv = args.csv
self.number_of_students = 0
def stats(self):
"""Return general stats for a given course """
self.collections = ['auth_userprofile', 'certificates_generatedcertificate']
self.number_of_students = self.collections['auth_userprofile'].count()
age_stats = self._age()
gender_stats = self._gender()
certificate_stats = self._certificate()
result = age_stats + gender_stats + certificate_stats
headers = ['Name', 'Stat']
if self.csv:
report_name = self.report_name(self.db_name, 'stats')
self.generate_csv(result, headers, report_name)
else:
table = PrettyTable(headers)
table.align[headers[0]] = 'l'
table.align[headers[1]] = 'c'
for row in result:
table.add_row(row)
print table
def _age(self):
age_breakdown = defaultdict(int)
current_year = date.today().year
cursor = self.collections['auth_userprofile'].find()
for item in cursor:
year_of_birth = item['year_of_birth']
if year_of_birth != 'NULL':
age = current_year - int(year_of_birth)
if age < 20:
age_breakdown['Age - Under 20'] += 1
elif 20 <= age <= 29:
age_breakdown['Age - 20-29'] += 1
elif 30 <= age <= 39:
age_breakdown['Age - 30-39'] += 1
elif 40 <= age <= 49:
age_breakdown['Age - 40-49'] += 1
elif 50 <= age <= 69:
age_breakdown['Age - 50-69'] += 1
elif age >= 70:
age_breakdown['Age - 70+'] += 1
else:
age_breakdown['Age - None'] += 1
order = ['Age - Under 20', 'Age - 20-29', 'Age - 30-39', 'Age - 40-49',
'Age - 50-69', 'Age - 70+', 'Age - None']
return [(key, age_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
def _gender(self):
gender_breakdown = defaultdict(int)
cursor = self.collections['auth_userprofile'].find()
for item in cursor:
gender = item['gender']
if gender == 'm':
gender_breakdown['Gender - Male'] += 1
elif gender == 'f':
gender_breakdown['Gender - Female'] += 1
elif gender == 'o':
gender_breakdown['Gender - Other'] += 1
else:
gender_breakdown['Gender - None'] += 1
order = ['Gender - Male', 'Gender - Female', 'Gender - Other',
'Gender - None']
return [(key, gender_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
def _certificate(self):
certificate_breakdown = defaultdict(int)
cursor = self.collections['certificates_generatedcertificate'].find()
for item in cursor:
status = item['status']
if status == 'notpassing':
certificate_breakdown['Certificate - No'] += 1
elif status == 'downloadable':
certificate_breakdown['Certificate - Yes'] += 1
order = ['Certificate - Yes', 'Certificate - No']
return [(key, certificate_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
|
<filename>edx_data_research/reporting/report_stats.py
from collections import defaultdict
from datetime import date
from prettytable import PrettyTable
from edx_data_research.reporting.report import Report
class Stats(Report):
def __init__(self, args):
super(Stats, self).__init__(args)
self.csv = args.csv
self.number_of_students = 0
def stats(self):
"""Return general stats for a given course """
self.collections = ['auth_userprofile', 'certificates_generatedcertificate']
self.number_of_students = self.collections['auth_userprofile'].count()
age_stats = self._age()
gender_stats = self._gender()
certificate_stats = self._certificate()
result = age_stats + gender_stats + certificate_stats
headers = ['Name', 'Stat']
if self.csv:
report_name = self.report_name(self.db_name, 'stats')
self.generate_csv(result, headers, report_name)
else:
table = PrettyTable(headers)
table.align[headers[0]] = 'l'
table.align[headers[1]] = 'c'
for row in result:
table.add_row(row)
print table
def _age(self):
age_breakdown = defaultdict(int)
current_year = date.today().year
cursor = self.collections['auth_userprofile'].find()
for item in cursor:
year_of_birth = item['year_of_birth']
if year_of_birth != 'NULL':
age = current_year - int(year_of_birth)
if age < 20:
age_breakdown['Age - Under 20'] += 1
elif 20 <= age <= 29:
age_breakdown['Age - 20-29'] += 1
elif 30 <= age <= 39:
age_breakdown['Age - 30-39'] += 1
elif 40 <= age <= 49:
age_breakdown['Age - 40-49'] += 1
elif 50 <= age <= 69:
age_breakdown['Age - 50-69'] += 1
elif age >= 70:
age_breakdown['Age - 70+'] += 1
else:
age_breakdown['Age - None'] += 1
order = ['Age - Under 20', 'Age - 20-29', 'Age - 30-39', 'Age - 40-49',
'Age - 50-69', 'Age - 70+', 'Age - None']
return [(key, age_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
def _gender(self):
gender_breakdown = defaultdict(int)
cursor = self.collections['auth_userprofile'].find()
for item in cursor:
gender = item['gender']
if gender == 'm':
gender_breakdown['Gender - Male'] += 1
elif gender == 'f':
gender_breakdown['Gender - Female'] += 1
elif gender == 'o':
gender_breakdown['Gender - Other'] += 1
else:
gender_breakdown['Gender - None'] += 1
order = ['Gender - Male', 'Gender - Female', 'Gender - Other',
'Gender - None']
return [(key, gender_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
def _certificate(self):
certificate_breakdown = defaultdict(int)
cursor = self.collections['certificates_generatedcertificate'].find()
for item in cursor:
status = item['status']
if status == 'notpassing':
certificate_breakdown['Certificate - No'] += 1
elif status == 'downloadable':
certificate_breakdown['Certificate - Yes'] += 1
order = ['Certificate - Yes', 'Certificate - No']
return [(key, certificate_breakdown[key] * 100.0 / self.number_of_students)
for key in order]
|
en
| 0.698134
|
Return general stats for a given course
| 3.013191
| 3
|
pipeline_runner/cache.py
|
schinckel/pipeline-runner
| 6
|
6626683
|
import logging
import os.path
from tempfile import NamedTemporaryFile
from time import time as ts
from typing import Dict, List
from . import utils
from .config import config
from .container import ContainerRunner
logger = logging.getLogger(__name__)
DOCKER_IMAGES_ARCHIVE_FILE_NAME = "images.tar"
class CacheManager:
def __init__(self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str]):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._ignored_caches = {"docker"}
def upload(self, cache_names: List[str]):
for name in cache_names:
cu = CacheRestoreFactory.get(self._container, self._cache_directory, self._cache_definitions, name)
cu.restore()
def download(self, cache_names: List[str]):
for name in cache_names:
cd = CacheSaveFactory.get(self._container, self._cache_directory, self._cache_definitions, name)
cd.save()
class CacheRestore:
def __init__(
self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._cache_name = cache_name
def restore(self):
cache_file = self._get_local_cache_file()
if not cache_file:
logger.info("Cache '%s': Not found: Skipping", self._cache_name)
return
self._upload_cache(cache_file)
self._restore_cache()
def _get_local_cache_file(self):
local_cache_archive_path = get_local_cache_archive_path(self._cache_directory, self._cache_name)
if not os.path.exists(local_cache_archive_path):
return None
return local_cache_archive_path
def _upload_cache(self, cache_file):
remote_cache_directory = get_remote_temp_directory(self._cache_name)
remote_cache_parent_directory = os.path.dirname(remote_cache_directory)
cache_archive_size = os.path.getsize(cache_file)
logger.info("Cache '%s': Uploading", self._cache_name)
t = ts()
prepare_cache_dir_cmd = (
f'[ -d "{remote_cache_directory}" ] && rm -rf "{remote_cache_directory}"; '
f'mkdir -p "{remote_cache_parent_directory}"'
)
res, output = self._container.run_command(prepare_cache_dir_cmd)
if res != 0:
logger.error("Remote command failed: %s", output.decode())
raise Exception(f"Error uploading cache: {self._cache_name}")
with open(cache_file, "rb") as f:
success = self._container.put_archive(remote_cache_parent_directory, f)
if not success:
raise Exception(f"Error uploading cache: {self._cache_name}")
t = ts() - t
logger.info(
"Cache '%s': Uploaded %s in %.3fs", self._cache_name, utils.get_human_readable_size(cache_archive_size), t
)
def _restore_cache(self):
temp_dir = get_remote_temp_directory(self._cache_name)
target_dir = sanitize_remote_path(self._cache_definitions[self._cache_name])
logger.info("Cache '%s': Restoring", self._cache_name)
t = ts()
restore_cache_script = [
f'if [ -e "{target_dir}" ]; then rm -rf "{target_dir}"; fi',
f'mkdir -p "$(dirname "{target_dir}")"',
f'mv "{temp_dir}" "{target_dir}"',
]
exit_code, output = self._container.run_command("\n".join(restore_cache_script))
if exit_code != 0:
raise Exception(f"Error restoring cache: {self._cache_name}: {output.decode()}")
t = ts() - t
logger.info("Cache '%s': Restored in %.3fs", self._cache_name, t)
class NullCacheRestore(CacheRestore):
def restore(self):
logger.info("Cache '%s': Ignoring", self._cache_name)
class CacheRestoreFactory:
@staticmethod
def get(
container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
) -> CacheRestore:
if cache_name == "docker":
cls = NullCacheRestore
else:
cls = CacheRestore
return cls(container, cache_directory, cache_definitions, cache_name)
class CacheSave:
def __init__(
self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._cache_name = cache_name
def save(self):
remote_cache_directory = self._prepare()
local_cache_archive_path = get_local_cache_archive_path(self._cache_directory, self._cache_name)
self._download(remote_cache_directory, local_cache_archive_path)
def _prepare(self) -> str:
remote_dir = sanitize_remote_path(self._cache_definitions[self._cache_name])
target_dir = get_remote_temp_directory(self._cache_name)
logger.info("Cache '%s': Preparing", self._cache_name)
t = ts()
prepare_cache_cmd = f'if [ -e "{remote_dir}" ]; then mv "{remote_dir}" "{target_dir}"; fi'
exit_code, output = self._container.run_command(prepare_cache_cmd)
if exit_code != 0:
raise Exception(f"Error preparing cache: {self._cache_name}: {output.decode()}")
t = ts() - t
logger.info("Cache '%s': Prepared in %.3fs", self._cache_name, t)
return target_dir
def _download(self, src: str, dst: str):
if not self._container.path_exists(src):
logger.info("Cache '%s': Not found", self._cache_name)
return
logger.info("Cache '%s': Downloading", self._cache_name)
t = ts()
with NamedTemporaryFile(dir=self._cache_directory, delete=False) as f:
try:
logger.debug(f"Downloading cache folder '{src}' to '{f.name}'")
data, _ = self._container.get_archive(src)
size = 0
for chunk in data:
size += len(chunk)
f.write(chunk)
except Exception as e:
logger.error(f"Error getting cache from container: {self._cache_name}: {e}")
os.unlink(f.name)
return
else:
logger.debug(f"Moving temp cache archive {f.name} to {dst}")
os.rename(f.name, dst)
t = ts() - t
logger.info("Cache '%s': Downloaded %s in %.3fs", self._cache_name, utils.get_human_readable_size(size), t)
class NullCacheSave(CacheSave):
def save(self):
logger.info("Cache '%s': Ignoring", self._cache_name)
class CacheSaveFactory:
@staticmethod
def get(
container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
) -> CacheSave:
if cache_name == "docker":
cls = NullCacheSave
else:
cls = CacheSave
return cls(container, cache_directory, cache_definitions, cache_name)
def get_local_cache_archive_path(cache_directory: str, cache_name: str) -> str:
return os.path.join(cache_directory, f"{cache_name}.tar")
def get_remote_temp_directory(cache_name: str) -> str:
return os.path.join(config.caches_dir, cache_name)
def sanitize_remote_path(path: str) -> str:
if path.startswith("~"):
path = path.replace("~", "$HOME", 1)
return path
|
import logging
import os.path
from tempfile import NamedTemporaryFile
from time import time as ts
from typing import Dict, List
from . import utils
from .config import config
from .container import ContainerRunner
logger = logging.getLogger(__name__)
DOCKER_IMAGES_ARCHIVE_FILE_NAME = "images.tar"
class CacheManager:
def __init__(self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str]):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._ignored_caches = {"docker"}
def upload(self, cache_names: List[str]):
for name in cache_names:
cu = CacheRestoreFactory.get(self._container, self._cache_directory, self._cache_definitions, name)
cu.restore()
def download(self, cache_names: List[str]):
for name in cache_names:
cd = CacheSaveFactory.get(self._container, self._cache_directory, self._cache_definitions, name)
cd.save()
class CacheRestore:
def __init__(
self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._cache_name = cache_name
def restore(self):
cache_file = self._get_local_cache_file()
if not cache_file:
logger.info("Cache '%s': Not found: Skipping", self._cache_name)
return
self._upload_cache(cache_file)
self._restore_cache()
def _get_local_cache_file(self):
local_cache_archive_path = get_local_cache_archive_path(self._cache_directory, self._cache_name)
if not os.path.exists(local_cache_archive_path):
return None
return local_cache_archive_path
def _upload_cache(self, cache_file):
remote_cache_directory = get_remote_temp_directory(self._cache_name)
remote_cache_parent_directory = os.path.dirname(remote_cache_directory)
cache_archive_size = os.path.getsize(cache_file)
logger.info("Cache '%s': Uploading", self._cache_name)
t = ts()
prepare_cache_dir_cmd = (
f'[ -d "{remote_cache_directory}" ] && rm -rf "{remote_cache_directory}"; '
f'mkdir -p "{remote_cache_parent_directory}"'
)
res, output = self._container.run_command(prepare_cache_dir_cmd)
if res != 0:
logger.error("Remote command failed: %s", output.decode())
raise Exception(f"Error uploading cache: {self._cache_name}")
with open(cache_file, "rb") as f:
success = self._container.put_archive(remote_cache_parent_directory, f)
if not success:
raise Exception(f"Error uploading cache: {self._cache_name}")
t = ts() - t
logger.info(
"Cache '%s': Uploaded %s in %.3fs", self._cache_name, utils.get_human_readable_size(cache_archive_size), t
)
def _restore_cache(self):
temp_dir = get_remote_temp_directory(self._cache_name)
target_dir = sanitize_remote_path(self._cache_definitions[self._cache_name])
logger.info("Cache '%s': Restoring", self._cache_name)
t = ts()
restore_cache_script = [
f'if [ -e "{target_dir}" ]; then rm -rf "{target_dir}"; fi',
f'mkdir -p "$(dirname "{target_dir}")"',
f'mv "{temp_dir}" "{target_dir}"',
]
exit_code, output = self._container.run_command("\n".join(restore_cache_script))
if exit_code != 0:
raise Exception(f"Error restoring cache: {self._cache_name}: {output.decode()}")
t = ts() - t
logger.info("Cache '%s': Restored in %.3fs", self._cache_name, t)
class NullCacheRestore(CacheRestore):
def restore(self):
logger.info("Cache '%s': Ignoring", self._cache_name)
class CacheRestoreFactory:
@staticmethod
def get(
container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
) -> CacheRestore:
if cache_name == "docker":
cls = NullCacheRestore
else:
cls = CacheRestore
return cls(container, cache_directory, cache_definitions, cache_name)
class CacheSave:
def __init__(
self, container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
):
self._container = container
self._cache_directory = cache_directory
self._cache_definitions = cache_definitions
self._cache_name = cache_name
def save(self):
remote_cache_directory = self._prepare()
local_cache_archive_path = get_local_cache_archive_path(self._cache_directory, self._cache_name)
self._download(remote_cache_directory, local_cache_archive_path)
def _prepare(self) -> str:
remote_dir = sanitize_remote_path(self._cache_definitions[self._cache_name])
target_dir = get_remote_temp_directory(self._cache_name)
logger.info("Cache '%s': Preparing", self._cache_name)
t = ts()
prepare_cache_cmd = f'if [ -e "{remote_dir}" ]; then mv "{remote_dir}" "{target_dir}"; fi'
exit_code, output = self._container.run_command(prepare_cache_cmd)
if exit_code != 0:
raise Exception(f"Error preparing cache: {self._cache_name}: {output.decode()}")
t = ts() - t
logger.info("Cache '%s': Prepared in %.3fs", self._cache_name, t)
return target_dir
def _download(self, src: str, dst: str):
if not self._container.path_exists(src):
logger.info("Cache '%s': Not found", self._cache_name)
return
logger.info("Cache '%s': Downloading", self._cache_name)
t = ts()
with NamedTemporaryFile(dir=self._cache_directory, delete=False) as f:
try:
logger.debug(f"Downloading cache folder '{src}' to '{f.name}'")
data, _ = self._container.get_archive(src)
size = 0
for chunk in data:
size += len(chunk)
f.write(chunk)
except Exception as e:
logger.error(f"Error getting cache from container: {self._cache_name}: {e}")
os.unlink(f.name)
return
else:
logger.debug(f"Moving temp cache archive {f.name} to {dst}")
os.rename(f.name, dst)
t = ts() - t
logger.info("Cache '%s': Downloaded %s in %.3fs", self._cache_name, utils.get_human_readable_size(size), t)
class NullCacheSave(CacheSave):
def save(self):
logger.info("Cache '%s': Ignoring", self._cache_name)
class CacheSaveFactory:
@staticmethod
def get(
container: ContainerRunner, cache_directory: str, cache_definitions: Dict[str, str], cache_name: str
) -> CacheSave:
if cache_name == "docker":
cls = NullCacheSave
else:
cls = CacheSave
return cls(container, cache_directory, cache_definitions, cache_name)
def get_local_cache_archive_path(cache_directory: str, cache_name: str) -> str:
return os.path.join(cache_directory, f"{cache_name}.tar")
def get_remote_temp_directory(cache_name: str) -> str:
return os.path.join(config.caches_dir, cache_name)
def sanitize_remote_path(path: str) -> str:
if path.startswith("~"):
path = path.replace("~", "$HOME", 1)
return path
|
none
| 1
| 2.187212
| 2
|
|
light_test/light_test/doctype/light_test_doctype/light_test_doctype.py
|
kwatkinsLexul/light_test
| 0
|
6626684
|
<reponame>kwatkinsLexul/light_test
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Keith and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class light_test_doctype(Document):
def validate(self):
print("Grrrrrrayson - Validate")
def on_update(self):
print("Grayson also - Update")
def on_submit(self):
print("Another Grayson thing - Submit")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Keith and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class light_test_doctype(Document):
def validate(self):
print("Grrrrrrayson - Validate")
def on_update(self):
print("Grayson also - Update")
def on_submit(self):
print("Another Grayson thing - Submit")
|
en
| 0.792086
|
# -*- coding: utf-8 -*- # Copyright (c) 2015, Keith and contributors # For license information, please see license.txt
| 1.623706
| 2
|
homeassistant/components/netgear/const.py
|
andersop91/core
| 4
|
6626685
|
<filename>homeassistant/components/netgear/const.py
"""Netgear component constants."""
from datetime import timedelta
from homeassistant.const import Platform
DOMAIN = "netgear"
PLATFORMS = [Platform.DEVICE_TRACKER, Platform.SENSOR]
CONF_CONSIDER_HOME = "consider_home"
DEFAULT_CONSIDER_HOME = timedelta(seconds=180)
DEFAULT_NAME = "Netgear router"
# models using port 80 instead of 5000
MODELS_PORT_80 = [
"Orbi",
"RBK",
"RBR",
"RBS",
"RBW",
"LBK",
"LBR",
"CBK",
"CBR",
"SRC",
"SRK",
"SRR",
"SRS",
"SXK",
"SXR",
"SXS",
]
PORT_80 = 80
# update method V2 models
MODELS_V2 = [
"Orbi",
"RBK",
"RBR",
"RBS",
"RBW",
"LBK",
"LBR",
"CBK",
"CBR",
"SRC",
"SRK",
"SRS",
"SXK",
"SXR",
"SXS",
]
# Icons
DEVICE_ICONS = {
0: "mdi:access-point-network", # Router (Orbi ...)
1: "mdi:book-open-variant", # Amazon Kindle
2: "mdi:android", # Android Device
3: "mdi:cellphone", # Android Phone
4: "mdi:tablet-android", # Android Tablet
5: "mdi:router-wireless", # Apple Airport Express
6: "mdi:disc-player", # Blu-ray Player
7: "mdi:router-network", # Bridge
8: "mdi:play-network", # Cable STB
9: "mdi:camera", # Camera
10: "mdi:router-network", # Router
11: "mdi:play-network", # DVR
12: "mdi:gamepad-variant", # Gaming Console
13: "mdi:desktop-mac", # iMac
14: "mdi:tablet", # iPad
15: "mdi:tablet", # iPad Mini
16: "mdi:cellphone", # iPhone 5/5S/5C
17: "mdi:cellphone", # iPhone
18: "mdi:ipod", # iPod Touch
19: "mdi:linux", # Linux PC
20: "mdi:apple-finder", # Mac Mini
21: "mdi:desktop-tower", # Mac Pro
22: "mdi:laptop", # MacBook
23: "mdi:play-network", # Media Device
24: "mdi:network", # Network Device
25: "mdi:play-network", # Other STB
26: "mdi:power-plug", # Powerline
27: "mdi:printer", # Printer
28: "mdi:access-point", # Repeater
29: "mdi:play-network", # Satellite STB
30: "mdi:scanner", # Scanner
31: "mdi:play-network", # SlingBox
32: "mdi:cellphone", # Smart Phone
33: "mdi:nas", # Storage (NAS)
34: "mdi:switch", # Switch
35: "mdi:television", # TV
36: "mdi:tablet", # Tablet
37: "mdi:desktop-classic", # UNIX PC
38: "mdi:desktop-tower-monitor", # Windows PC
39: "mdi:laptop", # Surface
40: "mdi:access-point-network", # Wifi Extender
41: "mdi:cast-variant", # Apple TV
}
|
<filename>homeassistant/components/netgear/const.py
"""Netgear component constants."""
from datetime import timedelta
from homeassistant.const import Platform
DOMAIN = "netgear"
PLATFORMS = [Platform.DEVICE_TRACKER, Platform.SENSOR]
CONF_CONSIDER_HOME = "consider_home"
DEFAULT_CONSIDER_HOME = timedelta(seconds=180)
DEFAULT_NAME = "Netgear router"
# models using port 80 instead of 5000
MODELS_PORT_80 = [
"Orbi",
"RBK",
"RBR",
"RBS",
"RBW",
"LBK",
"LBR",
"CBK",
"CBR",
"SRC",
"SRK",
"SRR",
"SRS",
"SXK",
"SXR",
"SXS",
]
PORT_80 = 80
# update method V2 models
MODELS_V2 = [
"Orbi",
"RBK",
"RBR",
"RBS",
"RBW",
"LBK",
"LBR",
"CBK",
"CBR",
"SRC",
"SRK",
"SRS",
"SXK",
"SXR",
"SXS",
]
# Icons
DEVICE_ICONS = {
0: "mdi:access-point-network", # Router (Orbi ...)
1: "mdi:book-open-variant", # Amazon Kindle
2: "mdi:android", # Android Device
3: "mdi:cellphone", # Android Phone
4: "mdi:tablet-android", # Android Tablet
5: "mdi:router-wireless", # Apple Airport Express
6: "mdi:disc-player", # Blu-ray Player
7: "mdi:router-network", # Bridge
8: "mdi:play-network", # Cable STB
9: "mdi:camera", # Camera
10: "mdi:router-network", # Router
11: "mdi:play-network", # DVR
12: "mdi:gamepad-variant", # Gaming Console
13: "mdi:desktop-mac", # iMac
14: "mdi:tablet", # iPad
15: "mdi:tablet", # iPad Mini
16: "mdi:cellphone", # iPhone 5/5S/5C
17: "mdi:cellphone", # iPhone
18: "mdi:ipod", # iPod Touch
19: "mdi:linux", # Linux PC
20: "mdi:apple-finder", # Mac Mini
21: "mdi:desktop-tower", # Mac Pro
22: "mdi:laptop", # MacBook
23: "mdi:play-network", # Media Device
24: "mdi:network", # Network Device
25: "mdi:play-network", # Other STB
26: "mdi:power-plug", # Powerline
27: "mdi:printer", # Printer
28: "mdi:access-point", # Repeater
29: "mdi:play-network", # Satellite STB
30: "mdi:scanner", # Scanner
31: "mdi:play-network", # SlingBox
32: "mdi:cellphone", # Smart Phone
33: "mdi:nas", # Storage (NAS)
34: "mdi:switch", # Switch
35: "mdi:television", # TV
36: "mdi:tablet", # Tablet
37: "mdi:desktop-classic", # UNIX PC
38: "mdi:desktop-tower-monitor", # Windows PC
39: "mdi:laptop", # Surface
40: "mdi:access-point-network", # Wifi Extender
41: "mdi:cast-variant", # Apple TV
}
|
en
| 0.480601
|
Netgear component constants. # models using port 80 instead of 5000 # update method V2 models # Icons # Router (Orbi ...) # Amazon Kindle # Android Device # Android Phone # Android Tablet # Apple Airport Express # Blu-ray Player # Bridge # Cable STB # Camera # Router # DVR # Gaming Console # iMac # iPad # iPad Mini # iPhone 5/5S/5C # iPhone # iPod Touch # Linux PC # Mac Mini # Mac Pro # MacBook # Media Device # Network Device # Other STB # Powerline # Printer # Repeater # Satellite STB # Scanner # SlingBox # Smart Phone # Storage (NAS) # Switch # TV # Tablet # UNIX PC # Windows PC # Surface # Wifi Extender # Apple TV
| 1.970246
| 2
|
tests/engine/test_ports.py
|
aiace9/aiida-core
| 1
|
6626686
|
<filename>tests/engine/test_ports.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for process spec ports."""
from aiida.backends.testbase import AiidaTestCase
from aiida.engine.processes.ports import InputPort, PortNamespace
from aiida.orm import Dict, Int
class TestInputPort(AiidaTestCase):
"""Tests for the `InputPort` class."""
def test_with_non_db(self):
"""Test the functionality of the `non_db` attribute upon construction and setting."""
# When not specifying, it should get the default value and `non_db_explicitly_set` should be `False`
port = InputPort('port')
self.assertEqual(port.non_db, False)
self.assertEqual(port.non_db_explicitly_set, False)
# Using the setter to change the value should toggle both properties
port.non_db = True
self.assertEqual(port.non_db, True)
self.assertEqual(port.non_db_explicitly_set, True)
# Explicitly setting to `False` upon construction
port = InputPort('port', non_db=False)
self.assertEqual(port.non_db, False)
self.assertEqual(port.non_db_explicitly_set, True)
# Explicitly setting to `True` upon construction
port = InputPort('port', non_db=True)
self.assertEqual(port.non_db, True)
self.assertEqual(port.non_db_explicitly_set, True)
class TestPortNamespace(AiidaTestCase):
"""Tests for the `PortNamespace` class."""
def test_with_non_db(self):
"""Ports inserted to a `PortNamespace` should inherit the `non_db` attribute if not explicitly set."""
namespace_non_db = True
port_namespace = PortNamespace('namespace', non_db=namespace_non_db)
# When explicitly set upon port construction, value should not be inherited even when different
port = InputPort('storable', non_db=False)
port_namespace['storable'] = port
self.assertEqual(port.non_db, False)
port = InputPort('not_storable', non_db=True)
port_namespace['not_storable'] = port
self.assertEqual(port.non_db, True)
# If not explicitly defined, it should inherit from parent namespace
port = InputPort('not_storable')
port_namespace['not_storable'] = port
self.assertEqual(port.non_db, namespace_non_db)
def test_validate_port_name(self):
"""This test will ensure that illegal port names will raise a `ValueError` when trying to add it."""
port = InputPort('port')
port_namespace = PortNamespace('namespace')
illegal_port_names = [
'two__underscores',
'three___underscores',
'_leading_underscore',
'trailing_underscore_',
'non_numeric_%',
'including.period',
'disallowed👻unicodecharacters',
'white space',
'das-hes',
]
for port_name in illegal_port_names:
with self.assertRaises(ValueError):
port_namespace[port_name] = port
def test_serialize_type_check(self):
"""Test that `serialize` will include full port namespace in exception message."""
base_namespace = 'base'
nested_namespace = 'some.nested.namespace'
port_namespace = PortNamespace(base_namespace)
port_namespace.create_port_namespace(nested_namespace)
with self.assertRaisesRegex(TypeError, f'.*{base_namespace}.*{nested_namespace}.*'):
port_namespace.serialize({'some': {'nested': {'namespace': {Dict()}}}})
def test_lambda_default(self):
"""Test that an input port can specify a lambda as a default."""
port_namespace = PortNamespace('base')
# Defining lambda for default that returns incorrect type should not except at construction
port_namespace['port'] = InputPort('port', valid_type=Int, default=lambda: 'string')
# However, pre processing the namespace, which shall evaluate the default followed by validation will fail
inputs = port_namespace.pre_process({})
self.assertIsNotNone(port_namespace.validate(inputs))
# Passing an explicit value for the port will forego the default and validation on returned inputs should pass
inputs = port_namespace.pre_process({'port': Int(5)})
self.assertIsNone(port_namespace.validate(inputs))
# Redefining the port, this time with a correct default
port_namespace['port'] = InputPort('port', valid_type=Int, default=lambda: Int(5))
# Pre processing the namespace shall evaluate the default and return the int node
inputs = port_namespace.pre_process({})
self.assertIsInstance(inputs['port'], Int)
self.assertEqual(inputs['port'].value, 5)
# Passing an explicit value for the port will forego the default
inputs = port_namespace.pre_process({'port': Int(3)})
self.assertIsInstance(inputs['port'], Int)
self.assertEqual(inputs['port'].value, 3)
|
<filename>tests/engine/test_ports.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for process spec ports."""
from aiida.backends.testbase import AiidaTestCase
from aiida.engine.processes.ports import InputPort, PortNamespace
from aiida.orm import Dict, Int
class TestInputPort(AiidaTestCase):
"""Tests for the `InputPort` class."""
def test_with_non_db(self):
"""Test the functionality of the `non_db` attribute upon construction and setting."""
# When not specifying, it should get the default value and `non_db_explicitly_set` should be `False`
port = InputPort('port')
self.assertEqual(port.non_db, False)
self.assertEqual(port.non_db_explicitly_set, False)
# Using the setter to change the value should toggle both properties
port.non_db = True
self.assertEqual(port.non_db, True)
self.assertEqual(port.non_db_explicitly_set, True)
# Explicitly setting to `False` upon construction
port = InputPort('port', non_db=False)
self.assertEqual(port.non_db, False)
self.assertEqual(port.non_db_explicitly_set, True)
# Explicitly setting to `True` upon construction
port = InputPort('port', non_db=True)
self.assertEqual(port.non_db, True)
self.assertEqual(port.non_db_explicitly_set, True)
class TestPortNamespace(AiidaTestCase):
"""Tests for the `PortNamespace` class."""
def test_with_non_db(self):
"""Ports inserted to a `PortNamespace` should inherit the `non_db` attribute if not explicitly set."""
namespace_non_db = True
port_namespace = PortNamespace('namespace', non_db=namespace_non_db)
# When explicitly set upon port construction, value should not be inherited even when different
port = InputPort('storable', non_db=False)
port_namespace['storable'] = port
self.assertEqual(port.non_db, False)
port = InputPort('not_storable', non_db=True)
port_namespace['not_storable'] = port
self.assertEqual(port.non_db, True)
# If not explicitly defined, it should inherit from parent namespace
port = InputPort('not_storable')
port_namespace['not_storable'] = port
self.assertEqual(port.non_db, namespace_non_db)
def test_validate_port_name(self):
"""This test will ensure that illegal port names will raise a `ValueError` when trying to add it."""
port = InputPort('port')
port_namespace = PortNamespace('namespace')
illegal_port_names = [
'two__underscores',
'three___underscores',
'_leading_underscore',
'trailing_underscore_',
'non_numeric_%',
'including.period',
'disallowed👻unicodecharacters',
'white space',
'das-hes',
]
for port_name in illegal_port_names:
with self.assertRaises(ValueError):
port_namespace[port_name] = port
def test_serialize_type_check(self):
"""Test that `serialize` will include full port namespace in exception message."""
base_namespace = 'base'
nested_namespace = 'some.nested.namespace'
port_namespace = PortNamespace(base_namespace)
port_namespace.create_port_namespace(nested_namespace)
with self.assertRaisesRegex(TypeError, f'.*{base_namespace}.*{nested_namespace}.*'):
port_namespace.serialize({'some': {'nested': {'namespace': {Dict()}}}})
def test_lambda_default(self):
"""Test that an input port can specify a lambda as a default."""
port_namespace = PortNamespace('base')
# Defining lambda for default that returns incorrect type should not except at construction
port_namespace['port'] = InputPort('port', valid_type=Int, default=lambda: 'string')
# However, pre processing the namespace, which shall evaluate the default followed by validation will fail
inputs = port_namespace.pre_process({})
self.assertIsNotNone(port_namespace.validate(inputs))
# Passing an explicit value for the port will forego the default and validation on returned inputs should pass
inputs = port_namespace.pre_process({'port': Int(5)})
self.assertIsNone(port_namespace.validate(inputs))
# Redefining the port, this time with a correct default
port_namespace['port'] = InputPort('port', valid_type=Int, default=lambda: Int(5))
# Pre processing the namespace shall evaluate the default and return the int node
inputs = port_namespace.pre_process({})
self.assertIsInstance(inputs['port'], Int)
self.assertEqual(inputs['port'].value, 5)
# Passing an explicit value for the port will forego the default
inputs = port_namespace.pre_process({'port': Int(3)})
self.assertIsInstance(inputs['port'], Int)
self.assertEqual(inputs['port'].value, 3)
|
en
| 0.684594
|
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### Tests for process spec ports. Tests for the `InputPort` class. Test the functionality of the `non_db` attribute upon construction and setting. # When not specifying, it should get the default value and `non_db_explicitly_set` should be `False` # Using the setter to change the value should toggle both properties # Explicitly setting to `False` upon construction # Explicitly setting to `True` upon construction Tests for the `PortNamespace` class. Ports inserted to a `PortNamespace` should inherit the `non_db` attribute if not explicitly set. # When explicitly set upon port construction, value should not be inherited even when different # If not explicitly defined, it should inherit from parent namespace This test will ensure that illegal port names will raise a `ValueError` when trying to add it. Test that `serialize` will include full port namespace in exception message. Test that an input port can specify a lambda as a default. # Defining lambda for default that returns incorrect type should not except at construction # However, pre processing the namespace, which shall evaluate the default followed by validation will fail # Passing an explicit value for the port will forego the default and validation on returned inputs should pass # Redefining the port, this time with a correct default # Pre processing the namespace shall evaluate the default and return the int node # Passing an explicit value for the port will forego the default
| 2.367988
| 2
|
api_study/apps/user_operation/views.py
|
shidashui/django_restful_api_study
| 2
|
6626687
|
from django.shortcuts import render
from rest_framework import viewsets, mixins
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from user_operation.models import UserLeavingMessage, UserAddress
from user_operation.serializers import UserFavDetailSerializer, LeavingMessageSerializer, AddressSerializer
from utils.permissions import IsOwnerOrReadOnly
from .models import UserFav
from .serializers import UserFavSerializer
# Create your views here.
class UserFavViewset(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin):
"""
用户收藏
"""
# queryset = UserFav.objects.all()
serializer_class = UserFavSerializer
#permission是用来做权限判断的
# IsAuthenticated:必须登陆用户; IsOwnerOrReadOnly:必须是当前登陆用户
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
#auth使用来做用户认证的
authentication_classes = (JSONWebTokenAuthentication,SessionAuthentication)
#搜索的字段
lookup_field = 'goods_id'
def get_queryset(self):
#只能查看当前登陆用户的收藏,不会获取所有用户的收藏
return UserFav.objects.filter(user=self.request.user)
#动态选择serializer
def get_serializer_class(self):
if self.action == "list":
return UserFavDetailSerializer
elif self.action == "create":
return UserFavSerializer
class LeavingMessageViewset(mixins.ListModelMixin, mixins.DestroyModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
list:
获取用户留言
create:
添加留言
delete:
删除留言功能
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = LeavingMessageSerializer
#只能看到自己的留言
def get_queryset(self):
return UserLeavingMessage.objects.filter(user=self.request.user)
class AddressViewset(viewsets.ModelViewSet):
"""
收货地址管理
list:
获取收货地址
create:
添加收货地址
update:
更新收货地址
delete:
删除收货地址
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = AddressSerializer
def get_queryset(self):
return UserAddress.objects.filter(user=self.request.user)
|
from django.shortcuts import render
from rest_framework import viewsets, mixins
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from user_operation.models import UserLeavingMessage, UserAddress
from user_operation.serializers import UserFavDetailSerializer, LeavingMessageSerializer, AddressSerializer
from utils.permissions import IsOwnerOrReadOnly
from .models import UserFav
from .serializers import UserFavSerializer
# Create your views here.
class UserFavViewset(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin):
"""
用户收藏
"""
# queryset = UserFav.objects.all()
serializer_class = UserFavSerializer
#permission是用来做权限判断的
# IsAuthenticated:必须登陆用户; IsOwnerOrReadOnly:必须是当前登陆用户
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
#auth使用来做用户认证的
authentication_classes = (JSONWebTokenAuthentication,SessionAuthentication)
#搜索的字段
lookup_field = 'goods_id'
def get_queryset(self):
#只能查看当前登陆用户的收藏,不会获取所有用户的收藏
return UserFav.objects.filter(user=self.request.user)
#动态选择serializer
def get_serializer_class(self):
if self.action == "list":
return UserFavDetailSerializer
elif self.action == "create":
return UserFavSerializer
class LeavingMessageViewset(mixins.ListModelMixin, mixins.DestroyModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
list:
获取用户留言
create:
添加留言
delete:
删除留言功能
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = LeavingMessageSerializer
#只能看到自己的留言
def get_queryset(self):
return UserLeavingMessage.objects.filter(user=self.request.user)
class AddressViewset(viewsets.ModelViewSet):
"""
收货地址管理
list:
获取收货地址
create:
添加收货地址
update:
更新收货地址
delete:
删除收货地址
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = AddressSerializer
def get_queryset(self):
return UserAddress.objects.filter(user=self.request.user)
|
zh
| 0.893401
|
# Create your views here. 用户收藏 # queryset = UserFav.objects.all() #permission是用来做权限判断的 # IsAuthenticated:必须登陆用户; IsOwnerOrReadOnly:必须是当前登陆用户 #auth使用来做用户认证的 #搜索的字段 #只能查看当前登陆用户的收藏,不会获取所有用户的收藏 #动态选择serializer list: 获取用户留言 create: 添加留言 delete: 删除留言功能 #只能看到自己的留言 收货地址管理 list: 获取收货地址 create: 添加收货地址 update: 更新收货地址 delete: 删除收货地址
| 1.979136
| 2
|
magenta/models/drums_rnn/drums_rnn_config_flags.py
|
flyingleafe/magenta
| 0
|
6626688
|
<filename>magenta/models/drums_rnn/drums_rnn_config_flags.py<gh_stars>0
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a class, defaults, and utils for Drums RNN model configuration."""
from magenta.models.drums_rnn import drums_rnn_model
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'config',
'drum_kit',
"Which config to use. Must be one of 'one_drum', 'drum_kit' or 'reduced_drum_kit'.")
tf.app.flags.DEFINE_string(
'generator_id',
None,
'A unique ID for the generator, overriding the default.')
tf.app.flags.DEFINE_string(
'generator_description',
None,
'A description of the generator, overriding the default.')
tf.app.flags.DEFINE_string(
'hparams', '',
'Comma-separated list of `name=value` pairs. For each pair, the value of '
'the hyperparameter named `name` is set to `value`. This mapping is merged '
'with the default hyperparameters.')
class DrumsRnnConfigError(Exception):
pass
def config_from_flags():
"""Parses flags and returns the appropriate DrumsRnnConfig.
Returns:
The appropriate DrumsRnnConfig based on the supplied flags.
Raises:
DrumsRnnConfigError: When an invalid config is supplied.
"""
if FLAGS.config not in drums_rnn_model.default_configs:
raise DrumsRnnConfigError(
'`--config` must be one of %s. Got %s.' % (
drums_rnn_model.default_configs.keys(), FLAGS.config))
config = drums_rnn_model.default_configs[FLAGS.config]
config.hparams.parse(FLAGS.hparams)
if FLAGS.generator_id is not None:
config.details.id = FLAGS.generator_id
if FLAGS.generator_description is not None:
config.details.description = FLAGS.generator_description
return config
|
<filename>magenta/models/drums_rnn/drums_rnn_config_flags.py<gh_stars>0
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a class, defaults, and utils for Drums RNN model configuration."""
from magenta.models.drums_rnn import drums_rnn_model
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'config',
'drum_kit',
"Which config to use. Must be one of 'one_drum', 'drum_kit' or 'reduced_drum_kit'.")
tf.app.flags.DEFINE_string(
'generator_id',
None,
'A unique ID for the generator, overriding the default.')
tf.app.flags.DEFINE_string(
'generator_description',
None,
'A description of the generator, overriding the default.')
tf.app.flags.DEFINE_string(
'hparams', '',
'Comma-separated list of `name=value` pairs. For each pair, the value of '
'the hyperparameter named `name` is set to `value`. This mapping is merged '
'with the default hyperparameters.')
class DrumsRnnConfigError(Exception):
pass
def config_from_flags():
"""Parses flags and returns the appropriate DrumsRnnConfig.
Returns:
The appropriate DrumsRnnConfig based on the supplied flags.
Raises:
DrumsRnnConfigError: When an invalid config is supplied.
"""
if FLAGS.config not in drums_rnn_model.default_configs:
raise DrumsRnnConfigError(
'`--config` must be one of %s. Got %s.' % (
drums_rnn_model.default_configs.keys(), FLAGS.config))
config = drums_rnn_model.default_configs[FLAGS.config]
config.hparams.parse(FLAGS.hparams)
if FLAGS.generator_id is not None:
config.details.id = FLAGS.generator_id
if FLAGS.generator_description is not None:
config.details.description = FLAGS.generator_description
return config
|
en
| 0.811326
|
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Provides a class, defaults, and utils for Drums RNN model configuration. Parses flags and returns the appropriate DrumsRnnConfig. Returns: The appropriate DrumsRnnConfig based on the supplied flags. Raises: DrumsRnnConfigError: When an invalid config is supplied.
| 2.066956
| 2
|
ltr/data/processing.py
|
sehomi/pyCFTrackers
| 0
|
6626689
|
<filename>ltr/data/processing.py
import torch
import math
import numpy as np
import torchvision.transforms as transforms
from pytracking import TensorDict
import ltr.data.processing_utils as prutils
def stack_tensors(x):
if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):
return torch.stack(x)
return x
class BaseProcessing:
""" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it
through the network. For example, it can be used to crop a search region around the object, apply various data
augmentations, etc."""
def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None):
"""
args:
transform - The set of transformations to be applied on the images. Used only if train_transform or
test_transform is None.
train_transform - The set of transformations to be applied on the train images. If None, the 'transform'
argument is used instead.
test_transform - The set of transformations to be applied on the test images. If None, the 'transform'
argument is used instead.
joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For
example, it can be used to convert both test and train images to grayscale.
"""
self.transform = {'train': transform if train_transform is None else train_transform,
'test': transform if test_transform is None else test_transform,
'joint': joint_transform}
def __call__(self, data: TensorDict):
raise NotImplementedError
class ATOMProcessing(BaseProcessing):
""" The processing class used for training ATOM. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposal_method = self.proposal_params.get('proposal_method', 'default')
if proposal_method == 'default':
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'])
elif proposal_method == 'gmm':
proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
num_samples=num_proposals)
gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4))
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou'
"""
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class KLBBregProcessing(BaseProcessing):
""" Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning
introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565].
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
"""
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
gt_sigma=self.proposal_params['gt_sigma'],
num_samples=self.proposal_params[
'boxes_per_frame'],
add_mean_box=self.proposal_params.get(
'add_mean_box', False))
return proposals, proposal_density, gt_density
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density'
"""
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class ATOMwKLProcessing(BaseProcessing):
"""Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing."""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
"""
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
self.proposal_params['gt_sigma'],
self.proposal_params['boxes_per_frame'])
iou = prutils.iou_gen(proposals, box.view(1, 4))
return proposals, proposal_density, gt_density, iou
def __call__(self, data: TensorDict):
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density, proposal_iou = zip(
*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
data['proposal_iou'] = proposal_iou
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class DiMPProcessing(BaseProcessing):
""" The processing class used for training DiMP. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are
used for computing the loss of the predicted classification model on the test images. A set of proposals are
also generated for the test images by jittering the ground truth box. These proposals are used to train the
bounding box estimating branch.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.proposal_params = proposal_params
self.label_function_params = label_function_params
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposal_method = self.proposal_params.get('proposal_method', 'default')
if proposal_method == 'default':
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'])
elif proposal_method == 'gmm':
proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
num_samples=num_proposals)
gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4))
else:
raise ValueError('Unknown proposal method.')
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))
return gauss_label
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
if self.proposal_params:
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
return data
class KLDiMPProcessing(BaseProcessing):
""" The processing class used for training PrDiMP that additionally supports the probabilistic classifier and
bounding box regressor. See DiMPProcessing for details.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', proposal_params=None,
label_function_params=None, label_density_params=None, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
label_density_params - Arguments for the label density generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.proposal_params = proposal_params
self.label_function_params = label_function_params
self.label_density_params = label_density_params
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density.
This is used for ML and KL based regression learning of the bounding box regressor.
args:
box - input bounding box
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
gt_sigma=self.proposal_params['gt_sigma'],
num_samples=self.proposal_params['boxes_per_frame'],
add_mean_box=self.proposal_params.get('add_mean_box', False))
return proposals, proposal_density, gt_density
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))
return gauss_label
def _generate_label_density(self, target_bb):
""" Generates the gaussian label density centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1)
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'],
self.label_density_params['kernel_sz'],
feat_sz, self.output_sz,
end_pad_if_even=self.label_density_params.get('end_pad_if_even', True),
density=True,
uni_bias=self.label_density_params.get('uni_weight', 0.0))
gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float()
if self.label_density_params.get('normalize', False):
g_sum = gauss_label.sum(dim=(-2,-1))
valid = g_sum>0.01
gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1)
gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1])
gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0)
return gauss_label
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
for s in ['train', 'test']:
is_distractor = data.get('is_distractor_{}_frame'.format(s), None)
if is_distractor is not None:
for is_dist, box in zip(is_distractor, data[s+'_anno']):
if is_dist:
box[0] = 99999999.9
box[1] = 99999999.9
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
if self.label_density_params is not None:
data['train_label_density'] = self._generate_label_density(data['train_anno'])
data['test_label_density'] = self._generate_label_density(data['test_anno'])
return data
class LWLProcessing(BaseProcessing):
""" The processing class used for training LWL. The images are processed in the following way.
First, the target bounding box (computed using the segmentation mask)is jittered by adding some noise.
Next, a rectangular region (called search region ) centered at the jittered target center, and of area
search_area_factor^2 times the area of the jittered box is cropped from the image.
The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. The argument 'crop_type' determines how out-of-frame regions are handled when cropping the
search region. For instance, if crop_type == 'replicate', the boundary pixels are replicated in case the search
region crop goes out of frame. If crop_type == 'inside_major', the search region crop is shifted/shrunk to fit
completely inside one axis of the image.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', new_roll=False, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - The size (width, height) to which the search region is resized. The aspect ratio is always
preserved when resizing the search region
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - Determines how out-of-frame regions are handled when cropping the search region.
If 'replicate', the boundary pixels are replicated in case the search region crop goes out of
image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis
of the image.
max_scale_change - Maximum allowed scale change when shrinking the search region to fit the image
(only applicable to 'inside' and 'inside_major' cropping modes). In case the desired
shrink factor exceeds the max_scale_change, the search region is only shrunk to the
factor max_scale_change. Out-of-frame regions are then handled by replicating the
boundary pixels. If max_scale_change is set to None, unbounded shrinking is allowed.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
new_roll - Whether to use the same random roll values for train and test frames when applying the joint
transformation. If True, a new random roll is performed for the test frame transformations. Thus,
if performing random flips, the set of train frames and the set of test frames will be flipped
independently.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.new_roll = new_roll
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
if self.scale_jitter_factor.get('mode', 'gauss') == 'gauss':
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
elif self.scale_jitter_factor.get('mode', 'gauss') == 'uniform':
jittered_size = box[2:4] * torch.exp(torch.FloatTensor(2).uniform_(-self.scale_jitter_factor[mode],
self.scale_jitter_factor[mode]))
else:
raise Exception
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode])).float()
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def __call__(self, data: TensorDict):
# Apply joint transformations. i.e. All train/test frames in a sequence are applied the transformation with the
# same parameters
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'], data['train_masks'] = self.transform['joint'](
image=data['train_images'], bbox=data['train_anno'], mask=data['train_masks'])
data['test_images'], data['test_anno'], data['test_masks'] = self.transform['joint'](
image=data['test_images'], bbox=data['test_anno'], mask=data['test_masks'], new_roll=self.new_roll)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
orig_anno = data[s + '_anno']
# Extract a crop containing the target
crops, boxes, mask_crops = prutils.target_image_crop(data[s + '_images'], jittered_anno,
data[s + '_anno'], self.search_area_factor,
self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change,
masks=data[s + '_masks'])
# Apply independent transformations to each image
data[s + '_images'], data[s + '_anno'], data[s + '_masks'] = self.transform[s](image=crops, bbox=boxes, mask=mask_crops, joint=False)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class KYSProcessing(BaseProcessing):
""" The processing class used for training KYS. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are
used for computing the loss of the predicted classification model on the test images. A set of proposals are
also generated for the test images by jittering the ground truth box. These proposals can be used to train the
bounding box estimating branch.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_param, scale_jitter_param,
proposal_params=None, label_function_params=None, min_crop_inside_ratio=0,
*args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _generate_synthetic_motion for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _generate_synthetic_motion for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
min_crop_inside_ratio - Minimum amount of cropped search area which should be inside the image.
See _check_if_crop_inside_image for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_param = center_jitter_param
self.scale_jitter_param = scale_jitter_param
self.proposal_params = proposal_params
self.label_function_params = label_function_params
self.min_crop_inside_ratio = min_crop_inside_ratio
def _check_if_crop_inside_image(self, box, im_shape):
x, y, w, h = box.tolist()
if w <= 0.0 or h <= 0.0:
return False
crop_sz = math.ceil(math.sqrt(w * h) * self.search_area_factor)
x1 = x + 0.5 * w - crop_sz * 0.5
x2 = x1 + crop_sz
y1 = y + 0.5 * h - crop_sz * 0.5
y2 = y1 + crop_sz
w_inside = max(min(x2, im_shape[1]) - max(x1, 0), 0)
h_inside = max(min(y2, im_shape[0]) - max(y1, 0), 0)
crop_area = ((x2 - x1) * (y2 - y1))
if crop_area > 0:
inside_ratio = w_inside * h_inside / crop_area
return inside_ratio > self.min_crop_inside_ratio
else:
return False
def _generate_synthetic_motion(self, boxes, images, mode):
num_frames = len(boxes)
out_boxes = []
for i in range(num_frames):
jittered_box = None
for _ in range(10):
orig_box = boxes[i]
jittered_size = orig_box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_param[mode + '_factor'])
if self.center_jitter_param.get(mode + '_mode', 'uniform') == 'uniform':
max_offset = (jittered_size.prod().sqrt() * self.center_jitter_param[mode + '_factor']).item()
offset_factor = (torch.rand(2) - 0.5)
jittered_center = orig_box[0:2] + 0.5 * orig_box[2:4] + max_offset * offset_factor
if self.center_jitter_param.get(mode + '_limit_motion', False) and i > 0:
prev_out_box_center = out_boxes[-1][:2] + 0.5 * out_boxes[-1][2:]
if abs(jittered_center[0] - prev_out_box_center[0]) > out_boxes[-1][2:].prod().sqrt() * 2.5:
jittered_center[0] = orig_box[0] + 0.5 * orig_box[2] + max_offset * offset_factor[0] * -1
if abs(jittered_center[1] - prev_out_box_center[1]) > out_boxes[-1][2:].prod().sqrt() * 2.5:
jittered_center[1] = orig_box[1] + 0.5 * orig_box[3] + max_offset * offset_factor[1] * -1
jittered_box = torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
if self._check_if_crop_inside_image(jittered_box, images[i].shape):
break
else:
jittered_box = torch.tensor([1, 1, 10, 10]).float()
out_boxes.append(jittered_box)
return out_boxes
def _generate_proposals(self, frame2_gt_crop):
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
frame2_proposals = np.zeros((num_proposals, 4))
gt_iou = np.zeros(num_proposals)
sample_p = np.zeros(num_proposals)
for i in range(num_proposals):
frame2_proposals[i, :], gt_iou[i], sample_p[i] = prutils.perturb_box(
frame2_gt_crop,
min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor']
)
gt_iou = gt_iou * 2 - 1
return frame2_proposals, gt_iou
def _generate_label_function(self, target_bb, target_absent=None):
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get(
'end_pad_if_even', True))
if target_absent is not None:
gauss_label *= (1 - target_absent).view(-1, 1, 1).float()
return gauss_label
def __call__(self, data: TensorDict):
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'],
bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
# Generate synthetic sequence
jittered_anno = self._generate_synthetic_motion(data[s + '_anno'], data[s + '_images'], s)
# Crop images
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Add transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
if self.proposal_params:
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a.numpy()) for a in data['test_anno']])
data['test_proposals'] = [torch.tensor(p, dtype=torch.float32) for p in frame2_proposals]
data['proposal_iou'] = [torch.tensor(gi, dtype=torch.float32) for gi in gt_iou]
data = data.apply(stack_tensors)
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
test_target_absent = 1 - (data['test_visible'] * data['test_valid_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'], test_target_absent)
return data
class TargetCandiateMatchingProcessing(BaseProcessing):
""" The processing class used for training KeepTrack. The distractor dataset for LaSOT is required.
Two different modes are available partial supervision (partial_sup) or self-supervision (self_sup).
For partial supervision the candidates their meta data and the images of two consecutive frames are used to
form a single supervision cue among the candidates corresponding to the annotated target object. All other
candidates are ignored. First, the search area region is cropped from the image followed by augmentation.
Then, the candidate matching with the annotated target object is detected to supervise the matching. Then, the
score map coordinates of the candidates are transformed to full image coordinates. Next, it is randomly decided
whether the candidates corresponding to the target is dropped in one of the frames to simulate re-detection,
occlusions or normal tracking. To enable training in batches the number of candidates to match between
two frames is fixed. Hence, artificial candidates are added. Finally, the assignment matrix is formed where a 1
denotes a match between two candidates, -1 denotes that a match is not available and -2 denotes that no
information about the matching is available. These entries will be ignored.
The second method for partial supervision is used for validation only. It uses only the detected candidates and
thus results in different numbers of candidates for each frame-pair such that training in batches is not possible.
For self-supervision only a singe frame and its candidates are required. The second frame and candidates are
artificially created using augmentations. Here full supervision among all candidates is enabled.
First, the search area region is cropped from the full image. Then, the cropping coordinates are augmented to
crop a slightly different view that mimics search area region of the next frame.
Next, the two image regions are augmented further. Then, the matching between candidates is determined by randomly
dropping candidates to mimic occlusions or re-detections. Again, the number of candidates is fixed by adding
artificial candidates that are ignored during training. In addition, the scores and coordinates of each
candidate are altered to increase matching difficulty. Finally, the assignment matrix is formed where a 1
denotes a match between two candidates, -1 denotes that a match is not available.
"""
def __init__(self, output_sz, num_target_candidates=None, mode='self_sup',
img_aug_transform=None, score_map_sz=None, enable_search_area_aug=True,
search_area_jitter_value=100, real_target_candidates_only=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_sz = output_sz
self.num_target_candidates = num_target_candidates
self.mode = mode
self.img_aug_transform = img_aug_transform
self.enable_search_area_aug = enable_search_area_aug
self.search_area_jitter_value = search_area_jitter_value
self.real_target_candidates_only = real_target_candidates_only
self.score_map_sz = score_map_sz if score_map_sz is not None else (23, 23)
def __call__(self, data: TensorDict):
if data['sup_mode'] == 'self_sup':
data = self._original_and_augmented_frame(data)
elif data['sup_mode'] == 'partial_sup' and self.real_target_candidates_only == False:
data = self._previous_and_current_frame(data)
elif data['sup_mode'] == 'partial_sup' and self.real_target_candidates_only == True:
data = self._previous_and_current_frame_detected_target_candidates_only(data)
else:
raise NotImplementedError()
data = data.apply(stack_tensors)
return data
def _original_and_augmented_frame(self, data: TensorDict):
out = TensorDict()
img = data.pop('img')[0]
tsm_coords = data['target_candidate_coords'][0]
scores = data['target_candidate_scores'][0]
sa_box = data['search_area_box'][0]
sa_box0 = sa_box.clone()
sa_box1 = sa_box.clone()
out['img_shape0'] = [torch.tensor(img.shape[:2])]
out['img_shape1'] = [torch.tensor(img.shape[:2])]
# prepared cropped image
frame_crop0 = prutils.sample_target_from_crop_region(img, sa_box0, self.output_sz)
x, y, w, h = sa_box.long().tolist()
if self.enable_search_area_aug:
l = self.search_area_jitter_value
sa_box1 = torch.tensor([x + torch.randint(-w//l, w//l+1, (1,)),
y + torch.randint(-h//l, h//l+1, (1,)),
w + torch.randint(-w//l, w//l+1, (1,)),
h + torch.randint(-h//l, h//l+1, (1,))])
frame_crop1 = prutils.sample_target_from_crop_region(img, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.img_aug_transform(image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
x, y, w, h = sa_box0.tolist()
img_coords = torch.stack([
h * (tsm_coords[:, 0].float() / (self.score_map_sz[0] - 1)) + y,
w * (tsm_coords[:, 1].float() / (self.score_map_sz[1] - 1)) + x
]).permute(1, 0)
img_coords_pad0, img_coords_pad1, valid0, valid1 = self._candidate_drop_out(img_coords, img_coords.clone())
img_coords_pad0, img_coords_pad1 = self._pad_with_fake_candidates(img_coords_pad0, img_coords_pad1, valid0, valid1,
sa_box0, sa_box1, img.shape)
scores_pad0 = self._add_fake_candidate_scores(scores, valid0)
scores_pad1 = self._add_fake_candidate_scores(scores, valid1)
x0, y0, w0, h0 = sa_box0.long().tolist()
tsm_coords_pad0 = torch.stack([
torch.round((img_coords_pad0[:, 0] - y0) / h0 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad0[:, 1] - x0) / w0 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
# make sure that the augmented search_are_box is only used for the fake img_coords the other need the original.
x1, y1, w1, h1 = sa_box1.long().tolist()
y = torch.where(valid1 == 1, torch.tensor(y0), torch.tensor(y1))
x = torch.where(valid1 == 1, torch.tensor(x0), torch.tensor(x1))
h = torch.where(valid1 == 1, torch.tensor(h0), torch.tensor(h1))
w = torch.where(valid1 == 1, torch.tensor(w0), torch.tensor(w1))
tsm_coords_pad1 = torch.stack([
torch.round((img_coords_pad1[:, 0] - y) / h * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad1[:, 1] - x) / w * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
assert torch.all(tsm_coords_pad0 >= 0) and torch.all(tsm_coords_pad0 < self.score_map_sz[0])
assert torch.all(tsm_coords_pad1 >= 0) and torch.all(tsm_coords_pad1 < self.score_map_sz[0])
img_coords_pad1 = self._augment_coords(img_coords_pad1, img.shape, sa_box1)
scores_pad1 = self._augment_scores(scores_pad1, valid1, ~torch.all(valid0 == valid1))
out['candidate_img_coords0'] = [img_coords_pad0]
out['candidate_img_coords1'] = [img_coords_pad1]
out['candidate_tsm_coords0'] = [tsm_coords_pad0]
out['candidate_tsm_coords1'] = [tsm_coords_pad1]
out['candidate_scores0'] = [scores_pad0]
out['candidate_scores1'] = [scores_pad1]
out['candidate_valid0'] = [valid0]
out['candidate_valid1'] = [valid1]
# Prepare gt labels
gt_assignment = torch.zeros((self.num_target_candidates, self.num_target_candidates))
gt_assignment[torch.arange(self.num_target_candidates), torch.arange(self.num_target_candidates)] = valid0 * valid1
gt_matches0 = torch.arange(0, self.num_target_candidates).float()
gt_matches1 = torch.arange(0, self.num_target_candidates).float()
gt_matches0[(valid0==0) | (valid1==0)] = -1
gt_matches1[(valid0==0) | (valid1==0)] = -1
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _previous_and_current_frame(self, data: TensorDict):
out = TensorDict()
imgs = data.pop('img')
img0 = imgs[0]
img1 = imgs[1]
sa_box0 = data['search_area_box'][0]
sa_box1 = data['search_area_box'][1]
tsm_anno_coord0 = data['target_anno_coord'][0]
tsm_anno_coord1 = data['target_anno_coord'][1]
tsm_coords0 = data['target_candidate_coords'][0]
tsm_coords1 = data['target_candidate_coords'][1]
scores0 = data['target_candidate_scores'][0]
scores1 = data['target_candidate_scores'][1]
out['img_shape0'] = [torch.tensor(img0.shape[:2])]
out['img_shape1'] = [torch.tensor(img1.shape[:2])]
frame_crop0 = prutils.sample_target_from_crop_region(img0, sa_box0, self.output_sz)
frame_crop1 = prutils.sample_target_from_crop_region(img1, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.transform['train'](image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
gt_idx0 = self._find_gt_candidate_index(tsm_coords0, tsm_anno_coord0)
gt_idx1 = self._find_gt_candidate_index(tsm_coords1, tsm_anno_coord1)
x0, y0, w0, h0 = sa_box0.tolist()
x1, y1, w1, h1 = sa_box1.tolist()
img_coords0 = torch.stack([
h0 * (tsm_coords0[:, 0].float() / (self.score_map_sz[0] - 1)) + y0,
w0 * (tsm_coords0[:, 1].float() / (self.score_map_sz[1] - 1)) + x0
]).permute(1, 0)
img_coords1 = torch.stack([
h1 * (tsm_coords1[:, 0].float() / (self.score_map_sz[0] - 1)) + y1,
w1 * (tsm_coords1[:, 1].float() / (self.score_map_sz[1] - 1)) + x1
]).permute(1, 0)
frame_id, dropout = self._gt_candidate_drop_out()
drop0 = dropout & (frame_id == 0)
drop1 = dropout & (frame_id == 1)
img_coords_pad0, valid0 = self._pad_with_fake_candidates_drop_gt(img_coords0, drop0, gt_idx0, sa_box0, img0.shape)
img_coords_pad1, valid1 = self._pad_with_fake_candidates_drop_gt(img_coords1, drop1, gt_idx1, sa_box1, img1.shape)
scores_pad0 = self._add_fake_candidate_scores(scores0, valid0)
scores_pad1 = self._add_fake_candidate_scores(scores1, valid1)
x0, y0, w0, h0 = sa_box0.long().tolist()
x1, y1, w1, h1 = sa_box1.long().tolist()
tsm_coords_pad0 = torch.stack([
torch.round((img_coords_pad0[:, 0] - y0) / h0 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad0[:, 1] - x0) / w0 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
tsm_coords_pad1 = torch.stack([
torch.round((img_coords_pad1[:, 0] - y1) / h1 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad1[:, 1] - x1) / w1 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
assert torch.all(tsm_coords_pad0 >= 0) and torch.all(tsm_coords_pad0 < self.score_map_sz[0])
assert torch.all(tsm_coords_pad1 >= 0) and torch.all(tsm_coords_pad1 < self.score_map_sz[0])
out['candidate_img_coords0'] = [img_coords_pad0]
out['candidate_img_coords1'] = [img_coords_pad1]
out['candidate_tsm_coords0'] = [tsm_coords_pad0]
out['candidate_tsm_coords1'] = [tsm_coords_pad1]
out['candidate_scores0'] = [scores_pad0]
out['candidate_scores1'] = [scores_pad1]
out['candidate_valid0'] = [valid0]
out['candidate_valid1'] = [valid1]
# Prepare gt labels
gt_assignment = torch.zeros((self.num_target_candidates, self.num_target_candidates))
gt_assignment[gt_idx0, gt_idx1] = valid0[gt_idx0]*valid1[gt_idx1]
gt_matches0 = torch.zeros(self.num_target_candidates) - 2
gt_matches1 = torch.zeros(self.num_target_candidates) - 2
if drop0:
gt_matches0[gt_idx0] = -2
gt_matches1[gt_idx1] = -1
elif drop1:
gt_matches0[gt_idx0] = -1
gt_matches0[gt_idx1] = -2
else:
gt_matches0[gt_idx0] = gt_idx1
gt_matches1[gt_idx1] = gt_idx0
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _previous_and_current_frame_detected_target_candidates_only(self, data: TensorDict):
out = TensorDict()
imgs = data.pop('img')
img0 = imgs[0]
img1 = imgs[1]
sa_box0 = data['search_area_box'][0]
sa_box1 = data['search_area_box'][1]
tsm_anno_coord0 = data['target_anno_coord'][0]
tsm_anno_coord1 = data['target_anno_coord'][1]
tsm_coords0 = data['target_candidate_coords'][0]
tsm_coords1 = data['target_candidate_coords'][1]
scores0 = data['target_candidate_scores'][0]
scores1 = data['target_candidate_scores'][1]
out['img_shape0'] = [torch.tensor(img0.shape[:2])]
out['img_shape1'] = [torch.tensor(img1.shape[:2])]
frame_crop0 = prutils.sample_target_from_crop_region(img0, sa_box0, self.output_sz)
frame_crop1 = prutils.sample_target_from_crop_region(img1, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.transform['train'](image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
gt_idx0 = self._find_gt_candidate_index(tsm_coords0, tsm_anno_coord0)
gt_idx1 = self._find_gt_candidate_index(tsm_coords1, tsm_anno_coord1)
x0, y0, w0, h0 = sa_box0.tolist()
x1, y1, w1, h1 = sa_box1.tolist()
img_coords0 = torch.stack([
h0 * (tsm_coords0[:, 0].float() / (self.score_map_sz[0] - 1)) + y0,
w0 * (tsm_coords0[:, 1].float() / (self.score_map_sz[1] - 1)) + x0
]).permute(1, 0)
img_coords1 = torch.stack([
h1 * (tsm_coords1[:, 0].float() / (self.score_map_sz[0] - 1)) + y1,
w1 * (tsm_coords1[:, 1].float() / (self.score_map_sz[1] - 1)) + x1
]).permute(1, 0)
out['candidate_img_coords0'] = [img_coords0]
out['candidate_img_coords1'] = [img_coords1]
out['candidate_tsm_coords0'] = [tsm_coords0]
out['candidate_tsm_coords1'] = [tsm_coords1]
out['candidate_scores0'] = [scores0]
out['candidate_scores1'] = [scores1]
out['candidate_valid0'] = [torch.ones_like(scores0)]
out['candidate_valid1'] = [torch.ones_like(scores1)]
# Prepare gt labels
gt_assignment = torch.zeros((scores0.shape[0], scores1.shape[0]))
gt_assignment[gt_idx0, gt_idx1] = 1
gt_matches0 = torch.zeros(scores0.shape[0]) - 2
gt_matches1 = torch.zeros(scores1.shape[0]) - 2
gt_matches0[gt_idx0] = gt_idx1
gt_matches1[gt_idx1] = gt_idx0
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _find_gt_candidate_index(self, coords, target_anno_coord):
gt_idx = torch.argmin(torch.sum((coords - target_anno_coord) ** 2, dim=1))
return gt_idx
def _gt_candidate_drop_out(self):
dropout = (torch.rand(1) < 0.25).item()
frameid = torch.randint(0, 2, (1,)).item()
return frameid, dropout
def _pad_with_fake_candidates_drop_gt(self, img_coords, dropout, gt_idx, sa_box, img_shape):
H, W = img_shape[:2]
num_peaks = min(img_coords.shape[0], self.num_target_candidates)
x, y, w, h = sa_box.long().tolist()
lowx, lowy, highx, highy = max(0, x), max(0, y), min(W, x + w), min(H, y + h)
img_coords_pad = torch.zeros((self.num_target_candidates, 2))
valid = torch.zeros(self.num_target_candidates)
img_coords_pad[:num_peaks] = img_coords[:num_peaks]
valid[:num_peaks] = 1
gt_coords = img_coords_pad[gt_idx].clone().unsqueeze(0)
if dropout:
valid[gt_idx] = 0
img_coords_pad[gt_idx] = 0
filled = valid.clone()
for i in range(0, self.num_target_candidates):
if filled[i] == 0:
cs = torch.cat([
torch.rand((20, 1)) * (highy - lowy) + lowy,
torch.rand((20, 1)) * (highx - lowx) + lowx
], dim=1)
cs_used = torch.cat([img_coords_pad[filled == 1], gt_coords], dim=0)
dist = torch.sqrt(torch.sum((cs_used[:, None, :] - cs[None, :, :]) ** 2, dim=2))
min_dist = torch.min(dist, dim=0).values
max_min_dist_idx = torch.argmax(min_dist)
img_coords_pad[i] = cs[max_min_dist_idx]
filled[i] = 1
return img_coords_pad, valid
def _candidate_drop_out(self, coords0, coords1):
num_candidates = min(coords1.shape[0], self.num_target_candidates)
num_candidates_to_drop = torch.round(0.25*num_candidates*torch.rand(1)).long()
idx = torch.randperm(num_candidates)[:num_candidates_to_drop]
coords_pad0 = torch.zeros((self.num_target_candidates, 2))
valid0 = torch.zeros(self.num_target_candidates)
coords_pad1 = torch.zeros((self.num_target_candidates, 2))
valid1 = torch.zeros(self.num_target_candidates)
coords_pad0[:num_candidates] = coords0[:num_candidates]
coords_pad1[:num_candidates] = coords1[:num_candidates]
valid0[:num_candidates] = 1
valid1[:num_candidates] = 1
if torch.rand(1) < 0.5:
coords_pad0[idx] = 0
valid0[idx] = 0
else:
coords_pad1[idx] = 0
valid1[idx] = 0
return coords_pad0, coords_pad1, valid0, valid1
def _pad_with_fake_candidates(self, img_coords_pad0, img_coords_pad1, valid0, valid1, sa_box0, sa_box1, img_shape):
H, W = img_shape[:2]
x0, y0, w0, h0 = sa_box0.long().tolist()
x1, y1, w1, h1 = sa_box1.long().tolist()
lowx = [max(0, x0), max(0, x1)]
lowy = [max(0, y0), max(0, y1)]
highx = [min(W, x0 + w0), min(W, x1 + w1)]
highy = [min(H, y0 + h0), min(H, y1 + h1)]
filled = [valid0.clone(), valid1.clone()]
img_coords_pad = [img_coords_pad0.clone(), img_coords_pad1.clone()]
for i in range(0, self.num_target_candidates):
for k in range(0, 2):
if filled[k][i] == 0:
cs = torch.cat([
torch.rand((20, 1)) * (highy[k] - lowy[k]) + lowy[k],
torch.rand((20, 1)) * (highx[k] - lowx[k]) + lowx[k]
], dim=1)
cs_used = torch.cat([img_coords_pad[0][filled[0]==1], img_coords_pad[1][filled[1]==1]], dim=0)
dist = torch.sqrt(torch.sum((cs_used[:, None, :] - cs[None, :, :]) ** 2, dim=2))
min_dist = torch.min(dist, dim=0).values
max_min_dist_idx = torch.argmax(min_dist)
img_coords_pad[k][i] = cs[max_min_dist_idx]
filled[k][i] = 1
return img_coords_pad[0], img_coords_pad[1]
def _add_fake_candidate_scores(self, scores, valid):
scores_pad = torch.zeros(valid.shape[0])
scores_pad[valid == 1] = scores[:self.num_target_candidates][valid[:scores.shape[0]] == 1]
scores_pad[valid == 0] = (torch.abs(torch.randn((valid==0).sum()))/50).clamp_max(0.025) + 0.05
return scores_pad
def _augment_scores(self, scores, valid, drop):
num_valid = (valid==1).sum()
noise = 0.1 * torch.randn(num_valid)
if num_valid > 2 and not drop:
if scores[1] > 0.5*scores[0] and torch.all(scores[:2] > 0.2):
# two valid peaks with a high score that are relatively close.
mode = torch.randint(0, 3, size=(1,))
if mode == 0:
# augment randomly.
scores_aug = torch.sort(noise + scores[valid==1], descending=True)[0]
elif mode == 1:
# move peaks closer
scores_aug = torch.sort(noise + scores[valid == 1], descending=True)[0]
scores_aug[0] = scores[valid==1][0] - torch.abs(noise[0])
scores_aug[1] = scores[valid==1][1] + torch.abs(noise[1])
scores_aug[:2] = torch.sort(scores_aug[:2], descending=True)[0]
else:
# move peaks closer and switch
scores_aug = torch.sort(noise + scores[valid == 1], descending=True)[0]
scores_aug[0] = scores[valid==1][0] - torch.abs(noise[0])
scores_aug[1] = scores[valid==1][1] + torch.abs(noise[1])
scores_aug[:2] = torch.sort(scores_aug[:2], descending=True)[0]
idx = torch.arange(num_valid)
idx[:2] = torch.tensor([1, 0])
scores_aug = scores_aug[idx]
else:
scores_aug = torch.sort(scores[valid==1] + noise, descending=True)[0]
else:
scores_aug = torch.sort(scores[valid == 1] + noise, descending=True)[0]
scores_aug = scores_aug.clamp_min(0.075)
scores[valid==1] = scores_aug.clone()
return scores
def _augment_coords(self, coords, img_shape, search_area_box):
H, W = img_shape[:2]
_, _, w, h = search_area_box.float()
# add independent offset to each coord
d = torch.sqrt(torch.sum((coords[None, :] - coords[:, None])**2, dim=2))
if torch.all(d == 0):
xmin = 0.5*w/self.score_map_sz[1]
ymin = 0.5*h/self.score_map_sz[0]
else:
dmin = torch.min(d[d>0])
xmin = (math.sqrt(2)*dmin/4).clamp_max(w/self.score_map_sz[1])
ymin = (math.sqrt(2)*dmin/4).clamp_max(h/self.score_map_sz[0])
txi = torch.rand(coords.shape[0])*2*xmin - xmin
tyi = torch.rand(coords.shape[0])*2*ymin - ymin
coords[:, 0] += tyi
coords[:, 1] += txi
coords[:, 0] = coords[:, 0].clamp(0, H)
coords[:, 1] = coords[:, 1].clamp(0, W)
return coords
class LTRBDenseRegressionProcessing(BaseProcessing):
""" The processing class used for training ToMP that supports dense bounding box regression.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', stride=16, label_function_params=None,
center_sampling_radius=0.0, use_normalized_coords=True, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
label_density_params - Arguments for the label density generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.stride = stride
self.label_function_params = label_function_params
self.center_sampling_radius = center_sampling_radius
self.use_normalized_coords = use_normalized_coords
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4),
self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get(
'end_pad_if_even', True))
return gauss_label
def _generate_ltbr_regression_targets(self, target_bb):
shifts_x = torch.arange(
0, self.output_sz, step=self.stride,
dtype=torch.float32, device=target_bb.device
)
shifts_y = torch.arange(
0, self.output_sz, step=self.stride,
dtype=torch.float32, device=target_bb.device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + self.stride // 2
xs, ys = locations[:, 0], locations[:, 1]
xyxy = torch.stack([target_bb[:, 0], target_bb[:, 1], target_bb[:, 0] + target_bb[:, 2],
target_bb[:, 1] + target_bb[:, 3]], dim=1)
l = xs[:, None] - xyxy[:, 0][None]
t = ys[:, None] - xyxy[:, 1][None]
r = xyxy[:, 2][None] - xs[:, None]
b = xyxy[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2).reshape(-1, 4)
if self.use_normalized_coords:
reg_targets_per_im = reg_targets_per_im / self.output_sz
if self.center_sampling_radius > 0:
is_in_box = self._compute_sampling_region(xs, xyxy, ys)
else:
is_in_box = (reg_targets_per_im.min(dim=1)[0] > 0)
sz = self.output_sz//self.stride
nb = target_bb.shape[0]
reg_targets_per_im = reg_targets_per_im.reshape(sz, sz, nb, 4).permute(2, 3, 0, 1)
is_in_box = is_in_box.reshape(sz, sz, nb, 1).permute(2, 3, 0, 1)
return reg_targets_per_im, is_in_box
def _compute_sampling_region(self, xs, xyxy, ys):
cx = (xyxy[:, 0] + xyxy[:, 2]) / 2
cy = (xyxy[:, 1] + xyxy[:, 3]) / 2
xmin = cx - self.center_sampling_radius * self.stride
ymin = cy - self.center_sampling_radius * self.stride
xmax = cx + self.center_sampling_radius * self.stride
ymax = cy + self.center_sampling_radius * self.stride
center_gt = xyxy.new_zeros(xyxy.shape)
center_gt[:, 0] = torch.where(xmin > xyxy[:, 0], xmin, xyxy[:, 0])
center_gt[:, 1] = torch.where(ymin > xyxy[:, 1], ymin, xyxy[:, 1])
center_gt[:, 2] = torch.where(xmax > xyxy[:, 2], xyxy[:, 2], xmax)
center_gt[:, 3] = torch.where(ymax > xyxy[:, 3], xyxy[:, 3], ymax)
left = xs[:, None] - center_gt[:, 0]
right = center_gt[:, 2] - xs[:, None]
top = ys[:, None] - center_gt[:, 1]
bottom = center_gt[:, 3] - ys[:, None]
center_bbox = torch.stack((left, top, right, bottom), -1)
is_in_box = center_bbox.min(-1)[0] > 0
return is_in_box
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'],
bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'],
bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
data['test_ltrb_target'], data['test_sample_region'] = self._generate_ltbr_regression_targets(data['test_anno'])
data['train_ltrb_target'], data['train_sample_region'] = self._generate_ltbr_regression_targets(data['train_anno'])
return data
|
<filename>ltr/data/processing.py
import torch
import math
import numpy as np
import torchvision.transforms as transforms
from pytracking import TensorDict
import ltr.data.processing_utils as prutils
def stack_tensors(x):
if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):
return torch.stack(x)
return x
class BaseProcessing:
""" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it
through the network. For example, it can be used to crop a search region around the object, apply various data
augmentations, etc."""
def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None):
"""
args:
transform - The set of transformations to be applied on the images. Used only if train_transform or
test_transform is None.
train_transform - The set of transformations to be applied on the train images. If None, the 'transform'
argument is used instead.
test_transform - The set of transformations to be applied on the test images. If None, the 'transform'
argument is used instead.
joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For
example, it can be used to convert both test and train images to grayscale.
"""
self.transform = {'train': transform if train_transform is None else train_transform,
'test': transform if test_transform is None else test_transform,
'joint': joint_transform}
def __call__(self, data: TensorDict):
raise NotImplementedError
class ATOMProcessing(BaseProcessing):
""" The processing class used for training ATOM. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposal_method = self.proposal_params.get('proposal_method', 'default')
if proposal_method == 'default':
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'])
elif proposal_method == 'gmm':
proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
num_samples=num_proposals)
gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4))
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou'
"""
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class KLBBregProcessing(BaseProcessing):
""" Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning
introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565].
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
"""
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
gt_sigma=self.proposal_params['gt_sigma'],
num_samples=self.proposal_params[
'boxes_per_frame'],
add_mean_box=self.proposal_params.get(
'add_mean_box', False))
return proposals, proposal_density, gt_density
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density'
"""
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class ATOMwKLProcessing(BaseProcessing):
"""Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing."""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,
mode='pair', *args, **kwargs):
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
"""
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
self.proposal_params['gt_sigma'],
self.proposal_params['boxes_per_frame'])
iou = prutils.iou_gen(proposals, box.view(1, 4))
return proposals, proposal_density, gt_density, iou
def __call__(self, data: TensorDict):
# Apply joint transforms
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
# Crop image region centered at jittered_anno box
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Apply transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density, proposal_iou = zip(
*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
data['proposal_iou'] = proposal_iou
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class DiMPProcessing(BaseProcessing):
""" The processing class used for training DiMP. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are
used for computing the loss of the predicted classification model on the test images. A set of proposals are
also generated for the test images by jittering the ground truth box. These proposals are used to train the
bounding box estimating branch.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.proposal_params = proposal_params
self.label_function_params = label_function_params
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
torch.Tensor - Array of shape (num_proposals, 4) containing proposals
torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposal_method = self.proposal_params.get('proposal_method', 'default')
if proposal_method == 'default':
proposals = torch.zeros((num_proposals, 4))
gt_iou = torch.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'])
elif proposal_method == 'gmm':
proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
num_samples=num_proposals)
gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4))
else:
raise ValueError('Unknown proposal method.')
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))
return gauss_label
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
if self.proposal_params:
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
return data
class KLDiMPProcessing(BaseProcessing):
""" The processing class used for training PrDiMP that additionally supports the probabilistic classifier and
bounding box regressor. See DiMPProcessing for details.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', proposal_params=None,
label_function_params=None, label_density_params=None, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
label_density_params - Arguments for the label density generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.proposal_params = proposal_params
self.label_function_params = label_function_params
self.label_density_params = label_density_params
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_proposals(self, box):
""" Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density.
This is used for ML and KL based regression learning of the bounding box regressor.
args:
box - input bounding box
"""
# Generate proposals
proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],
gt_sigma=self.proposal_params['gt_sigma'],
num_samples=self.proposal_params['boxes_per_frame'],
add_mean_box=self.proposal_params.get('add_mean_box', False))
return proposals, proposal_density, gt_density
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))
return gauss_label
def _generate_label_density(self, target_bb):
""" Generates the gaussian label density centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1)
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'],
self.label_density_params['kernel_sz'],
feat_sz, self.output_sz,
end_pad_if_even=self.label_density_params.get('end_pad_if_even', True),
density=True,
uni_bias=self.label_density_params.get('uni_weight', 0.0))
gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float()
if self.label_density_params.get('normalize', False):
g_sum = gauss_label.sum(dim=(-2,-1))
valid = g_sum>0.01
gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1)
gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1])
gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0)
return gauss_label
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Generate proposals
proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])
data['test_proposals'] = proposals
data['proposal_density'] = proposal_density
data['gt_density'] = gt_density
for s in ['train', 'test']:
is_distractor = data.get('is_distractor_{}_frame'.format(s), None)
if is_distractor is not None:
for is_dist, box in zip(is_distractor, data[s+'_anno']):
if is_dist:
box[0] = 99999999.9
box[1] = 99999999.9
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
if self.label_density_params is not None:
data['train_label_density'] = self._generate_label_density(data['train_anno'])
data['test_label_density'] = self._generate_label_density(data['test_anno'])
return data
class LWLProcessing(BaseProcessing):
""" The processing class used for training LWL. The images are processed in the following way.
First, the target bounding box (computed using the segmentation mask)is jittered by adding some noise.
Next, a rectangular region (called search region ) centered at the jittered target center, and of area
search_area_factor^2 times the area of the jittered box is cropped from the image.
The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. The argument 'crop_type' determines how out-of-frame regions are handled when cropping the
search region. For instance, if crop_type == 'replicate', the boundary pixels are replicated in case the search
region crop goes out of frame. If crop_type == 'inside_major', the search region crop is shifted/shrunk to fit
completely inside one axis of the image.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', new_roll=False, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - The size (width, height) to which the search region is resized. The aspect ratio is always
preserved when resizing the search region
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - Determines how out-of-frame regions are handled when cropping the search region.
If 'replicate', the boundary pixels are replicated in case the search region crop goes out of
image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis
of the image.
max_scale_change - Maximum allowed scale change when shrinking the search region to fit the image
(only applicable to 'inside' and 'inside_major' cropping modes). In case the desired
shrink factor exceeds the max_scale_change, the search region is only shrunk to the
factor max_scale_change. Out-of-frame regions are then handled by replicating the
boundary pixels. If max_scale_change is set to None, unbounded shrinking is allowed.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
new_roll - Whether to use the same random roll values for train and test frames when applying the joint
transformation. If True, a new random roll is performed for the test frame transformations. Thus,
if performing random flips, the set of train frames and the set of test frames will be flipped
independently.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.new_roll = new_roll
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
if self.scale_jitter_factor.get('mode', 'gauss') == 'gauss':
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
elif self.scale_jitter_factor.get('mode', 'gauss') == 'uniform':
jittered_size = box[2:4] * torch.exp(torch.FloatTensor(2).uniform_(-self.scale_jitter_factor[mode],
self.scale_jitter_factor[mode]))
else:
raise Exception
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode])).float()
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def __call__(self, data: TensorDict):
# Apply joint transformations. i.e. All train/test frames in a sequence are applied the transformation with the
# same parameters
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'], data['train_masks'] = self.transform['joint'](
image=data['train_images'], bbox=data['train_anno'], mask=data['train_masks'])
data['test_images'], data['test_anno'], data['test_masks'] = self.transform['joint'](
image=data['test_images'], bbox=data['test_anno'], mask=data['test_masks'], new_roll=self.new_roll)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
orig_anno = data[s + '_anno']
# Extract a crop containing the target
crops, boxes, mask_crops = prutils.target_image_crop(data[s + '_images'], jittered_anno,
data[s + '_anno'], self.search_area_factor,
self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change,
masks=data[s + '_masks'])
# Apply independent transformations to each image
data[s + '_images'], data[s + '_anno'], data[s + '_masks'] = self.transform[s](image=crops, bbox=boxes, mask=mask_crops, joint=False)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class KYSProcessing(BaseProcessing):
""" The processing class used for training KYS. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are
used for computing the loss of the predicted classification model on the test images. A set of proposals are
also generated for the test images by jittering the ground truth box. These proposals can be used to train the
bounding box estimating branch.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_param, scale_jitter_param,
proposal_params=None, label_function_params=None, min_crop_inside_ratio=0,
*args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _generate_synthetic_motion for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _generate_synthetic_motion for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
min_crop_inside_ratio - Minimum amount of cropped search area which should be inside the image.
See _check_if_crop_inside_image for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_param = center_jitter_param
self.scale_jitter_param = scale_jitter_param
self.proposal_params = proposal_params
self.label_function_params = label_function_params
self.min_crop_inside_ratio = min_crop_inside_ratio
def _check_if_crop_inside_image(self, box, im_shape):
x, y, w, h = box.tolist()
if w <= 0.0 or h <= 0.0:
return False
crop_sz = math.ceil(math.sqrt(w * h) * self.search_area_factor)
x1 = x + 0.5 * w - crop_sz * 0.5
x2 = x1 + crop_sz
y1 = y + 0.5 * h - crop_sz * 0.5
y2 = y1 + crop_sz
w_inside = max(min(x2, im_shape[1]) - max(x1, 0), 0)
h_inside = max(min(y2, im_shape[0]) - max(y1, 0), 0)
crop_area = ((x2 - x1) * (y2 - y1))
if crop_area > 0:
inside_ratio = w_inside * h_inside / crop_area
return inside_ratio > self.min_crop_inside_ratio
else:
return False
def _generate_synthetic_motion(self, boxes, images, mode):
num_frames = len(boxes)
out_boxes = []
for i in range(num_frames):
jittered_box = None
for _ in range(10):
orig_box = boxes[i]
jittered_size = orig_box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_param[mode + '_factor'])
if self.center_jitter_param.get(mode + '_mode', 'uniform') == 'uniform':
max_offset = (jittered_size.prod().sqrt() * self.center_jitter_param[mode + '_factor']).item()
offset_factor = (torch.rand(2) - 0.5)
jittered_center = orig_box[0:2] + 0.5 * orig_box[2:4] + max_offset * offset_factor
if self.center_jitter_param.get(mode + '_limit_motion', False) and i > 0:
prev_out_box_center = out_boxes[-1][:2] + 0.5 * out_boxes[-1][2:]
if abs(jittered_center[0] - prev_out_box_center[0]) > out_boxes[-1][2:].prod().sqrt() * 2.5:
jittered_center[0] = orig_box[0] + 0.5 * orig_box[2] + max_offset * offset_factor[0] * -1
if abs(jittered_center[1] - prev_out_box_center[1]) > out_boxes[-1][2:].prod().sqrt() * 2.5:
jittered_center[1] = orig_box[1] + 0.5 * orig_box[3] + max_offset * offset_factor[1] * -1
jittered_box = torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
if self._check_if_crop_inside_image(jittered_box, images[i].shape):
break
else:
jittered_box = torch.tensor([1, 1, 10, 10]).float()
out_boxes.append(jittered_box)
return out_boxes
def _generate_proposals(self, frame2_gt_crop):
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
frame2_proposals = np.zeros((num_proposals, 4))
gt_iou = np.zeros(num_proposals)
sample_p = np.zeros(num_proposals)
for i in range(num_proposals):
frame2_proposals[i, :], gt_iou[i], sample_p[i] = prutils.perturb_box(
frame2_gt_crop,
min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor']
)
gt_iou = gt_iou * 2 - 1
return frame2_proposals, gt_iou
def _generate_label_function(self, target_bb, target_absent=None):
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get(
'end_pad_if_even', True))
if target_absent is not None:
gauss_label *= (1 - target_absent).view(-1, 1, 1).float()
return gauss_label
def __call__(self, data: TensorDict):
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'],
bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
# Generate synthetic sequence
jittered_anno = self._generate_synthetic_motion(data[s + '_anno'], data[s + '_images'], s)
# Crop images
crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
# Add transforms
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
if self.proposal_params:
frame2_proposals, gt_iou = zip(*[self._generate_proposals(a.numpy()) for a in data['test_anno']])
data['test_proposals'] = [torch.tensor(p, dtype=torch.float32) for p in frame2_proposals]
data['proposal_iou'] = [torch.tensor(gi, dtype=torch.float32) for gi in gt_iou]
data = data.apply(stack_tensors)
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
test_target_absent = 1 - (data['test_visible'] * data['test_valid_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'], test_target_absent)
return data
class TargetCandiateMatchingProcessing(BaseProcessing):
""" The processing class used for training KeepTrack. The distractor dataset for LaSOT is required.
Two different modes are available partial supervision (partial_sup) or self-supervision (self_sup).
For partial supervision the candidates their meta data and the images of two consecutive frames are used to
form a single supervision cue among the candidates corresponding to the annotated target object. All other
candidates are ignored. First, the search area region is cropped from the image followed by augmentation.
Then, the candidate matching with the annotated target object is detected to supervise the matching. Then, the
score map coordinates of the candidates are transformed to full image coordinates. Next, it is randomly decided
whether the candidates corresponding to the target is dropped in one of the frames to simulate re-detection,
occlusions or normal tracking. To enable training in batches the number of candidates to match between
two frames is fixed. Hence, artificial candidates are added. Finally, the assignment matrix is formed where a 1
denotes a match between two candidates, -1 denotes that a match is not available and -2 denotes that no
information about the matching is available. These entries will be ignored.
The second method for partial supervision is used for validation only. It uses only the detected candidates and
thus results in different numbers of candidates for each frame-pair such that training in batches is not possible.
For self-supervision only a singe frame and its candidates are required. The second frame and candidates are
artificially created using augmentations. Here full supervision among all candidates is enabled.
First, the search area region is cropped from the full image. Then, the cropping coordinates are augmented to
crop a slightly different view that mimics search area region of the next frame.
Next, the two image regions are augmented further. Then, the matching between candidates is determined by randomly
dropping candidates to mimic occlusions or re-detections. Again, the number of candidates is fixed by adding
artificial candidates that are ignored during training. In addition, the scores and coordinates of each
candidate are altered to increase matching difficulty. Finally, the assignment matrix is formed where a 1
denotes a match between two candidates, -1 denotes that a match is not available.
"""
def __init__(self, output_sz, num_target_candidates=None, mode='self_sup',
img_aug_transform=None, score_map_sz=None, enable_search_area_aug=True,
search_area_jitter_value=100, real_target_candidates_only=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_sz = output_sz
self.num_target_candidates = num_target_candidates
self.mode = mode
self.img_aug_transform = img_aug_transform
self.enable_search_area_aug = enable_search_area_aug
self.search_area_jitter_value = search_area_jitter_value
self.real_target_candidates_only = real_target_candidates_only
self.score_map_sz = score_map_sz if score_map_sz is not None else (23, 23)
def __call__(self, data: TensorDict):
if data['sup_mode'] == 'self_sup':
data = self._original_and_augmented_frame(data)
elif data['sup_mode'] == 'partial_sup' and self.real_target_candidates_only == False:
data = self._previous_and_current_frame(data)
elif data['sup_mode'] == 'partial_sup' and self.real_target_candidates_only == True:
data = self._previous_and_current_frame_detected_target_candidates_only(data)
else:
raise NotImplementedError()
data = data.apply(stack_tensors)
return data
def _original_and_augmented_frame(self, data: TensorDict):
out = TensorDict()
img = data.pop('img')[0]
tsm_coords = data['target_candidate_coords'][0]
scores = data['target_candidate_scores'][0]
sa_box = data['search_area_box'][0]
sa_box0 = sa_box.clone()
sa_box1 = sa_box.clone()
out['img_shape0'] = [torch.tensor(img.shape[:2])]
out['img_shape1'] = [torch.tensor(img.shape[:2])]
# prepared cropped image
frame_crop0 = prutils.sample_target_from_crop_region(img, sa_box0, self.output_sz)
x, y, w, h = sa_box.long().tolist()
if self.enable_search_area_aug:
l = self.search_area_jitter_value
sa_box1 = torch.tensor([x + torch.randint(-w//l, w//l+1, (1,)),
y + torch.randint(-h//l, h//l+1, (1,)),
w + torch.randint(-w//l, w//l+1, (1,)),
h + torch.randint(-h//l, h//l+1, (1,))])
frame_crop1 = prutils.sample_target_from_crop_region(img, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.img_aug_transform(image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
x, y, w, h = sa_box0.tolist()
img_coords = torch.stack([
h * (tsm_coords[:, 0].float() / (self.score_map_sz[0] - 1)) + y,
w * (tsm_coords[:, 1].float() / (self.score_map_sz[1] - 1)) + x
]).permute(1, 0)
img_coords_pad0, img_coords_pad1, valid0, valid1 = self._candidate_drop_out(img_coords, img_coords.clone())
img_coords_pad0, img_coords_pad1 = self._pad_with_fake_candidates(img_coords_pad0, img_coords_pad1, valid0, valid1,
sa_box0, sa_box1, img.shape)
scores_pad0 = self._add_fake_candidate_scores(scores, valid0)
scores_pad1 = self._add_fake_candidate_scores(scores, valid1)
x0, y0, w0, h0 = sa_box0.long().tolist()
tsm_coords_pad0 = torch.stack([
torch.round((img_coords_pad0[:, 0] - y0) / h0 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad0[:, 1] - x0) / w0 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
# make sure that the augmented search_are_box is only used for the fake img_coords the other need the original.
x1, y1, w1, h1 = sa_box1.long().tolist()
y = torch.where(valid1 == 1, torch.tensor(y0), torch.tensor(y1))
x = torch.where(valid1 == 1, torch.tensor(x0), torch.tensor(x1))
h = torch.where(valid1 == 1, torch.tensor(h0), torch.tensor(h1))
w = torch.where(valid1 == 1, torch.tensor(w0), torch.tensor(w1))
tsm_coords_pad1 = torch.stack([
torch.round((img_coords_pad1[:, 0] - y) / h * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad1[:, 1] - x) / w * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
assert torch.all(tsm_coords_pad0 >= 0) and torch.all(tsm_coords_pad0 < self.score_map_sz[0])
assert torch.all(tsm_coords_pad1 >= 0) and torch.all(tsm_coords_pad1 < self.score_map_sz[0])
img_coords_pad1 = self._augment_coords(img_coords_pad1, img.shape, sa_box1)
scores_pad1 = self._augment_scores(scores_pad1, valid1, ~torch.all(valid0 == valid1))
out['candidate_img_coords0'] = [img_coords_pad0]
out['candidate_img_coords1'] = [img_coords_pad1]
out['candidate_tsm_coords0'] = [tsm_coords_pad0]
out['candidate_tsm_coords1'] = [tsm_coords_pad1]
out['candidate_scores0'] = [scores_pad0]
out['candidate_scores1'] = [scores_pad1]
out['candidate_valid0'] = [valid0]
out['candidate_valid1'] = [valid1]
# Prepare gt labels
gt_assignment = torch.zeros((self.num_target_candidates, self.num_target_candidates))
gt_assignment[torch.arange(self.num_target_candidates), torch.arange(self.num_target_candidates)] = valid0 * valid1
gt_matches0 = torch.arange(0, self.num_target_candidates).float()
gt_matches1 = torch.arange(0, self.num_target_candidates).float()
gt_matches0[(valid0==0) | (valid1==0)] = -1
gt_matches1[(valid0==0) | (valid1==0)] = -1
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _previous_and_current_frame(self, data: TensorDict):
out = TensorDict()
imgs = data.pop('img')
img0 = imgs[0]
img1 = imgs[1]
sa_box0 = data['search_area_box'][0]
sa_box1 = data['search_area_box'][1]
tsm_anno_coord0 = data['target_anno_coord'][0]
tsm_anno_coord1 = data['target_anno_coord'][1]
tsm_coords0 = data['target_candidate_coords'][0]
tsm_coords1 = data['target_candidate_coords'][1]
scores0 = data['target_candidate_scores'][0]
scores1 = data['target_candidate_scores'][1]
out['img_shape0'] = [torch.tensor(img0.shape[:2])]
out['img_shape1'] = [torch.tensor(img1.shape[:2])]
frame_crop0 = prutils.sample_target_from_crop_region(img0, sa_box0, self.output_sz)
frame_crop1 = prutils.sample_target_from_crop_region(img1, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.transform['train'](image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
gt_idx0 = self._find_gt_candidate_index(tsm_coords0, tsm_anno_coord0)
gt_idx1 = self._find_gt_candidate_index(tsm_coords1, tsm_anno_coord1)
x0, y0, w0, h0 = sa_box0.tolist()
x1, y1, w1, h1 = sa_box1.tolist()
img_coords0 = torch.stack([
h0 * (tsm_coords0[:, 0].float() / (self.score_map_sz[0] - 1)) + y0,
w0 * (tsm_coords0[:, 1].float() / (self.score_map_sz[1] - 1)) + x0
]).permute(1, 0)
img_coords1 = torch.stack([
h1 * (tsm_coords1[:, 0].float() / (self.score_map_sz[0] - 1)) + y1,
w1 * (tsm_coords1[:, 1].float() / (self.score_map_sz[1] - 1)) + x1
]).permute(1, 0)
frame_id, dropout = self._gt_candidate_drop_out()
drop0 = dropout & (frame_id == 0)
drop1 = dropout & (frame_id == 1)
img_coords_pad0, valid0 = self._pad_with_fake_candidates_drop_gt(img_coords0, drop0, gt_idx0, sa_box0, img0.shape)
img_coords_pad1, valid1 = self._pad_with_fake_candidates_drop_gt(img_coords1, drop1, gt_idx1, sa_box1, img1.shape)
scores_pad0 = self._add_fake_candidate_scores(scores0, valid0)
scores_pad1 = self._add_fake_candidate_scores(scores1, valid1)
x0, y0, w0, h0 = sa_box0.long().tolist()
x1, y1, w1, h1 = sa_box1.long().tolist()
tsm_coords_pad0 = torch.stack([
torch.round((img_coords_pad0[:, 0] - y0) / h0 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad0[:, 1] - x0) / w0 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
tsm_coords_pad1 = torch.stack([
torch.round((img_coords_pad1[:, 0] - y1) / h1 * (self.score_map_sz[0] - 1)).long(),
torch.round((img_coords_pad1[:, 1] - x1) / w1 * (self.score_map_sz[1] - 1)).long()
]).permute(1, 0)
assert torch.all(tsm_coords_pad0 >= 0) and torch.all(tsm_coords_pad0 < self.score_map_sz[0])
assert torch.all(tsm_coords_pad1 >= 0) and torch.all(tsm_coords_pad1 < self.score_map_sz[0])
out['candidate_img_coords0'] = [img_coords_pad0]
out['candidate_img_coords1'] = [img_coords_pad1]
out['candidate_tsm_coords0'] = [tsm_coords_pad0]
out['candidate_tsm_coords1'] = [tsm_coords_pad1]
out['candidate_scores0'] = [scores_pad0]
out['candidate_scores1'] = [scores_pad1]
out['candidate_valid0'] = [valid0]
out['candidate_valid1'] = [valid1]
# Prepare gt labels
gt_assignment = torch.zeros((self.num_target_candidates, self.num_target_candidates))
gt_assignment[gt_idx0, gt_idx1] = valid0[gt_idx0]*valid1[gt_idx1]
gt_matches0 = torch.zeros(self.num_target_candidates) - 2
gt_matches1 = torch.zeros(self.num_target_candidates) - 2
if drop0:
gt_matches0[gt_idx0] = -2
gt_matches1[gt_idx1] = -1
elif drop1:
gt_matches0[gt_idx0] = -1
gt_matches0[gt_idx1] = -2
else:
gt_matches0[gt_idx0] = gt_idx1
gt_matches1[gt_idx1] = gt_idx0
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _previous_and_current_frame_detected_target_candidates_only(self, data: TensorDict):
out = TensorDict()
imgs = data.pop('img')
img0 = imgs[0]
img1 = imgs[1]
sa_box0 = data['search_area_box'][0]
sa_box1 = data['search_area_box'][1]
tsm_anno_coord0 = data['target_anno_coord'][0]
tsm_anno_coord1 = data['target_anno_coord'][1]
tsm_coords0 = data['target_candidate_coords'][0]
tsm_coords1 = data['target_candidate_coords'][1]
scores0 = data['target_candidate_scores'][0]
scores1 = data['target_candidate_scores'][1]
out['img_shape0'] = [torch.tensor(img0.shape[:2])]
out['img_shape1'] = [torch.tensor(img1.shape[:2])]
frame_crop0 = prutils.sample_target_from_crop_region(img0, sa_box0, self.output_sz)
frame_crop1 = prutils.sample_target_from_crop_region(img1, sa_box1, self.output_sz)
frame_crop0 = self.transform['train'](image=frame_crop0)
frame_crop1 = self.transform['train'](image=frame_crop1)
out['img_cropped0'] = [frame_crop0]
out['img_cropped1'] = [frame_crop1]
gt_idx0 = self._find_gt_candidate_index(tsm_coords0, tsm_anno_coord0)
gt_idx1 = self._find_gt_candidate_index(tsm_coords1, tsm_anno_coord1)
x0, y0, w0, h0 = sa_box0.tolist()
x1, y1, w1, h1 = sa_box1.tolist()
img_coords0 = torch.stack([
h0 * (tsm_coords0[:, 0].float() / (self.score_map_sz[0] - 1)) + y0,
w0 * (tsm_coords0[:, 1].float() / (self.score_map_sz[1] - 1)) + x0
]).permute(1, 0)
img_coords1 = torch.stack([
h1 * (tsm_coords1[:, 0].float() / (self.score_map_sz[0] - 1)) + y1,
w1 * (tsm_coords1[:, 1].float() / (self.score_map_sz[1] - 1)) + x1
]).permute(1, 0)
out['candidate_img_coords0'] = [img_coords0]
out['candidate_img_coords1'] = [img_coords1]
out['candidate_tsm_coords0'] = [tsm_coords0]
out['candidate_tsm_coords1'] = [tsm_coords1]
out['candidate_scores0'] = [scores0]
out['candidate_scores1'] = [scores1]
out['candidate_valid0'] = [torch.ones_like(scores0)]
out['candidate_valid1'] = [torch.ones_like(scores1)]
# Prepare gt labels
gt_assignment = torch.zeros((scores0.shape[0], scores1.shape[0]))
gt_assignment[gt_idx0, gt_idx1] = 1
gt_matches0 = torch.zeros(scores0.shape[0]) - 2
gt_matches1 = torch.zeros(scores1.shape[0]) - 2
gt_matches0[gt_idx0] = gt_idx1
gt_matches1[gt_idx1] = gt_idx0
out['gt_matches0'] = [gt_matches0]
out['gt_matches1'] = [gt_matches1]
out['gt_assignment'] = [gt_assignment]
return out
def _find_gt_candidate_index(self, coords, target_anno_coord):
gt_idx = torch.argmin(torch.sum((coords - target_anno_coord) ** 2, dim=1))
return gt_idx
def _gt_candidate_drop_out(self):
dropout = (torch.rand(1) < 0.25).item()
frameid = torch.randint(0, 2, (1,)).item()
return frameid, dropout
def _pad_with_fake_candidates_drop_gt(self, img_coords, dropout, gt_idx, sa_box, img_shape):
H, W = img_shape[:2]
num_peaks = min(img_coords.shape[0], self.num_target_candidates)
x, y, w, h = sa_box.long().tolist()
lowx, lowy, highx, highy = max(0, x), max(0, y), min(W, x + w), min(H, y + h)
img_coords_pad = torch.zeros((self.num_target_candidates, 2))
valid = torch.zeros(self.num_target_candidates)
img_coords_pad[:num_peaks] = img_coords[:num_peaks]
valid[:num_peaks] = 1
gt_coords = img_coords_pad[gt_idx].clone().unsqueeze(0)
if dropout:
valid[gt_idx] = 0
img_coords_pad[gt_idx] = 0
filled = valid.clone()
for i in range(0, self.num_target_candidates):
if filled[i] == 0:
cs = torch.cat([
torch.rand((20, 1)) * (highy - lowy) + lowy,
torch.rand((20, 1)) * (highx - lowx) + lowx
], dim=1)
cs_used = torch.cat([img_coords_pad[filled == 1], gt_coords], dim=0)
dist = torch.sqrt(torch.sum((cs_used[:, None, :] - cs[None, :, :]) ** 2, dim=2))
min_dist = torch.min(dist, dim=0).values
max_min_dist_idx = torch.argmax(min_dist)
img_coords_pad[i] = cs[max_min_dist_idx]
filled[i] = 1
return img_coords_pad, valid
def _candidate_drop_out(self, coords0, coords1):
num_candidates = min(coords1.shape[0], self.num_target_candidates)
num_candidates_to_drop = torch.round(0.25*num_candidates*torch.rand(1)).long()
idx = torch.randperm(num_candidates)[:num_candidates_to_drop]
coords_pad0 = torch.zeros((self.num_target_candidates, 2))
valid0 = torch.zeros(self.num_target_candidates)
coords_pad1 = torch.zeros((self.num_target_candidates, 2))
valid1 = torch.zeros(self.num_target_candidates)
coords_pad0[:num_candidates] = coords0[:num_candidates]
coords_pad1[:num_candidates] = coords1[:num_candidates]
valid0[:num_candidates] = 1
valid1[:num_candidates] = 1
if torch.rand(1) < 0.5:
coords_pad0[idx] = 0
valid0[idx] = 0
else:
coords_pad1[idx] = 0
valid1[idx] = 0
return coords_pad0, coords_pad1, valid0, valid1
def _pad_with_fake_candidates(self, img_coords_pad0, img_coords_pad1, valid0, valid1, sa_box0, sa_box1, img_shape):
H, W = img_shape[:2]
x0, y0, w0, h0 = sa_box0.long().tolist()
x1, y1, w1, h1 = sa_box1.long().tolist()
lowx = [max(0, x0), max(0, x1)]
lowy = [max(0, y0), max(0, y1)]
highx = [min(W, x0 + w0), min(W, x1 + w1)]
highy = [min(H, y0 + h0), min(H, y1 + h1)]
filled = [valid0.clone(), valid1.clone()]
img_coords_pad = [img_coords_pad0.clone(), img_coords_pad1.clone()]
for i in range(0, self.num_target_candidates):
for k in range(0, 2):
if filled[k][i] == 0:
cs = torch.cat([
torch.rand((20, 1)) * (highy[k] - lowy[k]) + lowy[k],
torch.rand((20, 1)) * (highx[k] - lowx[k]) + lowx[k]
], dim=1)
cs_used = torch.cat([img_coords_pad[0][filled[0]==1], img_coords_pad[1][filled[1]==1]], dim=0)
dist = torch.sqrt(torch.sum((cs_used[:, None, :] - cs[None, :, :]) ** 2, dim=2))
min_dist = torch.min(dist, dim=0).values
max_min_dist_idx = torch.argmax(min_dist)
img_coords_pad[k][i] = cs[max_min_dist_idx]
filled[k][i] = 1
return img_coords_pad[0], img_coords_pad[1]
def _add_fake_candidate_scores(self, scores, valid):
scores_pad = torch.zeros(valid.shape[0])
scores_pad[valid == 1] = scores[:self.num_target_candidates][valid[:scores.shape[0]] == 1]
scores_pad[valid == 0] = (torch.abs(torch.randn((valid==0).sum()))/50).clamp_max(0.025) + 0.05
return scores_pad
def _augment_scores(self, scores, valid, drop):
num_valid = (valid==1).sum()
noise = 0.1 * torch.randn(num_valid)
if num_valid > 2 and not drop:
if scores[1] > 0.5*scores[0] and torch.all(scores[:2] > 0.2):
# two valid peaks with a high score that are relatively close.
mode = torch.randint(0, 3, size=(1,))
if mode == 0:
# augment randomly.
scores_aug = torch.sort(noise + scores[valid==1], descending=True)[0]
elif mode == 1:
# move peaks closer
scores_aug = torch.sort(noise + scores[valid == 1], descending=True)[0]
scores_aug[0] = scores[valid==1][0] - torch.abs(noise[0])
scores_aug[1] = scores[valid==1][1] + torch.abs(noise[1])
scores_aug[:2] = torch.sort(scores_aug[:2], descending=True)[0]
else:
# move peaks closer and switch
scores_aug = torch.sort(noise + scores[valid == 1], descending=True)[0]
scores_aug[0] = scores[valid==1][0] - torch.abs(noise[0])
scores_aug[1] = scores[valid==1][1] + torch.abs(noise[1])
scores_aug[:2] = torch.sort(scores_aug[:2], descending=True)[0]
idx = torch.arange(num_valid)
idx[:2] = torch.tensor([1, 0])
scores_aug = scores_aug[idx]
else:
scores_aug = torch.sort(scores[valid==1] + noise, descending=True)[0]
else:
scores_aug = torch.sort(scores[valid == 1] + noise, descending=True)[0]
scores_aug = scores_aug.clamp_min(0.075)
scores[valid==1] = scores_aug.clone()
return scores
def _augment_coords(self, coords, img_shape, search_area_box):
H, W = img_shape[:2]
_, _, w, h = search_area_box.float()
# add independent offset to each coord
d = torch.sqrt(torch.sum((coords[None, :] - coords[:, None])**2, dim=2))
if torch.all(d == 0):
xmin = 0.5*w/self.score_map_sz[1]
ymin = 0.5*h/self.score_map_sz[0]
else:
dmin = torch.min(d[d>0])
xmin = (math.sqrt(2)*dmin/4).clamp_max(w/self.score_map_sz[1])
ymin = (math.sqrt(2)*dmin/4).clamp_max(h/self.score_map_sz[0])
txi = torch.rand(coords.shape[0])*2*xmin - xmin
tyi = torch.rand(coords.shape[0])*2*ymin - ymin
coords[:, 0] += tyi
coords[:, 1] += txi
coords[:, 0] = coords[:, 0].clamp(0, H)
coords[:, 1] = coords[:, 1].clamp(0, W)
return coords
class LTRBDenseRegressionProcessing(BaseProcessing):
""" The processing class used for training ToMP that supports dense bounding box regression.
"""
def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',
max_scale_change=None, mode='pair', stride=16, label_function_params=None,
center_sampling_radius=0.0, use_normalized_coords=True, *args, **kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.
If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.
If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.
max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
label_function_params - Arguments for the label generation process. See _generate_label_function for details.
label_density_params - Arguments for the label density generation process. See _generate_label_function for details.
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.crop_type = crop_type
self.mode = mode
self.max_scale_change = max_scale_change
self.stride = stride
self.label_function_params = label_function_params
self.center_sampling_radius = center_sampling_radius
self.use_normalized_coords = use_normalized_coords
def _get_jittered_box(self, box, mode):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
torch.Tensor - jittered box
"""
jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])
max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)
return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)
def _generate_label_function(self, target_bb):
""" Generates the gaussian label function centered at target_bb
args:
target_bb - target bounding box (num_images, 4)
returns:
torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample
"""
gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4),
self.label_function_params['sigma_factor'],
self.label_function_params['kernel_sz'],
self.label_function_params['feature_sz'], self.output_sz,
end_pad_if_even=self.label_function_params.get(
'end_pad_if_even', True))
return gauss_label
def _generate_ltbr_regression_targets(self, target_bb):
shifts_x = torch.arange(
0, self.output_sz, step=self.stride,
dtype=torch.float32, device=target_bb.device
)
shifts_y = torch.arange(
0, self.output_sz, step=self.stride,
dtype=torch.float32, device=target_bb.device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + self.stride // 2
xs, ys = locations[:, 0], locations[:, 1]
xyxy = torch.stack([target_bb[:, 0], target_bb[:, 1], target_bb[:, 0] + target_bb[:, 2],
target_bb[:, 1] + target_bb[:, 3]], dim=1)
l = xs[:, None] - xyxy[:, 0][None]
t = ys[:, None] - xyxy[:, 1][None]
r = xyxy[:, 2][None] - xs[:, None]
b = xyxy[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2).reshape(-1, 4)
if self.use_normalized_coords:
reg_targets_per_im = reg_targets_per_im / self.output_sz
if self.center_sampling_radius > 0:
is_in_box = self._compute_sampling_region(xs, xyxy, ys)
else:
is_in_box = (reg_targets_per_im.min(dim=1)[0] > 0)
sz = self.output_sz//self.stride
nb = target_bb.shape[0]
reg_targets_per_im = reg_targets_per_im.reshape(sz, sz, nb, 4).permute(2, 3, 0, 1)
is_in_box = is_in_box.reshape(sz, sz, nb, 1).permute(2, 3, 0, 1)
return reg_targets_per_im, is_in_box
def _compute_sampling_region(self, xs, xyxy, ys):
cx = (xyxy[:, 0] + xyxy[:, 2]) / 2
cy = (xyxy[:, 1] + xyxy[:, 3]) / 2
xmin = cx - self.center_sampling_radius * self.stride
ymin = cy - self.center_sampling_radius * self.stride
xmax = cx + self.center_sampling_radius * self.stride
ymax = cy + self.center_sampling_radius * self.stride
center_gt = xyxy.new_zeros(xyxy.shape)
center_gt[:, 0] = torch.where(xmin > xyxy[:, 0], xmin, xyxy[:, 0])
center_gt[:, 1] = torch.where(ymin > xyxy[:, 1], ymin, xyxy[:, 1])
center_gt[:, 2] = torch.where(xmax > xyxy[:, 2], xyxy[:, 2], xmax)
center_gt[:, 3] = torch.where(ymax > xyxy[:, 3], xyxy[:, 3], ymax)
left = xs[:, None] - center_gt[:, 0]
right = center_gt[:, 2] - xs[:, None]
top = ys[:, None] - center_gt[:, 1]
bottom = center_gt[:, 3] - ys[:, None]
center_bbox = torch.stack((left, top, right, bottom), -1)
is_in_box = center_bbox.min(-1)[0] > 0
return is_in_box
def __call__(self, data: TensorDict):
"""
args:
data - The input data, should contain the following fields:
'train_images', test_images', 'train_anno', 'test_anno'
returns:
TensorDict - output data block with following fields:
'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',
'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)
"""
if self.transform['joint'] is not None:
data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'],
bbox=data['train_anno'])
data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'],
bbox=data['test_anno'], new_roll=False)
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]
crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz, mode=self.crop_type,
max_scale_change=self.max_scale_change)
data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)
# Prepare output
if self.mode == 'sequence':
data = data.apply(stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
# Generate label functions
if self.label_function_params is not None:
data['train_label'] = self._generate_label_function(data['train_anno'])
data['test_label'] = self._generate_label_function(data['test_anno'])
data['test_ltrb_target'], data['test_sample_region'] = self._generate_ltbr_regression_targets(data['test_anno'])
data['train_ltrb_target'], data['train_sample_region'] = self._generate_ltbr_regression_targets(data['train_anno'])
return data
|
en
| 0.766229
|
Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc. args: transform - The set of transformations to be applied on the images. Used only if train_transform or test_transform is None. train_transform - The set of transformations to be applied on the train images. If None, the 'transform' argument is used instead. test_transform - The set of transformations to be applied on the test images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For example, it can be used to convert both test and train images to grayscale. The processing class used for training ATOM. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] # Generate proposals # Map to [-1, 1] args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou' # Apply joint transforms # Add a uniform noise to the center pos # Crop image region centered at jittered_anno box # Apply transforms # Generate proposals # Prepare output Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565]. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box # Generate proposals args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density' # Apply joint transforms # Add a uniform noise to the center pos # Crop image region centered at jittered_anno box # Apply transforms # Generate proposals # Prepare output Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing. Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box # Generate proposals # Apply joint transforms # Add a uniform noise to the center pos # Crop image region centered at jittered_anno box # Apply transforms # Generate proposals # Prepare output The processing class used for training DiMP. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are used for computing the loss of the predicted classification model on the test images. A set of proposals are also generated for the test images by jittering the ground truth box. These proposals are used to train the bounding box estimating branch. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] # Generate proposals # Map to [-1, 1] Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) # Add a uniform noise to the center pos # Generate proposals # Prepare output # Generate label functions The processing class used for training PrDiMP that additionally supports the probabilistic classifier and bounding box regressor. See DiMPProcessing for details. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. label_density_params - Arguments for the label density generation process. See _generate_label_function for details. Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density. This is used for ML and KL based regression learning of the bounding box regressor. args: box - input bounding box # Generate proposals Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample Generates the gaussian label density centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) # Add a uniform noise to the center pos # Generate proposals # Prepare output # Generate label functions The processing class used for training LWL. The images are processed in the following way. First, the target bounding box (computed using the segmentation mask)is jittered by adding some noise. Next, a rectangular region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. The argument 'crop_type' determines how out-of-frame regions are handled when cropping the search region. For instance, if crop_type == 'replicate', the boundary pixels are replicated in case the search region crop goes out of frame. If crop_type == 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. args: search_area_factor - The size of the search region relative to the target size. output_sz - The size (width, height) to which the search region is resized. The aspect ratio is always preserved when resizing the search region center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - Determines how out-of-frame regions are handled when cropping the search region. If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when shrinking the search region to fit the image (only applicable to 'inside' and 'inside_major' cropping modes). In case the desired shrink factor exceeds the max_scale_change, the search region is only shrunk to the factor max_scale_change. Out-of-frame regions are then handled by replicating the boundary pixels. If max_scale_change is set to None, unbounded shrinking is allowed. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames new_roll - Whether to use the same random roll values for train and test frames when applying the joint transformation. If True, a new random roll is performed for the test frame transformations. Thus, if performing random flips, the set of train frames and the set of test frames will be flipped independently. Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box # Apply joint transformations. i.e. All train/test frames in a sequence are applied the transformation with the # same parameters # Add a uniform noise to the center pos # Extract a crop containing the target # Apply independent transformations to each image # Prepare output The processing class used for training KYS. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are used for computing the loss of the predicted classification model on the test images. A set of proposals are also generated for the test images by jittering the ground truth box. These proposals can be used to train the bounding box estimating branch. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _generate_synthetic_motion for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _generate_synthetic_motion for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. min_crop_inside_ratio - Minimum amount of cropped search area which should be inside the image. See _check_if_crop_inside_image for details. # Generate proposals # Generate synthetic sequence # Crop images # Add transforms The processing class used for training KeepTrack. The distractor dataset for LaSOT is required. Two different modes are available partial supervision (partial_sup) or self-supervision (self_sup). For partial supervision the candidates their meta data and the images of two consecutive frames are used to form a single supervision cue among the candidates corresponding to the annotated target object. All other candidates are ignored. First, the search area region is cropped from the image followed by augmentation. Then, the candidate matching with the annotated target object is detected to supervise the matching. Then, the score map coordinates of the candidates are transformed to full image coordinates. Next, it is randomly decided whether the candidates corresponding to the target is dropped in one of the frames to simulate re-detection, occlusions or normal tracking. To enable training in batches the number of candidates to match between two frames is fixed. Hence, artificial candidates are added. Finally, the assignment matrix is formed where a 1 denotes a match between two candidates, -1 denotes that a match is not available and -2 denotes that no information about the matching is available. These entries will be ignored. The second method for partial supervision is used for validation only. It uses only the detected candidates and thus results in different numbers of candidates for each frame-pair such that training in batches is not possible. For self-supervision only a singe frame and its candidates are required. The second frame and candidates are artificially created using augmentations. Here full supervision among all candidates is enabled. First, the search area region is cropped from the full image. Then, the cropping coordinates are augmented to crop a slightly different view that mimics search area region of the next frame. Next, the two image regions are augmented further. Then, the matching between candidates is determined by randomly dropping candidates to mimic occlusions or re-detections. Again, the number of candidates is fixed by adding artificial candidates that are ignored during training. In addition, the scores and coordinates of each candidate are altered to increase matching difficulty. Finally, the assignment matrix is formed where a 1 denotes a match between two candidates, -1 denotes that a match is not available. # prepared cropped image # make sure that the augmented search_are_box is only used for the fake img_coords the other need the original. # Prepare gt labels # Prepare gt labels # Prepare gt labels # two valid peaks with a high score that are relatively close. # augment randomly. # move peaks closer # move peaks closer and switch # add independent offset to each coord The processing class used for training ToMP that supports dense bounding box regression. args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. label_density_params - Arguments for the label density generation process. See _generate_label_function for details. Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) # Add a uniform noise to the center pos # Prepare output # Generate label functions
| 3.041396
| 3
|
desktop/core/ext-py/python-pam-1.8.4/setup.py
|
kokosing/hue
| 5,079
|
6626690
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
__sdesc = 'Python PAM module using ctypes, py3/py2'
setup(name = 'python-pam',
description = __sdesc,
long_description = read('README.md'),
py_modules = ['pam'],
version = '1.8.4',
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
url = 'https://github.com/FirefighterBlu3/python-pam',
download_url = 'https://github.com/FirefighterBlu3/python-pam',
license = 'License :: OSI Approved :: MIT License',
platforms = ['i686','x86_64'],
classifiers = [
'Development Status :: 6 - Mature',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Security',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
)
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
__sdesc = 'Python PAM module using ctypes, py3/py2'
setup(name = 'python-pam',
description = __sdesc,
long_description = read('README.md'),
py_modules = ['pam'],
version = '1.8.4',
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
url = 'https://github.com/FirefighterBlu3/python-pam',
download_url = 'https://github.com/FirefighterBlu3/python-pam',
license = 'License :: OSI Approved :: MIT License',
platforms = ['i686','x86_64'],
classifiers = [
'Development Status :: 6 - Mature',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Security',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
)
|
none
| 1
| 1.610497
| 2
|
|
orig/models/temp_vaeold.py
|
IBM/oct-glaucoma-forecast
| 0
|
6626691
|
<gh_stars>0
import torch
import torch.nn as nn
from models.model_blocks import RNFLEncoder, RNFLDecoder, Encoder, Decoder
from models.model_blocks import VFTEncoder, VFTDecoder
#@todo implement info vae loss /mmd vae loss
class VAE(nn.Module):
def __init__(self, latent_dim, type):
super(VAE, self).__init__()
self.latent_dim = latent_dim
assert type in ['vft', 'rnfl', 'gcl'], 'invalid type'
if (type == 'vft'):
self.encoder = VFTEncoder(latent_dim=latent_dim)
self.decoder = VFTDecoder(z_size=latent_dim)
else:
self.encoder = Encoder(input_shape=(64, 64), channel_in=1, z_size=64,
num_downsamples=4, latent_dim=latent_dim)
self.decoder = Decoder(z_size=latent_dim, channel_out=1, num_upsamples=4,
image_size=64)
#self.encoder = RNFLEncoder(latent_dim=latent_dim, rnfl_imgChans=1, rnfl_fBase=32)
#self.decoder = RNFLDecoder(z_size=latent_dim,rnfl_imgChans=1,rnfl_fBase=32)
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = torch.autograd.Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, x):
mu, logvar = self.infer(x)
pred_z = self.reparametrize(mu, logvar)
pred_x = self.decoder(pred_z)
return [pred_x], [mu, logvar]
def infer(self, x):
"""
Posterior inference
:param x:
:return:
"""
out = self.encoder(x)
mu, logvar = out[:, :self.latent_dim], out[:, self.latent_dim:2 * self.latent_dim]
return mu, logvar
|
import torch
import torch.nn as nn
from models.model_blocks import RNFLEncoder, RNFLDecoder, Encoder, Decoder
from models.model_blocks import VFTEncoder, VFTDecoder
#@todo implement info vae loss /mmd vae loss
class VAE(nn.Module):
def __init__(self, latent_dim, type):
super(VAE, self).__init__()
self.latent_dim = latent_dim
assert type in ['vft', 'rnfl', 'gcl'], 'invalid type'
if (type == 'vft'):
self.encoder = VFTEncoder(latent_dim=latent_dim)
self.decoder = VFTDecoder(z_size=latent_dim)
else:
self.encoder = Encoder(input_shape=(64, 64), channel_in=1, z_size=64,
num_downsamples=4, latent_dim=latent_dim)
self.decoder = Decoder(z_size=latent_dim, channel_out=1, num_upsamples=4,
image_size=64)
#self.encoder = RNFLEncoder(latent_dim=latent_dim, rnfl_imgChans=1, rnfl_fBase=32)
#self.decoder = RNFLDecoder(z_size=latent_dim,rnfl_imgChans=1,rnfl_fBase=32)
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = torch.autograd.Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, x):
mu, logvar = self.infer(x)
pred_z = self.reparametrize(mu, logvar)
pred_x = self.decoder(pred_z)
return [pred_x], [mu, logvar]
def infer(self, x):
"""
Posterior inference
:param x:
:return:
"""
out = self.encoder(x)
mu, logvar = out[:, :self.latent_dim], out[:, self.latent_dim:2 * self.latent_dim]
return mu, logvar
|
en
| 0.308253
|
#@todo implement info vae loss /mmd vae loss #self.encoder = RNFLEncoder(latent_dim=latent_dim, rnfl_imgChans=1, rnfl_fBase=32) #self.decoder = RNFLDecoder(z_size=latent_dim,rnfl_imgChans=1,rnfl_fBase=32) # return mean during inference Posterior inference :param x: :return:
| 2.249984
| 2
|
public/code2.py
|
luisneto98/code-coliseum-web
| 0
|
6626692
|
<filename>public/code2.py<gh_stars>0
from enum import Enum
import sys
def converte_array(args = ['0','0','0','0','0','0','0','0','0']):
array = args[1:]
matrix = [array[0:3], array[3:6], array[6:9]]
return matrix
ESPACO_VAZIO = '0'
JOGADA_SUA = '1'
JOGADA_ADVERSARIO = '2'
'''
lOCALIZAÇÃO DAS POSIÇÕES
UM | DOIS | TRÊS
--------|--------|--------
QUATRO | CINCO | SEIS
--------|--------|--------
SETE | OITO | NOVE
'''
class Posi(Enum):
UM = '1'
DOIS = '2'
TRES = '3'
QUATRO = '4'
CINCO = '5'
SEIS = '6'
SETE = '7'
OITO = '8'
NOVE = '9'
def play(tabela):
if(tabela[1][0] == ESPACO_VAZIO):
return Posi.QUATRO
if(tabela[1][1] == ESPACO_VAZIO):
return Posi.CINCO
if(tabela[1][2] == ESPACO_VAZIO):
return Posi.SEIS
if(tabela[2][0] == ESPACO_VAZIO):
return Posi.SETE
return Posi.DOIS
print(play(converte_array(sys.argv)).value, end='')
|
<filename>public/code2.py<gh_stars>0
from enum import Enum
import sys
def converte_array(args = ['0','0','0','0','0','0','0','0','0']):
array = args[1:]
matrix = [array[0:3], array[3:6], array[6:9]]
return matrix
ESPACO_VAZIO = '0'
JOGADA_SUA = '1'
JOGADA_ADVERSARIO = '2'
'''
lOCALIZAÇÃO DAS POSIÇÕES
UM | DOIS | TRÊS
--------|--------|--------
QUATRO | CINCO | SEIS
--------|--------|--------
SETE | OITO | NOVE
'''
class Posi(Enum):
UM = '1'
DOIS = '2'
TRES = '3'
QUATRO = '4'
CINCO = '5'
SEIS = '6'
SETE = '7'
OITO = '8'
NOVE = '9'
def play(tabela):
if(tabela[1][0] == ESPACO_VAZIO):
return Posi.QUATRO
if(tabela[1][1] == ESPACO_VAZIO):
return Posi.CINCO
if(tabela[1][2] == ESPACO_VAZIO):
return Posi.SEIS
if(tabela[2][0] == ESPACO_VAZIO):
return Posi.SETE
return Posi.DOIS
print(play(converte_array(sys.argv)).value, end='')
|
en
| 0.159662
|
lOCALIZAÇÃO DAS POSIÇÕES UM | DOIS | TRÊS --------|--------|-------- QUATRO | CINCO | SEIS --------|--------|-------- SETE | OITO | NOVE
| 3.135707
| 3
|
tests/models/test_deepspeech2.py
|
cosmoquester/speech-recognition
| 6
|
6626693
|
import pytest
import tensorflow as tf
from speech_recognition.models.deepspeech2 import Convolution, DeepSpeech2, Recurrent
@pytest.mark.parametrize(
"num_layers,channels,kernel_sizes,strides,batch_size,sequence_length,frequency_bins,feature_dim",
[
(1, [32], [[41, 11]], [[2, 2]], 7, 111, 33, 1),
(2, [32, 32], [[41, 11], [21, 11]], [[2, 2], [2, 1]], 12, 333, 45, 2),
(3, [32, 32, 32], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], 33, 242, 56, 3),
(3, [32, 32, 96], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], 5, 553, 62, 4),
],
)
def test_convolution(
num_layers, channels, kernel_sizes, strides, batch_size, sequence_length, frequency_bins, feature_dim
):
convolution = Convolution(num_layers, channels, kernel_sizes, strides)
audio = tf.random.normal([batch_size, sequence_length, frequency_bins, feature_dim])
output, mask = convolution(audio)
output_batch_size, output_length, hidden_dim = output.shape
assert batch_size == output_batch_size
assert sequence_length > output_length == mask.shape[1]
assert hidden_dim > channels[-1]
@pytest.mark.parametrize(
"run_type,num_layers,units,recurrent_dropout,batch_size,sequence_length,feature_dim,pad_length",
[
("rnn", 1, 240, 0.1, 88, 12, 142, 3),
("lstm", 3, 188, 0.2, 32, 121, 134, 4),
("gru", 5, 151, 0.3, 12, 124, 64, 5),
("gru", 7, 128, 0.4, 55, 333, 55, 6),
],
)
def test_recurrent(
run_type, num_layers, units, recurrent_dropout, batch_size, sequence_length, feature_dim, pad_length
):
recurrent = Recurrent(run_type, num_layers, units, recurrent_dropout)
# Check Shape
audio = tf.random.normal([batch_size, sequence_length, feature_dim])
mask = tf.cast(tf.random.normal([batch_size, sequence_length]) > 0.1, tf.int32)
output = recurrent(audio, mask)
tf.debugging.assert_equal(output.shape, [batch_size, sequence_length, units * 2])
padded_audio = tf.concat([audio, tf.random.normal([batch_size, pad_length, feature_dim])], axis=1)
padded_mask = tf.concat([mask, tf.zeros([batch_size, pad_length], dtype=tf.int32)], axis=1)
padded_output = recurrent(padded_audio, padded_mask)
tf.debugging.assert_equal(padded_output.shape, [batch_size, sequence_length + pad_length, units * 2])
# Check Mask for PAD
tf.debugging.assert_equal(output, padded_output[:, :-pad_length])
# fmt: off
@pytest.mark.parametrize(
"num_conv_layers,channels,kernel_sizes,strides,rnn_type,num_reccurent_layers,hidden_dim,dropout,vocab_size,batch_size,sequence_length,freq_bins,feature_dim",
[
(1, [32], [[41, 11]], [[2, 2]], "rnn", 1, 240, 0.1, 88,7, 111, 33, 1),
(2, [32, 32], [[41, 11], [21, 11]], [[2, 2], [2, 1]], "lstm", 3, 188, 0.2, 32,12, 333, 45, 2),
(3, [32, 32, 32], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], "gru", 5, 151, 0.3, 12,33, 242, 56, 3),
(3, [32, 32, 96], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], "gru", 7, 128, 0.4, 55,5, 553, 62, 4),
],
)
# fmt: on
def test_deepspeech2(
num_conv_layers,
channels,
kernel_sizes,
strides,
rnn_type,
num_reccurent_layers,
hidden_dim,
dropout,
vocab_size,
batch_size,
sequence_length,
freq_bins,
feature_dim,
):
deepspeech2 = DeepSpeech2(
num_conv_layers,
channels,
kernel_sizes,
strides,
rnn_type,
num_reccurent_layers,
hidden_dim,
dropout,
dropout,
vocab_size,
10,
)
audio = tf.random.normal([batch_size, sequence_length, freq_bins, feature_dim])
output = deepspeech2(audio)
output_batch_size, output_length, output_vocab_size = output.shape
assert batch_size == output_batch_size
assert sequence_length > output_length
assert output_vocab_size == vocab_size
|
import pytest
import tensorflow as tf
from speech_recognition.models.deepspeech2 import Convolution, DeepSpeech2, Recurrent
@pytest.mark.parametrize(
"num_layers,channels,kernel_sizes,strides,batch_size,sequence_length,frequency_bins,feature_dim",
[
(1, [32], [[41, 11]], [[2, 2]], 7, 111, 33, 1),
(2, [32, 32], [[41, 11], [21, 11]], [[2, 2], [2, 1]], 12, 333, 45, 2),
(3, [32, 32, 32], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], 33, 242, 56, 3),
(3, [32, 32, 96], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], 5, 553, 62, 4),
],
)
def test_convolution(
num_layers, channels, kernel_sizes, strides, batch_size, sequence_length, frequency_bins, feature_dim
):
convolution = Convolution(num_layers, channels, kernel_sizes, strides)
audio = tf.random.normal([batch_size, sequence_length, frequency_bins, feature_dim])
output, mask = convolution(audio)
output_batch_size, output_length, hidden_dim = output.shape
assert batch_size == output_batch_size
assert sequence_length > output_length == mask.shape[1]
assert hidden_dim > channels[-1]
@pytest.mark.parametrize(
"run_type,num_layers,units,recurrent_dropout,batch_size,sequence_length,feature_dim,pad_length",
[
("rnn", 1, 240, 0.1, 88, 12, 142, 3),
("lstm", 3, 188, 0.2, 32, 121, 134, 4),
("gru", 5, 151, 0.3, 12, 124, 64, 5),
("gru", 7, 128, 0.4, 55, 333, 55, 6),
],
)
def test_recurrent(
run_type, num_layers, units, recurrent_dropout, batch_size, sequence_length, feature_dim, pad_length
):
recurrent = Recurrent(run_type, num_layers, units, recurrent_dropout)
# Check Shape
audio = tf.random.normal([batch_size, sequence_length, feature_dim])
mask = tf.cast(tf.random.normal([batch_size, sequence_length]) > 0.1, tf.int32)
output = recurrent(audio, mask)
tf.debugging.assert_equal(output.shape, [batch_size, sequence_length, units * 2])
padded_audio = tf.concat([audio, tf.random.normal([batch_size, pad_length, feature_dim])], axis=1)
padded_mask = tf.concat([mask, tf.zeros([batch_size, pad_length], dtype=tf.int32)], axis=1)
padded_output = recurrent(padded_audio, padded_mask)
tf.debugging.assert_equal(padded_output.shape, [batch_size, sequence_length + pad_length, units * 2])
# Check Mask for PAD
tf.debugging.assert_equal(output, padded_output[:, :-pad_length])
# fmt: off
@pytest.mark.parametrize(
"num_conv_layers,channels,kernel_sizes,strides,rnn_type,num_reccurent_layers,hidden_dim,dropout,vocab_size,batch_size,sequence_length,freq_bins,feature_dim",
[
(1, [32], [[41, 11]], [[2, 2]], "rnn", 1, 240, 0.1, 88,7, 111, 33, 1),
(2, [32, 32], [[41, 11], [21, 11]], [[2, 2], [2, 1]], "lstm", 3, 188, 0.2, 32,12, 333, 45, 2),
(3, [32, 32, 32], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], "gru", 5, 151, 0.3, 12,33, 242, 56, 3),
(3, [32, 32, 96], [[41, 11], [21, 11], [21, 11]], [[2, 2], [2, 1], [2, 1]], "gru", 7, 128, 0.4, 55,5, 553, 62, 4),
],
)
# fmt: on
def test_deepspeech2(
num_conv_layers,
channels,
kernel_sizes,
strides,
rnn_type,
num_reccurent_layers,
hidden_dim,
dropout,
vocab_size,
batch_size,
sequence_length,
freq_bins,
feature_dim,
):
deepspeech2 = DeepSpeech2(
num_conv_layers,
channels,
kernel_sizes,
strides,
rnn_type,
num_reccurent_layers,
hidden_dim,
dropout,
dropout,
vocab_size,
10,
)
audio = tf.random.normal([batch_size, sequence_length, freq_bins, feature_dim])
output = deepspeech2(audio)
output_batch_size, output_length, output_vocab_size = output.shape
assert batch_size == output_batch_size
assert sequence_length > output_length
assert output_vocab_size == vocab_size
|
en
| 0.651372
|
# Check Shape # Check Mask for PAD # fmt: off # fmt: on
| 2.582357
| 3
|
src/permifrost/core/permissions/utils/snowflake_grants.py
|
kouk/permifrost
| 0
|
6626694
|
<gh_stars>0
import re
from typing import Any, Dict, List, Optional, Set, Tuple
from permifrost.core.logger import GLOBAL_LOGGER as logger
from permifrost.core.permissions.utils.snowflake_connector import SnowflakeConnector
GRANT_ROLE_TEMPLATE = "GRANT ROLE {role_name} TO {type} {entity_name}"
REVOKE_ROLE_TEMPLATE = "REVOKE ROLE {role_name} FROM {type} {entity_name}"
GRANT_PRIVILEGES_TEMPLATE = (
"GRANT {privileges} ON {resource_type} {resource_name} TO ROLE {role}"
)
REVOKE_PRIVILEGES_TEMPLATE = (
"REVOKE {privileges} ON {resource_type} {resource_name} FROM ROLE {role}"
)
GRANT_FUTURE_PRIVILEGES_TEMPLATE = "GRANT {privileges} ON FUTURE {resource_type}s IN {grouping_type} {grouping_name} TO ROLE {role}"
REVOKE_FUTURE_PRIVILEGES_TEMPLATE = "REVOKE {privileges} ON FUTURE {resource_type}s IN {grouping_type} {grouping_name} FROM ROLE {role}"
ALTER_USER_TEMPLATE = "ALTER USER {user_name} SET {privileges}"
GRANT_OWNERSHIP_TEMPLATE = "GRANT OWNERSHIP ON {resource_type} {resource_name} TO ROLE {role_name} COPY CURRENT GRANTS"
class SnowflakeGrantsGenerator:
def __init__(
self,
grants_to_role: Dict,
roles_granted_to_user: Dict[str, List[str]],
ignore_memberships: Optional[bool] = False,
) -> None:
"""
Initializes a grants generator, used to generate SQL for generating grants
grants_to_role: a dict, mapping role to grants where role is a string
and grants is a dictionary of privileges to entities.
e.g. {'functional_role': {'create schema': {'database': ['database_1', 'database_2']}, ...}}
roles_granted_to_user: a dict, mapping the user to a list of roles.,
e.g. {'user_name': ['role_1', 'role_2']
ignore_memberships: bool, whether to skip role grant/revoke of memberships
"""
self.grants_to_role = grants_to_role
self.roles_granted_to_user = roles_granted_to_user
self.ignore_memberships = ignore_memberships
self.conn = SnowflakeConnector()
def is_granted_privilege(
self, role: str, privilege: str, entity_type: str, entity_name: str
) -> bool:
"""
Check if <role> has been granted the privilege <privilege> on entity type
<entity_type> with name <entity_name>. First checks if it is a future grant
since snowflaky will format the future grants wrong - i.e. <table> is a part
of the fully qualified name for a future table grant.
For example:
is_granted_privilege('reporter', 'usage', 'database', 'analytics') -> True
means that role reporter has been granted the privilege to use the
Database ANALYTICS on the Snowflake server.
"""
future = True if re.search(r"<(table|view|schema)>", entity_name) else False
grants = (
self.grants_to_role.get(role, {}).get(privilege, {}).get(entity_type, [])
)
if future and entity_name in grants:
return True
if not future and SnowflakeConnector.snowflaky(entity_name) in grants:
return True
return False
def _generate_member_lists(self, config: Dict) -> Tuple[List[str], List[str]]:
"""
Generate a tuple with the member_include_list (e.g. roles that should be granted)
and member_exclude_list (e.g. roles that should not be granted)
config: the subtree for the entity as specified in the spec
Returns: A tuple of two lists with the roles/users to include and exclude:
(member_include_list, member_exclude_list)
"""
member_include_list = []
member_exclude_list = []
if isinstance(config.get("member_of", []), dict):
member_include_list = config.get("member_of", {}).get("include", [])
member_include_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_include_list
]
member_exclude_list = config.get("member_of", {}).get("exclude", [])
member_exclude_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_exclude_list
]
elif isinstance(config.get("member_of", []), list):
member_include_list = config.get("member_of", [])
member_include_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_include_list
]
return (member_include_list, member_exclude_list)
def _generate_member_star_lists(self, all_entities: List, entity: str) -> List[str]:
"""
Generates the member include list when a * privilege is granted
all_entities: a List of all entities defined in the spec
entity: the entity to generate the list for
Returns: a list of all roles to include for the entity
"""
conn = SnowflakeConnector()
show_roles = conn.show_roles()
member_include_list = [
role for role in show_roles if role in all_entities and role != entity
]
return member_include_list
def _generate_sql_commands_for_member_of_list(
self, member_of_list: List[str], entity: str, entity_type: str
) -> List[Dict]:
"""For a given member_of list and entity, generate the SQL commands
to grant the entity privileges for every member_role in the member_of list
member_of_list: List of roles to generate sql commands for
entity: the user or role to grant permissions for
entity_type: the type of enttiy, either "users" or "roles"
returns: a List of SQL Commands
"""
if entity_type == "users":
grant_type = "user"
elif entity_type == "roles":
grant_type = "role"
else:
raise ValueError("grant_type must be either 'users' or 'roles'")
sql_commands = []
for member_role in member_of_list:
granted_role = SnowflakeConnector.snowflaky_user_role(member_role)
already_granted = False
if (
entity_type == "users"
and granted_role in self.roles_granted_to_user[entity]
) or (
entity_type == "roles"
and self.is_granted_privilege(entity, "usage", "role", member_role)
):
already_granted = True
# Don't generate grants for Snowflake default roles as this will raise errors
# on Snowflake
snowflake_default_roles = [
"accountadmin",
"sysadmin",
"securityadmin",
"useradmin",
"public",
]
if (
entity in snowflake_default_roles
and member_role in snowflake_default_roles
):
continue
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(member_role),
type=grant_type,
entity_name=SnowflakeConnector.snowflaky_user_role(entity),
),
}
)
return sql_commands
def _generate_revoke_sql_commands_for_user(
self, username: str, member_of_list: List[str]
) -> List[Dict]:
"""For a given user, generate the SQL commands to revoke privileges
to any roles not defined in the member of list
"""
sql_commands = []
for granted_role in self.roles_granted_to_user[username]:
if granted_role not in member_of_list:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(
granted_role
),
type="user",
entity_name=SnowflakeConnector.snowflaky_user_role(
username
),
),
}
)
return sql_commands
def _generate_revoke_sql_commands_for_role(self, rolename, member_of_list):
sql_commands = []
for granted_role in (
self.grants_to_role.get(rolename, {}).get("usage", {}).get("role", [])
):
if granted_role not in member_of_list:
snowflake_default_roles = [
"accountadmin",
"sysadmin",
"securityadmin",
"useradmin",
"public",
]
if (
granted_role in snowflake_default_roles
and rolename in snowflake_default_roles
):
continue
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(
granted_role
),
type="role",
entity_name=SnowflakeConnector.snowflaky_user_role(
rolename
),
),
}
)
return sql_commands
def generate_grant_roles(
self,
entity_type: str,
entity: str,
config: Dict[str, Any],
all_entities: Optional[List] = None,
) -> List[Dict]:
"""
Generate the GRANT statements for both roles and users.
entity_type: "users" or "roles"
entity: the name of the entity (e.g. "yannis" or "reporter")
config: the subtree for the entity as specified in the spec
all_entities: all roles defined in spec
Returns the SQL commands generated as a list
"""
sql_commands: List[Dict] = []
if self.ignore_memberships:
return sql_commands
member_include_list, member_exclude_list = self._generate_member_lists(config)
if len(member_include_list) == 1 and member_include_list[0] == '"*"':
if not all_entities:
raise ValueError(
"Cannot generate grant roles if all_entities not provided"
)
member_include_list = self._generate_member_star_lists(all_entities, entity)
member_of_list = [
role for role in member_include_list if role not in member_exclude_list
]
sql_commands.extend(
self._generate_sql_commands_for_member_of_list(
member_of_list, entity, entity_type
)
)
if entity_type == "users":
sql_commands.extend(
self._generate_revoke_sql_commands_for_user(entity, member_of_list)
)
if entity_type == "roles":
sql_commands.extend(
self._generate_revoke_sql_commands_for_role(entity, member_of_list)
)
return sql_commands
def _generate_database_commands(self, role, config, shared_dbs, spec_dbs):
databases = {
"read": config.get("privileges", {}).get("databases", {}).get("read", []),
"write": config.get("privileges", {}).get("databases", {}).get("write", []),
}
if len(databases.get("read", "")) == 0:
logger.debug(
"`privileges.databases.read` not found for role {}, skipping generation of database read level GRANT statements.".format(
role
)
)
if len(databases.get("write", "")) == 0:
logger.debug(
"`privileges.databases.write` not found for role {}, skipping generation of database write level GRANT statements.".format(
role
)
)
database_commands = self.generate_database_grants(
role=role, databases=databases, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return database_commands
def _generate_schema_commands(self, role, config, shared_dbs, spec_dbs):
schemas = {
"read": config.get("privileges", {}).get("schemas", {}).get("read", []),
"write": config.get("privileges", {}).get("schemas", {}).get("write", []),
}
if len(schemas.get("read", "")) == 0:
logger.debug(
"`privileges.schemas.read` not found for role {}, skipping generation of schemas read level GRANT statements.".format(
role
)
)
if len(schemas.get("write", "")) == 0:
logger.debug(
"`privileges.schemas.write` not found for role {}, skipping generation of schemas write level GRANT statements.".format(
role
)
)
schema_commands = self.generate_schema_grants(
role=role, schemas=schemas, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return schema_commands
def _generate_table_commands(self, role, config, shared_dbs, spec_dbs):
tables = {
"read": config.get("privileges", {}).get("tables", {}).get("read", []),
"write": config.get("privileges", {}).get("tables", {}).get("write", []),
}
if len(tables.get("read", "")) == 0:
logger.debug(
"`privileges.tables.read` not found for role {}, skipping generation of tables read level GRANT statements.".format(
role
)
)
if len(tables.get("write", "")) == 0:
logger.debug(
"`privileges.tables.write` not found for role {}, skipping generation of tables write level GRANT statements.".format(
role
)
)
table_commands = self.generate_table_and_view_grants(
role=role, tables=tables, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return table_commands
def generate_grant_privileges_to_role(
self, role: str, config: Dict[str, Any], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate all the privilege granting and revocation
statements for a role so Snowflake matches the spec.
Most of the SQL command that will be generated are privileges granted to
roles and this function orchestrates the whole process.
role: the name of the role (e.g. "loader" or "reporter") the privileges
are granted to and revoked from
config: the subtree for the role as specified in the spec
shared_dbs: a set of all the shared databases defined in the spec.
Used down the road by generate_database_grants() to also grant
"imported privileges" when access is granted to a shared DB.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a list
"""
sql_commands: List[Dict] = []
try:
warehouses = config["warehouses"]
new_commands = self.generate_warehouse_grants(
role=role, warehouses=warehouses
)
sql_commands.extend(new_commands)
except KeyError:
logger.debug(
"`warehouses` not found for role {}, skipping generation of Warehouse GRANT statements.".format(
role
)
)
database_commands = self._generate_database_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(database_commands)
schema_commands = self._generate_schema_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(schema_commands)
table_commands = self._generate_table_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(table_commands)
return sql_commands
def generate_warehouse_grants(
self, role: str, warehouses: list
) -> List[Dict[str, Any]]:
"""
Generate the GRANT statements for Warehouse usage and operation.
role: the name of the role the privileges are GRANTed to
warehouses: list of warehouses for the specified role
Returns the SQL command generated
"""
sql_commands: List[Dict] = []
for warehouse in warehouses:
for priv in ["usage", "operate", "monitor"]:
already_granted = self.is_granted_privilege(
role, priv, "warehouse", warehouse
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=priv,
resource_type="warehouse",
resource_name=SnowflakeConnector.snowflaky(warehouse),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for priv in ["usage", "operate", "monitor"]:
for granted_warehouse in (
self.grants_to_role.get(role, {}).get(priv, {}).get("warehouse", [])
):
if granted_warehouse not in warehouses:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=priv,
resource_type="warehouse",
resource_name=SnowflakeConnector.snowflaky(
granted_warehouse
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_database_read_privs(
self, database: str, role: str, shared_dbs: Set[str], read_privileges: str
) -> Dict:
already_granted = self.is_granted_privilege(role, "usage", "database", database)
# If this is a shared database, we have to grant the "imported privileges"
# privilege to the user and skip granting the specific permissions as
# "Granting individual privileges on imported databases is not allowed."
if database in shared_dbs:
return {
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
else:
return {
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
def generate_database_grants(
self, role: str, databases: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict[str, Any]]:
"""
Generate the GRANT and REVOKE statements for Databases
to align Snowflake with the spec.
role: the name of the role the privileges are GRANTed to
databases: list of databases (e.g. "raw")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a list
"""
sql_commands = []
read_privileges = "usage"
partial_write_privileges = "monitor, create schema"
write_privileges = f"{read_privileges}, {partial_write_privileges}"
for database in databases.get("read", []):
read_grant = self._generate_database_read_privs(
database=database,
role=role,
shared_dbs=shared_dbs,
read_privileges=read_privileges,
)
sql_commands.append(read_grant)
for database in databases.get("write", []):
already_granted = (
self.is_granted_privilege(role, "usage", "database", database)
and self.is_granted_privilege(role, "monitor", "database", database)
and self.is_granted_privilege(
role, "create schema", "database", database
)
)
# If this is a shared database, we have to grant the "imported privileges"
# privilege to the user and skip granting the specific permissions as
# "Granting individual privileges on imported databases is not allowed."
if database in shared_dbs:
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
continue
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# REVOKES
# The "Usage" privilege is consistent across read and write.
# Compare granted usage to full read/write usage set
# and revoke missing ones
usage_privs_on_db = (
self.grants_to_role.get(role, {}).get("usage", {}).get("database", [])
)
for granted_database in usage_privs_on_db:
# If it's a shared database, only revoke imported
# We'll only know if it's a shared DB based on the spec
all_databases = databases.get("read", []) + databases.get("write", [])
if granted_database not in spec_dbs:
# Skip revocation on database that are not defined in spec
continue
# Revoke read/write permissions on shared databases
elif (
granted_database not in all_databases and granted_database in shared_dbs
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Revoke read permissions on created databases in Snowflake
elif granted_database not in all_databases:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Get all other write privilege dbs in case there are dbs where
# usage was revoked but other write permissions still exist
# This also preserves the case where somebody switches write access
# for read access
monitor_privs_on_db = (
self.grants_to_role.get(role, {}).get("monitor", {}).get("database", [])
)
create_privs_on_db = (
self.grants_to_role.get(role, {})
.get("create schema", {})
.get("database", [])
)
full_write_privs_on_dbs = monitor_privs_on_db + create_privs_on_db
for granted_database in full_write_privs_on_dbs:
# If it's a shared database, only revoke imported
# We'll only know if it's a shared DB based on the spec
if (
granted_database not in databases.get("write", [])
and granted_database in shared_dbs
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif granted_database not in databases.get("write", []):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_schema_read_grants(
self, schemas, shared_dbs, role
) -> Tuple[List[Dict], List]:
sql_commands = []
read_grant_schemas = []
read_privileges = "usage"
for schema in schemas:
# Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME}
# so that we can check and use each one
name_parts = schema.split(".")
# Do nothing if this is a schema inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
database = name_parts[0]
if database in shared_dbs:
continue
conn = SnowflakeConnector()
fetched_schemas = conn.full_schema_list(schema)
read_grant_schemas.extend(fetched_schemas)
if name_parts[1] == "*":
# If <db_name>.* then you can grant future and add future schema to grant list
future_schema = f"{database}.<schema>"
read_grant_schemas.append(future_schema)
schema_already_granted = self.is_granted_privilege(
role, read_privileges, "schema", future_schema
)
# Grant on FUTURE schemas
sql_commands.append(
{
"already_granted": schema_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for db_schema in fetched_schemas:
already_granted = False
if self.is_granted_privilege(
role, read_privileges, "schema", db_schema
):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, read_grant_schemas)
def _generate_schema_write_grants(
self, schemas, shared_dbs, role
) -> Tuple[List[Dict], List]:
sql_commands = []
write_grant_schemas = []
read_privileges = "usage"
partial_write_privileges = (
"monitor, create table,"
" create view, create stage, create file format,"
" create sequence, create function, create pipe"
)
write_privileges = f"{read_privileges}, {partial_write_privileges}"
write_privileges_array = write_privileges.split(", ")
for schema in schemas:
# Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME}
# so that we can check and use each one
name_parts = schema.split(".")
# Do nothing if this is a schema inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
database = name_parts[0]
if database in shared_dbs:
continue
conn = SnowflakeConnector()
fetched_schemas = conn.full_schema_list(schema)
write_grant_schemas.extend(fetched_schemas)
if name_parts[1] == "*":
# If <db_name>.* then you can grant future and add future schema to grant list
future_schema = f"{database}.<schema>"
write_grant_schemas.append(future_schema)
already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "schema", future_schema
):
already_granted = False
# Grant on FUTURE schemas
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for db_schema in fetched_schemas:
already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "schema", db_schema
):
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, write_grant_schemas)
def _generate_schema_revokes(
self, usage_schemas, all_grant_schemas, shared_dbs, spec_dbs, role
):
sql_commands = []
read_privileges = "usage"
for granted_schema in usage_schemas:
database_name = granted_schema.split(".")[0]
future_schema_name = f"{database_name}.<schema>"
if granted_schema not in all_grant_schemas and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif ( # If future privilege is granted on snowflake but not in grant list
granted_schema == future_schema_name
and future_schema_name not in all_grant_schemas #
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_schema not in all_grant_schemas
and future_schema_name not in all_grant_schemas
):
# Covers case where schema is granted in Snowflake
# But it's not in the grant list and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(granted_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
# TODO: This method is too complex, consider refactoring
def generate_schema_grants(
self, role: str, schemas: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate the GRANT and REVOKE statements for schemas
including future grants.
role: the name of the role the privileges are GRANTed to
schemas: the name of the Schema (e.g. "raw.public", "raw.*")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a List
"""
sql_commands = []
# Schema lists to hold read/write grants. This is necessary
# as the provided schemas are not the full list - we determine
# the full list via full_schema_list and store in these variables
read_grant_schemas = []
write_grant_schemas = []
partial_write_privileges = (
"monitor, create table,"
" create view, create stage, create file format,"
" create sequence, create function, create pipe"
)
# Get Schema Read Commands
read_schemas = schemas.get("read", [])
read_commands, read_grants = self._generate_schema_read_grants(
read_schemas, shared_dbs, role
)
sql_commands.extend(read_commands)
read_grant_schemas.extend(read_grants)
# Get Schema Write Commands
write_schemas = schemas.get("write", [])
write_commands, write_grants = self._generate_schema_write_grants(
write_schemas, shared_dbs, role
)
sql_commands.extend(write_commands)
write_grant_schemas.extend(write_grants)
# REVOKES
# The "usage" privilege is consistent across read and write.
# Compare granted usage to full read/write set and revoke missing ones
usage_schemas = set(
self.grants_to_role.get(role, {}).get("usage", {}).get("schema", [])
)
all_grant_schemas = read_grant_schemas + write_grant_schemas
sql_commands.extend(
self._generate_schema_revokes(
usage_schemas, all_grant_schemas, shared_dbs, spec_dbs, role
)
)
# Get all other write privilege schemas in case there are schemas where
# usage was revoked but other write permissions still exist
# This also preserves the case where somebody switches write access
# for read access
other_privileges = [
"monitor",
"create table",
"create view",
"create stage",
"create file format",
"create sequence",
"create pipe",
]
other_schema_grants = list()
for privilege in other_privileges:
other_schema_grants.extend(
self.grants_to_role.get(role, {}).get(privilege, {}).get("schema", [])
)
for granted_schema in other_schema_grants:
database_name = granted_schema.split(".")[0]
future_schema_name = f"{database_name}.<schema>"
if granted_schema not in write_grant_schemas and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif ( # If future privilege is granted but not in grant list
granted_schema == future_schema_name
and future_schema_name not in write_grant_schemas
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_schema not in write_grant_schemas
and future_schema_name not in write_grant_schemas
):
# Covers case where schema is granted and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(granted_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_table_read_grants(self, conn, tables, shared_dbs, role):
sql_commands = []
read_grant_tables_full = []
read_grant_views_full = []
read_privileges = "select"
for table in tables:
# Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME}
# so that we can check and use each one
name_parts = table.split(".")
database_name = name_parts[0] if 0 < len(name_parts) else None
schema_name = name_parts[1] if 1 < len(name_parts) else None
table_view_name = name_parts[2] if 2 < len(name_parts) else None
# Do nothing if this is a table inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if database_name in shared_dbs:
continue
# Gather the tables/views that privileges will be granted to
# for the given table schema
read_grant_tables = []
read_grant_views = []
# List of all tables/views in schema for validation
read_table_list = []
read_view_list = []
fetched_schemas = conn.full_schema_list(f"{database_name}.{schema_name}")
# For future grants at the database level for tables
future_database_table = "{database}.<table>".format(database=database_name)
table_already_granted = self.is_granted_privilege(
role, read_privileges, "table", future_database_table
)
read_grant_tables_full.append(future_database_table)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# For future grants at the database level for views
future_database_view = "{database}.<view>".format(database=database_name)
view_already_granted = self.is_granted_privilege(
role, read_privileges, "view", future_database_view
)
read_grant_views_full.append(future_database_view)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for schema in fetched_schemas:
# Fetch all tables from Snowflake for each schema and add
# to the read_tables_list[] and read_views_list[] variables.
# This is so we can check that a table given in the config
# Is valid
read_table_list.extend(conn.show_tables(schema=schema))
read_view_list.extend(conn.show_views(schema=schema))
if table_view_name == "*":
# If <schema_name>.* then you add all tables to grant list and then grant future
# If *.* was provided then we're still ok as the full_schema_list
# Would fetch all schemas and we'd still iterate through each
# If == * then append all tables to both
# the grant list AND the full grant list
read_grant_tables.extend(read_table_list)
read_grant_views.extend(read_view_list)
read_grant_tables_full.extend(read_table_list)
read_grant_views_full.extend(read_view_list)
for schema in fetched_schemas:
# Adds the future grant table format to the granted lists
future_table = f"{schema}.<table>"
future_view = f"{schema}.<view>"
read_grant_tables_full.append(future_table)
read_grant_views_full.append(future_view)
table_already_granted = self.is_granted_privilege(
role, read_privileges, "table", future_table
)
# Grant future on all tables
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
view_already_granted = self.is_granted_privilege(
role, read_privileges, "view", future_view
)
# Grant future on all views
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# TODO Future elif to have partial table name
else:
# Else the table passed is a single entity
# Check that it's valid and add to list
if table in read_table_list:
read_grant_tables = [table]
read_grant_tables_full.append(table)
if table in read_view_list:
read_grant_views = [table]
read_grant_views_full.append(table)
# Grant privileges to all tables flagged for granting.
# We have this loop b/c we explicitly grant to each table
# Instead of doing grant to all tables/views in schema
for db_table in read_grant_tables:
already_granted = self.is_granted_privilege(
role, read_privileges, "table", db_table
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Grant privileges to all flagged views
for db_view in read_grant_views:
already_granted = self.is_granted_privilege(
role, read_privileges, "view", db_view
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
resource_name=SnowflakeConnector.snowflaky(db_view),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, read_grant_tables_full, read_grant_views_full)
# TODO: This method remains complex, could use extra refactoring
def _generate_table_write_grants(self, conn, tables, shared_dbs, role): # noqa
sql_commands, write_grant_tables_full, write_grant_views_full = [], [], []
read_privileges = "select"
write_partial_privileges = "insert, update, delete, truncate, references"
write_privileges = f"{read_privileges}, {write_partial_privileges}"
write_privileges_array = write_privileges.split(", ")
for table in tables:
# Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME}
# so that we can check and use each one
name_parts = table.split(".")
database_name = name_parts[0] if 0 < len(name_parts) else None
schema_name = name_parts[1] if 1 < len(name_parts) else None
table_view_name = name_parts[2] if 2 < len(name_parts) else None
# Do nothing if this is a table inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if database_name in shared_dbs:
continue
# Gather the tables/views that privileges will be granted to
write_grant_tables = []
write_grant_views = []
# List of all tables/views in schema
write_table_list = []
write_view_list = []
fetched_schemas = conn.full_schema_list(f"{database_name}.{name_parts[1]}")
# For future grants at the database level
future_database_table = "{database}.<table>".format(database=database_name)
future_database_view = "{database}.<view>".format(database=database_name)
table_already_granted = False
view_already_granted = False
if self.is_granted_privilege(
role, write_privileges, "table", future_database_table
):
table_already_granted = True
write_grant_tables_full.append(future_database_table)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
if self.is_granted_privilege(
role, write_privileges, "view", future_database_view
):
view_already_granted = True
write_grant_views_full.append(future_database_view)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="view",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for schema in fetched_schemas:
# Fetch all tables from Snowflake for each schema and add
# to the write_tables_list[] and write_views_list[] variables.
# This is so we can check that a table given in the config
# Is valid
write_table_list.extend(conn.show_tables(schema=schema))
write_view_list.extend(conn.show_views(schema=schema))
if table_view_name == "*":
# If <schema_name>.* then you add all tables to grant list and then grant future
# If *.* was provided then we're still ok as the full_schema_list
# Would fetch all schemas and we'd still iterate through each
# If == * then append all tables to both
# the grant list AND the full grant list
write_grant_tables.extend(write_table_list)
write_grant_views.extend(write_view_list)
write_grant_tables_full.extend(write_table_list)
write_grant_views_full.extend(write_view_list)
for schema in fetched_schemas:
# Adds the future grant table format to the granted lists
future_table = f"{schema}.<table>"
future_view = f"{schema}.<view>"
write_grant_tables_full.append(future_table)
write_grant_views_full.append(future_view)
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
table_already_granted = not self.is_granted_privilege(
role, privilege, "table", future_table
)
# Grant future on all tables
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
view_already_granted = not self.is_granted_privilege(
role, "select", "view", future_view
)
# Grant future on all views. Select is only privilege
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges="select",
resource_type="view",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# TODO Future elif to have partial table name
else:
# Only one table/view to be granted permissions to
if table in write_table_list:
write_grant_tables = [table]
write_grant_tables_full.append(table)
if table in write_view_list:
write_grant_views = [table]
write_grant_views_full.append(table)
# Grant privileges to all tables flagged for granting.
# We have this loop b/c we explicitly grant to each table
# Instead of doing grant to all tables/views in schema
for db_table in write_grant_tables:
table_already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "table", db_table
):
table_already_granted = False
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Grant privileges to all views in that schema.
# Select is the only schemaObjectPrivilege for views
# https://docs.snowflake.net/manuals/sql-reference/sql/grant-privilege.html
for db_view in write_grant_views:
already_granted = False
if self.is_granted_privilege(role, "select", "view", db_view):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="select",
resource_type="view",
resource_name=SnowflakeConnector.snowflaky(db_view),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, write_grant_tables_full, write_grant_views_full)
def _generate_revoke_select_privs(
self,
role: str,
all_grant_resources: List[str],
shared_dbs: Set[Any],
spec_dbs: Set[Any],
privilege_set: str,
resource_type: str,
granted_resources: List[str],
) -> List[Dict[str, Any]]:
"""
Generates REVOKE privileges for tables/views known as resources here
role: Snowflake role to revoke the resource from
all_grant_resources: All the GRANTS applied
shared_dbs: Shared databases to be skipped
spec_dbs: Databases to apply REVOKE statements on
privilege_set: Privileges to revoke (i.e. SELECT, INSERT, etc.)
resource_type: Database object to revoke (i.e. table, view, etc.)
granted_resources: List of GRANTS to filter through
Returns a list of REVOKE statements
"""
sql_commands = []
for granted_resource in granted_resources:
resource_split = granted_resource.split(".")
database_name = resource_split[0]
schema_name = resource_split[1] if 1 < len(resource_split) else None
# For future grants at the database level
if len(resource_split) == 2 or (
len(resource_split) == 3 and schema_name == "*"
):
future_resource = f"{database_name}.<{resource_type}>"
grouping_type = "database"
grouping_name = database_name
else:
future_resource = f"{database_name}.{schema_name}.<{resource_type}>"
grouping_type = "schema"
grouping_name = f"{database_name}.{schema_name}"
if granted_resource not in all_grant_resources and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif (
granted_resource == future_resource
and future_resource not in all_grant_resources
):
# If future privilege is granted in Snowflake but not in grant list
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=privilege_set,
resource_type=resource_type,
grouping_type=grouping_type,
grouping_name=SnowflakeConnector.snowflaky(grouping_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_resource not in all_grant_resources
and future_resource not in all_grant_resources
):
# Covers case where resource is granted in Snowflake
# But it's not in the grant list and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=privilege_set,
resource_type=resource_type,
resource_name=SnowflakeConnector.snowflaky(
granted_resource
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def generate_revoke_privs(
self,
role: str,
shared_dbs: Set[Any],
spec_dbs: Set[Any],
all_grant_tables: List[str],
all_grant_views: List[str],
write_grant_tables_full: List[str],
) -> List[Dict[str, Any]]:
read_privileges = "select"
write_partial_privileges = "insert, update, delete, truncate, references"
sql_commands = []
granted_resources = list(
set(self.grants_to_role.get(role, {}).get("select", {}).get("table", []))
)
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=all_grant_tables,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=read_privileges,
resource_type="table",
granted_resources=granted_resources,
)
)
granted_resources = list(
set(self.grants_to_role.get(role, {}).get("select", {}).get("view", []))
)
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=all_grant_views,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=read_privileges,
resource_type="view",
granted_resources=granted_resources,
)
)
all_write_privs_granted_tables = []
for privilege in write_partial_privileges.split(", "):
table_names = (
self.grants_to_role.get(role, {}).get(privilege, {}).get("table", [])
)
all_write_privs_granted_tables += table_names
all_write_privs_granted_tables = list(set(all_write_privs_granted_tables))
# Write Privileges
# Only need to revoke write privileges for tables since SELECT is the
# only privilege available for views
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=write_grant_tables_full,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=write_partial_privileges,
resource_type="table",
granted_resources=all_write_privs_granted_tables,
)
)
return sql_commands
def generate_table_and_view_grants(
self, role: str, tables: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate the GRANT and REVOKE statements for tables and views
including future grants.
role: the name of the role the privileges are GRANTed to
table: the name of the TABLE/VIEW (e.g. "raw.public.my_table")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a List
"""
sql_commands = []
# These are necessary as the provided tables/views are not the full list
# we determine the full list for granting via full_schema_list()
# and store in these variables
read_grant_tables_full = []
read_grant_views_full = []
write_grant_tables_full = []
write_grant_views_full = []
conn = SnowflakeConnector()
read_tables = tables.get("read", [])
read_command, read_table, read_views = self._generate_table_read_grants(
conn, read_tables, shared_dbs, role
)
sql_commands.extend(read_command)
read_grant_tables_full.extend(read_table)
read_grant_views_full.extend(read_views)
write_tables = tables.get("write", [])
write_command, write_table, write_views = self._generate_table_write_grants(
conn, write_tables, shared_dbs, role
)
sql_commands.extend(write_command)
write_grant_tables_full.extend(write_table)
write_grant_views_full.extend(write_views)
all_grant_tables = read_grant_tables_full + write_grant_tables_full
all_grant_views = read_grant_views_full + write_grant_views_full
sql_commands.extend(
self.generate_revoke_privs(
role,
shared_dbs,
spec_dbs,
all_grant_tables,
all_grant_views,
write_grant_tables_full,
)
)
return sql_commands
def generate_alter_user(self, user: str, config: Dict[str, Any]) -> List[Dict]:
"""
Generate the ALTER statements for USERs.
user: the name of the USER
config: the subtree for the user as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands: List[Any] = []
alter_privileges: List[Any] = []
if self.ignore_memberships:
return sql_commands
if "can_login" in config:
if config.get("can_login"):
alter_privileges.append("DISABLED = FALSE")
else:
alter_privileges.append("DISABLED = TRUE")
if alter_privileges:
sql_commands.append(
{
"already_granted": False,
"sql": ALTER_USER_TEMPLATE.format(
user_name=SnowflakeConnector.snowflaky_user_role(user),
privileges=", ".join(alter_privileges),
),
}
)
return sql_commands
def _generate_ownership_grant_database(
self, role: str, database_refs: List[str]
) -> List[Dict]:
sql_commands = []
for database in database_refs:
already_granted = self.is_granted_privilege(
role, "ownership", "database", database
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_ownership_grant_schema(self, conn, role, schema_refs) -> List[Dict]:
sql_commands = []
for schema in schema_refs:
name_parts = schema.split(".")
info_schema = f"{name_parts[0]}.information_schema"
schemas = []
if name_parts[1] == "*":
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
if db_schema != info_schema:
schemas.append(db_schema)
else:
schemas = [schema]
for db_schema in schemas:
already_granted = self.is_granted_privilege(
role, "ownership", "schema", db_schema
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_ownership_grant_table(self, conn, role, table_refs) -> List[Dict]:
sql_commands = []
tables = []
for table in table_refs:
name_parts = table.split(".")
info_schema = f"{name_parts[0]}.information_schema"
if name_parts[2] == "*":
schemas = []
if name_parts[1] == "*":
db_schemas = conn.show_schemas(name_parts[0])
for schema in db_schemas:
if schema != info_schema:
schemas.append(schema)
else:
schemas = [f"{name_parts[0]}.{name_parts[1]}"]
for schema in schemas:
tables.extend(conn.show_tables(schema=schema))
else:
tables.append(table)
# And then grant ownership to all tables
for db_table in tables:
already_granted = self.is_granted_privilege(
role, "ownership", "table", db_table
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def generate_grant_ownership( # noqa
self, role: str, config: Dict[str, Any]
) -> List[Dict]:
"""
Generate the GRANT ownership statements for databases, schemas and tables.
role: the name of the role (e.g. "loader") ownership will be GRANTed to
config: the subtree for the role as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands = []
db_refs = config.get("owns", {}).get("databases")
if db_refs:
db_ownership_grants = self._generate_ownership_grant_database(role, db_refs)
sql_commands.extend(db_ownership_grants)
schema_refs = config.get("owns", {}).get("schemas")
if schema_refs:
schema_ownership_grants = self._generate_ownership_grant_schema(
self.conn, role, schema_refs
)
sql_commands.extend(schema_ownership_grants)
table_refs = config.get("owns", {}).get("tables")
if table_refs:
table_ownership_grants = self._generate_ownership_grant_table(
self.conn, role, table_refs
)
sql_commands.extend(table_ownership_grants)
return sql_commands
|
import re
from typing import Any, Dict, List, Optional, Set, Tuple
from permifrost.core.logger import GLOBAL_LOGGER as logger
from permifrost.core.permissions.utils.snowflake_connector import SnowflakeConnector
GRANT_ROLE_TEMPLATE = "GRANT ROLE {role_name} TO {type} {entity_name}"
REVOKE_ROLE_TEMPLATE = "REVOKE ROLE {role_name} FROM {type} {entity_name}"
GRANT_PRIVILEGES_TEMPLATE = (
"GRANT {privileges} ON {resource_type} {resource_name} TO ROLE {role}"
)
REVOKE_PRIVILEGES_TEMPLATE = (
"REVOKE {privileges} ON {resource_type} {resource_name} FROM ROLE {role}"
)
GRANT_FUTURE_PRIVILEGES_TEMPLATE = "GRANT {privileges} ON FUTURE {resource_type}s IN {grouping_type} {grouping_name} TO ROLE {role}"
REVOKE_FUTURE_PRIVILEGES_TEMPLATE = "REVOKE {privileges} ON FUTURE {resource_type}s IN {grouping_type} {grouping_name} FROM ROLE {role}"
ALTER_USER_TEMPLATE = "ALTER USER {user_name} SET {privileges}"
GRANT_OWNERSHIP_TEMPLATE = "GRANT OWNERSHIP ON {resource_type} {resource_name} TO ROLE {role_name} COPY CURRENT GRANTS"
class SnowflakeGrantsGenerator:
def __init__(
self,
grants_to_role: Dict,
roles_granted_to_user: Dict[str, List[str]],
ignore_memberships: Optional[bool] = False,
) -> None:
"""
Initializes a grants generator, used to generate SQL for generating grants
grants_to_role: a dict, mapping role to grants where role is a string
and grants is a dictionary of privileges to entities.
e.g. {'functional_role': {'create schema': {'database': ['database_1', 'database_2']}, ...}}
roles_granted_to_user: a dict, mapping the user to a list of roles.,
e.g. {'user_name': ['role_1', 'role_2']
ignore_memberships: bool, whether to skip role grant/revoke of memberships
"""
self.grants_to_role = grants_to_role
self.roles_granted_to_user = roles_granted_to_user
self.ignore_memberships = ignore_memberships
self.conn = SnowflakeConnector()
def is_granted_privilege(
self, role: str, privilege: str, entity_type: str, entity_name: str
) -> bool:
"""
Check if <role> has been granted the privilege <privilege> on entity type
<entity_type> with name <entity_name>. First checks if it is a future grant
since snowflaky will format the future grants wrong - i.e. <table> is a part
of the fully qualified name for a future table grant.
For example:
is_granted_privilege('reporter', 'usage', 'database', 'analytics') -> True
means that role reporter has been granted the privilege to use the
Database ANALYTICS on the Snowflake server.
"""
future = True if re.search(r"<(table|view|schema)>", entity_name) else False
grants = (
self.grants_to_role.get(role, {}).get(privilege, {}).get(entity_type, [])
)
if future and entity_name in grants:
return True
if not future and SnowflakeConnector.snowflaky(entity_name) in grants:
return True
return False
def _generate_member_lists(self, config: Dict) -> Tuple[List[str], List[str]]:
"""
Generate a tuple with the member_include_list (e.g. roles that should be granted)
and member_exclude_list (e.g. roles that should not be granted)
config: the subtree for the entity as specified in the spec
Returns: A tuple of two lists with the roles/users to include and exclude:
(member_include_list, member_exclude_list)
"""
member_include_list = []
member_exclude_list = []
if isinstance(config.get("member_of", []), dict):
member_include_list = config.get("member_of", {}).get("include", [])
member_include_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_include_list
]
member_exclude_list = config.get("member_of", {}).get("exclude", [])
member_exclude_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_exclude_list
]
elif isinstance(config.get("member_of", []), list):
member_include_list = config.get("member_of", [])
member_include_list = [
SnowflakeConnector.snowflaky_user_role(role)
for role in member_include_list
]
return (member_include_list, member_exclude_list)
def _generate_member_star_lists(self, all_entities: List, entity: str) -> List[str]:
"""
Generates the member include list when a * privilege is granted
all_entities: a List of all entities defined in the spec
entity: the entity to generate the list for
Returns: a list of all roles to include for the entity
"""
conn = SnowflakeConnector()
show_roles = conn.show_roles()
member_include_list = [
role for role in show_roles if role in all_entities and role != entity
]
return member_include_list
def _generate_sql_commands_for_member_of_list(
self, member_of_list: List[str], entity: str, entity_type: str
) -> List[Dict]:
"""For a given member_of list and entity, generate the SQL commands
to grant the entity privileges for every member_role in the member_of list
member_of_list: List of roles to generate sql commands for
entity: the user or role to grant permissions for
entity_type: the type of enttiy, either "users" or "roles"
returns: a List of SQL Commands
"""
if entity_type == "users":
grant_type = "user"
elif entity_type == "roles":
grant_type = "role"
else:
raise ValueError("grant_type must be either 'users' or 'roles'")
sql_commands = []
for member_role in member_of_list:
granted_role = SnowflakeConnector.snowflaky_user_role(member_role)
already_granted = False
if (
entity_type == "users"
and granted_role in self.roles_granted_to_user[entity]
) or (
entity_type == "roles"
and self.is_granted_privilege(entity, "usage", "role", member_role)
):
already_granted = True
# Don't generate grants for Snowflake default roles as this will raise errors
# on Snowflake
snowflake_default_roles = [
"accountadmin",
"sysadmin",
"securityadmin",
"useradmin",
"public",
]
if (
entity in snowflake_default_roles
and member_role in snowflake_default_roles
):
continue
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(member_role),
type=grant_type,
entity_name=SnowflakeConnector.snowflaky_user_role(entity),
),
}
)
return sql_commands
def _generate_revoke_sql_commands_for_user(
self, username: str, member_of_list: List[str]
) -> List[Dict]:
"""For a given user, generate the SQL commands to revoke privileges
to any roles not defined in the member of list
"""
sql_commands = []
for granted_role in self.roles_granted_to_user[username]:
if granted_role not in member_of_list:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(
granted_role
),
type="user",
entity_name=SnowflakeConnector.snowflaky_user_role(
username
),
),
}
)
return sql_commands
def _generate_revoke_sql_commands_for_role(self, rolename, member_of_list):
sql_commands = []
for granted_role in (
self.grants_to_role.get(rolename, {}).get("usage", {}).get("role", [])
):
if granted_role not in member_of_list:
snowflake_default_roles = [
"accountadmin",
"sysadmin",
"securityadmin",
"useradmin",
"public",
]
if (
granted_role in snowflake_default_roles
and rolename in snowflake_default_roles
):
continue
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_ROLE_TEMPLATE.format(
role_name=SnowflakeConnector.snowflaky_user_role(
granted_role
),
type="role",
entity_name=SnowflakeConnector.snowflaky_user_role(
rolename
),
),
}
)
return sql_commands
def generate_grant_roles(
self,
entity_type: str,
entity: str,
config: Dict[str, Any],
all_entities: Optional[List] = None,
) -> List[Dict]:
"""
Generate the GRANT statements for both roles and users.
entity_type: "users" or "roles"
entity: the name of the entity (e.g. "yannis" or "reporter")
config: the subtree for the entity as specified in the spec
all_entities: all roles defined in spec
Returns the SQL commands generated as a list
"""
sql_commands: List[Dict] = []
if self.ignore_memberships:
return sql_commands
member_include_list, member_exclude_list = self._generate_member_lists(config)
if len(member_include_list) == 1 and member_include_list[0] == '"*"':
if not all_entities:
raise ValueError(
"Cannot generate grant roles if all_entities not provided"
)
member_include_list = self._generate_member_star_lists(all_entities, entity)
member_of_list = [
role for role in member_include_list if role not in member_exclude_list
]
sql_commands.extend(
self._generate_sql_commands_for_member_of_list(
member_of_list, entity, entity_type
)
)
if entity_type == "users":
sql_commands.extend(
self._generate_revoke_sql_commands_for_user(entity, member_of_list)
)
if entity_type == "roles":
sql_commands.extend(
self._generate_revoke_sql_commands_for_role(entity, member_of_list)
)
return sql_commands
def _generate_database_commands(self, role, config, shared_dbs, spec_dbs):
databases = {
"read": config.get("privileges", {}).get("databases", {}).get("read", []),
"write": config.get("privileges", {}).get("databases", {}).get("write", []),
}
if len(databases.get("read", "")) == 0:
logger.debug(
"`privileges.databases.read` not found for role {}, skipping generation of database read level GRANT statements.".format(
role
)
)
if len(databases.get("write", "")) == 0:
logger.debug(
"`privileges.databases.write` not found for role {}, skipping generation of database write level GRANT statements.".format(
role
)
)
database_commands = self.generate_database_grants(
role=role, databases=databases, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return database_commands
def _generate_schema_commands(self, role, config, shared_dbs, spec_dbs):
schemas = {
"read": config.get("privileges", {}).get("schemas", {}).get("read", []),
"write": config.get("privileges", {}).get("schemas", {}).get("write", []),
}
if len(schemas.get("read", "")) == 0:
logger.debug(
"`privileges.schemas.read` not found for role {}, skipping generation of schemas read level GRANT statements.".format(
role
)
)
if len(schemas.get("write", "")) == 0:
logger.debug(
"`privileges.schemas.write` not found for role {}, skipping generation of schemas write level GRANT statements.".format(
role
)
)
schema_commands = self.generate_schema_grants(
role=role, schemas=schemas, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return schema_commands
def _generate_table_commands(self, role, config, shared_dbs, spec_dbs):
tables = {
"read": config.get("privileges", {}).get("tables", {}).get("read", []),
"write": config.get("privileges", {}).get("tables", {}).get("write", []),
}
if len(tables.get("read", "")) == 0:
logger.debug(
"`privileges.tables.read` not found for role {}, skipping generation of tables read level GRANT statements.".format(
role
)
)
if len(tables.get("write", "")) == 0:
logger.debug(
"`privileges.tables.write` not found for role {}, skipping generation of tables write level GRANT statements.".format(
role
)
)
table_commands = self.generate_table_and_view_grants(
role=role, tables=tables, shared_dbs=shared_dbs, spec_dbs=spec_dbs
)
return table_commands
def generate_grant_privileges_to_role(
self, role: str, config: Dict[str, Any], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate all the privilege granting and revocation
statements for a role so Snowflake matches the spec.
Most of the SQL command that will be generated are privileges granted to
roles and this function orchestrates the whole process.
role: the name of the role (e.g. "loader" or "reporter") the privileges
are granted to and revoked from
config: the subtree for the role as specified in the spec
shared_dbs: a set of all the shared databases defined in the spec.
Used down the road by generate_database_grants() to also grant
"imported privileges" when access is granted to a shared DB.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a list
"""
sql_commands: List[Dict] = []
try:
warehouses = config["warehouses"]
new_commands = self.generate_warehouse_grants(
role=role, warehouses=warehouses
)
sql_commands.extend(new_commands)
except KeyError:
logger.debug(
"`warehouses` not found for role {}, skipping generation of Warehouse GRANT statements.".format(
role
)
)
database_commands = self._generate_database_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(database_commands)
schema_commands = self._generate_schema_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(schema_commands)
table_commands = self._generate_table_commands(
role, config, shared_dbs, spec_dbs
)
sql_commands.extend(table_commands)
return sql_commands
def generate_warehouse_grants(
self, role: str, warehouses: list
) -> List[Dict[str, Any]]:
"""
Generate the GRANT statements for Warehouse usage and operation.
role: the name of the role the privileges are GRANTed to
warehouses: list of warehouses for the specified role
Returns the SQL command generated
"""
sql_commands: List[Dict] = []
for warehouse in warehouses:
for priv in ["usage", "operate", "monitor"]:
already_granted = self.is_granted_privilege(
role, priv, "warehouse", warehouse
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=priv,
resource_type="warehouse",
resource_name=SnowflakeConnector.snowflaky(warehouse),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for priv in ["usage", "operate", "monitor"]:
for granted_warehouse in (
self.grants_to_role.get(role, {}).get(priv, {}).get("warehouse", [])
):
if granted_warehouse not in warehouses:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=priv,
resource_type="warehouse",
resource_name=SnowflakeConnector.snowflaky(
granted_warehouse
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_database_read_privs(
self, database: str, role: str, shared_dbs: Set[str], read_privileges: str
) -> Dict:
already_granted = self.is_granted_privilege(role, "usage", "database", database)
# If this is a shared database, we have to grant the "imported privileges"
# privilege to the user and skip granting the specific permissions as
# "Granting individual privileges on imported databases is not allowed."
if database in shared_dbs:
return {
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
else:
return {
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
def generate_database_grants(
self, role: str, databases: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict[str, Any]]:
"""
Generate the GRANT and REVOKE statements for Databases
to align Snowflake with the spec.
role: the name of the role the privileges are GRANTed to
databases: list of databases (e.g. "raw")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a list
"""
sql_commands = []
read_privileges = "usage"
partial_write_privileges = "monitor, create schema"
write_privileges = f"{read_privileges}, {partial_write_privileges}"
for database in databases.get("read", []):
read_grant = self._generate_database_read_privs(
database=database,
role=role,
shared_dbs=shared_dbs,
read_privileges=read_privileges,
)
sql_commands.append(read_grant)
for database in databases.get("write", []):
already_granted = (
self.is_granted_privilege(role, "usage", "database", database)
and self.is_granted_privilege(role, "monitor", "database", database)
and self.is_granted_privilege(
role, "create schema", "database", database
)
)
# If this is a shared database, we have to grant the "imported privileges"
# privilege to the user and skip granting the specific permissions as
# "Granting individual privileges on imported databases is not allowed."
if database in shared_dbs:
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
continue
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# REVOKES
# The "Usage" privilege is consistent across read and write.
# Compare granted usage to full read/write usage set
# and revoke missing ones
usage_privs_on_db = (
self.grants_to_role.get(role, {}).get("usage", {}).get("database", [])
)
for granted_database in usage_privs_on_db:
# If it's a shared database, only revoke imported
# We'll only know if it's a shared DB based on the spec
all_databases = databases.get("read", []) + databases.get("write", [])
if granted_database not in spec_dbs:
# Skip revocation on database that are not defined in spec
continue
# Revoke read/write permissions on shared databases
elif (
granted_database not in all_databases and granted_database in shared_dbs
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Revoke read permissions on created databases in Snowflake
elif granted_database not in all_databases:
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Get all other write privilege dbs in case there are dbs where
# usage was revoked but other write permissions still exist
# This also preserves the case where somebody switches write access
# for read access
monitor_privs_on_db = (
self.grants_to_role.get(role, {}).get("monitor", {}).get("database", [])
)
create_privs_on_db = (
self.grants_to_role.get(role, {})
.get("create schema", {})
.get("database", [])
)
full_write_privs_on_dbs = monitor_privs_on_db + create_privs_on_db
for granted_database in full_write_privs_on_dbs:
# If it's a shared database, only revoke imported
# We'll only know if it's a shared DB based on the spec
if (
granted_database not in databases.get("write", [])
and granted_database in shared_dbs
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges="imported privileges",
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif granted_database not in databases.get("write", []):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(
granted_database
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_schema_read_grants(
self, schemas, shared_dbs, role
) -> Tuple[List[Dict], List]:
sql_commands = []
read_grant_schemas = []
read_privileges = "usage"
for schema in schemas:
# Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME}
# so that we can check and use each one
name_parts = schema.split(".")
# Do nothing if this is a schema inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
database = name_parts[0]
if database in shared_dbs:
continue
conn = SnowflakeConnector()
fetched_schemas = conn.full_schema_list(schema)
read_grant_schemas.extend(fetched_schemas)
if name_parts[1] == "*":
# If <db_name>.* then you can grant future and add future schema to grant list
future_schema = f"{database}.<schema>"
read_grant_schemas.append(future_schema)
schema_already_granted = self.is_granted_privilege(
role, read_privileges, "schema", future_schema
)
# Grant on FUTURE schemas
sql_commands.append(
{
"already_granted": schema_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for db_schema in fetched_schemas:
already_granted = False
if self.is_granted_privilege(
role, read_privileges, "schema", db_schema
):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, read_grant_schemas)
def _generate_schema_write_grants(
self, schemas, shared_dbs, role
) -> Tuple[List[Dict], List]:
sql_commands = []
write_grant_schemas = []
read_privileges = "usage"
partial_write_privileges = (
"monitor, create table,"
" create view, create stage, create file format,"
" create sequence, create function, create pipe"
)
write_privileges = f"{read_privileges}, {partial_write_privileges}"
write_privileges_array = write_privileges.split(", ")
for schema in schemas:
# Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME}
# so that we can check and use each one
name_parts = schema.split(".")
# Do nothing if this is a schema inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
database = name_parts[0]
if database in shared_dbs:
continue
conn = SnowflakeConnector()
fetched_schemas = conn.full_schema_list(schema)
write_grant_schemas.extend(fetched_schemas)
if name_parts[1] == "*":
# If <db_name>.* then you can grant future and add future schema to grant list
future_schema = f"{database}.<schema>"
write_grant_schemas.append(future_schema)
already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "schema", future_schema
):
already_granted = False
# Grant on FUTURE schemas
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for db_schema in fetched_schemas:
already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "schema", db_schema
):
already_granted = False
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, write_grant_schemas)
def _generate_schema_revokes(
self, usage_schemas, all_grant_schemas, shared_dbs, spec_dbs, role
):
sql_commands = []
read_privileges = "usage"
for granted_schema in usage_schemas:
database_name = granted_schema.split(".")[0]
future_schema_name = f"{database_name}.<schema>"
if granted_schema not in all_grant_schemas and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif ( # If future privilege is granted on snowflake but not in grant list
granted_schema == future_schema_name
and future_schema_name not in all_grant_schemas #
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_schema not in all_grant_schemas
and future_schema_name not in all_grant_schemas
):
# Covers case where schema is granted in Snowflake
# But it's not in the grant list and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(granted_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
# TODO: This method is too complex, consider refactoring
def generate_schema_grants(
self, role: str, schemas: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate the GRANT and REVOKE statements for schemas
including future grants.
role: the name of the role the privileges are GRANTed to
schemas: the name of the Schema (e.g. "raw.public", "raw.*")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a List
"""
sql_commands = []
# Schema lists to hold read/write grants. This is necessary
# as the provided schemas are not the full list - we determine
# the full list via full_schema_list and store in these variables
read_grant_schemas = []
write_grant_schemas = []
partial_write_privileges = (
"monitor, create table,"
" create view, create stage, create file format,"
" create sequence, create function, create pipe"
)
# Get Schema Read Commands
read_schemas = schemas.get("read", [])
read_commands, read_grants = self._generate_schema_read_grants(
read_schemas, shared_dbs, role
)
sql_commands.extend(read_commands)
read_grant_schemas.extend(read_grants)
# Get Schema Write Commands
write_schemas = schemas.get("write", [])
write_commands, write_grants = self._generate_schema_write_grants(
write_schemas, shared_dbs, role
)
sql_commands.extend(write_commands)
write_grant_schemas.extend(write_grants)
# REVOKES
# The "usage" privilege is consistent across read and write.
# Compare granted usage to full read/write set and revoke missing ones
usage_schemas = set(
self.grants_to_role.get(role, {}).get("usage", {}).get("schema", [])
)
all_grant_schemas = read_grant_schemas + write_grant_schemas
sql_commands.extend(
self._generate_schema_revokes(
usage_schemas, all_grant_schemas, shared_dbs, spec_dbs, role
)
)
# Get all other write privilege schemas in case there are schemas where
# usage was revoked but other write permissions still exist
# This also preserves the case where somebody switches write access
# for read access
other_privileges = [
"monitor",
"create table",
"create view",
"create stage",
"create file format",
"create sequence",
"create pipe",
]
other_schema_grants = list()
for privilege in other_privileges:
other_schema_grants.extend(
self.grants_to_role.get(role, {}).get(privilege, {}).get("schema", [])
)
for granted_schema in other_schema_grants:
database_name = granted_schema.split(".")[0]
future_schema_name = f"{database_name}.<schema>"
if granted_schema not in write_grant_schemas and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif ( # If future privilege is granted but not in grant list
granted_schema == future_schema_name
and future_schema_name not in write_grant_schemas
):
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="schema",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_schema not in write_grant_schemas
and future_schema_name not in write_grant_schemas
):
# Covers case where schema is granted and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=partial_write_privileges,
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(granted_schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_table_read_grants(self, conn, tables, shared_dbs, role):
sql_commands = []
read_grant_tables_full = []
read_grant_views_full = []
read_privileges = "select"
for table in tables:
# Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME}
# so that we can check and use each one
name_parts = table.split(".")
database_name = name_parts[0] if 0 < len(name_parts) else None
schema_name = name_parts[1] if 1 < len(name_parts) else None
table_view_name = name_parts[2] if 2 < len(name_parts) else None
# Do nothing if this is a table inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if database_name in shared_dbs:
continue
# Gather the tables/views that privileges will be granted to
# for the given table schema
read_grant_tables = []
read_grant_views = []
# List of all tables/views in schema for validation
read_table_list = []
read_view_list = []
fetched_schemas = conn.full_schema_list(f"{database_name}.{schema_name}")
# For future grants at the database level for tables
future_database_table = "{database}.<table>".format(database=database_name)
table_already_granted = self.is_granted_privilege(
role, read_privileges, "table", future_database_table
)
read_grant_tables_full.append(future_database_table)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# For future grants at the database level for views
future_database_view = "{database}.<view>".format(database=database_name)
view_already_granted = self.is_granted_privilege(
role, read_privileges, "view", future_database_view
)
read_grant_views_full.append(future_database_view)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for schema in fetched_schemas:
# Fetch all tables from Snowflake for each schema and add
# to the read_tables_list[] and read_views_list[] variables.
# This is so we can check that a table given in the config
# Is valid
read_table_list.extend(conn.show_tables(schema=schema))
read_view_list.extend(conn.show_views(schema=schema))
if table_view_name == "*":
# If <schema_name>.* then you add all tables to grant list and then grant future
# If *.* was provided then we're still ok as the full_schema_list
# Would fetch all schemas and we'd still iterate through each
# If == * then append all tables to both
# the grant list AND the full grant list
read_grant_tables.extend(read_table_list)
read_grant_views.extend(read_view_list)
read_grant_tables_full.extend(read_table_list)
read_grant_views_full.extend(read_view_list)
for schema in fetched_schemas:
# Adds the future grant table format to the granted lists
future_table = f"{schema}.<table>"
future_view = f"{schema}.<view>"
read_grant_tables_full.append(future_table)
read_grant_views_full.append(future_view)
table_already_granted = self.is_granted_privilege(
role, read_privileges, "table", future_table
)
# Grant future on all tables
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
view_already_granted = self.is_granted_privilege(
role, read_privileges, "view", future_view
)
# Grant future on all views
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# TODO Future elif to have partial table name
else:
# Else the table passed is a single entity
# Check that it's valid and add to list
if table in read_table_list:
read_grant_tables = [table]
read_grant_tables_full.append(table)
if table in read_view_list:
read_grant_views = [table]
read_grant_views_full.append(table)
# Grant privileges to all tables flagged for granting.
# We have this loop b/c we explicitly grant to each table
# Instead of doing grant to all tables/views in schema
for db_table in read_grant_tables:
already_granted = self.is_granted_privilege(
role, read_privileges, "table", db_table
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Grant privileges to all flagged views
for db_view in read_grant_views:
already_granted = self.is_granted_privilege(
role, read_privileges, "view", db_view
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=read_privileges,
resource_type="view",
resource_name=SnowflakeConnector.snowflaky(db_view),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, read_grant_tables_full, read_grant_views_full)
# TODO: This method remains complex, could use extra refactoring
def _generate_table_write_grants(self, conn, tables, shared_dbs, role): # noqa
sql_commands, write_grant_tables_full, write_grant_views_full = [], [], []
read_privileges = "select"
write_partial_privileges = "insert, update, delete, truncate, references"
write_privileges = f"{read_privileges}, {write_partial_privileges}"
write_privileges_array = write_privileges.split(", ")
for table in tables:
# Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME}
# so that we can check and use each one
name_parts = table.split(".")
database_name = name_parts[0] if 0 < len(name_parts) else None
schema_name = name_parts[1] if 1 < len(name_parts) else None
table_view_name = name_parts[2] if 2 < len(name_parts) else None
# Do nothing if this is a table inside a shared database:
# "Granting individual privileges on imported databases is not allowed."
if database_name in shared_dbs:
continue
# Gather the tables/views that privileges will be granted to
write_grant_tables = []
write_grant_views = []
# List of all tables/views in schema
write_table_list = []
write_view_list = []
fetched_schemas = conn.full_schema_list(f"{database_name}.{name_parts[1]}")
# For future grants at the database level
future_database_table = "{database}.<table>".format(database=database_name)
future_database_view = "{database}.<view>".format(database=database_name)
table_already_granted = False
view_already_granted = False
if self.is_granted_privilege(
role, write_privileges, "table", future_database_table
):
table_already_granted = True
write_grant_tables_full.append(future_database_table)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
if self.is_granted_privilege(
role, write_privileges, "view", future_database_view
):
view_already_granted = True
write_grant_views_full.append(future_database_view)
if schema_name == "*" and table_view_name == "*":
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="view",
grouping_type="database",
grouping_name=SnowflakeConnector.snowflaky(database_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
for schema in fetched_schemas:
# Fetch all tables from Snowflake for each schema and add
# to the write_tables_list[] and write_views_list[] variables.
# This is so we can check that a table given in the config
# Is valid
write_table_list.extend(conn.show_tables(schema=schema))
write_view_list.extend(conn.show_views(schema=schema))
if table_view_name == "*":
# If <schema_name>.* then you add all tables to grant list and then grant future
# If *.* was provided then we're still ok as the full_schema_list
# Would fetch all schemas and we'd still iterate through each
# If == * then append all tables to both
# the grant list AND the full grant list
write_grant_tables.extend(write_table_list)
write_grant_views.extend(write_view_list)
write_grant_tables_full.extend(write_table_list)
write_grant_views_full.extend(write_view_list)
for schema in fetched_schemas:
# Adds the future grant table format to the granted lists
future_table = f"{schema}.<table>"
future_view = f"{schema}.<view>"
write_grant_tables_full.append(future_table)
write_grant_views_full.append(future_view)
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
table_already_granted = not self.is_granted_privilege(
role, privilege, "table", future_table
)
# Grant future on all tables
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
view_already_granted = not self.is_granted_privilege(
role, "select", "view", future_view
)
# Grant future on all views. Select is only privilege
sql_commands.append(
{
"already_granted": view_already_granted,
"sql": GRANT_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges="select",
resource_type="view",
grouping_type="schema",
grouping_name=SnowflakeConnector.snowflaky(schema),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# TODO Future elif to have partial table name
else:
# Only one table/view to be granted permissions to
if table in write_table_list:
write_grant_tables = [table]
write_grant_tables_full.append(table)
if table in write_view_list:
write_grant_views = [table]
write_grant_views_full.append(table)
# Grant privileges to all tables flagged for granting.
# We have this loop b/c we explicitly grant to each table
# Instead of doing grant to all tables/views in schema
for db_table in write_grant_tables:
table_already_granted = True
for privilege in write_privileges_array:
# If any of the privileges are not granted, set already_granted to False
if not self.is_granted_privilege(
role, privilege, "table", db_table
):
table_already_granted = False
sql_commands.append(
{
"already_granted": table_already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges=write_privileges,
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
# Grant privileges to all views in that schema.
# Select is the only schemaObjectPrivilege for views
# https://docs.snowflake.net/manuals/sql-reference/sql/grant-privilege.html
for db_view in write_grant_views:
already_granted = False
if self.is_granted_privilege(role, "select", "view", db_view):
already_granted = True
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_PRIVILEGES_TEMPLATE.format(
privileges="select",
resource_type="view",
resource_name=SnowflakeConnector.snowflaky(db_view),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return (sql_commands, write_grant_tables_full, write_grant_views_full)
def _generate_revoke_select_privs(
self,
role: str,
all_grant_resources: List[str],
shared_dbs: Set[Any],
spec_dbs: Set[Any],
privilege_set: str,
resource_type: str,
granted_resources: List[str],
) -> List[Dict[str, Any]]:
"""
Generates REVOKE privileges for tables/views known as resources here
role: Snowflake role to revoke the resource from
all_grant_resources: All the GRANTS applied
shared_dbs: Shared databases to be skipped
spec_dbs: Databases to apply REVOKE statements on
privilege_set: Privileges to revoke (i.e. SELECT, INSERT, etc.)
resource_type: Database object to revoke (i.e. table, view, etc.)
granted_resources: List of GRANTS to filter through
Returns a list of REVOKE statements
"""
sql_commands = []
for granted_resource in granted_resources:
resource_split = granted_resource.split(".")
database_name = resource_split[0]
schema_name = resource_split[1] if 1 < len(resource_split) else None
# For future grants at the database level
if len(resource_split) == 2 or (
len(resource_split) == 3 and schema_name == "*"
):
future_resource = f"{database_name}.<{resource_type}>"
grouping_type = "database"
grouping_name = database_name
else:
future_resource = f"{database_name}.{schema_name}.<{resource_type}>"
grouping_type = "schema"
grouping_name = f"{database_name}.{schema_name}"
if granted_resource not in all_grant_resources and (
database_name in shared_dbs or database_name not in spec_dbs
):
# No privileges to revoke on imported db. Done at database level
# Don't revoke on privileges on databases not defined in spec.
continue
elif (
granted_resource == future_resource
and future_resource not in all_grant_resources
):
# If future privilege is granted in Snowflake but not in grant list
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_FUTURE_PRIVILEGES_TEMPLATE.format(
privileges=privilege_set,
resource_type=resource_type,
grouping_type=grouping_type,
grouping_name=SnowflakeConnector.snowflaky(grouping_name),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
elif (
granted_resource not in all_grant_resources
and future_resource not in all_grant_resources
):
# Covers case where resource is granted in Snowflake
# But it's not in the grant list and it's not explicitly granted as a future grant
sql_commands.append(
{
"already_granted": False,
"sql": REVOKE_PRIVILEGES_TEMPLATE.format(
privileges=privilege_set,
resource_type=resource_type,
resource_name=SnowflakeConnector.snowflaky(
granted_resource
),
role=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def generate_revoke_privs(
self,
role: str,
shared_dbs: Set[Any],
spec_dbs: Set[Any],
all_grant_tables: List[str],
all_grant_views: List[str],
write_grant_tables_full: List[str],
) -> List[Dict[str, Any]]:
read_privileges = "select"
write_partial_privileges = "insert, update, delete, truncate, references"
sql_commands = []
granted_resources = list(
set(self.grants_to_role.get(role, {}).get("select", {}).get("table", []))
)
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=all_grant_tables,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=read_privileges,
resource_type="table",
granted_resources=granted_resources,
)
)
granted_resources = list(
set(self.grants_to_role.get(role, {}).get("select", {}).get("view", []))
)
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=all_grant_views,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=read_privileges,
resource_type="view",
granted_resources=granted_resources,
)
)
all_write_privs_granted_tables = []
for privilege in write_partial_privileges.split(", "):
table_names = (
self.grants_to_role.get(role, {}).get(privilege, {}).get("table", [])
)
all_write_privs_granted_tables += table_names
all_write_privs_granted_tables = list(set(all_write_privs_granted_tables))
# Write Privileges
# Only need to revoke write privileges for tables since SELECT is the
# only privilege available for views
sql_commands.extend(
self._generate_revoke_select_privs(
role=role,
all_grant_resources=write_grant_tables_full,
shared_dbs=shared_dbs,
spec_dbs=spec_dbs,
privilege_set=write_partial_privileges,
resource_type="table",
granted_resources=all_write_privs_granted_tables,
)
)
return sql_commands
def generate_table_and_view_grants(
self, role: str, tables: Dict[str, List], shared_dbs: Set, spec_dbs: Set
) -> List[Dict]:
"""
Generate the GRANT and REVOKE statements for tables and views
including future grants.
role: the name of the role the privileges are GRANTed to
table: the name of the TABLE/VIEW (e.g. "raw.public.my_table")
shared_dbs: a set of all the shared databases defined in the spec.
spec_dbs: a set of all the databases defined in the spec. This is used in revoke
commands to validate revocations are only for spec'd databases
Returns the SQL commands generated as a List
"""
sql_commands = []
# These are necessary as the provided tables/views are not the full list
# we determine the full list for granting via full_schema_list()
# and store in these variables
read_grant_tables_full = []
read_grant_views_full = []
write_grant_tables_full = []
write_grant_views_full = []
conn = SnowflakeConnector()
read_tables = tables.get("read", [])
read_command, read_table, read_views = self._generate_table_read_grants(
conn, read_tables, shared_dbs, role
)
sql_commands.extend(read_command)
read_grant_tables_full.extend(read_table)
read_grant_views_full.extend(read_views)
write_tables = tables.get("write", [])
write_command, write_table, write_views = self._generate_table_write_grants(
conn, write_tables, shared_dbs, role
)
sql_commands.extend(write_command)
write_grant_tables_full.extend(write_table)
write_grant_views_full.extend(write_views)
all_grant_tables = read_grant_tables_full + write_grant_tables_full
all_grant_views = read_grant_views_full + write_grant_views_full
sql_commands.extend(
self.generate_revoke_privs(
role,
shared_dbs,
spec_dbs,
all_grant_tables,
all_grant_views,
write_grant_tables_full,
)
)
return sql_commands
def generate_alter_user(self, user: str, config: Dict[str, Any]) -> List[Dict]:
"""
Generate the ALTER statements for USERs.
user: the name of the USER
config: the subtree for the user as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands: List[Any] = []
alter_privileges: List[Any] = []
if self.ignore_memberships:
return sql_commands
if "can_login" in config:
if config.get("can_login"):
alter_privileges.append("DISABLED = FALSE")
else:
alter_privileges.append("DISABLED = TRUE")
if alter_privileges:
sql_commands.append(
{
"already_granted": False,
"sql": ALTER_USER_TEMPLATE.format(
user_name=SnowflakeConnector.snowflaky_user_role(user),
privileges=", ".join(alter_privileges),
),
}
)
return sql_commands
def _generate_ownership_grant_database(
self, role: str, database_refs: List[str]
) -> List[Dict]:
sql_commands = []
for database in database_refs:
already_granted = self.is_granted_privilege(
role, "ownership", "database", database
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="database",
resource_name=SnowflakeConnector.snowflaky(database),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_ownership_grant_schema(self, conn, role, schema_refs) -> List[Dict]:
sql_commands = []
for schema in schema_refs:
name_parts = schema.split(".")
info_schema = f"{name_parts[0]}.information_schema"
schemas = []
if name_parts[1] == "*":
db_schemas = conn.show_schemas(name_parts[0])
for db_schema in db_schemas:
if db_schema != info_schema:
schemas.append(db_schema)
else:
schemas = [schema]
for db_schema in schemas:
already_granted = self.is_granted_privilege(
role, "ownership", "schema", db_schema
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="schema",
resource_name=SnowflakeConnector.snowflaky(db_schema),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def _generate_ownership_grant_table(self, conn, role, table_refs) -> List[Dict]:
sql_commands = []
tables = []
for table in table_refs:
name_parts = table.split(".")
info_schema = f"{name_parts[0]}.information_schema"
if name_parts[2] == "*":
schemas = []
if name_parts[1] == "*":
db_schemas = conn.show_schemas(name_parts[0])
for schema in db_schemas:
if schema != info_schema:
schemas.append(schema)
else:
schemas = [f"{name_parts[0]}.{name_parts[1]}"]
for schema in schemas:
tables.extend(conn.show_tables(schema=schema))
else:
tables.append(table)
# And then grant ownership to all tables
for db_table in tables:
already_granted = self.is_granted_privilege(
role, "ownership", "table", db_table
)
sql_commands.append(
{
"already_granted": already_granted,
"sql": GRANT_OWNERSHIP_TEMPLATE.format(
resource_type="table",
resource_name=SnowflakeConnector.snowflaky(db_table),
role_name=SnowflakeConnector.snowflaky_user_role(role),
),
}
)
return sql_commands
def generate_grant_ownership( # noqa
self, role: str, config: Dict[str, Any]
) -> List[Dict]:
"""
Generate the GRANT ownership statements for databases, schemas and tables.
role: the name of the role (e.g. "loader") ownership will be GRANTed to
config: the subtree for the role as specified in the spec
Returns the SQL commands generated as a List
"""
sql_commands = []
db_refs = config.get("owns", {}).get("databases")
if db_refs:
db_ownership_grants = self._generate_ownership_grant_database(role, db_refs)
sql_commands.extend(db_ownership_grants)
schema_refs = config.get("owns", {}).get("schemas")
if schema_refs:
schema_ownership_grants = self._generate_ownership_grant_schema(
self.conn, role, schema_refs
)
sql_commands.extend(schema_ownership_grants)
table_refs = config.get("owns", {}).get("tables")
if table_refs:
table_ownership_grants = self._generate_ownership_grant_table(
self.conn, role, table_refs
)
sql_commands.extend(table_ownership_grants)
return sql_commands
|
en
| 0.823698
|
Initializes a grants generator, used to generate SQL for generating grants grants_to_role: a dict, mapping role to grants where role is a string and grants is a dictionary of privileges to entities. e.g. {'functional_role': {'create schema': {'database': ['database_1', 'database_2']}, ...}} roles_granted_to_user: a dict, mapping the user to a list of roles., e.g. {'user_name': ['role_1', 'role_2'] ignore_memberships: bool, whether to skip role grant/revoke of memberships Check if <role> has been granted the privilege <privilege> on entity type <entity_type> with name <entity_name>. First checks if it is a future grant since snowflaky will format the future grants wrong - i.e. <table> is a part of the fully qualified name for a future table grant. For example: is_granted_privilege('reporter', 'usage', 'database', 'analytics') -> True means that role reporter has been granted the privilege to use the Database ANALYTICS on the Snowflake server. Generate a tuple with the member_include_list (e.g. roles that should be granted) and member_exclude_list (e.g. roles that should not be granted) config: the subtree for the entity as specified in the spec Returns: A tuple of two lists with the roles/users to include and exclude: (member_include_list, member_exclude_list) Generates the member include list when a * privilege is granted all_entities: a List of all entities defined in the spec entity: the entity to generate the list for Returns: a list of all roles to include for the entity For a given member_of list and entity, generate the SQL commands to grant the entity privileges for every member_role in the member_of list member_of_list: List of roles to generate sql commands for entity: the user or role to grant permissions for entity_type: the type of enttiy, either "users" or "roles" returns: a List of SQL Commands # Don't generate grants for Snowflake default roles as this will raise errors # on Snowflake For a given user, generate the SQL commands to revoke privileges to any roles not defined in the member of list Generate the GRANT statements for both roles and users. entity_type: "users" or "roles" entity: the name of the entity (e.g. "yannis" or "reporter") config: the subtree for the entity as specified in the spec all_entities: all roles defined in spec Returns the SQL commands generated as a list Generate all the privilege granting and revocation statements for a role so Snowflake matches the spec. Most of the SQL command that will be generated are privileges granted to roles and this function orchestrates the whole process. role: the name of the role (e.g. "loader" or "reporter") the privileges are granted to and revoked from config: the subtree for the role as specified in the spec shared_dbs: a set of all the shared databases defined in the spec. Used down the road by generate_database_grants() to also grant "imported privileges" when access is granted to a shared DB. spec_dbs: a set of all the databases defined in the spec. This is used in revoke commands to validate revocations are only for spec'd databases Returns the SQL commands generated as a list Generate the GRANT statements for Warehouse usage and operation. role: the name of the role the privileges are GRANTed to warehouses: list of warehouses for the specified role Returns the SQL command generated # If this is a shared database, we have to grant the "imported privileges" # privilege to the user and skip granting the specific permissions as # "Granting individual privileges on imported databases is not allowed." Generate the GRANT and REVOKE statements for Databases to align Snowflake with the spec. role: the name of the role the privileges are GRANTed to databases: list of databases (e.g. "raw") shared_dbs: a set of all the shared databases defined in the spec. spec_dbs: a set of all the databases defined in the spec. This is used in revoke commands to validate revocations are only for spec'd databases Returns the SQL commands generated as a list # If this is a shared database, we have to grant the "imported privileges" # privilege to the user and skip granting the specific permissions as # "Granting individual privileges on imported databases is not allowed." # REVOKES # The "Usage" privilege is consistent across read and write. # Compare granted usage to full read/write usage set # and revoke missing ones # If it's a shared database, only revoke imported # We'll only know if it's a shared DB based on the spec # Skip revocation on database that are not defined in spec # Revoke read/write permissions on shared databases # Revoke read permissions on created databases in Snowflake # Get all other write privilege dbs in case there are dbs where # usage was revoked but other write permissions still exist # This also preserves the case where somebody switches write access # for read access # If it's a shared database, only revoke imported # We'll only know if it's a shared DB based on the spec # Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME} # so that we can check and use each one # Do nothing if this is a schema inside a shared database: # "Granting individual privileges on imported databases is not allowed." # If <db_name>.* then you can grant future and add future schema to grant list # Grant on FUTURE schemas # Split the schema identifier into parts {DB_NAME}.{SCHEMA_NAME} # so that we can check and use each one # Do nothing if this is a schema inside a shared database: # "Granting individual privileges on imported databases is not allowed." # If <db_name>.* then you can grant future and add future schema to grant list # If any of the privileges are not granted, set already_granted to False # Grant on FUTURE schemas # If any of the privileges are not granted, set already_granted to False # No privileges to revoke on imported db. Done at database level # Don't revoke on privileges on databases not defined in spec. # If future privilege is granted on snowflake but not in grant list # # Covers case where schema is granted in Snowflake # But it's not in the grant list and it's not explicitly granted as a future grant # TODO: This method is too complex, consider refactoring Generate the GRANT and REVOKE statements for schemas including future grants. role: the name of the role the privileges are GRANTed to schemas: the name of the Schema (e.g. "raw.public", "raw.*") shared_dbs: a set of all the shared databases defined in the spec. spec_dbs: a set of all the databases defined in the spec. This is used in revoke commands to validate revocations are only for spec'd databases Returns the SQL commands generated as a List # Schema lists to hold read/write grants. This is necessary # as the provided schemas are not the full list - we determine # the full list via full_schema_list and store in these variables # Get Schema Read Commands # Get Schema Write Commands # REVOKES # The "usage" privilege is consistent across read and write. # Compare granted usage to full read/write set and revoke missing ones # Get all other write privilege schemas in case there are schemas where # usage was revoked but other write permissions still exist # This also preserves the case where somebody switches write access # for read access # No privileges to revoke on imported db. Done at database level # Don't revoke on privileges on databases not defined in spec. # If future privilege is granted but not in grant list # Covers case where schema is granted and it's not explicitly granted as a future grant # Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME} # so that we can check and use each one # Do nothing if this is a table inside a shared database: # "Granting individual privileges on imported databases is not allowed." # Gather the tables/views that privileges will be granted to # for the given table schema # List of all tables/views in schema for validation # For future grants at the database level for tables # For future grants at the database level for views # Fetch all tables from Snowflake for each schema and add # to the read_tables_list[] and read_views_list[] variables. # This is so we can check that a table given in the config # Is valid # If <schema_name>.* then you add all tables to grant list and then grant future # If *.* was provided then we're still ok as the full_schema_list # Would fetch all schemas and we'd still iterate through each # If == * then append all tables to both # the grant list AND the full grant list # Adds the future grant table format to the granted lists # Grant future on all tables # Grant future on all views # TODO Future elif to have partial table name # Else the table passed is a single entity # Check that it's valid and add to list # Grant privileges to all tables flagged for granting. # We have this loop b/c we explicitly grant to each table # Instead of doing grant to all tables/views in schema # Grant privileges to all flagged views # TODO: This method remains complex, could use extra refactoring # noqa # Split the table identifier into parts {DB_NAME}.{SCHEMA_NAME}.{TABLE_NAME} # so that we can check and use each one # Do nothing if this is a table inside a shared database: # "Granting individual privileges on imported databases is not allowed." # Gather the tables/views that privileges will be granted to # List of all tables/views in schema # For future grants at the database level # Fetch all tables from Snowflake for each schema and add # to the write_tables_list[] and write_views_list[] variables. # This is so we can check that a table given in the config # Is valid # If <schema_name>.* then you add all tables to grant list and then grant future # If *.* was provided then we're still ok as the full_schema_list # Would fetch all schemas and we'd still iterate through each # If == * then append all tables to both # the grant list AND the full grant list # Adds the future grant table format to the granted lists # If any of the privileges are not granted, set already_granted to False # Grant future on all tables # Grant future on all views. Select is only privilege # TODO Future elif to have partial table name # Only one table/view to be granted permissions to # Grant privileges to all tables flagged for granting. # We have this loop b/c we explicitly grant to each table # Instead of doing grant to all tables/views in schema # If any of the privileges are not granted, set already_granted to False # Grant privileges to all views in that schema. # Select is the only schemaObjectPrivilege for views # https://docs.snowflake.net/manuals/sql-reference/sql/grant-privilege.html Generates REVOKE privileges for tables/views known as resources here role: Snowflake role to revoke the resource from all_grant_resources: All the GRANTS applied shared_dbs: Shared databases to be skipped spec_dbs: Databases to apply REVOKE statements on privilege_set: Privileges to revoke (i.e. SELECT, INSERT, etc.) resource_type: Database object to revoke (i.e. table, view, etc.) granted_resources: List of GRANTS to filter through Returns a list of REVOKE statements # For future grants at the database level # No privileges to revoke on imported db. Done at database level # Don't revoke on privileges on databases not defined in spec. # If future privilege is granted in Snowflake but not in grant list # Covers case where resource is granted in Snowflake # But it's not in the grant list and it's not explicitly granted as a future grant # Write Privileges # Only need to revoke write privileges for tables since SELECT is the # only privilege available for views Generate the GRANT and REVOKE statements for tables and views including future grants. role: the name of the role the privileges are GRANTed to table: the name of the TABLE/VIEW (e.g. "raw.public.my_table") shared_dbs: a set of all the shared databases defined in the spec. spec_dbs: a set of all the databases defined in the spec. This is used in revoke commands to validate revocations are only for spec'd databases Returns the SQL commands generated as a List # These are necessary as the provided tables/views are not the full list # we determine the full list for granting via full_schema_list() # and store in these variables Generate the ALTER statements for USERs. user: the name of the USER config: the subtree for the user as specified in the spec Returns the SQL commands generated as a List # And then grant ownership to all tables # noqa Generate the GRANT ownership statements for databases, schemas and tables. role: the name of the role (e.g. "loader") ownership will be GRANTed to config: the subtree for the role as specified in the spec Returns the SQL commands generated as a List
| 2.319303
| 2
|
promgen/signals.py
|
kfdm/promgen
| 0
|
6626695
|
<gh_stars>0
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import logging
from functools import wraps
from django.contrib import messages
from django.core.cache import cache
from django.db.models.signals import (post_delete, post_save, pre_delete,
pre_save)
from django.dispatch import Signal, receiver
from promgen import models, prometheus
logger = logging.getLogger(__name__)
trigger_write_config = Signal()
trigger_write_rules = Signal()
trigger_write_urls = Signal()
post_reload = Signal()
def multi_receiver(signal, senders, **kwargs):
def _decorator(func):
for sender in senders:
signal.connect(func, sender=sender, **kwargs)
return func
return _decorator
def run_once(signal):
'''
Run a signal only once
Certain actions we want to run only once, at the end of
processing so we wrap our function in a special decorator
that uses Django's caching system to set whether we
want to run it or not, and trigger the actual run with
a force keyword at the end of the request when we run to run it
'''
def _decorator(func):
@wraps(func)
def _wrapper(*args, **kwargs):
key = '{}.{}'.format(func.__module__, func.__name__)
if 'force' in kwargs:
logger.debug('Checking %s for %s', key, kwargs['sender'])
kwargs.pop('force')
if cache.get(key):
cache.delete(key)
logger.debug('Running %s for %s', key, kwargs['sender'])
return func(*args, **kwargs)
else:
logger.debug('Queueing %s for %s', key, kwargs['sender'])
cache.set(key, 1)
signal.connect(_wrapper)
return _wrapper
return _decorator
@run_once(trigger_write_config)
def _trigger_write_config(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_config on %s', target)
prometheus.write_config.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating config on {}'.format(targets))
return True
@run_once(trigger_write_rules)
def _trigger_write_rules(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_rules on %s', target)
prometheus.write_rules.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating rules on {}'.format(targets))
return True
@run_once(trigger_write_urls)
def _trigger_write_urls(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_urls on %s', target)
prometheus.write_urls.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating urls on {}'.format(targets))
return True
def update_log(sender, instance, **kwargs):
# For our update_log, we hook the pre_save signal and make sure it's an
# existing object by checking for a primary key. We then use that to get a
# copy of the existing object from the database so that we can show the
# changes
if instance.pk:
old = sender.objects.get(pk=instance.pk)
models.Audit.log('Updated %s %s' % (sender.__name__, instance), instance, old)
pre_save.connect(update_log, sender=models.Exporter)
pre_save.connect(update_log, sender=models.Farm)
pre_save.connect(update_log, sender=models.Host)
pre_save.connect(update_log, sender=models.Project)
pre_save.connect(update_log, sender=models.Rule)
pre_save.connect(update_log, sender=models.Service)
pre_save.connect(update_log, sender=models.URL)
def create_log(sender, instance, created, **kwargs):
# For our create_log, we have to hook post_save to make sure we have a
# primary key set so that we can link back to it using the ContentType
# system.
if created:
models.Audit.log('Created %s %s' % (sender.__name__, instance), instance)
post_save.connect(create_log, sender=models.Exporter)
post_save.connect(create_log, sender=models.Farm)
post_save.connect(create_log, sender=models.Host)
post_save.connect(create_log, sender=models.Project)
post_save.connect(create_log, sender=models.Rule)
post_save.connect(create_log, sender=models.Service)
post_save.connect(create_log, sender=models.URL)
def delete_log(sender, instance, **kwargs):
models.Audit.log('Deleted %s %s' % (sender.__name__, instance), instance)
post_delete.connect(delete_log, sender=models.Exporter)
post_delete.connect(delete_log, sender=models.Farm)
post_delete.connect(delete_log, sender=models.Host)
post_delete.connect(delete_log, sender=models.Project)
post_delete.connect(delete_log, sender=models.Rule)
post_delete.connect(delete_log, sender=models.Service)
post_delete.connect(delete_log, sender=models.URL)
@receiver(post_save, sender=models.Rule)
def save_rule(sender, instance, **kwargs):
prometheus.check_rules([instance])
trigger_write_rules.send(instance)
@receiver(post_delete, sender=models.Rule)
def delete_rule(sender, instance, **kwargs):
trigger_write_rules.send(instance)
@receiver(post_save, sender=models.URL)
def save_url(sender, instance, **kwargs):
trigger_write_urls.send(instance)
@receiver(post_delete, sender=models.URL)
def delete_url(sender, instance, **kwargs):
trigger_write_urls.send(instance)
@receiver(post_save, sender=models.Host)
def save_host(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.farm.project_set.all():
if project.exporter_set:
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Host)
def delete_host(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.farm.project_set.all():
if project.exporter_set.exists():
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Farm)
def delete_farm(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.project_set.all():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Exporter)
def save_exporter(sender, instance, **kwargs):
'''Only trigger write if parent project also has hosts'''
if instance.project.farm:
if instance.project.farm.host_set.exists():
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Exporter)
def delete_exporter(sender, instance, **kwargs):
'''Only trigger write if parent project also has hosts'''
if instance.project.farm:
if instance.project.farm.host_set.exists():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Project)
def save_project(sender, instance, **kwargs):
logger.debug('save_project: %s', instance)
if instance.farm and instance.farm.host_set.exists() and instance.exporter_set.exists():
trigger_write_config.send(instance)
return True
@receiver(pre_delete, sender=models.Project)
def delete_project(sender, instance, **kwargs):
if instance.farm and instance.farm.host_set.exists() and instance.exporter_set.exists():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Service)
def save_service(sender, instance, **kwargs):
# We saving a service, we delegate the configuration reload triggering to
# the child projects which have additional information about if we need to
# write out our file or not. We call our save_project signal directly
# (instead of through post_save.save) because we don't want to trigger other
# attached signals
logger.debug('save_service: %s', instance)
for project in instance.project_set.prefetch_related(
'farm',
'farm__host_set',
'exporter_set'):
if save_project(sender=models.Project, instance=project):
# If any of our save_project returns True, then we do not need to
# check any others
return True
|
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import logging
from functools import wraps
from django.contrib import messages
from django.core.cache import cache
from django.db.models.signals import (post_delete, post_save, pre_delete,
pre_save)
from django.dispatch import Signal, receiver
from promgen import models, prometheus
logger = logging.getLogger(__name__)
trigger_write_config = Signal()
trigger_write_rules = Signal()
trigger_write_urls = Signal()
post_reload = Signal()
def multi_receiver(signal, senders, **kwargs):
def _decorator(func):
for sender in senders:
signal.connect(func, sender=sender, **kwargs)
return func
return _decorator
def run_once(signal):
'''
Run a signal only once
Certain actions we want to run only once, at the end of
processing so we wrap our function in a special decorator
that uses Django's caching system to set whether we
want to run it or not, and trigger the actual run with
a force keyword at the end of the request when we run to run it
'''
def _decorator(func):
@wraps(func)
def _wrapper(*args, **kwargs):
key = '{}.{}'.format(func.__module__, func.__name__)
if 'force' in kwargs:
logger.debug('Checking %s for %s', key, kwargs['sender'])
kwargs.pop('force')
if cache.get(key):
cache.delete(key)
logger.debug('Running %s for %s', key, kwargs['sender'])
return func(*args, **kwargs)
else:
logger.debug('Queueing %s for %s', key, kwargs['sender'])
cache.set(key, 1)
signal.connect(_wrapper)
return _wrapper
return _decorator
@run_once(trigger_write_config)
def _trigger_write_config(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_config on %s', target)
prometheus.write_config.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating config on {}'.format(targets))
return True
@run_once(trigger_write_rules)
def _trigger_write_rules(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_rules on %s', target)
prometheus.write_rules.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating rules on {}'.format(targets))
return True
@run_once(trigger_write_urls)
def _trigger_write_urls(signal, **kwargs):
targets = [server.host for server in models.Prometheus.objects.all()]
for target in targets:
logger.info('Queueing write_urls on %s', target)
prometheus.write_urls.apply_async(queue=target)
if 'request' in kwargs:
messages.info(kwargs['request'], 'Updating urls on {}'.format(targets))
return True
def update_log(sender, instance, **kwargs):
# For our update_log, we hook the pre_save signal and make sure it's an
# existing object by checking for a primary key. We then use that to get a
# copy of the existing object from the database so that we can show the
# changes
if instance.pk:
old = sender.objects.get(pk=instance.pk)
models.Audit.log('Updated %s %s' % (sender.__name__, instance), instance, old)
pre_save.connect(update_log, sender=models.Exporter)
pre_save.connect(update_log, sender=models.Farm)
pre_save.connect(update_log, sender=models.Host)
pre_save.connect(update_log, sender=models.Project)
pre_save.connect(update_log, sender=models.Rule)
pre_save.connect(update_log, sender=models.Service)
pre_save.connect(update_log, sender=models.URL)
def create_log(sender, instance, created, **kwargs):
# For our create_log, we have to hook post_save to make sure we have a
# primary key set so that we can link back to it using the ContentType
# system.
if created:
models.Audit.log('Created %s %s' % (sender.__name__, instance), instance)
post_save.connect(create_log, sender=models.Exporter)
post_save.connect(create_log, sender=models.Farm)
post_save.connect(create_log, sender=models.Host)
post_save.connect(create_log, sender=models.Project)
post_save.connect(create_log, sender=models.Rule)
post_save.connect(create_log, sender=models.Service)
post_save.connect(create_log, sender=models.URL)
def delete_log(sender, instance, **kwargs):
models.Audit.log('Deleted %s %s' % (sender.__name__, instance), instance)
post_delete.connect(delete_log, sender=models.Exporter)
post_delete.connect(delete_log, sender=models.Farm)
post_delete.connect(delete_log, sender=models.Host)
post_delete.connect(delete_log, sender=models.Project)
post_delete.connect(delete_log, sender=models.Rule)
post_delete.connect(delete_log, sender=models.Service)
post_delete.connect(delete_log, sender=models.URL)
@receiver(post_save, sender=models.Rule)
def save_rule(sender, instance, **kwargs):
prometheus.check_rules([instance])
trigger_write_rules.send(instance)
@receiver(post_delete, sender=models.Rule)
def delete_rule(sender, instance, **kwargs):
trigger_write_rules.send(instance)
@receiver(post_save, sender=models.URL)
def save_url(sender, instance, **kwargs):
trigger_write_urls.send(instance)
@receiver(post_delete, sender=models.URL)
def delete_url(sender, instance, **kwargs):
trigger_write_urls.send(instance)
@receiver(post_save, sender=models.Host)
def save_host(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.farm.project_set.all():
if project.exporter_set:
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Host)
def delete_host(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.farm.project_set.all():
if project.exporter_set.exists():
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Farm)
def delete_farm(sender, instance, **kwargs):
'''Only trigger write if parent project also has exporters'''
for project in instance.project_set.all():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Exporter)
def save_exporter(sender, instance, **kwargs):
'''Only trigger write if parent project also has hosts'''
if instance.project.farm:
if instance.project.farm.host_set.exists():
trigger_write_config.send(instance)
@receiver(pre_delete, sender=models.Exporter)
def delete_exporter(sender, instance, **kwargs):
'''Only trigger write if parent project also has hosts'''
if instance.project.farm:
if instance.project.farm.host_set.exists():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Project)
def save_project(sender, instance, **kwargs):
logger.debug('save_project: %s', instance)
if instance.farm and instance.farm.host_set.exists() and instance.exporter_set.exists():
trigger_write_config.send(instance)
return True
@receiver(pre_delete, sender=models.Project)
def delete_project(sender, instance, **kwargs):
if instance.farm and instance.farm.host_set.exists() and instance.exporter_set.exists():
trigger_write_config.send(instance)
@receiver(post_save, sender=models.Service)
def save_service(sender, instance, **kwargs):
# We saving a service, we delegate the configuration reload triggering to
# the child projects which have additional information about if we need to
# write out our file or not. We call our save_project signal directly
# (instead of through post_save.save) because we don't want to trigger other
# attached signals
logger.debug('save_service: %s', instance)
for project in instance.project_set.prefetch_related(
'farm',
'farm__host_set',
'exporter_set'):
if save_project(sender=models.Project, instance=project):
# If any of our save_project returns True, then we do not need to
# check any others
return True
|
en
| 0.890649
|
# Copyright (c) 2017 LINE Corporation # These sources are released under the terms of the MIT license: see LICENSE Run a signal only once Certain actions we want to run only once, at the end of processing so we wrap our function in a special decorator that uses Django's caching system to set whether we want to run it or not, and trigger the actual run with a force keyword at the end of the request when we run to run it # For our update_log, we hook the pre_save signal and make sure it's an # existing object by checking for a primary key. We then use that to get a # copy of the existing object from the database so that we can show the # changes # For our create_log, we have to hook post_save to make sure we have a # primary key set so that we can link back to it using the ContentType # system. Only trigger write if parent project also has exporters Only trigger write if parent project also has exporters Only trigger write if parent project also has exporters Only trigger write if parent project also has hosts Only trigger write if parent project also has hosts # We saving a service, we delegate the configuration reload triggering to # the child projects which have additional information about if we need to # write out our file or not. We call our save_project signal directly # (instead of through post_save.save) because we don't want to trigger other # attached signals # If any of our save_project returns True, then we do not need to # check any others
| 2.156438
| 2
|
src/lib/utils.py
|
TeleMidia/audio_reconstruction
| 2
|
6626696
|
<gh_stars>1-10
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
import random as rand
import os
import glob
from PIL import Image
from tqdm import tqdm
import lib.jpeg as jpg
from skimage.metrics import peak_signal_noise_ratio, normalized_root_mse
exp_chart_folder = None
model_weights_folder1 = None
model_weights_folder2 = None
dict_chart_data = None
CONST_GAMA = 0.001
LAST_EPOCH = -1
BEST_VALIDATION_EPOCH = 0
class CustomMetric:
def __init__(self):
self.buffer_psnr = []
self.buffer_nrmse = []
def feed(self, batch_y, predictions):
batch_size = predictions.shape[0]
for index in range(0, batch_size):
batch_y_r = batch_y[index,:,:,0]
predictions_r = predictions[index,:,:,0]
self.buffer_psnr = np.concatenate((self.buffer_psnr, peak_signal_noise_ratio(batch_y_r, predictions_r, data_range=1)), axis=None)
self.buffer_nrmse = np.concatenate((self.buffer_nrmse, normalized_root_mse(batch_y_r, predictions_r)), axis=None)
def result(self):
return np.mean(self.buffer_psnr[~np.isinf(self.buffer_psnr)]), np.mean(self.buffer_nrmse)
def reset_states(self):
self.buffer_psnr = []
self.buffer_nrmse = []
def check_experiment_folders():
global exp_chart_folder, model_weights_folder1, model_weights_folder2
if exp_chart_folder is None or model_weights_folder1 is None or model_weights_folder2 is None:
return False
return True
def create_experiment_folders(exp_id):
global exp_chart_folder, model_weights_folder1, model_weights_folder2
exp_chart_folder = os.path.join("model_save", exp_id, "chart_data")
model_weights_folder1 = os.path.join("model_save", exp_id, "model_last_epoch")
model_weights_folder2 = os.path.join("model_save", exp_id, "model_best_valid")
if not os.path.exists(exp_chart_folder):
os.makedirs(exp_chart_folder)
if not os.path.exists(model_weights_folder1):
os.makedirs(model_weights_folder1)
if not os.path.exists(model_weights_folder2):
os.makedirs(model_weights_folder2)
return
def get_exp_folder_last_epoch():
return os.path.join(model_weights_folder1, "model")
def get_exp_folder_best_valid():
return os.path.join(model_weights_folder2, "model")
def load_experiment_data():
assert check_experiment_folders()
global exp_chart_folder, dict_chart_data, LAST_EPOCH
path = os.path.join(exp_chart_folder, "data.txt")
if os.path.exists(path):
with open(path, "r") as file:
dict_chart_data = eval(file.readline())
#print(dict_chart_data)
#print(dict_chart_data["epoch"])
if len(dict_chart_data["epoch"]) > 0:
LAST_EPOCH = int(dict_chart_data["epoch"][-1])
#print(LAST_EPOCH)
else:
dict_chart_data = {}
dict_chart_data["epoch"] = []
dict_chart_data["Train_MSE"] = []
dict_chart_data["Valid_MSE_1"] = []
dict_chart_data["Valid_MSE_2"] = []
dict_chart_data["Valid_MSE_3"] = []
dict_chart_data["PSNR_1"] = []
dict_chart_data["PSNR_2"] = []
dict_chart_data["PSNR_3"] = []
dict_chart_data["NRMSE_1"] = []
dict_chart_data["NRMSE_2"] = []
dict_chart_data["NRMSE_3"] = []
dict_chart_data["Best_Validation_Result"] = 0
dict_chart_data["Best_Validation_Epoch"] = 0
return
def get_model_last_data(mode="LastEpoch"):
global LAST_EPOCH
if mode =="LastEpoch":
return LAST_EPOCH+1, dict_chart_data["Best_Validation_Result"]
else:
return dict_chart_data["Best_Validation_Epoch"], dict_chart_data["Best_Validation_Result"]
def update_chart_data(epoch, train_mse, valid_mse, psnr, nrmse):
assert check_experiment_folders()
global exp_chart_folder,dict_chart_data
assert dict_chart_data is not None
path = os.path.join(exp_chart_folder, "data.txt")
if psnr[0] > dict_chart_data["Best_Validation_Result"]:
dict_chart_data["Best_Validation_Result"] = psnr[0]
dict_chart_data["Best_Validation_Epoch"] = epoch
dict_chart_data["epoch"].append(epoch)
dict_chart_data["Train_MSE"].append(train_mse)
dict_chart_data["Valid_MSE_1"].append(valid_mse[0])
dict_chart_data["Valid_MSE_2"].append(valid_mse[1])
dict_chart_data["Valid_MSE_3"].append(valid_mse[2])
dict_chart_data["PSNR_1"].append(psnr[0])
dict_chart_data["PSNR_2"].append(psnr[1])
dict_chart_data["PSNR_3"].append(psnr[2])
dict_chart_data["NRMSE_1"].append(nrmse[0])
dict_chart_data["NRMSE_2"].append(nrmse[1])
dict_chart_data["NRMSE_3"].append(nrmse[2])
if os.path.exists(path):
os.remove(path)
with open(path, "w") as file:
file.write(str(dict_chart_data))
return
def annot_max(ax, x,y, op="min"):
if op=="min":
xmax = x[np.argmin(y)]
ymax = y.min()
else:
xmax = x[np.argmax(y)]
ymax = y.max()
text= "epoch={}, result={:.6f}".format(xmax, ymax)
if not ax:
ax=plt.gca()
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=1)
arrowprops=dict(arrowstyle="->")
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmax, ymax), xytext=(0.94,0.96), **kw)
def get_experiment_results():
return {
"Best_Valid": dict_chart_data["Best_Validation_Result"],
"Best_Epoch": dict_chart_data["Best_Validation_Epoch"],
"PSNR_1": max(dict_chart_data["PSNR_1"]),
"PSNR_2": max(dict_chart_data["PSNR_2"]),
"PSNR_3": max(dict_chart_data["PSNR_3"]),
"NRMSE_1": min(dict_chart_data["NRMSE_1"]),
"NRMSE_2": min(dict_chart_data["NRMSE_2"]),
"NRMSE_3": min(dict_chart_data["NRMSE_3"])
}
def draw_chart():
global dict_chart_data
if len(dict_chart_data["epoch"]) == 0:
return
fig, axs = plt.subplots(3, figsize=(15,15))
axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Train_MSE"], linewidth=2, color="orange", label="Train_MSE")
axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_1"], linewidth=2, color="blue", label="Valid_MSE_1")
# axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_2"], linewidth=2, color="green", label="Valid_MSE_2")
# axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_3"], linewidth=2, color="red", label="Valid_MSE_3")
axs[0].legend(frameon=False, loc='upper center', ncol=2)
#annot_max(axs[0], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["Valid_MSE"]) )
axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_1"], linewidth=2, color="blue", label="PSNR_1")
# axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_2"], linewidth=2, color="green", label="PSNR_2")
# axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_3"], linewidth=2, color="red", label="PSNR_3")
axs[1].legend(frameon=False, loc='upper center', ncol=1)
#annot_max(axs[1], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["PSNR_1"]), op="max")
axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_1"], linewidth=2, color="blue", label="NRMSE_1")
# axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_2"], linewidth=2, color="green", label="NRMSE_2")
# axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_3"], linewidth=2, color="red", label="NRMSE_3")
axs[2].legend(frameon=False, loc='upper center', ncol=1)
#annot_max(axs[4], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["NRMSE_1"]))
plt.show()
def load_dataset(root_folder, replace_vec, load_gen=True, DCTScale=256, limit=None):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_x_dct = []
dataset_y_seismic = []
dataset_y_dct = []
counter = 0
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
reg = "/*/*/*.tiff"
for file_ in tqdm(glob.iglob(root_folder+reg, recursive=False)):
file_path_x = file_.replace("\\", "/")
file_path_y = file_path_x.replace(replace_vec[0], replace_vec[1])
if load_gen:
ext = file_path_y.split("/")[-1].split(".tiff")[0][-1]
file_path_y = file_path_y.replace("_"+ext+".tiff",".tiff")
x_img = np.expand_dims(np.array(Image.open(file_path_x)), axis=2)
assert x_img.shape == (IMG_SIZE, IMG_SIZE, 1)
x_dct = None
x_dct_path = file_path_x.replace(".tiff", "_dct_q100.npy")
if os.path.exists(x_dct_path):
x_dct = np.load(x_dct_path)
else:
x_dct = jpg.encode_image(x_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(x_dct_path, x_dct)
y_img = np.expand_dims(np.array(Image.open(file_path_y)), axis=2)
assert y_img.shape == (IMG_SIZE, IMG_SIZE, 1)
y_dct = None
y_dct_path = file_path_y.replace(".tiff", "_dct_q100.npy")
if os.path.exists(y_dct_path):
y_dct = np.load(y_dct_path)
else:
y_dct = jpg.encode_image(y_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(y_dct_path, y_dct)
dataset_x_seismic.append(x_img)
dataset_y_seismic.append(y_img)
dataset_x_dct.append(x_dct)
dataset_y_dct.append(y_dct)
counter += 1
if limit != None and counter >= limit:
break
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), np.array(dataset_x_dct), np.array(dataset_y_dct)
def load_dataset_from_step1(root_folder):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_y_seismic = []
reg = "/*_x.npy"
for file_ in tqdm(glob.iglob(root_folder+reg, recursive=False)):
file_path_x = file_.replace("\\","/")
file_path_y = file_path_x.replace("_x.npy", "_y.npy")
x_img = np.load(file_path_x)
dataset_x_seismic.append(x_img)
y_img = np.load(file_path_y)
dataset_y_seismic.append(y_img)
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), None, None
def load_dataset_from_file(file_path, useDCT=False, DCTScale=256):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_x_dct = []
dataset_y_seismic = []
dataset_y_dct = []
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
f_ = open(file_path, "r")
lines = f_.readlines()
for line in tqdm(lines):
line = line.replace("\n", "")
data = line.split(";")
file_path_x = data[0]
file_path_x = file_path_x.replace("\\", "/")
file_path_y = data[1]
file_path_y = file_path_y.replace("\\", "/")
x_img = np.expand_dims(np.array(Image.open(file_path_x)), axis=2)
assert x_img.shape == (IMG_SIZE, IMG_SIZE, 1)
if useDCT:
x_dct = None
x_dct_path = file_path_x.replace(".tiff", "_dct_q100.npy")
if os.path.exists(x_dct_path):
x_dct = np.load(x_dct_path)
else:
x_dct = jpg.encode_image(x_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(x_dct_path, x_dct)
dataset_x_dct.append(x_dct)
y_img = np.expand_dims(np.array(Image.open(file_path_y)), axis=2)
assert y_img.shape == (IMG_SIZE, IMG_SIZE, 1)
if useDCT:
y_dct = None
y_dct_path = file_path_y.replace(".tiff", "_dct_q100.npy")
if os.path.exists(y_dct_path):
y_dct = np.load(y_dct_path)
else:
y_dct = jpg.encode_image(y_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(y_dct_path, y_dct)
dataset_y_dct.append(y_dct)
dataset_x_seismic.append(x_img)
dataset_y_seismic.append(y_img)
if useDCT:
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), np.array(dataset_x_dct), np.array(dataset_y_dct)
else:
return np.array(dataset_x_seismic), np.array(dataset_y_seismic)
def random_mini_batches(X1, Y1, X2, Y2, mini_batch_size = 64, seed = 0):
m = X1.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X1 = X1[permutation]
shuffled_Y1 = Y1[permutation]
if X2 is not None:
shuffled_X2 = X2[permutation]
shuffled_Y2 = Y2[permutation]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X1 = shuffled_X1[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y1 = shuffled_Y1[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = None
if X2 is not None:
mini_batch_X2 = shuffled_X2[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y2 = shuffled_Y2[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X1, mini_batch_Y1, mini_batch_X2, mini_batch_Y2)
else:
mini_batch = (mini_batch_X1, mini_batch_Y1, None, None)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X1 = shuffled_X1[num_complete_minibatches * mini_batch_size : m]
mini_batch_Y1 = shuffled_Y1[num_complete_minibatches * mini_batch_size : m]
mini_batch = None
if X2 is not None:
mini_batch_X2 = shuffled_X2[num_complete_minibatches * mini_batch_size : m]
mini_batch_Y2 = shuffled_Y2[num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X1, mini_batch_Y1, mini_batch_X2, mini_batch_Y2)
else:
mini_batch = (mini_batch_X1, mini_batch_Y1, None, None)
mini_batches.append(mini_batch)
return mini_batches
def get_patches_from_folder(folder):
IMG_SIZE = 200
patches = []
qtd_images = 0
files = glob.iglob(folder+"/*.tiff", recursive=False)
for file in files:
qtd_images+= 1
for index in tqdm(range(0, qtd_images)):
img = np.expand_dims(np.array(Image.open( folder+"/"+str(index)+".tiff" )), axis=2)
assert img.shape == (IMG_SIZE, IMG_SIZE, 1)
patches.append(img)
return np.array(patches)
def compose_seismogram(patches, per_column):
column = None
counter = 0
final_seismogram = None
qtd_patches = patches.shape[0]
for index in range(0,qtd_patches):
if counter < per_column:
if column is None:
column = patches[index,:,:,0]
else:
column = np.vstack((column, patches[index,:,:,0]))
counter+= 1
if index == (qtd_patches-1):
final_seismogram = np.hstack((final_seismogram, column))
else:
if final_seismogram is None:
final_seismogram = column
else:
final_seismogram = np.hstack((final_seismogram, column))
column = patches[index,:,:,0]
counter = 1
return final_seismogram
def convert_batch_dct2seismo(batch, DCTScale=256):
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
quant = batch.shape[0]
list_sample = []
for index in range(quant):
list_sample.append(jpg.decode_image(batch[index].copy(), qtable_luma_100, qtable_chroma_100))
return np.array(list_sample)/DCTScale
def get_shift_scale_maxmin(train_x, train_y, valid_x, valid_y):
SHIFT_VALUE_X = 0
SHIFT_VALUE_Y = 0
SCALE_VALUE_X = 0
SCALE_VALUE_Y = 0
if np.amin(valid_x) < np.amin(train_x):
SHIFT_VALUE_X = np.amin(valid_x)
else:
SHIFT_VALUE_X = np.amin(train_x)
if np.amin(valid_y) < np.amin(train_y):
SHIFT_VALUE_Y = np.amin(valid_y)
else:
SHIFT_VALUE_Y = np.amin(train_y)
if np.amax(valid_x) > np.amax(train_x):
SCALE_VALUE_X = np.amax(valid_x)
else:
SCALE_VALUE_X = np.amax(train_x)
if np.amax(valid_y) > np.amax(train_y):
SCALE_VALUE_Y = np.amax(valid_y)
else:
SCALE_VALUE_Y = np.amax(train_y)
SHIFT_VALUE_X = SHIFT_VALUE_X*-1
SHIFT_VALUE_Y = SHIFT_VALUE_Y*-1
SCALE_VALUE_X += SHIFT_VALUE_X
SCALE_VALUE_Y += SHIFT_VALUE_Y
return SHIFT_VALUE_X, SHIFT_VALUE_Y, SCALE_VALUE_X, SCALE_VALUE_Y
def shift_and_normalize(batch, shift_value, scale_value):
return ((batch+shift_value)/scale_value)+CONST_GAMA
def inv_shift_and_normalize(batch, shift_value, scale_value):
return ((batch-CONST_GAMA)*scale_value)-shift_value
def add_margin_zeros(data_x, size=8, chan=1):
data_x_size = data_x.shape[0]
dataset_x = []
zeros_1 = np.zeros((data_x.shape[1], size, chan))
zeros_2 = np.zeros((size, data_x.shape[2]+size, chan))
for i_nd in range(0,data_x_size):
tensor_x = np.hstack([data_x[i_nd], zeros_1])
tensor_x = np.vstack([tensor_x, zeros_2])
dataset_x.append(tensor_x)
return np.array(dataset_x)
def remove_margin_zeros(data_x, size=8):
data_x_size = data_x.shape[0]
height = data_x.shape[1]
width = data_x.shape[2]
dataset_x = []
for i_nd in range(0,data_x_size):
tensor_x = data_x[i_nd,:(height-size),:,:]
tensor_x = tensor_x[:,:(width-size),:]
dataset_x.append(tensor_x)
return np.array(dataset_x)
def load_single_seismogram(noisy_path, replace_str):
dict_patches = {}
DATA_SIZE = 200
reg = "/*.tiff"
for file_ in glob.iglob(noisy_path+reg, recursive=False):
file_ = file_.replace("\\","/")
key_ = int(os.path.basename(file_).replace(".tiff",""))
dict_patches[key_] = file_
dict_patches = dict_patches.items()
dict_patches = sorted(dict_patches)
#print(dict_patches)
data_seismic_x = []
data_seismic_y = []
for file_ in dict_patches:
key, file_ = file_
x_data = np.expand_dims(np.array(Image.open(file_)), axis=2)
assert x_data.shape == (DATA_SIZE, DATA_SIZE, 1)
file_ = file_.replace(replace_str[0], replace_str[1])
y_data = np.expand_dims(np.array(Image.open(file_)), axis=2)
assert y_data.shape == (DATA_SIZE, DATA_SIZE, 1)
data_seismic_x.append(x_data)
data_seismic_y.append(y_data)
return np.array(data_seismic_x), np.array(data_seismic_y)
dict_final_image = {}
def compose_final_image(key, data, pat_per_col, index, max_):
global dict_final_image
if not key in dict_final_image:
dict_final_image[key] = {}
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
dict_final_image[key]["image"] = None
#print(dict_final_image[key]["conter"], "add to stack!")
if dict_final_image[key]["col"] is None:
dict_final_image[key]["col"] = data
else:
dict_final_image[key]["col"] = np.vstack((dict_final_image[key]["col"], data))
if dict_final_image[key]["conter"] == pat_per_col or index == max_:
#print(dict_final_image[key]["conter"],"next column!")
if dict_final_image[key]["image"] is None:
dict_final_image[key]["image"] = dict_final_image[key]["col"]
else:
dict_final_image[key]["image"] = np.hstack((dict_final_image[key]["image"], dict_final_image[key]["col"]))
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
else:
dict_final_image[key]["conter"] = dict_final_image[key]["conter"] + 1
def export_image_data(key):
ret = dict_final_image[key]["image"]
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
dict_final_image[key]["image"] = None
return ret
def draw_trace(seismogram_x, seismogram_y, seismogram_p, trace_index):
if trace_index < 0 or trace_index > seismogram_x.shape[0]:
return None
array_x = seismogram_x[:,trace_index]
array_y = seismogram_y[:,trace_index]
array_p = seismogram_p[:,trace_index]
t = np.arange(array_x.shape[0])
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.plot(t, array_x, label='X')
ax0.plot(t, array_y, label='Y')
ax0.plot(t, array_p, label='P')
ax0.set_xlabel("time")
ax0.legend()
|
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
import random as rand
import os
import glob
from PIL import Image
from tqdm import tqdm
import lib.jpeg as jpg
from skimage.metrics import peak_signal_noise_ratio, normalized_root_mse
exp_chart_folder = None
model_weights_folder1 = None
model_weights_folder2 = None
dict_chart_data = None
CONST_GAMA = 0.001
LAST_EPOCH = -1
BEST_VALIDATION_EPOCH = 0
class CustomMetric:
def __init__(self):
self.buffer_psnr = []
self.buffer_nrmse = []
def feed(self, batch_y, predictions):
batch_size = predictions.shape[0]
for index in range(0, batch_size):
batch_y_r = batch_y[index,:,:,0]
predictions_r = predictions[index,:,:,0]
self.buffer_psnr = np.concatenate((self.buffer_psnr, peak_signal_noise_ratio(batch_y_r, predictions_r, data_range=1)), axis=None)
self.buffer_nrmse = np.concatenate((self.buffer_nrmse, normalized_root_mse(batch_y_r, predictions_r)), axis=None)
def result(self):
return np.mean(self.buffer_psnr[~np.isinf(self.buffer_psnr)]), np.mean(self.buffer_nrmse)
def reset_states(self):
self.buffer_psnr = []
self.buffer_nrmse = []
def check_experiment_folders():
global exp_chart_folder, model_weights_folder1, model_weights_folder2
if exp_chart_folder is None or model_weights_folder1 is None or model_weights_folder2 is None:
return False
return True
def create_experiment_folders(exp_id):
global exp_chart_folder, model_weights_folder1, model_weights_folder2
exp_chart_folder = os.path.join("model_save", exp_id, "chart_data")
model_weights_folder1 = os.path.join("model_save", exp_id, "model_last_epoch")
model_weights_folder2 = os.path.join("model_save", exp_id, "model_best_valid")
if not os.path.exists(exp_chart_folder):
os.makedirs(exp_chart_folder)
if not os.path.exists(model_weights_folder1):
os.makedirs(model_weights_folder1)
if not os.path.exists(model_weights_folder2):
os.makedirs(model_weights_folder2)
return
def get_exp_folder_last_epoch():
return os.path.join(model_weights_folder1, "model")
def get_exp_folder_best_valid():
return os.path.join(model_weights_folder2, "model")
def load_experiment_data():
assert check_experiment_folders()
global exp_chart_folder, dict_chart_data, LAST_EPOCH
path = os.path.join(exp_chart_folder, "data.txt")
if os.path.exists(path):
with open(path, "r") as file:
dict_chart_data = eval(file.readline())
#print(dict_chart_data)
#print(dict_chart_data["epoch"])
if len(dict_chart_data["epoch"]) > 0:
LAST_EPOCH = int(dict_chart_data["epoch"][-1])
#print(LAST_EPOCH)
else:
dict_chart_data = {}
dict_chart_data["epoch"] = []
dict_chart_data["Train_MSE"] = []
dict_chart_data["Valid_MSE_1"] = []
dict_chart_data["Valid_MSE_2"] = []
dict_chart_data["Valid_MSE_3"] = []
dict_chart_data["PSNR_1"] = []
dict_chart_data["PSNR_2"] = []
dict_chart_data["PSNR_3"] = []
dict_chart_data["NRMSE_1"] = []
dict_chart_data["NRMSE_2"] = []
dict_chart_data["NRMSE_3"] = []
dict_chart_data["Best_Validation_Result"] = 0
dict_chart_data["Best_Validation_Epoch"] = 0
return
def get_model_last_data(mode="LastEpoch"):
global LAST_EPOCH
if mode =="LastEpoch":
return LAST_EPOCH+1, dict_chart_data["Best_Validation_Result"]
else:
return dict_chart_data["Best_Validation_Epoch"], dict_chart_data["Best_Validation_Result"]
def update_chart_data(epoch, train_mse, valid_mse, psnr, nrmse):
assert check_experiment_folders()
global exp_chart_folder,dict_chart_data
assert dict_chart_data is not None
path = os.path.join(exp_chart_folder, "data.txt")
if psnr[0] > dict_chart_data["Best_Validation_Result"]:
dict_chart_data["Best_Validation_Result"] = psnr[0]
dict_chart_data["Best_Validation_Epoch"] = epoch
dict_chart_data["epoch"].append(epoch)
dict_chart_data["Train_MSE"].append(train_mse)
dict_chart_data["Valid_MSE_1"].append(valid_mse[0])
dict_chart_data["Valid_MSE_2"].append(valid_mse[1])
dict_chart_data["Valid_MSE_3"].append(valid_mse[2])
dict_chart_data["PSNR_1"].append(psnr[0])
dict_chart_data["PSNR_2"].append(psnr[1])
dict_chart_data["PSNR_3"].append(psnr[2])
dict_chart_data["NRMSE_1"].append(nrmse[0])
dict_chart_data["NRMSE_2"].append(nrmse[1])
dict_chart_data["NRMSE_3"].append(nrmse[2])
if os.path.exists(path):
os.remove(path)
with open(path, "w") as file:
file.write(str(dict_chart_data))
return
def annot_max(ax, x,y, op="min"):
if op=="min":
xmax = x[np.argmin(y)]
ymax = y.min()
else:
xmax = x[np.argmax(y)]
ymax = y.max()
text= "epoch={}, result={:.6f}".format(xmax, ymax)
if not ax:
ax=plt.gca()
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=1)
arrowprops=dict(arrowstyle="->")
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmax, ymax), xytext=(0.94,0.96), **kw)
def get_experiment_results():
return {
"Best_Valid": dict_chart_data["Best_Validation_Result"],
"Best_Epoch": dict_chart_data["Best_Validation_Epoch"],
"PSNR_1": max(dict_chart_data["PSNR_1"]),
"PSNR_2": max(dict_chart_data["PSNR_2"]),
"PSNR_3": max(dict_chart_data["PSNR_3"]),
"NRMSE_1": min(dict_chart_data["NRMSE_1"]),
"NRMSE_2": min(dict_chart_data["NRMSE_2"]),
"NRMSE_3": min(dict_chart_data["NRMSE_3"])
}
def draw_chart():
global dict_chart_data
if len(dict_chart_data["epoch"]) == 0:
return
fig, axs = plt.subplots(3, figsize=(15,15))
axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Train_MSE"], linewidth=2, color="orange", label="Train_MSE")
axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_1"], linewidth=2, color="blue", label="Valid_MSE_1")
# axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_2"], linewidth=2, color="green", label="Valid_MSE_2")
# axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_3"], linewidth=2, color="red", label="Valid_MSE_3")
axs[0].legend(frameon=False, loc='upper center', ncol=2)
#annot_max(axs[0], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["Valid_MSE"]) )
axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_1"], linewidth=2, color="blue", label="PSNR_1")
# axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_2"], linewidth=2, color="green", label="PSNR_2")
# axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_3"], linewidth=2, color="red", label="PSNR_3")
axs[1].legend(frameon=False, loc='upper center', ncol=1)
#annot_max(axs[1], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["PSNR_1"]), op="max")
axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_1"], linewidth=2, color="blue", label="NRMSE_1")
# axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_2"], linewidth=2, color="green", label="NRMSE_2")
# axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_3"], linewidth=2, color="red", label="NRMSE_3")
axs[2].legend(frameon=False, loc='upper center', ncol=1)
#annot_max(axs[4], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["NRMSE_1"]))
plt.show()
def load_dataset(root_folder, replace_vec, load_gen=True, DCTScale=256, limit=None):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_x_dct = []
dataset_y_seismic = []
dataset_y_dct = []
counter = 0
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
reg = "/*/*/*.tiff"
for file_ in tqdm(glob.iglob(root_folder+reg, recursive=False)):
file_path_x = file_.replace("\\", "/")
file_path_y = file_path_x.replace(replace_vec[0], replace_vec[1])
if load_gen:
ext = file_path_y.split("/")[-1].split(".tiff")[0][-1]
file_path_y = file_path_y.replace("_"+ext+".tiff",".tiff")
x_img = np.expand_dims(np.array(Image.open(file_path_x)), axis=2)
assert x_img.shape == (IMG_SIZE, IMG_SIZE, 1)
x_dct = None
x_dct_path = file_path_x.replace(".tiff", "_dct_q100.npy")
if os.path.exists(x_dct_path):
x_dct = np.load(x_dct_path)
else:
x_dct = jpg.encode_image(x_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(x_dct_path, x_dct)
y_img = np.expand_dims(np.array(Image.open(file_path_y)), axis=2)
assert y_img.shape == (IMG_SIZE, IMG_SIZE, 1)
y_dct = None
y_dct_path = file_path_y.replace(".tiff", "_dct_q100.npy")
if os.path.exists(y_dct_path):
y_dct = np.load(y_dct_path)
else:
y_dct = jpg.encode_image(y_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(y_dct_path, y_dct)
dataset_x_seismic.append(x_img)
dataset_y_seismic.append(y_img)
dataset_x_dct.append(x_dct)
dataset_y_dct.append(y_dct)
counter += 1
if limit != None and counter >= limit:
break
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), np.array(dataset_x_dct), np.array(dataset_y_dct)
def load_dataset_from_step1(root_folder):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_y_seismic = []
reg = "/*_x.npy"
for file_ in tqdm(glob.iglob(root_folder+reg, recursive=False)):
file_path_x = file_.replace("\\","/")
file_path_y = file_path_x.replace("_x.npy", "_y.npy")
x_img = np.load(file_path_x)
dataset_x_seismic.append(x_img)
y_img = np.load(file_path_y)
dataset_y_seismic.append(y_img)
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), None, None
def load_dataset_from_file(file_path, useDCT=False, DCTScale=256):
IMG_SIZE = 200
dataset_x_seismic = []
dataset_x_dct = []
dataset_y_seismic = []
dataset_y_dct = []
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
f_ = open(file_path, "r")
lines = f_.readlines()
for line in tqdm(lines):
line = line.replace("\n", "")
data = line.split(";")
file_path_x = data[0]
file_path_x = file_path_x.replace("\\", "/")
file_path_y = data[1]
file_path_y = file_path_y.replace("\\", "/")
x_img = np.expand_dims(np.array(Image.open(file_path_x)), axis=2)
assert x_img.shape == (IMG_SIZE, IMG_SIZE, 1)
if useDCT:
x_dct = None
x_dct_path = file_path_x.replace(".tiff", "_dct_q100.npy")
if os.path.exists(x_dct_path):
x_dct = np.load(x_dct_path)
else:
x_dct = jpg.encode_image(x_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(x_dct_path, x_dct)
dataset_x_dct.append(x_dct)
y_img = np.expand_dims(np.array(Image.open(file_path_y)), axis=2)
assert y_img.shape == (IMG_SIZE, IMG_SIZE, 1)
if useDCT:
y_dct = None
y_dct_path = file_path_y.replace(".tiff", "_dct_q100.npy")
if os.path.exists(y_dct_path):
y_dct = np.load(y_dct_path)
else:
y_dct = jpg.encode_image(y_img*DCTScale, qtable_luma_100, qtable_chroma_100)
np.save(y_dct_path, y_dct)
dataset_y_dct.append(y_dct)
dataset_x_seismic.append(x_img)
dataset_y_seismic.append(y_img)
if useDCT:
return np.array(dataset_x_seismic), np.array(dataset_y_seismic), np.array(dataset_x_dct), np.array(dataset_y_dct)
else:
return np.array(dataset_x_seismic), np.array(dataset_y_seismic)
def random_mini_batches(X1, Y1, X2, Y2, mini_batch_size = 64, seed = 0):
m = X1.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X1 = X1[permutation]
shuffled_Y1 = Y1[permutation]
if X2 is not None:
shuffled_X2 = X2[permutation]
shuffled_Y2 = Y2[permutation]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X1 = shuffled_X1[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y1 = shuffled_Y1[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = None
if X2 is not None:
mini_batch_X2 = shuffled_X2[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y2 = shuffled_Y2[k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X1, mini_batch_Y1, mini_batch_X2, mini_batch_Y2)
else:
mini_batch = (mini_batch_X1, mini_batch_Y1, None, None)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X1 = shuffled_X1[num_complete_minibatches * mini_batch_size : m]
mini_batch_Y1 = shuffled_Y1[num_complete_minibatches * mini_batch_size : m]
mini_batch = None
if X2 is not None:
mini_batch_X2 = shuffled_X2[num_complete_minibatches * mini_batch_size : m]
mini_batch_Y2 = shuffled_Y2[num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X1, mini_batch_Y1, mini_batch_X2, mini_batch_Y2)
else:
mini_batch = (mini_batch_X1, mini_batch_Y1, None, None)
mini_batches.append(mini_batch)
return mini_batches
def get_patches_from_folder(folder):
IMG_SIZE = 200
patches = []
qtd_images = 0
files = glob.iglob(folder+"/*.tiff", recursive=False)
for file in files:
qtd_images+= 1
for index in tqdm(range(0, qtd_images)):
img = np.expand_dims(np.array(Image.open( folder+"/"+str(index)+".tiff" )), axis=2)
assert img.shape == (IMG_SIZE, IMG_SIZE, 1)
patches.append(img)
return np.array(patches)
def compose_seismogram(patches, per_column):
column = None
counter = 0
final_seismogram = None
qtd_patches = patches.shape[0]
for index in range(0,qtd_patches):
if counter < per_column:
if column is None:
column = patches[index,:,:,0]
else:
column = np.vstack((column, patches[index,:,:,0]))
counter+= 1
if index == (qtd_patches-1):
final_seismogram = np.hstack((final_seismogram, column))
else:
if final_seismogram is None:
final_seismogram = column
else:
final_seismogram = np.hstack((final_seismogram, column))
column = patches[index,:,:,0]
counter = 1
return final_seismogram
def convert_batch_dct2seismo(batch, DCTScale=256):
qtable_luma_100, qtable_chroma_100 = jpg.generate_qtables(quality_factor=100)
quant = batch.shape[0]
list_sample = []
for index in range(quant):
list_sample.append(jpg.decode_image(batch[index].copy(), qtable_luma_100, qtable_chroma_100))
return np.array(list_sample)/DCTScale
def get_shift_scale_maxmin(train_x, train_y, valid_x, valid_y):
SHIFT_VALUE_X = 0
SHIFT_VALUE_Y = 0
SCALE_VALUE_X = 0
SCALE_VALUE_Y = 0
if np.amin(valid_x) < np.amin(train_x):
SHIFT_VALUE_X = np.amin(valid_x)
else:
SHIFT_VALUE_X = np.amin(train_x)
if np.amin(valid_y) < np.amin(train_y):
SHIFT_VALUE_Y = np.amin(valid_y)
else:
SHIFT_VALUE_Y = np.amin(train_y)
if np.amax(valid_x) > np.amax(train_x):
SCALE_VALUE_X = np.amax(valid_x)
else:
SCALE_VALUE_X = np.amax(train_x)
if np.amax(valid_y) > np.amax(train_y):
SCALE_VALUE_Y = np.amax(valid_y)
else:
SCALE_VALUE_Y = np.amax(train_y)
SHIFT_VALUE_X = SHIFT_VALUE_X*-1
SHIFT_VALUE_Y = SHIFT_VALUE_Y*-1
SCALE_VALUE_X += SHIFT_VALUE_X
SCALE_VALUE_Y += SHIFT_VALUE_Y
return SHIFT_VALUE_X, SHIFT_VALUE_Y, SCALE_VALUE_X, SCALE_VALUE_Y
def shift_and_normalize(batch, shift_value, scale_value):
return ((batch+shift_value)/scale_value)+CONST_GAMA
def inv_shift_and_normalize(batch, shift_value, scale_value):
return ((batch-CONST_GAMA)*scale_value)-shift_value
def add_margin_zeros(data_x, size=8, chan=1):
data_x_size = data_x.shape[0]
dataset_x = []
zeros_1 = np.zeros((data_x.shape[1], size, chan))
zeros_2 = np.zeros((size, data_x.shape[2]+size, chan))
for i_nd in range(0,data_x_size):
tensor_x = np.hstack([data_x[i_nd], zeros_1])
tensor_x = np.vstack([tensor_x, zeros_2])
dataset_x.append(tensor_x)
return np.array(dataset_x)
def remove_margin_zeros(data_x, size=8):
data_x_size = data_x.shape[0]
height = data_x.shape[1]
width = data_x.shape[2]
dataset_x = []
for i_nd in range(0,data_x_size):
tensor_x = data_x[i_nd,:(height-size),:,:]
tensor_x = tensor_x[:,:(width-size),:]
dataset_x.append(tensor_x)
return np.array(dataset_x)
def load_single_seismogram(noisy_path, replace_str):
dict_patches = {}
DATA_SIZE = 200
reg = "/*.tiff"
for file_ in glob.iglob(noisy_path+reg, recursive=False):
file_ = file_.replace("\\","/")
key_ = int(os.path.basename(file_).replace(".tiff",""))
dict_patches[key_] = file_
dict_patches = dict_patches.items()
dict_patches = sorted(dict_patches)
#print(dict_patches)
data_seismic_x = []
data_seismic_y = []
for file_ in dict_patches:
key, file_ = file_
x_data = np.expand_dims(np.array(Image.open(file_)), axis=2)
assert x_data.shape == (DATA_SIZE, DATA_SIZE, 1)
file_ = file_.replace(replace_str[0], replace_str[1])
y_data = np.expand_dims(np.array(Image.open(file_)), axis=2)
assert y_data.shape == (DATA_SIZE, DATA_SIZE, 1)
data_seismic_x.append(x_data)
data_seismic_y.append(y_data)
return np.array(data_seismic_x), np.array(data_seismic_y)
dict_final_image = {}
def compose_final_image(key, data, pat_per_col, index, max_):
global dict_final_image
if not key in dict_final_image:
dict_final_image[key] = {}
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
dict_final_image[key]["image"] = None
#print(dict_final_image[key]["conter"], "add to stack!")
if dict_final_image[key]["col"] is None:
dict_final_image[key]["col"] = data
else:
dict_final_image[key]["col"] = np.vstack((dict_final_image[key]["col"], data))
if dict_final_image[key]["conter"] == pat_per_col or index == max_:
#print(dict_final_image[key]["conter"],"next column!")
if dict_final_image[key]["image"] is None:
dict_final_image[key]["image"] = dict_final_image[key]["col"]
else:
dict_final_image[key]["image"] = np.hstack((dict_final_image[key]["image"], dict_final_image[key]["col"]))
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
else:
dict_final_image[key]["conter"] = dict_final_image[key]["conter"] + 1
def export_image_data(key):
ret = dict_final_image[key]["image"]
dict_final_image[key]["col"] = None
dict_final_image[key]["conter"] = 0
dict_final_image[key]["image"] = None
return ret
def draw_trace(seismogram_x, seismogram_y, seismogram_p, trace_index):
if trace_index < 0 or trace_index > seismogram_x.shape[0]:
return None
array_x = seismogram_x[:,trace_index]
array_y = seismogram_y[:,trace_index]
array_p = seismogram_p[:,trace_index]
t = np.arange(array_x.shape[0])
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.plot(t, array_x, label='X')
ax0.plot(t, array_y, label='Y')
ax0.plot(t, array_p, label='P')
ax0.set_xlabel("time")
ax0.legend()
|
en
| 0.260723
|
#print(dict_chart_data) #print(dict_chart_data["epoch"]) #print(LAST_EPOCH) # axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_2"], linewidth=2, color="green", label="Valid_MSE_2") # axs[0].plot(dict_chart_data["epoch"], dict_chart_data["Valid_MSE_3"], linewidth=2, color="red", label="Valid_MSE_3") #annot_max(axs[0], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["Valid_MSE"]) ) # axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_2"], linewidth=2, color="green", label="PSNR_2") # axs[1].plot(dict_chart_data["epoch"], dict_chart_data["PSNR_3"], linewidth=2, color="red", label="PSNR_3") #annot_max(axs[1], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["PSNR_1"]), op="max") # axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_2"], linewidth=2, color="green", label="NRMSE_2") # axs[2].plot(dict_chart_data["epoch"], dict_chart_data["NRMSE_3"], linewidth=2, color="red", label="NRMSE_3") #annot_max(axs[4], np.asarray(dict_chart_data["epoch"]), np.asarray(dict_chart_data["NRMSE_1"])) # number of training examples # Step 1: Shuffle (X, Y) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. # number of mini batches of size mini_batch_size in your partitionning # Handling the end case (last mini-batch < mini_batch_size) #print(dict_patches) #print(dict_final_image[key]["conter"], "add to stack!") #print(dict_final_image[key]["conter"],"next column!")
| 2.116137
| 2
|
InvenTree/order/models.py
|
Pervanovo/InvenTree
| 0
|
6626697
|
<reponame>Pervanovo/InvenTree
"""
Order model definitions
"""
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from decimal import Decimal
from django.db import models, transaction
from django.db.models import Q, F, Sum
from django.db.models.functions import Coalesce
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from markdownx.models import MarkdownxField
from djmoney.models.fields import MoneyField
from part import models as PartModels
from stock import models as stock_models
from company.models import Company, SupplierPart
from InvenTree.fields import RoundingDecimalField
from InvenTree.helpers import decimal2string, increment, getSetting
from InvenTree.status_codes import PurchaseOrderStatus, SalesOrderStatus, StockStatus
from InvenTree.models import InvenTreeAttachment
class Order(models.Model):
""" Abstract model for an order.
Instances of this class:
- PuchaseOrder
Attributes:
reference: Unique order number / reference / code
description: Long form description (required)
notes: Extra note field (optional)
creation_date: Automatic date of order creation
created_by: User who created this order (automatically captured)
issue_date: Date the order was issued
complete_date: Date the order was completed
"""
@classmethod
def getNextOrderNumber(cls):
"""
Try to predict the next order-number
"""
if cls.objects.count() == 0:
return None
# We will assume that the latest pk has the highest PO number
order = cls.objects.last()
ref = order.reference
if not ref:
return None
tries = set()
tries.add(ref)
while 1:
new_ref = increment(ref)
if new_ref in tries:
# We are in a looping situation - simply return the original one
return ref
# Check that the new ref does not exist in the database
if cls.objects.filter(reference=new_ref).exists():
tries.add(new_ref)
new_ref = increment(new_ref)
else:
break
return new_ref
def save(self, *args, **kwargs):
if not self.creation_date:
self.creation_date = datetime.now().date()
super().save(*args, **kwargs)
class Meta:
abstract = True
reference = models.CharField(unique=True, max_length=64, blank=False, help_text=_('Order reference'))
description = models.CharField(max_length=250, help_text=_('Order description'))
link = models.URLField(blank=True, help_text=_('Link to external page'))
creation_date = models.DateField(blank=True, null=True)
created_by = models.ForeignKey(User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
notes = MarkdownxField(blank=True, help_text=_('Order notes'))
class PurchaseOrder(Order):
""" A PurchaseOrder represents goods shipped inwards from an external supplier.
Attributes:
supplier: Reference to the company supplying the goods in the order
supplier_reference: Optional field for supplier order reference code
received_by: User that received the goods
target_date: Expected delivery target date for PurchaseOrder completion (optional)
"""
OVERDUE_FILTER = Q(status__in=PurchaseOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__lte=datetime.now().date())
@staticmethod
def filterByDate(queryset, min_date, max_date):
"""
Filter by 'minimum and maximum date range'
- Specified as min_date, max_date
- Both must be specified for filter to be applied
- Determine which "interesting" orders exist bewteen these dates
To be "interesting":
- A "received" order where the received date lies within the date range
- A "pending" order where the target date lies within the date range
- TODO: An "overdue" order where the target date is in the past
"""
date_fmt = '%Y-%m-%d' # ISO format date string
# Ensure that both dates are valid
try:
min_date = datetime.strptime(str(min_date), date_fmt).date()
max_date = datetime.strptime(str(max_date), date_fmt).date()
except (ValueError, TypeError):
# Date processing error, return queryset unchanged
return queryset
# Construct a queryset for "received" orders within the range
received = Q(status=PurchaseOrderStatus.COMPLETE) & Q(complete_date__gte=min_date) & Q(complete_date__lte=max_date)
# Construct a queryset for "pending" orders within the range
pending = Q(status__in=PurchaseOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__gte=min_date) & Q(target_date__lte=max_date)
# TODO - Construct a queryset for "overdue" orders within the range
queryset = queryset.filter(received | pending)
return queryset
def __str__(self):
prefix = getSetting('PURCHASEORDER_REFERENCE_PREFIX')
return f"{prefix}{self.reference} - {self.supplier.name}"
status = models.PositiveIntegerField(default=PurchaseOrderStatus.PENDING, choices=PurchaseOrderStatus.items(),
help_text=_('Purchase order status'))
supplier = models.ForeignKey(
Company, on_delete=models.CASCADE,
limit_choices_to={
'is_supplier': True,
},
related_name='purchase_orders',
help_text=_('Company from which the items are being ordered')
)
supplier_reference = models.CharField(max_length=64, blank=True, help_text=_("Supplier order reference code"))
received_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
issue_date = models.DateField(
blank=True, null=True,
verbose_name=_('Issue Date'),
help_text=_('Date order was issued')
)
target_date = models.DateField(
blank=True, null=True,
verbose_name=_('Target Delivery Date'),
help_text=_('Expected date for order delivery. Order will be overdue after this date.'),
)
complete_date = models.DateField(
blank=True, null=True,
verbose_name=_('Completion Date'),
help_text=_('Date order was completed')
)
def get_absolute_url(self):
return reverse('po-detail', kwargs={'pk': self.id})
@transaction.atomic
def add_line_item(self, supplier_part, quantity, group=True, reference=''):
""" Add a new line item to this purchase order.
This function will check that:
* The supplier part matches the supplier specified for this purchase order
* The quantity is greater than zero
Args:
supplier_part - The supplier_part to add
quantity - The number of items to add
group - If True, this new quantity will be added to an existing line item for the same supplier_part (if it exists)
"""
try:
quantity = int(quantity)
if quantity <= 0:
raise ValidationError({
'quantity': _("Quantity must be greater than zero")})
except ValueError:
raise ValidationError({'quantity': _("Invalid quantity provided")})
if not supplier_part.supplier == self.supplier:
raise ValidationError({'supplier': _("Part supplier must match PO supplier")})
if group:
# Check if there is already a matching line item (for this PO)
matches = self.lines.filter(part=supplier_part)
if matches.count() > 0:
line = matches.first()
line.quantity += quantity
line.save()
return
line = PurchaseOrderLineItem(
order=self,
part=supplier_part,
quantity=quantity,
reference=reference)
line.save()
@transaction.atomic
def place_order(self):
""" Marks the PurchaseOrder as PLACED. Order must be currently PENDING. """
if self.status == PurchaseOrderStatus.PENDING:
self.status = PurchaseOrderStatus.PLACED
self.issue_date = datetime.now().date()
self.save()
@transaction.atomic
def complete_order(self):
""" Marks the PurchaseOrder as COMPLETE. Order must be currently PLACED. """
if self.status == PurchaseOrderStatus.PLACED:
self.status = PurchaseOrderStatus.COMPLETE
self.complete_date = datetime.now().date()
self.save()
@property
def is_overdue(self):
"""
Returns True if this PurchaseOrder is "overdue"
Makes use of the OVERDUE_FILTER to avoid code duplication.
"""
query = PurchaseOrder.objects.filter(pk=self.pk)
query = query.filter(PurchaseOrder.OVERDUE_FILTER)
return query.exists()
def can_cancel(self):
"""
A PurchaseOrder can only be cancelled under the following circumstances:
"""
return self.status in [
PurchaseOrderStatus.PLACED,
PurchaseOrderStatus.PENDING
]
def cancel_order(self):
""" Marks the PurchaseOrder as CANCELLED. """
if self.can_cancel():
self.status = PurchaseOrderStatus.CANCELLED
self.save()
def pending_line_items(self):
""" Return a list of pending line items for this order.
Any line item where 'received' < 'quantity' will be returned.
"""
return self.lines.filter(quantity__gt=F('received'))
@property
def is_complete(self):
""" Return True if all line items have been received """
return self.pending_line_items().count() == 0
@transaction.atomic
def receive_line_item(self, line, location, quantity, user, status=StockStatus.OK):
""" Receive a line item (or partial line item) against this PO
"""
if not self.status == PurchaseOrderStatus.PLACED:
raise ValidationError({"status": _("Lines can only be received against an order marked as 'Placed'")})
try:
quantity = int(quantity)
if quantity <= 0:
raise ValidationError({"quantity": _("Quantity must be greater than zero")})
except ValueError:
raise ValidationError({"quantity": _("Invalid quantity provided")})
# Create a new stock item
if line.part:
stock = stock_models.StockItem(
part=line.part.part,
supplier_part=line.part,
location=location,
quantity=quantity,
purchase_order=self,
status=status
)
stock.save()
text = _("Received items")
note = f"{_('Received')} {quantity} {_('items against order')} {str(self)}"
# Add a new transaction note to the newly created stock item
stock.addTransactionNote(text, user, note)
# Update the number of parts received against the particular line item
line.received += quantity
line.save()
# Has this order been completed?
if len(self.pending_line_items()) == 0:
self.received_by = user
self.complete_order() # This will save the model
class SalesOrder(Order):
"""
A SalesOrder represents a list of goods shipped outwards to a customer.
Attributes:
customer: Reference to the company receiving the goods in the order
customer_reference: Optional field for customer order reference code
target_date: Target date for SalesOrder completion (optional)
"""
OVERDUE_FILTER = Q(status__in=SalesOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__lte=datetime.now().date())
@staticmethod
def filterByDate(queryset, min_date, max_date):
"""
Filter by "minimum and maximum date range"
- Specified as min_date, max_date
- Both must be specified for filter to be applied
- Determine which "interesting" orders exist between these dates
To be "interesting":
- A "completed" order where the completion date lies within the date range
- A "pending" order where the target date lies within the date range
- TODO: An "overdue" order where the target date is in the past
"""
date_fmt = '%Y-%m-%d' # ISO format date string
# Ensure that both dates are valid
try:
min_date = datetime.strptime(str(min_date), date_fmt).date()
max_date = datetime.strptime(str(max_date), date_fmt).date()
except (ValueError, TypeError):
# Date processing error, return queryset unchanged
return queryset
# Construct a queryset for "completed" orders within the range
completed = Q(status__in=SalesOrderStatus.COMPLETE) & Q(shipment_date__gte=min_date) & Q(shipment_date__lte=max_date)
# Construct a queryset for "pending" orders within the range
pending = Q(status__in=SalesOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__gte=min_date) & Q(target_date__lte=max_date)
# TODO: Construct a queryset for "overdue" orders within the range
queryset = queryset.filter(completed | pending)
return queryset
def __str__(self):
prefix = getSetting('SALESORDER_REFERENCE_PREFIX')
return f"{prefix}{self.reference} - {self.customer.name}"
def get_absolute_url(self):
return reverse('so-detail', kwargs={'pk': self.id})
customer = models.ForeignKey(
Company,
on_delete=models.SET_NULL,
null=True,
limit_choices_to={'is_customer': True},
related_name='sales_orders',
help_text=_("Company to which the items are being sold"),
)
status = models.PositiveIntegerField(default=SalesOrderStatus.PENDING, choices=SalesOrderStatus.items(),
help_text=_('Purchase order status'))
customer_reference = models.CharField(max_length=64, blank=True, help_text=_("Customer order reference code"))
target_date = models.DateField(
null=True, blank=True,
verbose_name=_('Target completion date'),
help_text=_('Target date for order completion. Order will be overdue after this date.')
)
shipment_date = models.DateField(blank=True, null=True)
shipped_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
@property
def is_overdue(self):
"""
Returns true if this SalesOrder is "overdue":
Makes use of the OVERDUE_FILTER to avoid code duplication.
"""
query = SalesOrder.objects.filter(pk=self.pk)
query = query.filter(SalesOrder.OVERDUE_FILTER)
return query.exists()
@property
def is_pending(self):
return self.status == SalesOrderStatus.PENDING
def is_fully_allocated(self):
""" Return True if all line items are fully allocated """
for line in self.lines.all():
if not line.is_fully_allocated():
return False
return True
def is_over_allocated(self):
""" Return true if any lines in the order are over-allocated """
for line in self.lines.all():
if line.is_over_allocated():
return True
return False
@transaction.atomic
def ship_order(self, user):
""" Mark this order as 'shipped' """
# The order can only be 'shipped' if the current status is PENDING
if not self.status == SalesOrderStatus.PENDING:
raise ValidationError({'status': _("SalesOrder cannot be shipped as it is not currently pending")})
# Complete the allocation for each allocated StockItem
for line in self.lines.all():
for allocation in line.allocations.all():
allocation.complete_allocation(user)
# Remove the allocation from the database once it has been 'fulfilled'
if allocation.item.sales_order == self:
allocation.delete()
else:
raise ValidationError("Could not complete order - allocation item not fulfilled")
# Ensure the order status is marked as "Shipped"
self.status = SalesOrderStatus.SHIPPED
self.shipment_date = datetime.now().date()
self.shipped_by = user
self.save()
return True
def can_cancel(self):
"""
Return True if this order can be cancelled
"""
if not self.status == SalesOrderStatus.PENDING:
return False
return True
@transaction.atomic
def cancel_order(self):
"""
Cancel this order (only if it is "pending")
- Mark the order as 'cancelled'
- Delete any StockItems which have been allocated
"""
if not self.can_cancel():
return False
self.status = SalesOrderStatus.CANCELLED
self.save()
for line in self.lines.all():
for allocation in line.allocations.all():
allocation.delete()
return True
class PurchaseOrderAttachment(InvenTreeAttachment):
"""
Model for storing file attachments against a PurchaseOrder object
"""
def getSubdir(self):
return os.path.join("po_files", str(self.order.id))
order = models.ForeignKey(PurchaseOrder, on_delete=models.CASCADE, related_name="attachments")
class SalesOrderAttachment(InvenTreeAttachment):
"""
Model for storing file attachments against a SalesOrder object
"""
def getSubdir(self):
return os.path.join("so_files", str(self.order.id))
order = models.ForeignKey(SalesOrder, on_delete=models.CASCADE, related_name='attachments')
class OrderLineItem(models.Model):
""" Abstract model for an order line item
Attributes:
quantity: Number of items
note: Annotation for the item
"""
class Meta:
abstract = True
quantity = RoundingDecimalField(max_digits=15, decimal_places=5, validators=[MinValueValidator(0)], default=1, help_text=_('Item quantity'))
reference = models.CharField(max_length=100, blank=True, help_text=_('Line item reference'))
notes = models.CharField(max_length=500, blank=True, help_text=_('Line item notes'))
class PurchaseOrderLineItem(OrderLineItem):
""" Model for a purchase order line item.
Attributes:
order: Reference to a PurchaseOrder object
"""
class Meta:
unique_together = (
('order', 'part')
)
def __str__(self):
return "{n} x {part} from {supplier} (for {po})".format(
n=decimal2string(self.quantity),
part=self.part.SKU if self.part else 'unknown part',
supplier=self.order.supplier.name,
po=self.order)
order = models.ForeignKey(
PurchaseOrder, on_delete=models.CASCADE,
related_name='lines',
help_text=_('Purchase Order')
)
def get_base_part(self):
""" Return the base-part for the line item """
return self.part.part
# TODO - Function callback for when the SupplierPart is deleted?
part = models.ForeignKey(
SupplierPart, on_delete=models.SET_NULL,
blank=True, null=True,
related_name='purchase_order_line_items',
help_text=_("Supplier part"),
)
received = models.DecimalField(decimal_places=5, max_digits=15, default=0, help_text=_('Number of items received'))
purchase_price = MoneyField(
max_digits=19,
decimal_places=4,
default_currency='USD',
null=True, blank=True,
verbose_name=_('Purchase Price'),
help_text=_('Unit purchase price'),
)
def remaining(self):
""" Calculate the number of items remaining to be received """
r = self.quantity - self.received
return max(r, 0)
class SalesOrderLineItem(OrderLineItem):
"""
Model for a single LineItem in a SalesOrder
Attributes:
order: Link to the SalesOrder that this line item belongs to
part: Link to a Part object (may be null)
"""
order = models.ForeignKey(SalesOrder, on_delete=models.CASCADE, related_name='lines', help_text=_('Sales Order'))
part = models.ForeignKey('part.Part', on_delete=models.SET_NULL, related_name='sales_order_line_items', null=True, help_text=_('Part'), limit_choices_to={'salable': True})
class Meta:
unique_together = [
('order', 'part'),
]
def fulfilled_quantity(self):
"""
Return the total stock quantity fulfilled against this line item.
"""
query = self.order.stock_items.filter(part=self.part).aggregate(fulfilled=Coalesce(Sum('quantity'), Decimal(0)))
return query['fulfilled']
def allocated_quantity(self):
""" Return the total stock quantity allocated to this LineItem.
This is a summation of the quantity of each attached StockItem
"""
query = self.allocations.aggregate(allocated=Coalesce(Sum('quantity'), Decimal(0)))
return query['allocated']
def is_fully_allocated(self):
""" Return True if this line item is fully allocated """
if self.order.status == SalesOrderStatus.SHIPPED:
return self.fulfilled_quantity() >= self.quantity
return self.allocated_quantity() >= self.quantity
def is_over_allocated(self):
""" Return True if this line item is over allocated """
return self.allocated_quantity() > self.quantity
class SalesOrderAllocation(models.Model):
"""
This model is used to 'allocate' stock items to a SalesOrder.
Items that are "allocated" to a SalesOrder are not yet "attached" to the order,
but they will be once the order is fulfilled.
Attributes:
line: SalesOrderLineItem reference
item: StockItem reference
quantity: Quantity to take from the StockItem
"""
class Meta:
unique_together = [
# Cannot allocate any given StockItem to the same line more than once
('line', 'item'),
]
def clean(self):
"""
Validate the SalesOrderAllocation object:
- Cannot allocate stock to a line item without a part reference
- The referenced part must match the part associated with the line item
- Allocated quantity cannot exceed the quantity of the stock item
- Allocation quantity must be "1" if the StockItem is serialized
- Allocation quantity cannot be zero
"""
super().clean()
errors = {}
try:
if not self.line.part == self.item.part:
errors['item'] = _('Cannot allocate stock item to a line with a different part')
except PartModels.Part.DoesNotExist:
errors['line'] = _('Cannot allocate stock to a line without a part')
if self.quantity > self.item.quantity:
errors['quantity'] = _('Allocation quantity cannot exceed stock quantity')
# TODO: The logic here needs improving. Do we need to subtract our own amount, or something?
if self.item.quantity - self.item.allocation_count() + self.quantity < self.quantity:
errors['quantity'] = _('StockItem is over-allocated')
if self.quantity <= 0:
errors['quantity'] = _('Allocation quantity must be greater than zero')
if self.item.serial and not self.quantity == 1:
errors['quantity'] = _('Quantity must be 1 for serialized stock item')
if len(errors) > 0:
raise ValidationError(errors)
line = models.ForeignKey(SalesOrderLineItem, on_delete=models.CASCADE, related_name='allocations')
item = models.ForeignKey(
'stock.StockItem',
on_delete=models.CASCADE,
related_name='sales_order_allocations',
limit_choices_to={
'part__salable': True,
'belongs_to': None,
'sales_order': None,
},
help_text=_('Select stock item to allocate')
)
quantity = RoundingDecimalField(max_digits=15, decimal_places=5, validators=[MinValueValidator(0)], default=1, help_text=_('Enter stock allocation quantity'))
def get_serial(self):
return self.item.serial
def get_location(self):
return self.item.location.id if self.item.location else None
def get_location_path(self):
if self.item.location:
return self.item.location.pathstring
else:
return ""
def complete_allocation(self, user):
"""
Complete this allocation (called when the parent SalesOrder is marked as "shipped"):
- Determine if the referenced StockItem needs to be "split" (if allocated quantity != stock quantity)
- Mark the StockItem as belonging to the Customer (this will remove it from stock)
"""
order = self.line.order
item = self.item.allocateToCustomer(
order.customer,
quantity=self.quantity,
order=order,
user=user
)
# Update our own reference to the StockItem
# (It may have changed if the stock was split)
self.item = item
self.save()
|
"""
Order model definitions
"""
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from decimal import Decimal
from django.db import models, transaction
from django.db.models import Q, F, Sum
from django.db.models.functions import Coalesce
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from markdownx.models import MarkdownxField
from djmoney.models.fields import MoneyField
from part import models as PartModels
from stock import models as stock_models
from company.models import Company, SupplierPart
from InvenTree.fields import RoundingDecimalField
from InvenTree.helpers import decimal2string, increment, getSetting
from InvenTree.status_codes import PurchaseOrderStatus, SalesOrderStatus, StockStatus
from InvenTree.models import InvenTreeAttachment
class Order(models.Model):
""" Abstract model for an order.
Instances of this class:
- PuchaseOrder
Attributes:
reference: Unique order number / reference / code
description: Long form description (required)
notes: Extra note field (optional)
creation_date: Automatic date of order creation
created_by: User who created this order (automatically captured)
issue_date: Date the order was issued
complete_date: Date the order was completed
"""
@classmethod
def getNextOrderNumber(cls):
"""
Try to predict the next order-number
"""
if cls.objects.count() == 0:
return None
# We will assume that the latest pk has the highest PO number
order = cls.objects.last()
ref = order.reference
if not ref:
return None
tries = set()
tries.add(ref)
while 1:
new_ref = increment(ref)
if new_ref in tries:
# We are in a looping situation - simply return the original one
return ref
# Check that the new ref does not exist in the database
if cls.objects.filter(reference=new_ref).exists():
tries.add(new_ref)
new_ref = increment(new_ref)
else:
break
return new_ref
def save(self, *args, **kwargs):
if not self.creation_date:
self.creation_date = datetime.now().date()
super().save(*args, **kwargs)
class Meta:
abstract = True
reference = models.CharField(unique=True, max_length=64, blank=False, help_text=_('Order reference'))
description = models.CharField(max_length=250, help_text=_('Order description'))
link = models.URLField(blank=True, help_text=_('Link to external page'))
creation_date = models.DateField(blank=True, null=True)
created_by = models.ForeignKey(User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
notes = MarkdownxField(blank=True, help_text=_('Order notes'))
class PurchaseOrder(Order):
""" A PurchaseOrder represents goods shipped inwards from an external supplier.
Attributes:
supplier: Reference to the company supplying the goods in the order
supplier_reference: Optional field for supplier order reference code
received_by: User that received the goods
target_date: Expected delivery target date for PurchaseOrder completion (optional)
"""
OVERDUE_FILTER = Q(status__in=PurchaseOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__lte=datetime.now().date())
@staticmethod
def filterByDate(queryset, min_date, max_date):
"""
Filter by 'minimum and maximum date range'
- Specified as min_date, max_date
- Both must be specified for filter to be applied
- Determine which "interesting" orders exist bewteen these dates
To be "interesting":
- A "received" order where the received date lies within the date range
- A "pending" order where the target date lies within the date range
- TODO: An "overdue" order where the target date is in the past
"""
date_fmt = '%Y-%m-%d' # ISO format date string
# Ensure that both dates are valid
try:
min_date = datetime.strptime(str(min_date), date_fmt).date()
max_date = datetime.strptime(str(max_date), date_fmt).date()
except (ValueError, TypeError):
# Date processing error, return queryset unchanged
return queryset
# Construct a queryset for "received" orders within the range
received = Q(status=PurchaseOrderStatus.COMPLETE) & Q(complete_date__gte=min_date) & Q(complete_date__lte=max_date)
# Construct a queryset for "pending" orders within the range
pending = Q(status__in=PurchaseOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__gte=min_date) & Q(target_date__lte=max_date)
# TODO - Construct a queryset for "overdue" orders within the range
queryset = queryset.filter(received | pending)
return queryset
def __str__(self):
prefix = getSetting('PURCHASEORDER_REFERENCE_PREFIX')
return f"{prefix}{self.reference} - {self.supplier.name}"
status = models.PositiveIntegerField(default=PurchaseOrderStatus.PENDING, choices=PurchaseOrderStatus.items(),
help_text=_('Purchase order status'))
supplier = models.ForeignKey(
Company, on_delete=models.CASCADE,
limit_choices_to={
'is_supplier': True,
},
related_name='purchase_orders',
help_text=_('Company from which the items are being ordered')
)
supplier_reference = models.CharField(max_length=64, blank=True, help_text=_("Supplier order reference code"))
received_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
issue_date = models.DateField(
blank=True, null=True,
verbose_name=_('Issue Date'),
help_text=_('Date order was issued')
)
target_date = models.DateField(
blank=True, null=True,
verbose_name=_('Target Delivery Date'),
help_text=_('Expected date for order delivery. Order will be overdue after this date.'),
)
complete_date = models.DateField(
blank=True, null=True,
verbose_name=_('Completion Date'),
help_text=_('Date order was completed')
)
def get_absolute_url(self):
return reverse('po-detail', kwargs={'pk': self.id})
@transaction.atomic
def add_line_item(self, supplier_part, quantity, group=True, reference=''):
""" Add a new line item to this purchase order.
This function will check that:
* The supplier part matches the supplier specified for this purchase order
* The quantity is greater than zero
Args:
supplier_part - The supplier_part to add
quantity - The number of items to add
group - If True, this new quantity will be added to an existing line item for the same supplier_part (if it exists)
"""
try:
quantity = int(quantity)
if quantity <= 0:
raise ValidationError({
'quantity': _("Quantity must be greater than zero")})
except ValueError:
raise ValidationError({'quantity': _("Invalid quantity provided")})
if not supplier_part.supplier == self.supplier:
raise ValidationError({'supplier': _("Part supplier must match PO supplier")})
if group:
# Check if there is already a matching line item (for this PO)
matches = self.lines.filter(part=supplier_part)
if matches.count() > 0:
line = matches.first()
line.quantity += quantity
line.save()
return
line = PurchaseOrderLineItem(
order=self,
part=supplier_part,
quantity=quantity,
reference=reference)
line.save()
@transaction.atomic
def place_order(self):
""" Marks the PurchaseOrder as PLACED. Order must be currently PENDING. """
if self.status == PurchaseOrderStatus.PENDING:
self.status = PurchaseOrderStatus.PLACED
self.issue_date = datetime.now().date()
self.save()
@transaction.atomic
def complete_order(self):
""" Marks the PurchaseOrder as COMPLETE. Order must be currently PLACED. """
if self.status == PurchaseOrderStatus.PLACED:
self.status = PurchaseOrderStatus.COMPLETE
self.complete_date = datetime.now().date()
self.save()
@property
def is_overdue(self):
"""
Returns True if this PurchaseOrder is "overdue"
Makes use of the OVERDUE_FILTER to avoid code duplication.
"""
query = PurchaseOrder.objects.filter(pk=self.pk)
query = query.filter(PurchaseOrder.OVERDUE_FILTER)
return query.exists()
def can_cancel(self):
"""
A PurchaseOrder can only be cancelled under the following circumstances:
"""
return self.status in [
PurchaseOrderStatus.PLACED,
PurchaseOrderStatus.PENDING
]
def cancel_order(self):
""" Marks the PurchaseOrder as CANCELLED. """
if self.can_cancel():
self.status = PurchaseOrderStatus.CANCELLED
self.save()
def pending_line_items(self):
""" Return a list of pending line items for this order.
Any line item where 'received' < 'quantity' will be returned.
"""
return self.lines.filter(quantity__gt=F('received'))
@property
def is_complete(self):
""" Return True if all line items have been received """
return self.pending_line_items().count() == 0
@transaction.atomic
def receive_line_item(self, line, location, quantity, user, status=StockStatus.OK):
""" Receive a line item (or partial line item) against this PO
"""
if not self.status == PurchaseOrderStatus.PLACED:
raise ValidationError({"status": _("Lines can only be received against an order marked as 'Placed'")})
try:
quantity = int(quantity)
if quantity <= 0:
raise ValidationError({"quantity": _("Quantity must be greater than zero")})
except ValueError:
raise ValidationError({"quantity": _("Invalid quantity provided")})
# Create a new stock item
if line.part:
stock = stock_models.StockItem(
part=line.part.part,
supplier_part=line.part,
location=location,
quantity=quantity,
purchase_order=self,
status=status
)
stock.save()
text = _("Received items")
note = f"{_('Received')} {quantity} {_('items against order')} {str(self)}"
# Add a new transaction note to the newly created stock item
stock.addTransactionNote(text, user, note)
# Update the number of parts received against the particular line item
line.received += quantity
line.save()
# Has this order been completed?
if len(self.pending_line_items()) == 0:
self.received_by = user
self.complete_order() # This will save the model
class SalesOrder(Order):
"""
A SalesOrder represents a list of goods shipped outwards to a customer.
Attributes:
customer: Reference to the company receiving the goods in the order
customer_reference: Optional field for customer order reference code
target_date: Target date for SalesOrder completion (optional)
"""
OVERDUE_FILTER = Q(status__in=SalesOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__lte=datetime.now().date())
@staticmethod
def filterByDate(queryset, min_date, max_date):
"""
Filter by "minimum and maximum date range"
- Specified as min_date, max_date
- Both must be specified for filter to be applied
- Determine which "interesting" orders exist between these dates
To be "interesting":
- A "completed" order where the completion date lies within the date range
- A "pending" order where the target date lies within the date range
- TODO: An "overdue" order where the target date is in the past
"""
date_fmt = '%Y-%m-%d' # ISO format date string
# Ensure that both dates are valid
try:
min_date = datetime.strptime(str(min_date), date_fmt).date()
max_date = datetime.strptime(str(max_date), date_fmt).date()
except (ValueError, TypeError):
# Date processing error, return queryset unchanged
return queryset
# Construct a queryset for "completed" orders within the range
completed = Q(status__in=SalesOrderStatus.COMPLETE) & Q(shipment_date__gte=min_date) & Q(shipment_date__lte=max_date)
# Construct a queryset for "pending" orders within the range
pending = Q(status__in=SalesOrderStatus.OPEN) & ~Q(target_date=None) & Q(target_date__gte=min_date) & Q(target_date__lte=max_date)
# TODO: Construct a queryset for "overdue" orders within the range
queryset = queryset.filter(completed | pending)
return queryset
def __str__(self):
prefix = getSetting('SALESORDER_REFERENCE_PREFIX')
return f"{prefix}{self.reference} - {self.customer.name}"
def get_absolute_url(self):
return reverse('so-detail', kwargs={'pk': self.id})
customer = models.ForeignKey(
Company,
on_delete=models.SET_NULL,
null=True,
limit_choices_to={'is_customer': True},
related_name='sales_orders',
help_text=_("Company to which the items are being sold"),
)
status = models.PositiveIntegerField(default=SalesOrderStatus.PENDING, choices=SalesOrderStatus.items(),
help_text=_('Purchase order status'))
customer_reference = models.CharField(max_length=64, blank=True, help_text=_("Customer order reference code"))
target_date = models.DateField(
null=True, blank=True,
verbose_name=_('Target completion date'),
help_text=_('Target date for order completion. Order will be overdue after this date.')
)
shipment_date = models.DateField(blank=True, null=True)
shipped_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='+'
)
@property
def is_overdue(self):
"""
Returns true if this SalesOrder is "overdue":
Makes use of the OVERDUE_FILTER to avoid code duplication.
"""
query = SalesOrder.objects.filter(pk=self.pk)
query = query.filter(SalesOrder.OVERDUE_FILTER)
return query.exists()
@property
def is_pending(self):
return self.status == SalesOrderStatus.PENDING
def is_fully_allocated(self):
""" Return True if all line items are fully allocated """
for line in self.lines.all():
if not line.is_fully_allocated():
return False
return True
def is_over_allocated(self):
""" Return true if any lines in the order are over-allocated """
for line in self.lines.all():
if line.is_over_allocated():
return True
return False
@transaction.atomic
def ship_order(self, user):
""" Mark this order as 'shipped' """
# The order can only be 'shipped' if the current status is PENDING
if not self.status == SalesOrderStatus.PENDING:
raise ValidationError({'status': _("SalesOrder cannot be shipped as it is not currently pending")})
# Complete the allocation for each allocated StockItem
for line in self.lines.all():
for allocation in line.allocations.all():
allocation.complete_allocation(user)
# Remove the allocation from the database once it has been 'fulfilled'
if allocation.item.sales_order == self:
allocation.delete()
else:
raise ValidationError("Could not complete order - allocation item not fulfilled")
# Ensure the order status is marked as "Shipped"
self.status = SalesOrderStatus.SHIPPED
self.shipment_date = datetime.now().date()
self.shipped_by = user
self.save()
return True
def can_cancel(self):
"""
Return True if this order can be cancelled
"""
if not self.status == SalesOrderStatus.PENDING:
return False
return True
@transaction.atomic
def cancel_order(self):
"""
Cancel this order (only if it is "pending")
- Mark the order as 'cancelled'
- Delete any StockItems which have been allocated
"""
if not self.can_cancel():
return False
self.status = SalesOrderStatus.CANCELLED
self.save()
for line in self.lines.all():
for allocation in line.allocations.all():
allocation.delete()
return True
class PurchaseOrderAttachment(InvenTreeAttachment):
"""
Model for storing file attachments against a PurchaseOrder object
"""
def getSubdir(self):
return os.path.join("po_files", str(self.order.id))
order = models.ForeignKey(PurchaseOrder, on_delete=models.CASCADE, related_name="attachments")
class SalesOrderAttachment(InvenTreeAttachment):
"""
Model for storing file attachments against a SalesOrder object
"""
def getSubdir(self):
return os.path.join("so_files", str(self.order.id))
order = models.ForeignKey(SalesOrder, on_delete=models.CASCADE, related_name='attachments')
class OrderLineItem(models.Model):
""" Abstract model for an order line item
Attributes:
quantity: Number of items
note: Annotation for the item
"""
class Meta:
abstract = True
quantity = RoundingDecimalField(max_digits=15, decimal_places=5, validators=[MinValueValidator(0)], default=1, help_text=_('Item quantity'))
reference = models.CharField(max_length=100, blank=True, help_text=_('Line item reference'))
notes = models.CharField(max_length=500, blank=True, help_text=_('Line item notes'))
class PurchaseOrderLineItem(OrderLineItem):
""" Model for a purchase order line item.
Attributes:
order: Reference to a PurchaseOrder object
"""
class Meta:
unique_together = (
('order', 'part')
)
def __str__(self):
return "{n} x {part} from {supplier} (for {po})".format(
n=decimal2string(self.quantity),
part=self.part.SKU if self.part else 'unknown part',
supplier=self.order.supplier.name,
po=self.order)
order = models.ForeignKey(
PurchaseOrder, on_delete=models.CASCADE,
related_name='lines',
help_text=_('Purchase Order')
)
def get_base_part(self):
""" Return the base-part for the line item """
return self.part.part
# TODO - Function callback for when the SupplierPart is deleted?
part = models.ForeignKey(
SupplierPart, on_delete=models.SET_NULL,
blank=True, null=True,
related_name='purchase_order_line_items',
help_text=_("Supplier part"),
)
received = models.DecimalField(decimal_places=5, max_digits=15, default=0, help_text=_('Number of items received'))
purchase_price = MoneyField(
max_digits=19,
decimal_places=4,
default_currency='USD',
null=True, blank=True,
verbose_name=_('Purchase Price'),
help_text=_('Unit purchase price'),
)
def remaining(self):
""" Calculate the number of items remaining to be received """
r = self.quantity - self.received
return max(r, 0)
class SalesOrderLineItem(OrderLineItem):
"""
Model for a single LineItem in a SalesOrder
Attributes:
order: Link to the SalesOrder that this line item belongs to
part: Link to a Part object (may be null)
"""
order = models.ForeignKey(SalesOrder, on_delete=models.CASCADE, related_name='lines', help_text=_('Sales Order'))
part = models.ForeignKey('part.Part', on_delete=models.SET_NULL, related_name='sales_order_line_items', null=True, help_text=_('Part'), limit_choices_to={'salable': True})
class Meta:
unique_together = [
('order', 'part'),
]
def fulfilled_quantity(self):
"""
Return the total stock quantity fulfilled against this line item.
"""
query = self.order.stock_items.filter(part=self.part).aggregate(fulfilled=Coalesce(Sum('quantity'), Decimal(0)))
return query['fulfilled']
def allocated_quantity(self):
""" Return the total stock quantity allocated to this LineItem.
This is a summation of the quantity of each attached StockItem
"""
query = self.allocations.aggregate(allocated=Coalesce(Sum('quantity'), Decimal(0)))
return query['allocated']
def is_fully_allocated(self):
""" Return True if this line item is fully allocated """
if self.order.status == SalesOrderStatus.SHIPPED:
return self.fulfilled_quantity() >= self.quantity
return self.allocated_quantity() >= self.quantity
def is_over_allocated(self):
""" Return True if this line item is over allocated """
return self.allocated_quantity() > self.quantity
class SalesOrderAllocation(models.Model):
"""
This model is used to 'allocate' stock items to a SalesOrder.
Items that are "allocated" to a SalesOrder are not yet "attached" to the order,
but they will be once the order is fulfilled.
Attributes:
line: SalesOrderLineItem reference
item: StockItem reference
quantity: Quantity to take from the StockItem
"""
class Meta:
unique_together = [
# Cannot allocate any given StockItem to the same line more than once
('line', 'item'),
]
def clean(self):
"""
Validate the SalesOrderAllocation object:
- Cannot allocate stock to a line item without a part reference
- The referenced part must match the part associated with the line item
- Allocated quantity cannot exceed the quantity of the stock item
- Allocation quantity must be "1" if the StockItem is serialized
- Allocation quantity cannot be zero
"""
super().clean()
errors = {}
try:
if not self.line.part == self.item.part:
errors['item'] = _('Cannot allocate stock item to a line with a different part')
except PartModels.Part.DoesNotExist:
errors['line'] = _('Cannot allocate stock to a line without a part')
if self.quantity > self.item.quantity:
errors['quantity'] = _('Allocation quantity cannot exceed stock quantity')
# TODO: The logic here needs improving. Do we need to subtract our own amount, or something?
if self.item.quantity - self.item.allocation_count() + self.quantity < self.quantity:
errors['quantity'] = _('StockItem is over-allocated')
if self.quantity <= 0:
errors['quantity'] = _('Allocation quantity must be greater than zero')
if self.item.serial and not self.quantity == 1:
errors['quantity'] = _('Quantity must be 1 for serialized stock item')
if len(errors) > 0:
raise ValidationError(errors)
line = models.ForeignKey(SalesOrderLineItem, on_delete=models.CASCADE, related_name='allocations')
item = models.ForeignKey(
'stock.StockItem',
on_delete=models.CASCADE,
related_name='sales_order_allocations',
limit_choices_to={
'part__salable': True,
'belongs_to': None,
'sales_order': None,
},
help_text=_('Select stock item to allocate')
)
quantity = RoundingDecimalField(max_digits=15, decimal_places=5, validators=[MinValueValidator(0)], default=1, help_text=_('Enter stock allocation quantity'))
def get_serial(self):
return self.item.serial
def get_location(self):
return self.item.location.id if self.item.location else None
def get_location_path(self):
if self.item.location:
return self.item.location.pathstring
else:
return ""
def complete_allocation(self, user):
"""
Complete this allocation (called when the parent SalesOrder is marked as "shipped"):
- Determine if the referenced StockItem needs to be "split" (if allocated quantity != stock quantity)
- Mark the StockItem as belonging to the Customer (this will remove it from stock)
"""
order = self.line.order
item = self.item.allocateToCustomer(
order.customer,
quantity=self.quantity,
order=order,
user=user
)
# Update our own reference to the StockItem
# (It may have changed if the stock was split)
self.item = item
self.save()
|
en
| 0.849212
|
Order model definitions # -*- coding: utf-8 -*- Abstract model for an order. Instances of this class: - PuchaseOrder Attributes: reference: Unique order number / reference / code description: Long form description (required) notes: Extra note field (optional) creation_date: Automatic date of order creation created_by: User who created this order (automatically captured) issue_date: Date the order was issued complete_date: Date the order was completed Try to predict the next order-number # We will assume that the latest pk has the highest PO number # We are in a looping situation - simply return the original one # Check that the new ref does not exist in the database A PurchaseOrder represents goods shipped inwards from an external supplier. Attributes: supplier: Reference to the company supplying the goods in the order supplier_reference: Optional field for supplier order reference code received_by: User that received the goods target_date: Expected delivery target date for PurchaseOrder completion (optional) Filter by 'minimum and maximum date range' - Specified as min_date, max_date - Both must be specified for filter to be applied - Determine which "interesting" orders exist bewteen these dates To be "interesting": - A "received" order where the received date lies within the date range - A "pending" order where the target date lies within the date range - TODO: An "overdue" order where the target date is in the past # ISO format date string # Ensure that both dates are valid # Date processing error, return queryset unchanged # Construct a queryset for "received" orders within the range # Construct a queryset for "pending" orders within the range # TODO - Construct a queryset for "overdue" orders within the range Add a new line item to this purchase order. This function will check that: * The supplier part matches the supplier specified for this purchase order * The quantity is greater than zero Args: supplier_part - The supplier_part to add quantity - The number of items to add group - If True, this new quantity will be added to an existing line item for the same supplier_part (if it exists) # Check if there is already a matching line item (for this PO) Marks the PurchaseOrder as PLACED. Order must be currently PENDING. Marks the PurchaseOrder as COMPLETE. Order must be currently PLACED. Returns True if this PurchaseOrder is "overdue" Makes use of the OVERDUE_FILTER to avoid code duplication. A PurchaseOrder can only be cancelled under the following circumstances: Marks the PurchaseOrder as CANCELLED. Return a list of pending line items for this order. Any line item where 'received' < 'quantity' will be returned. Return True if all line items have been received Receive a line item (or partial line item) against this PO # Create a new stock item # Add a new transaction note to the newly created stock item # Update the number of parts received against the particular line item # Has this order been completed? # This will save the model A SalesOrder represents a list of goods shipped outwards to a customer. Attributes: customer: Reference to the company receiving the goods in the order customer_reference: Optional field for customer order reference code target_date: Target date for SalesOrder completion (optional) Filter by "minimum and maximum date range" - Specified as min_date, max_date - Both must be specified for filter to be applied - Determine which "interesting" orders exist between these dates To be "interesting": - A "completed" order where the completion date lies within the date range - A "pending" order where the target date lies within the date range - TODO: An "overdue" order where the target date is in the past # ISO format date string # Ensure that both dates are valid # Date processing error, return queryset unchanged # Construct a queryset for "completed" orders within the range # Construct a queryset for "pending" orders within the range # TODO: Construct a queryset for "overdue" orders within the range Returns true if this SalesOrder is "overdue": Makes use of the OVERDUE_FILTER to avoid code duplication. Return True if all line items are fully allocated Return true if any lines in the order are over-allocated Mark this order as 'shipped' # The order can only be 'shipped' if the current status is PENDING # Complete the allocation for each allocated StockItem # Remove the allocation from the database once it has been 'fulfilled' # Ensure the order status is marked as "Shipped" Return True if this order can be cancelled Cancel this order (only if it is "pending") - Mark the order as 'cancelled' - Delete any StockItems which have been allocated Model for storing file attachments against a PurchaseOrder object Model for storing file attachments against a SalesOrder object Abstract model for an order line item Attributes: quantity: Number of items note: Annotation for the item Model for a purchase order line item. Attributes: order: Reference to a PurchaseOrder object Return the base-part for the line item # TODO - Function callback for when the SupplierPart is deleted? Calculate the number of items remaining to be received Model for a single LineItem in a SalesOrder Attributes: order: Link to the SalesOrder that this line item belongs to part: Link to a Part object (may be null) Return the total stock quantity fulfilled against this line item. Return the total stock quantity allocated to this LineItem. This is a summation of the quantity of each attached StockItem Return True if this line item is fully allocated Return True if this line item is over allocated This model is used to 'allocate' stock items to a SalesOrder. Items that are "allocated" to a SalesOrder are not yet "attached" to the order, but they will be once the order is fulfilled. Attributes: line: SalesOrderLineItem reference item: StockItem reference quantity: Quantity to take from the StockItem # Cannot allocate any given StockItem to the same line more than once Validate the SalesOrderAllocation object: - Cannot allocate stock to a line item without a part reference - The referenced part must match the part associated with the line item - Allocated quantity cannot exceed the quantity of the stock item - Allocation quantity must be "1" if the StockItem is serialized - Allocation quantity cannot be zero # TODO: The logic here needs improving. Do we need to subtract our own amount, or something? Complete this allocation (called when the parent SalesOrder is marked as "shipped"): - Determine if the referenced StockItem needs to be "split" (if allocated quantity != stock quantity) - Mark the StockItem as belonging to the Customer (this will remove it from stock) # Update our own reference to the StockItem # (It may have changed if the stock was split)
| 2.579517
| 3
|
pytorch-distributed/setup.py
|
Napkin-DL/my-aws-example
| 0
|
6626698
|
from setuptools import setup, find_packages
setup(
name='gentrl',
version='0.1',
python_requires='>=3.5.0',
packages=find_packages(),
install_requires=[
'numpy>=1.15',
'pandas>=0.23',
'scipy>=1.1.0',
'torch==1.2.0',
'molsets==0.1.3'
],
description='Generative Tensorial Reinforcement Learning (GENTRL)',
)
|
from setuptools import setup, find_packages
setup(
name='gentrl',
version='0.1',
python_requires='>=3.5.0',
packages=find_packages(),
install_requires=[
'numpy>=1.15',
'pandas>=0.23',
'scipy>=1.1.0',
'torch==1.2.0',
'molsets==0.1.3'
],
description='Generative Tensorial Reinforcement Learning (GENTRL)',
)
|
none
| 1
| 1.299136
| 1
|
|
src/data/tests/__init__.py
|
nsteins/crash-model
| 54
|
6626699
|
"""
Tests for data_generation
"""
|
"""
Tests for data_generation
"""
|
en
| 0.77268
|
Tests for data_generation
| 0.975476
| 1
|
pureples/es_hyperneat/es_hyperneat.py
|
cb244/pureples
| 93
|
6626700
|
<filename>pureples/es_hyperneat/es_hyperneat.py<gh_stars>10-100
"""
All logic concerning ES-HyperNEAT resides here.
"""
import copy
import neat
import numpy as np
from pureples.hyperneat.hyperneat import query_cppn
from pureples.shared.visualize import draw_es
class ESNetwork:
"""
The evolvable substrate network.
"""
def __init__(self, substrate, cppn, params):
self.substrate = substrate
self.cppn = cppn
self.initial_depth = params["initial_depth"]
self.max_depth = params["max_depth"]
self.variance_threshold = params["variance_threshold"]
self.band_threshold = params["band_threshold"]
self.iteration_level = params["iteration_level"]
self.division_threshold = params["division_threshold"]
self.max_weight = params["max_weight"]
self.connections = set()
# Number of layers in the network.
self.activations = 2 ** params["max_depth"] + 1
activation_functions = neat.activations.ActivationFunctionSet()
self.activation = activation_functions.get(params["activation"])
def create_phenotype_network(self, filename=None):
"""
Create a RecurrentNetwork using the ES-HyperNEAT approach.
"""
input_coordinates = self.substrate.input_coordinates
output_coordinates = self.substrate.output_coordinates
input_nodes = list(range(len(input_coordinates)))
output_nodes = list(range(len(input_nodes), len(
input_nodes)+len(output_coordinates)))
hidden_idx = len(input_coordinates)+len(output_coordinates)
coordinates, indices, draw_connections, node_evals = [], [], [], []
nodes = {}
coordinates.extend(input_coordinates)
coordinates.extend(output_coordinates)
indices.extend(input_nodes)
indices.extend(output_nodes)
# Map input and output coordinates to their IDs.
coords_to_id = dict(zip(coordinates, indices))
# Where the magic happens.
hidden_nodes, connections = self.es_hyperneat()
# Map hidden coordinates to their IDs.
for x, y in hidden_nodes:
coords_to_id[x, y] = hidden_idx
hidden_idx += 1
# For every coordinate:
# Check the connections and create a node with corresponding connections if appropriate.
for (x, y), idx in coords_to_id.items():
for c in connections:
if c.x2 == x and c.y2 == y:
draw_connections.append(c)
if idx in nodes:
initial = nodes[idx]
initial.append((coords_to_id[c.x1, c.y1], c.weight))
nodes[idx] = initial
else:
nodes[idx] = [(coords_to_id[c.x1, c.y1], c.weight)]
# Combine the indices with the connections/links;
# forming node_evals used by the RecurrentNetwork.
for idx, links in nodes.items():
node_evals.append((idx, self.activation, sum, 0.0, 1.0, links))
# Visualize the network?
if filename is not None:
draw_es(coords_to_id, draw_connections, filename)
# This is actually a feedforward network.
return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals)
@staticmethod
def get_weights(p):
"""
Recursively collect all weights for a given QuadPoint.
"""
temp = []
def loop(pp):
if pp is not None and all(child is not None for child in pp.cs):
for i in range(0, 4):
loop(pp.cs[i])
else:
if pp is not None:
temp.append(pp.w)
loop(p)
return temp
def variance(self, p):
"""
Find the variance of a given QuadPoint.
"""
if not p:
return 0.0
return np.var(self.get_weights(p))
def division_initialization(self, coord, outgoing):
"""
Initialize the quadtree by dividing it in appropriate quads.
"""
root = QuadPoint(0.0, 0.0, 1.0, 1)
q = [root]
while q:
p = q.pop(0)
p.cs[0] = QuadPoint(p.x - p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[1] = QuadPoint(p.x - p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[2] = QuadPoint(p.x + p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[3] = QuadPoint(p.x + p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
for c in p.cs:
c.w = query_cppn(coord, (c.x, c.y), outgoing,
self.cppn, self.max_weight)
if (p.lvl < self.initial_depth) or (p.lvl < self.max_depth and self.variance(p)
> self.division_threshold):
for child in p.cs:
q.append(child)
return root
def pruning_extraction(self, coord, p, outgoing):
"""
Determines which connections to express - high variance = more connetions.
"""
for c in p.cs:
d_left, d_right, d_top, d_bottom = None, None, None, None
if self.variance(c) > self.variance_threshold:
self.pruning_extraction(coord, c, outgoing)
else:
d_left = abs(c.w - query_cppn(coord, (c.x - p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_right = abs(c.w - query_cppn(coord, (c.x + p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_top = abs(c.w - query_cppn(coord, (c.x, c.y - p.width),
outgoing, self.cppn, self.max_weight))
d_bottom = abs(c.w - query_cppn(coord, (c.x, c.y +
p.width), outgoing, self.cppn, self.max_weight))
con = None
if max(min(d_top, d_bottom), min(d_left, d_right)) > self.band_threshold:
if outgoing:
con = Connection(coord[0], coord[1], c.x, c.y, c.w)
else:
con = Connection(c.x, c.y, coord[0], coord[1], c.w)
if con is not None:
# Nodes will only connect upwards.
# If connections to same layer is wanted, change to con.y1 <= con.y2.
if not c.w == 0.0 and con.y1 < con.y2 and not (con.x1 == con.x2 and con.y1 == con.y2):
self.connections.add(con)
def es_hyperneat(self):
"""
Explores the hidden nodes and their connections.
"""
inputs = self.substrate.input_coordinates
outputs = self.substrate.output_coordinates
hidden_nodes, unexplored_hidden_nodes = set(), set()
connections1, connections2, connections3 = set(), set(), set()
for x, y in inputs: # Explore from inputs.
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections1 = connections1.union(self.connections)
for c in connections1:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = copy.deepcopy(hidden_nodes)
for _ in range(self.iteration_level): # Explore from hidden.
for x, y in unexplored_hidden_nodes:
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections2 = connections2.union(self.connections)
for c in connections2:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = hidden_nodes - unexplored_hidden_nodes
for x, y in outputs: # Explore to outputs.
root = self.division_initialization((x, y), False)
self.pruning_extraction((x, y), root, False)
connections3 = connections3.union(self.connections)
self.connections = set()
connections = connections1.union(connections2.union(connections3))
return self.clean_net(connections)
def clean_net(self, connections):
"""
Clean a net for dangling connections:
Intersects paths from input nodes with paths to output.
"""
connected_to_inputs = set(tuple(i)
for i in self.substrate.input_coordinates)
connected_to_outputs = set(tuple(i)
for i in self.substrate.output_coordinates)
true_connections = set()
initial_input_connections = copy.deepcopy(connections)
initial_output_connections = copy.deepcopy(connections)
add_happened = True
while add_happened: # The path from inputs.
add_happened = False
temp_input_connections = copy.deepcopy(initial_input_connections)
for c in temp_input_connections:
if (c.x1, c.y1) in connected_to_inputs:
connected_to_inputs.add((c.x2, c.y2))
initial_input_connections.remove(c)
add_happened = True
add_happened = True
while add_happened: # The path to outputs.
add_happened = False
temp_output_connections = copy.deepcopy(initial_output_connections)
for c in temp_output_connections:
if (c.x2, c.y2) in connected_to_outputs:
connected_to_outputs.add((c.x1, c.y1))
initial_output_connections.remove(c)
add_happened = True
true_nodes = connected_to_inputs.intersection(connected_to_outputs)
for c in connections:
# Only include connection if both source and target node resides in the real path from input to output
if (c.x1, c.y1) in true_nodes and (c.x2, c.y2) in true_nodes:
true_connections.add(c)
true_nodes -= (set(self.substrate.input_coordinates)
.union(set(self.substrate.output_coordinates)))
return true_nodes, true_connections
class QuadPoint:
"""
Class representing an area in the quadtree.
Defined by a center coordinate and the distance to the edges of the area.
"""
def __init__(self, x, y, width, lvl):
self.x = x
self.y = y
self.w = 0.0
self.width = width
self.cs = [None] * 4
self.lvl = lvl
class Connection:
"""
Class representing a connection from one point to another with a certain weight.
"""
def __init__(self, x1, y1, x2, y2, weight):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.weight = weight
# Below is needed for use in set.
def __eq__(self, other):
if not isinstance(other, Connection):
return NotImplemented
return (self.x1, self.y1, self.x2, self.y2) == (other.x1, other.y1, other.x2, other.y2)
def __hash__(self):
return hash((self.x1, self.y1, self.x2, self.y2, self.weight))
def find_pattern(cppn, coord, res=60, max_weight=5.0):
"""
From a given point, query the cppn for weights to all other points.
This can be visualized as a connectivity pattern.
"""
im = np.zeros((res, res))
for x2 in range(res):
for y2 in range(res):
x2_scaled = -1.0 + (x2/float(res))*2.0
y2_scaled = -1.0 + (y2/float(res))*2.0
i = [coord[0], coord[1], x2_scaled, y2_scaled, 1.0]
n = cppn.activate(i)[0]
im[x2][y2] = n * max_weight
return im
|
<filename>pureples/es_hyperneat/es_hyperneat.py<gh_stars>10-100
"""
All logic concerning ES-HyperNEAT resides here.
"""
import copy
import neat
import numpy as np
from pureples.hyperneat.hyperneat import query_cppn
from pureples.shared.visualize import draw_es
class ESNetwork:
"""
The evolvable substrate network.
"""
def __init__(self, substrate, cppn, params):
self.substrate = substrate
self.cppn = cppn
self.initial_depth = params["initial_depth"]
self.max_depth = params["max_depth"]
self.variance_threshold = params["variance_threshold"]
self.band_threshold = params["band_threshold"]
self.iteration_level = params["iteration_level"]
self.division_threshold = params["division_threshold"]
self.max_weight = params["max_weight"]
self.connections = set()
# Number of layers in the network.
self.activations = 2 ** params["max_depth"] + 1
activation_functions = neat.activations.ActivationFunctionSet()
self.activation = activation_functions.get(params["activation"])
def create_phenotype_network(self, filename=None):
"""
Create a RecurrentNetwork using the ES-HyperNEAT approach.
"""
input_coordinates = self.substrate.input_coordinates
output_coordinates = self.substrate.output_coordinates
input_nodes = list(range(len(input_coordinates)))
output_nodes = list(range(len(input_nodes), len(
input_nodes)+len(output_coordinates)))
hidden_idx = len(input_coordinates)+len(output_coordinates)
coordinates, indices, draw_connections, node_evals = [], [], [], []
nodes = {}
coordinates.extend(input_coordinates)
coordinates.extend(output_coordinates)
indices.extend(input_nodes)
indices.extend(output_nodes)
# Map input and output coordinates to their IDs.
coords_to_id = dict(zip(coordinates, indices))
# Where the magic happens.
hidden_nodes, connections = self.es_hyperneat()
# Map hidden coordinates to their IDs.
for x, y in hidden_nodes:
coords_to_id[x, y] = hidden_idx
hidden_idx += 1
# For every coordinate:
# Check the connections and create a node with corresponding connections if appropriate.
for (x, y), idx in coords_to_id.items():
for c in connections:
if c.x2 == x and c.y2 == y:
draw_connections.append(c)
if idx in nodes:
initial = nodes[idx]
initial.append((coords_to_id[c.x1, c.y1], c.weight))
nodes[idx] = initial
else:
nodes[idx] = [(coords_to_id[c.x1, c.y1], c.weight)]
# Combine the indices with the connections/links;
# forming node_evals used by the RecurrentNetwork.
for idx, links in nodes.items():
node_evals.append((idx, self.activation, sum, 0.0, 1.0, links))
# Visualize the network?
if filename is not None:
draw_es(coords_to_id, draw_connections, filename)
# This is actually a feedforward network.
return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals)
@staticmethod
def get_weights(p):
"""
Recursively collect all weights for a given QuadPoint.
"""
temp = []
def loop(pp):
if pp is not None and all(child is not None for child in pp.cs):
for i in range(0, 4):
loop(pp.cs[i])
else:
if pp is not None:
temp.append(pp.w)
loop(p)
return temp
def variance(self, p):
"""
Find the variance of a given QuadPoint.
"""
if not p:
return 0.0
return np.var(self.get_weights(p))
def division_initialization(self, coord, outgoing):
"""
Initialize the quadtree by dividing it in appropriate quads.
"""
root = QuadPoint(0.0, 0.0, 1.0, 1)
q = [root]
while q:
p = q.pop(0)
p.cs[0] = QuadPoint(p.x - p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[1] = QuadPoint(p.x - p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[2] = QuadPoint(p.x + p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[3] = QuadPoint(p.x + p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
for c in p.cs:
c.w = query_cppn(coord, (c.x, c.y), outgoing,
self.cppn, self.max_weight)
if (p.lvl < self.initial_depth) or (p.lvl < self.max_depth and self.variance(p)
> self.division_threshold):
for child in p.cs:
q.append(child)
return root
def pruning_extraction(self, coord, p, outgoing):
"""
Determines which connections to express - high variance = more connetions.
"""
for c in p.cs:
d_left, d_right, d_top, d_bottom = None, None, None, None
if self.variance(c) > self.variance_threshold:
self.pruning_extraction(coord, c, outgoing)
else:
d_left = abs(c.w - query_cppn(coord, (c.x - p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_right = abs(c.w - query_cppn(coord, (c.x + p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_top = abs(c.w - query_cppn(coord, (c.x, c.y - p.width),
outgoing, self.cppn, self.max_weight))
d_bottom = abs(c.w - query_cppn(coord, (c.x, c.y +
p.width), outgoing, self.cppn, self.max_weight))
con = None
if max(min(d_top, d_bottom), min(d_left, d_right)) > self.band_threshold:
if outgoing:
con = Connection(coord[0], coord[1], c.x, c.y, c.w)
else:
con = Connection(c.x, c.y, coord[0], coord[1], c.w)
if con is not None:
# Nodes will only connect upwards.
# If connections to same layer is wanted, change to con.y1 <= con.y2.
if not c.w == 0.0 and con.y1 < con.y2 and not (con.x1 == con.x2 and con.y1 == con.y2):
self.connections.add(con)
def es_hyperneat(self):
"""
Explores the hidden nodes and their connections.
"""
inputs = self.substrate.input_coordinates
outputs = self.substrate.output_coordinates
hidden_nodes, unexplored_hidden_nodes = set(), set()
connections1, connections2, connections3 = set(), set(), set()
for x, y in inputs: # Explore from inputs.
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections1 = connections1.union(self.connections)
for c in connections1:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = copy.deepcopy(hidden_nodes)
for _ in range(self.iteration_level): # Explore from hidden.
for x, y in unexplored_hidden_nodes:
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections2 = connections2.union(self.connections)
for c in connections2:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = hidden_nodes - unexplored_hidden_nodes
for x, y in outputs: # Explore to outputs.
root = self.division_initialization((x, y), False)
self.pruning_extraction((x, y), root, False)
connections3 = connections3.union(self.connections)
self.connections = set()
connections = connections1.union(connections2.union(connections3))
return self.clean_net(connections)
def clean_net(self, connections):
"""
Clean a net for dangling connections:
Intersects paths from input nodes with paths to output.
"""
connected_to_inputs = set(tuple(i)
for i in self.substrate.input_coordinates)
connected_to_outputs = set(tuple(i)
for i in self.substrate.output_coordinates)
true_connections = set()
initial_input_connections = copy.deepcopy(connections)
initial_output_connections = copy.deepcopy(connections)
add_happened = True
while add_happened: # The path from inputs.
add_happened = False
temp_input_connections = copy.deepcopy(initial_input_connections)
for c in temp_input_connections:
if (c.x1, c.y1) in connected_to_inputs:
connected_to_inputs.add((c.x2, c.y2))
initial_input_connections.remove(c)
add_happened = True
add_happened = True
while add_happened: # The path to outputs.
add_happened = False
temp_output_connections = copy.deepcopy(initial_output_connections)
for c in temp_output_connections:
if (c.x2, c.y2) in connected_to_outputs:
connected_to_outputs.add((c.x1, c.y1))
initial_output_connections.remove(c)
add_happened = True
true_nodes = connected_to_inputs.intersection(connected_to_outputs)
for c in connections:
# Only include connection if both source and target node resides in the real path from input to output
if (c.x1, c.y1) in true_nodes and (c.x2, c.y2) in true_nodes:
true_connections.add(c)
true_nodes -= (set(self.substrate.input_coordinates)
.union(set(self.substrate.output_coordinates)))
return true_nodes, true_connections
class QuadPoint:
"""
Class representing an area in the quadtree.
Defined by a center coordinate and the distance to the edges of the area.
"""
def __init__(self, x, y, width, lvl):
self.x = x
self.y = y
self.w = 0.0
self.width = width
self.cs = [None] * 4
self.lvl = lvl
class Connection:
"""
Class representing a connection from one point to another with a certain weight.
"""
def __init__(self, x1, y1, x2, y2, weight):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.weight = weight
# Below is needed for use in set.
def __eq__(self, other):
if not isinstance(other, Connection):
return NotImplemented
return (self.x1, self.y1, self.x2, self.y2) == (other.x1, other.y1, other.x2, other.y2)
def __hash__(self):
return hash((self.x1, self.y1, self.x2, self.y2, self.weight))
def find_pattern(cppn, coord, res=60, max_weight=5.0):
"""
From a given point, query the cppn for weights to all other points.
This can be visualized as a connectivity pattern.
"""
im = np.zeros((res, res))
for x2 in range(res):
for y2 in range(res):
x2_scaled = -1.0 + (x2/float(res))*2.0
y2_scaled = -1.0 + (y2/float(res))*2.0
i = [coord[0], coord[1], x2_scaled, y2_scaled, 1.0]
n = cppn.activate(i)[0]
im[x2][y2] = n * max_weight
return im
|
en
| 0.880861
|
All logic concerning ES-HyperNEAT resides here. The evolvable substrate network. # Number of layers in the network. Create a RecurrentNetwork using the ES-HyperNEAT approach. # Map input and output coordinates to their IDs. # Where the magic happens. # Map hidden coordinates to their IDs. # For every coordinate: # Check the connections and create a node with corresponding connections if appropriate. # Combine the indices with the connections/links; # forming node_evals used by the RecurrentNetwork. # Visualize the network? # This is actually a feedforward network. Recursively collect all weights for a given QuadPoint. Find the variance of a given QuadPoint. Initialize the quadtree by dividing it in appropriate quads. Determines which connections to express - high variance = more connetions. # Nodes will only connect upwards. # If connections to same layer is wanted, change to con.y1 <= con.y2. Explores the hidden nodes and their connections. # Explore from inputs. # Explore from hidden. # Explore to outputs. Clean a net for dangling connections: Intersects paths from input nodes with paths to output. # The path from inputs. # The path to outputs. # Only include connection if both source and target node resides in the real path from input to output Class representing an area in the quadtree. Defined by a center coordinate and the distance to the edges of the area. Class representing a connection from one point to another with a certain weight. # Below is needed for use in set. From a given point, query the cppn for weights to all other points. This can be visualized as a connectivity pattern.
| 2.290041
| 2
|
cubejsclientasync/client.py
|
NarrativeScience/cubejs-client-async
| 0
|
6626701
|
<gh_stars>0
"""Contains the Cube.js API client"""
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import backoff
import httpx
import jwt
from .query import Query
class CubeClient:
"""Cube.js API client"""
def __init__(
self,
host: str = "http://localhost:4000",
base_path: str = "/cubejs-api",
secret: Optional[str] = None,
load_request_timeout: float = 30.0,
token_ttl_hours: int = 1,
) -> None:
"""Initializer
Args:
host: Cube.js API host
base_path: Cube.js API base path
secret: Secret for signing tokens. Set to None to skip authentication.
load_request_timeout: Timeout in seconds to wait for load responses
token_ttl_hours: TTL in hours for the token lifetime
"""
self._secret = secret
self._load_request_timeout = load_request_timeout
self._token_ttl_hours = token_ttl_hours
self._http_client = httpx.AsyncClient(
base_url=f"{host.rstrip('/')}/{base_path.strip('/')}"
)
self._token = None
def _get_signed_token(self) -> Optional[str]:
"""Get or refresh the authentication token
Returns:
token or None if no secret was configured
"""
if not self._secret:
return None
now = datetime.now()
if not self._token or self._token_expiration <= now:
self._token_expiration = now + timedelta(hours=self._token_ttl_hours)
self._token = jwt.encode(
{"exp": self._token_expiration}, self._secret, algorithm="HS256"
)
return self._token
@property
def token(self) -> Optional[str]:
"""Alias for getting the current token value"""
return self._get_signed_token()
async def load(self, query: Query) -> Dict[str, Any]:
"""Get the data for a query.
Args:
query: Query object
Returns:
dict with properties:
* query -- The query passed via params
* data -- Formatted dataset of query results
* annotation -- Metadata for query. Contains descriptions for all query
items.
* title -- Human readable title from data schema.
* shortTitle -- Short title for visualization usage (ex. chart overlay)
* type -- Data type
"""
return await self._request(
"post",
"/v1/load",
body={"query": query.serialize()},
timeout=self._load_request_timeout,
)
@backoff.on_exception(
backoff.expo, httpx.RequestError, max_tries=8, jitter=backoff.random_jitter
)
async def _request(
self, method: str, path: str, body: Optional[Any] = None, timeout: float = 5.0
):
"""Make API request to Cube.js server
Args:
method: HTTP method
path: URL path
body: Body to send with the request, if applicable
timeout: Request timeout in seconds
Returns:
response data
"""
headers = {}
if self.token:
headers["Authorization"] = self.token
async with self._http_client as client:
response = await client.request(
method, path, json=body, headers=headers, timeout=timeout
)
response.raise_for_status()
return response.json()
|
"""Contains the Cube.js API client"""
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import backoff
import httpx
import jwt
from .query import Query
class CubeClient:
"""Cube.js API client"""
def __init__(
self,
host: str = "http://localhost:4000",
base_path: str = "/cubejs-api",
secret: Optional[str] = None,
load_request_timeout: float = 30.0,
token_ttl_hours: int = 1,
) -> None:
"""Initializer
Args:
host: Cube.js API host
base_path: Cube.js API base path
secret: Secret for signing tokens. Set to None to skip authentication.
load_request_timeout: Timeout in seconds to wait for load responses
token_ttl_hours: TTL in hours for the token lifetime
"""
self._secret = secret
self._load_request_timeout = load_request_timeout
self._token_ttl_hours = token_ttl_hours
self._http_client = httpx.AsyncClient(
base_url=f"{host.rstrip('/')}/{base_path.strip('/')}"
)
self._token = None
def _get_signed_token(self) -> Optional[str]:
"""Get or refresh the authentication token
Returns:
token or None if no secret was configured
"""
if not self._secret:
return None
now = datetime.now()
if not self._token or self._token_expiration <= now:
self._token_expiration = now + timedelta(hours=self._token_ttl_hours)
self._token = jwt.encode(
{"exp": self._token_expiration}, self._secret, algorithm="HS256"
)
return self._token
@property
def token(self) -> Optional[str]:
"""Alias for getting the current token value"""
return self._get_signed_token()
async def load(self, query: Query) -> Dict[str, Any]:
"""Get the data for a query.
Args:
query: Query object
Returns:
dict with properties:
* query -- The query passed via params
* data -- Formatted dataset of query results
* annotation -- Metadata for query. Contains descriptions for all query
items.
* title -- Human readable title from data schema.
* shortTitle -- Short title for visualization usage (ex. chart overlay)
* type -- Data type
"""
return await self._request(
"post",
"/v1/load",
body={"query": query.serialize()},
timeout=self._load_request_timeout,
)
@backoff.on_exception(
backoff.expo, httpx.RequestError, max_tries=8, jitter=backoff.random_jitter
)
async def _request(
self, method: str, path: str, body: Optional[Any] = None, timeout: float = 5.0
):
"""Make API request to Cube.js server
Args:
method: HTTP method
path: URL path
body: Body to send with the request, if applicable
timeout: Request timeout in seconds
Returns:
response data
"""
headers = {}
if self.token:
headers["Authorization"] = self.token
async with self._http_client as client:
response = await client.request(
method, path, json=body, headers=headers, timeout=timeout
)
response.raise_for_status()
return response.json()
|
en
| 0.646037
|
Contains the Cube.js API client Cube.js API client Initializer Args: host: Cube.js API host base_path: Cube.js API base path secret: Secret for signing tokens. Set to None to skip authentication. load_request_timeout: Timeout in seconds to wait for load responses token_ttl_hours: TTL in hours for the token lifetime Get or refresh the authentication token Returns: token or None if no secret was configured Alias for getting the current token value Get the data for a query. Args: query: Query object Returns: dict with properties: * query -- The query passed via params * data -- Formatted dataset of query results * annotation -- Metadata for query. Contains descriptions for all query items. * title -- Human readable title from data schema. * shortTitle -- Short title for visualization usage (ex. chart overlay) * type -- Data type Make API request to Cube.js server Args: method: HTTP method path: URL path body: Body to send with the request, if applicable timeout: Request timeout in seconds Returns: response data
| 2.616946
| 3
|
pyzoo/test/zoo/pipeline/onnx/test_model_loading.py
|
Polynomia/analytics-zoo
| 0
|
6626702
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from test.zoo.pipeline.utils.test_utils_onnx import OnnxTestCase
from zoo.pipeline.api.keras.layers import *
import numpy as np
np.random.seed(1337) # for reproducibility
import torch
import onnx.helper as helper
import onnx
import pytest
from zoo.pipeline.api.onnx.onnx_loader import OnnxLoader
from onnx import backend
from onnx.backend import test
from onnx.backend.test.case import node
from onnx.backend.test.case.node import pool_op_common
class Squeeze(torch.nn.Module):
def __init__(self, *dim):
super(Squeeze, self).__init__()
if dim:
self.dim = dim[0]
else:
self.dim = -1
def forward(self, x):
if (self.dim >= 0):
return torch.squeeze(x, dim=self.dim)
else:
return torch.squeeze(x)
class TestModelLoading(OnnxTestCase):
def test_onnx_conv2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_conv2d_2(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3),
torch.nn.Conv2d(in_channels=64, out_channels=4, kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def _batchnorm_test_mode(self, x, s, bias, mean, var, epsilon=1e-5):
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# Momentum is always equal to 1 no matter what value we set
def test_onnx_batch_norm1(self):
pytorch_model = torch.nn.Sequential(
torch.nn.BatchNorm2d(num_features=3, momentum=1, affine=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, rtol=1e-3, atol=1e-3)
# Momentum is always equal to 1 no matter what value we set
def test_onnx_batch_norm2(self):
pytorch_model = torch.nn.Sequential(
torch.nn.BatchNorm2d(num_features=3, momentum=1, affine=True)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, rtol=1e-3, atol=1e-3)
def test_batch_norm(self):
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32).reshape((3, 2, 1, 1))
s = np.array([1.0, 1.0]).astype(np.float32).reshape((2, 1))
bias = np.array([0, 0]).astype(np.float32).reshape((2, 1))
mean = np.array([0, 3]).astype(np.float32).reshape((2, 1))
var = np.array([1, 1.5]).astype(np.float32).reshape((2, 1))
y = self._batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
)
output = OnnxLoader.run_node(node, [x, s, bias, mean, var])
np.testing.assert_almost_equal(output["y"], y, decimal=3)
def test_conv_with_padding(self):
x = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
# Convolution with padding
node_with_padding = helper.make_node(
'Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[1, 1, 1, 1],
)
y_with_padding = np.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor
[33., 54., 63., 72., 51.],
[63., 99., 108., 117., 81.],
[93., 144., 153., 162., 111.],
[72., 111., 117., 123., 84.]]]]).astype(np.float32)
output = OnnxLoader.run_node(node_with_padding, [x, W])
np.testing.assert_almost_equal(output["y"], y_with_padding, decimal=5)
def test_conv_without_padding(self):
x = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
# Convolution without padding
node_without_padding = onnx.helper.make_node(
'Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[0, 0, 0, 0],
)
y_without_padding = np.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor
[99., 108., 117.],
[144., 153., 162.]]]]).astype(np.float32)
output = OnnxLoader.run_node(node_without_padding, [x, W])
np.testing.assert_almost_equal(output["y"], y_without_padding, decimal=5)
def test_onnx_gemm(self):
# TODO: Linear(bias = Flase) is mapped to Transpose + MatMul, not GEMM
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(in_features=3, out_features=4, bias=True)
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_add(self):
class Add(torch.nn.Module):
def forward(self, x):
return x[0] + x[1]
pytorch_model = Add()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_abs(self):
class Abs(torch.nn.Module):
def forward(self, x):
return abs(x)
pytorch_model = Abs()
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_abs(self):
node = onnx.helper.make_node(
'Abs',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.abs(x)
def test_onnx_neg(self):
class Neg(torch.nn.Module):
def forward(self, x):
return -x
pytorch_model = Neg()
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_neg(self):
node = onnx.helper.make_node(
'Neg',
inputs=['x'],
outputs=['y'],
)
x = np.array([-4, 2]).astype(np.float32).reshape([2, 1])
y = np.negative(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.negative(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_averagepool2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.AvgPool2d(kernel_size=3, count_include_pad=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_averagepool2d_padding(self):
pytorch_model = torch.nn.Sequential(
torch.nn.AvgPool2d(kernel_size=10, padding=4, count_include_pad=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_relu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ReLU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_relu(self):
node = helper.make_node(
'Relu',
inputs=['x'],
outputs=['y']
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_softmax(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Softmax()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_softmax(self):
node = helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y']
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output [[0.09003058, 0.24472848, 0.66524094]]
y = np.exp(x) / np.sum(np.exp(x), axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_reshape(self):
original_shape = [2, 3, 4]
test_cases = {
'reordered_dims': np.array([4, 2, 3], dtype=np.int64),
'reduced_dims': np.array([3, 8], dtype=np.int64),
'extended_dims': np.array([3, 2, 2, 2], dtype=np.int64),
'one_dim': np.array([24], dtype=np.int64)
# 'negative_dim': np.array([6, -1, 2], dtype=np.int64),
}
data = np.random.random_sample(original_shape).astype(np.float32)
for test_name, shape in test_cases.items():
node = onnx.helper.make_node(
'Reshape',
inputs=['data', 'shape'],
outputs=['reshaped'],
)
output = OnnxLoader.run_node(node, [data, shape])
reshaped = np.reshape(data, shape)
np.testing.assert_almost_equal(output["reshaped"], reshaped, decimal=5)
def test_reshape_pytorch(self):
class View(torch.nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(self.shape)
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(20, 20),
View(2, 5, 4))
input_shape_with_batch = (2, 20)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_constant(self):
values = np.random.randn(5, 5).astype(np.float32)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
output = OnnxLoader.run_node(node, [])
np.testing.assert_almost_equal(output["values"], values, decimal=5)
def test_onnx_maxpool2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool2d(kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_maxpool2d_pads(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[5, 5],
pads=[2, 2, 2, 2]
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[
[13, 14, 15, 15, 15],
[18, 19, 20, 20, 20],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool2d_same_upper(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
auto_pad="SAME_UPPER"
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[[7, 9, 10],
[17, 19, 20],
[22, 24, 25]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool2d_strides(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[2, 2]
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[[7, 9],
[17, 19]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_logsoftmax(self):
pytorch_model = torch.nn.Sequential(
torch.nn.LogSoftmax()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_tanh(self):
node = onnx.helper.make_node(
'Tanh',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.tanh(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y)
def test_onnx_exp(self):
node = onnx.helper.make_node(
'Exp',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.exp(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_flatten(self):
node = onnx.helper.make_node(
'Flatten',
inputs=['a'],
outputs=['b'],
)
shape = (5, 4, 3, 2)
a = np.random.random_sample(shape).astype(np.float32)
new_shape = (5, 24)
b = np.reshape(a, new_shape)
output = OnnxLoader.run_node(node, [a])
np.testing.assert_almost_equal(output["b"], b, decimal=5)
def test_onnx_sqrt(self):
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = np.sqrt(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_log(self):
node = onnx.helper.make_node(
'Log',
inputs=['x'],
outputs=['y'],
)
x = np.exp(np.random.randn(3, 4, 5).astype(np.float32))
y = np.log(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_hardsigmoid(self):
default_alpha = 0.2
default_beta = 0.5
node = onnx.helper.make_node(
'HardSigmoid',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * default_alpha + default_beta, 0, 1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_matmul_2d(self):
node = onnx.helper.make_node(
'MatMul',
inputs=['a', 'b'],
outputs=['c'],
)
# 2d
a = np.random.randn(3, 4).astype(np.float32).reshape((3, 4))
b = np.random.randn(4, 3).astype(np.float32).reshape((4, 3))
c = np.matmul(a, b)
output = OnnxLoader.run_node(node, [a, b])
np.testing.assert_almost_equal(output["c"], c, decimal=5)
def test_matmul_3d(self):
node = onnx.helper.make_node(
'MatMul',
inputs=['a', 'b'],
outputs=['c'],
)
# 3d
a = np.random.randn(2, 3, 4).astype(np.float32)
b = np.random.randn(2, 4, 3).astype(np.float32)
c = np.matmul(a, b)
output = OnnxLoader.run_node(node, [a, b])
np.testing.assert_almost_equal(output["c"], c, decimal=5)
def test_minit(self):
import torch.nn as nn
import torch.nn.functional as F
class MnistNet(nn.Module):
def __init__(self):
super(MnistNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
pytorch_model = MnistNet()
pytorch_model.train(mode=False)
self.compare_with_pytorch(pytorch_model, [(1, 1, 28, 28)])
def test_onnx_sub(self):
class Sub(torch.nn.Module):
def forward(self, x):
return x[0] - x[1]
pytorch_model = Sub()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_sub(self):
node = onnx.helper.make_node(
'Sub',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32).reshape([3, 1])
y = np.array([3, 2, 1]).astype(np.float32).reshape([3, 1])
z = x - y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x - y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_onnx_squeeze(self):
pytorch_model = Squeeze()
input_shape_with_batch = (2, 1, 2, 1, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_squeeze_dim0(self):
pytorch_model = Squeeze(0)
input_shape_with_batch = (1, 2, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_squeeze_dim1(self):
pytorch_model = Squeeze(1)
input_shape_with_batch = (2, 1, 3, 1, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_squeeze(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[0],
)
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
y = np.squeeze(x, axis=0)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_none(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(1, 1, 4, 5).astype(np.float32)
y = np.squeeze(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_list(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[0, 1],
)
x = np.random.randn(1, 1, 4, 5).astype(np.float32)
y = np.squeeze(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_axis(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[1],
)
x = np.random.randn(3, 1, 4, 5).astype(np.float32)
y = np.squeeze(x, axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_sigmoid(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Sigmoid()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_sigmoid(self):
node = helper.make_node(
'Sigmoid',
inputs=['x'],
outputs=['y'],
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
y = 1.0 / (1.0 + np.exp(np.negative(x))) # expected output [0.26894143, 0.5, 0.7310586]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_index_select(self):
class IndexSelect(torch.nn.Module):
def __init__(self, *parameter):
super(IndexSelect, self).__init__()
self.dim = parameter[0]
self.index = parameter[1]
def forward(self, x):
return torch.index_select(x, dim=self.dim, index=torch.tensor(self.index))
pytorch_model = IndexSelect(3, 2)
input_shape_with_batch = (3, 4, 5, 6)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_index_select_axis0(self):
import pytest
with pytest.raises(Exception) as e_info:
class IndexSelect(torch.nn.Module):
def __init__(self, *parameter):
super(IndexSelect, self).__init__()
self.dim = parameter[0]
self.index = parameter[1]
def forward(self, x):
return torch.index_select(x, dim=self.dim, index=torch.tensor(self.index))
pytorch_model = IndexSelect(0, 2)
input_shape_with_batch = (3, 4, 5, 6)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_concat(self):
class Concat(torch.nn.Module):
def forward(self, x):
return torch.cat([v for v in x], 1)
pytorch_model = Concat()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_concat(self):
test_cases = {
'1d': ([1, 2],
[3, 4]),
'2d': ([[1, 2], [3, 4]],
[[5, 6], [7, 8]]),
'3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(1, len(values[0].shape)):
in_args = ['value' + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
'Concat',
inputs=[s for s in in_args],
outputs=['output'],
axis=i
)
y = np.concatenate(values, i)
output = OnnxLoader.run_node(node, [v for v in values])
np.testing.assert_almost_equal(output["output"], y, decimal=5)
def test_concat_axis(self):
test_cases = {
'1d': ([1, 2],
[3, 4]),
'2d': ([[1, 2], [3, 4]],
[[5, 6], [7, 8]]),
'3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(1, len(values[0].shape)):
in_args = ['value' + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
'Concat',
inputs=[s for s in in_args],
outputs=['output'],
axis=0
)
y = np.concatenate(values, 0)
output = OnnxLoader.run_node(node, [v for v in values])
np.testing.assert_almost_equal(output["output"], y, decimal=5)
def test_torch_add(self):
class Add(torch.nn.Module):
def forward(self, x):
return torch.add(x[0], 1, x[1])
pytorch_model = Add()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_leakyrelu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.LeakyReLU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_leakyrelu(self):
node = helper.make_node(
'LeakyRelu',
inputs=['x'],
outputs=['y'],
alpha=0.1
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-0.1, 0., 1.]
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_gt(self):
class gt(torch.nn.Module):
def forward(self, x):
return torch.gt(x[0], x[1])
pytorch_model = gt()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_gt(self):
node = helper.make_node(
'Greater',
inputs=['x', 'y'],
outputs=['greater'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = np.greater(x, y)
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['greater'], z, decimal=5)
def test_maxpool1d(self):
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.array(np.shape(x))
kernel_shape = np.array([2])
strides = [1]
out_shape = pool_op_common.get_output_shape('VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = pool_op_common.pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool1d_strides(self):
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[2]
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.array(np.shape(x))
kernel_shape = np.array([2])
strides = [2]
out_shape = pool_op_common.get_output_shape('VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = pool_op_common.pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_maxpool1d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool1d(2)
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_maxpool1d_pads(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool1d(2, padding=1)
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_threshold(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Threshold(0, 0))
input_shape_with_batch = (2, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_mul(self):
class Mul(torch.nn.Module):
def forward(self, x):
return x[0] * x[1]
pytorch_model = Mul()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_mul1(self):
node = onnx.helper.make_node(
'Mul',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32).reshape([3, 1])
y = np.array([4, 5, 6]).astype(np.float32).reshape([3, 1])
z = x * y # expected output [4., 10., 18.]
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['z'], z, decimal=5)
def test_mul2(self):
node = onnx.helper.make_node(
'Mul',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x * y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['z'], z, decimal=5)
def test_onnx_div(self):
class Div(torch.nn.Module):
def forward(self, x):
return x[0] / x[1]
pytorch_model = Div()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_div1(self):
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([3, 4]).astype(np.float32).reshape([2, 1])
y = np.array([1, 2]).astype(np.float32).reshape([2, 1])
z = x / y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_div2(self):
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0
z = x / y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_pow(self):
class Power(torch.nn.Module):
def forward(self, x):
return torch.pow(x, 2)
pytorch_model = Power()
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_elu(self):
node = onnx.helper.make_node(
'Elu',
inputs=['x'],
outputs=['y'],
alpha=2.0
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_elu_default(self):
node = onnx.helper.make_node(
'Elu',
inputs=['x'],
outputs=['y']
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 1.0
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_elu_default(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ELU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_elu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ELU(alpha=2)
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_torch_clip(self):
class clamp(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, -1, 1)
pytorch_model = torch.nn.Sequential(
clamp()
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_exception_clip(self):
import pytest
with pytest.raises(Exception) as e_info:
class clamp(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, 1, -1)
pytorch_model = torch.nn.Sequential(
clamp()
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_embedding(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Embedding(num_embeddings=10, embedding_dim=3)
)
input_shape_with_batch = (2, 4)
input_data_with_batch = [[[1, 2, 4, 5], [4, 3, 2, 9]]]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, input_data_with_batch)
def test_onnx_slice1(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts:self.ends]
pytorch_model = Slice(0, 0, 2)
input_shape_with_batch = (3, 3, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice1_start_out_of_bounds(self):
with pytest.raises(Exception) as e_info:
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
axes=[0],
starts=[1000],
ends=[1000],
)
x = np.random.randn(3, 3, 3).astype(np.float32)
y = x[1000:1000]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_slice2(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts[0]:self.ends[0], self.starts[1]:self.ends[1]]
pytorch_model = Slice([0, 1], [0, 0], [2, -2])
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice2_neg(self):
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
axes=[0, 1],
starts=[0, 0],
ends=[2, -2],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[0:2, 0:-2]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_slice3(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts[0]:self.ends[0], self.starts[1]:self.ends[1],
self.starts[2]:self.ends[2]]
pytorch_model = Slice([0, 1, 2], [0, 0, 3], [20, 10, 4])
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice3_default_axes(self):
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
starts=[0, 0, 3],
ends=[20, 10, 4],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[:, :, 3:4]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_reducemean_keepdims(self):
class ReduceMean(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceMean, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.mean(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceMean(1, True)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_reducemean(self):
class ReduceMean(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceMean, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.mean(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceMean(1, False)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_reducemean_do_not_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceMean',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],
dtype=np.float32)
reduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_reducemean_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceMean',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_onnx_reducesum_keepdims(self):
class ReduceSum(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceSum, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.sum(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceSum(1, True)
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_reducesum(self):
class ReduceSum(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceSum, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.sum(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceSum(1, False)
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_reducesum_do_not_keepdims(self):
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]],
dtype=np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_reducesum_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_onnx_unsqueeze_axis0(self):
class Unsqueeze(torch.nn.Module):
def __init__(self, *parameter):
super(Unsqueeze, self).__init__()
self.dim = parameter[0]
def forward(self, x):
return torch.unsqueeze(x, dim=self.dim)
pytorch_model = Unsqueeze(0)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_unsqueeze_axis0(self):
node = onnx.helper.make_node(
'Unsqueeze',
inputs=['x'],
outputs=['y'],
axes=[0],
)
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
y = np.expand_dims(x, axis=0)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_unsqueeze_axis1(self):
class Unsqueeze(torch.nn.Module):
def __init__(self, *parameter):
super(Unsqueeze, self).__init__()
self.dim = parameter[0]
def forward(self, x):
return torch.unsqueeze(x, dim=self.dim)
pytorch_model = Unsqueeze(1)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_unsqueeze_axis1(self):
node = onnx.helper.make_node(
'Unsqueeze',
inputs=['x'],
outputs=['y'],
axes=[1],
)
x = np.random.randn(3, 1, 4, 5).astype(np.float32)
y = np.expand_dims(x, axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from test.zoo.pipeline.utils.test_utils_onnx import OnnxTestCase
from zoo.pipeline.api.keras.layers import *
import numpy as np
np.random.seed(1337) # for reproducibility
import torch
import onnx.helper as helper
import onnx
import pytest
from zoo.pipeline.api.onnx.onnx_loader import OnnxLoader
from onnx import backend
from onnx.backend import test
from onnx.backend.test.case import node
from onnx.backend.test.case.node import pool_op_common
class Squeeze(torch.nn.Module):
def __init__(self, *dim):
super(Squeeze, self).__init__()
if dim:
self.dim = dim[0]
else:
self.dim = -1
def forward(self, x):
if (self.dim >= 0):
return torch.squeeze(x, dim=self.dim)
else:
return torch.squeeze(x)
class TestModelLoading(OnnxTestCase):
def test_onnx_conv2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_conv2d_2(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3),
torch.nn.Conv2d(in_channels=64, out_channels=4, kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def _batchnorm_test_mode(self, x, s, bias, mean, var, epsilon=1e-5):
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# Momentum is always equal to 1 no matter what value we set
def test_onnx_batch_norm1(self):
pytorch_model = torch.nn.Sequential(
torch.nn.BatchNorm2d(num_features=3, momentum=1, affine=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, rtol=1e-3, atol=1e-3)
# Momentum is always equal to 1 no matter what value we set
def test_onnx_batch_norm2(self):
pytorch_model = torch.nn.Sequential(
torch.nn.BatchNorm2d(num_features=3, momentum=1, affine=True)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, rtol=1e-3, atol=1e-3)
def test_batch_norm(self):
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32).reshape((3, 2, 1, 1))
s = np.array([1.0, 1.0]).astype(np.float32).reshape((2, 1))
bias = np.array([0, 0]).astype(np.float32).reshape((2, 1))
mean = np.array([0, 3]).astype(np.float32).reshape((2, 1))
var = np.array([1, 1.5]).astype(np.float32).reshape((2, 1))
y = self._batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
)
output = OnnxLoader.run_node(node, [x, s, bias, mean, var])
np.testing.assert_almost_equal(output["y"], y, decimal=3)
def test_conv_with_padding(self):
x = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
# Convolution with padding
node_with_padding = helper.make_node(
'Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[1, 1, 1, 1],
)
y_with_padding = np.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor
[33., 54., 63., 72., 51.],
[63., 99., 108., 117., 81.],
[93., 144., 153., 162., 111.],
[72., 111., 117., 123., 84.]]]]).astype(np.float32)
output = OnnxLoader.run_node(node_with_padding, [x, W])
np.testing.assert_almost_equal(output["y"], y_with_padding, decimal=5)
def test_conv_without_padding(self):
x = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(np.float32)
W = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(np.float32)
# Convolution without padding
node_without_padding = onnx.helper.make_node(
'Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[0, 0, 0, 0],
)
y_without_padding = np.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor
[99., 108., 117.],
[144., 153., 162.]]]]).astype(np.float32)
output = OnnxLoader.run_node(node_without_padding, [x, W])
np.testing.assert_almost_equal(output["y"], y_without_padding, decimal=5)
def test_onnx_gemm(self):
# TODO: Linear(bias = Flase) is mapped to Transpose + MatMul, not GEMM
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(in_features=3, out_features=4, bias=True)
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_add(self):
class Add(torch.nn.Module):
def forward(self, x):
return x[0] + x[1]
pytorch_model = Add()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_abs(self):
class Abs(torch.nn.Module):
def forward(self, x):
return abs(x)
pytorch_model = Abs()
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_abs(self):
node = onnx.helper.make_node(
'Abs',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.abs(x)
def test_onnx_neg(self):
class Neg(torch.nn.Module):
def forward(self, x):
return -x
pytorch_model = Neg()
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_neg(self):
node = onnx.helper.make_node(
'Neg',
inputs=['x'],
outputs=['y'],
)
x = np.array([-4, 2]).astype(np.float32).reshape([2, 1])
y = np.negative(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.negative(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_averagepool2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.AvgPool2d(kernel_size=3, count_include_pad=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_averagepool2d_padding(self):
pytorch_model = torch.nn.Sequential(
torch.nn.AvgPool2d(kernel_size=10, padding=4, count_include_pad=False)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_relu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ReLU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_relu(self):
node = helper.make_node(
'Relu',
inputs=['x'],
outputs=['y']
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_softmax(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Softmax()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_softmax(self):
node = helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y']
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output [[0.09003058, 0.24472848, 0.66524094]]
y = np.exp(x) / np.sum(np.exp(x), axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_reshape(self):
original_shape = [2, 3, 4]
test_cases = {
'reordered_dims': np.array([4, 2, 3], dtype=np.int64),
'reduced_dims': np.array([3, 8], dtype=np.int64),
'extended_dims': np.array([3, 2, 2, 2], dtype=np.int64),
'one_dim': np.array([24], dtype=np.int64)
# 'negative_dim': np.array([6, -1, 2], dtype=np.int64),
}
data = np.random.random_sample(original_shape).astype(np.float32)
for test_name, shape in test_cases.items():
node = onnx.helper.make_node(
'Reshape',
inputs=['data', 'shape'],
outputs=['reshaped'],
)
output = OnnxLoader.run_node(node, [data, shape])
reshaped = np.reshape(data, shape)
np.testing.assert_almost_equal(output["reshaped"], reshaped, decimal=5)
def test_reshape_pytorch(self):
class View(torch.nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(self.shape)
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(20, 20),
View(2, 5, 4))
input_shape_with_batch = (2, 20)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_constant(self):
values = np.random.randn(5, 5).astype(np.float32)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
output = OnnxLoader.run_node(node, [])
np.testing.assert_almost_equal(output["values"], values, decimal=5)
def test_onnx_maxpool2d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool2d(kernel_size=3)
)
input_shape_with_batch = (1, 3, 224, 224)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_maxpool2d_pads(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[5, 5],
pads=[2, 2, 2, 2]
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[
[13, 14, 15, 15, 15],
[18, 19, 20, 20, 20],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool2d_same_upper(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
auto_pad="SAME_UPPER"
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[[7, 9, 10],
[17, 19, 20],
[22, 24, 25]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool2d_strides(self):
node = helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[2, 2]
)
x = np.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]]]).astype(np.float32)
y = np.array([[[[7, 9],
[17, 19]]]]).astype(np.float32)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_logsoftmax(self):
pytorch_model = torch.nn.Sequential(
torch.nn.LogSoftmax()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_tanh(self):
node = onnx.helper.make_node(
'Tanh',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.tanh(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y)
def test_onnx_exp(self):
node = onnx.helper.make_node(
'Exp',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.exp(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_flatten(self):
node = onnx.helper.make_node(
'Flatten',
inputs=['a'],
outputs=['b'],
)
shape = (5, 4, 3, 2)
a = np.random.random_sample(shape).astype(np.float32)
new_shape = (5, 24)
b = np.reshape(a, new_shape)
output = OnnxLoader.run_node(node, [a])
np.testing.assert_almost_equal(output["b"], b, decimal=5)
def test_onnx_sqrt(self):
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = np.sqrt(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_log(self):
node = onnx.helper.make_node(
'Log',
inputs=['x'],
outputs=['y'],
)
x = np.exp(np.random.randn(3, 4, 5).astype(np.float32))
y = np.log(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_hardsigmoid(self):
default_alpha = 0.2
default_beta = 0.5
node = onnx.helper.make_node(
'HardSigmoid',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * default_alpha + default_beta, 0, 1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_matmul_2d(self):
node = onnx.helper.make_node(
'MatMul',
inputs=['a', 'b'],
outputs=['c'],
)
# 2d
a = np.random.randn(3, 4).astype(np.float32).reshape((3, 4))
b = np.random.randn(4, 3).astype(np.float32).reshape((4, 3))
c = np.matmul(a, b)
output = OnnxLoader.run_node(node, [a, b])
np.testing.assert_almost_equal(output["c"], c, decimal=5)
def test_matmul_3d(self):
node = onnx.helper.make_node(
'MatMul',
inputs=['a', 'b'],
outputs=['c'],
)
# 3d
a = np.random.randn(2, 3, 4).astype(np.float32)
b = np.random.randn(2, 4, 3).astype(np.float32)
c = np.matmul(a, b)
output = OnnxLoader.run_node(node, [a, b])
np.testing.assert_almost_equal(output["c"], c, decimal=5)
def test_minit(self):
import torch.nn as nn
import torch.nn.functional as F
class MnistNet(nn.Module):
def __init__(self):
super(MnistNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
pytorch_model = MnistNet()
pytorch_model.train(mode=False)
self.compare_with_pytorch(pytorch_model, [(1, 1, 28, 28)])
def test_onnx_sub(self):
class Sub(torch.nn.Module):
def forward(self, x):
return x[0] - x[1]
pytorch_model = Sub()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_sub(self):
node = onnx.helper.make_node(
'Sub',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32).reshape([3, 1])
y = np.array([3, 2, 1]).astype(np.float32).reshape([3, 1])
z = x - y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x - y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_onnx_squeeze(self):
pytorch_model = Squeeze()
input_shape_with_batch = (2, 1, 2, 1, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_squeeze_dim0(self):
pytorch_model = Squeeze(0)
input_shape_with_batch = (1, 2, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_squeeze_dim1(self):
pytorch_model = Squeeze(1)
input_shape_with_batch = (2, 1, 3, 1, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_squeeze(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[0],
)
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
y = np.squeeze(x, axis=0)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_none(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(1, 1, 4, 5).astype(np.float32)
y = np.squeeze(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_list(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[0, 1],
)
x = np.random.randn(1, 1, 4, 5).astype(np.float32)
y = np.squeeze(x)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_squeeze_axis(self):
node = onnx.helper.make_node(
'Squeeze',
inputs=['x'],
outputs=['y'],
axes=[1],
)
x = np.random.randn(3, 1, 4, 5).astype(np.float32)
y = np.squeeze(x, axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_sigmoid(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Sigmoid()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_sigmoid(self):
node = helper.make_node(
'Sigmoid',
inputs=['x'],
outputs=['y'],
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
y = 1.0 / (1.0 + np.exp(np.negative(x))) # expected output [0.26894143, 0.5, 0.7310586]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_index_select(self):
class IndexSelect(torch.nn.Module):
def __init__(self, *parameter):
super(IndexSelect, self).__init__()
self.dim = parameter[0]
self.index = parameter[1]
def forward(self, x):
return torch.index_select(x, dim=self.dim, index=torch.tensor(self.index))
pytorch_model = IndexSelect(3, 2)
input_shape_with_batch = (3, 4, 5, 6)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_index_select_axis0(self):
import pytest
with pytest.raises(Exception) as e_info:
class IndexSelect(torch.nn.Module):
def __init__(self, *parameter):
super(IndexSelect, self).__init__()
self.dim = parameter[0]
self.index = parameter[1]
def forward(self, x):
return torch.index_select(x, dim=self.dim, index=torch.tensor(self.index))
pytorch_model = IndexSelect(0, 2)
input_shape_with_batch = (3, 4, 5, 6)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_concat(self):
class Concat(torch.nn.Module):
def forward(self, x):
return torch.cat([v for v in x], 1)
pytorch_model = Concat()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_concat(self):
test_cases = {
'1d': ([1, 2],
[3, 4]),
'2d': ([[1, 2], [3, 4]],
[[5, 6], [7, 8]]),
'3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(1, len(values[0].shape)):
in_args = ['value' + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
'Concat',
inputs=[s for s in in_args],
outputs=['output'],
axis=i
)
y = np.concatenate(values, i)
output = OnnxLoader.run_node(node, [v for v in values])
np.testing.assert_almost_equal(output["output"], y, decimal=5)
def test_concat_axis(self):
test_cases = {
'1d': ([1, 2],
[3, 4]),
'2d': ([[1, 2], [3, 4]],
[[5, 6], [7, 8]]),
'3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(1, len(values[0].shape)):
in_args = ['value' + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
'Concat',
inputs=[s for s in in_args],
outputs=['output'],
axis=0
)
y = np.concatenate(values, 0)
output = OnnxLoader.run_node(node, [v for v in values])
np.testing.assert_almost_equal(output["output"], y, decimal=5)
def test_torch_add(self):
class Add(torch.nn.Module):
def forward(self, x):
return torch.add(x[0], 1, x[1])
pytorch_model = Add()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_leakyrelu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.LeakyReLU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_leakyrelu(self):
node = helper.make_node(
'LeakyRelu',
inputs=['x'],
outputs=['y'],
alpha=0.1
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-0.1, 0., 1.]
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_gt(self):
class gt(torch.nn.Module):
def forward(self, x):
return torch.gt(x[0], x[1])
pytorch_model = gt()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_gt(self):
node = helper.make_node(
'Greater',
inputs=['x', 'y'],
outputs=['greater'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = np.greater(x, y)
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['greater'], z, decimal=5)
def test_maxpool1d(self):
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.array(np.shape(x))
kernel_shape = np.array([2])
strides = [1]
out_shape = pool_op_common.get_output_shape('VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = pool_op_common.pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_maxpool1d_strides(self):
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[2]
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.array(np.shape(x))
kernel_shape = np.array([2])
strides = [2]
out_shape = pool_op_common.get_output_shape('VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = pool_op_common.pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_maxpool1d(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool1d(2)
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_maxpool1d_pads(self):
pytorch_model = torch.nn.Sequential(
torch.nn.MaxPool1d(2, padding=1)
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_threshold(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Threshold(0, 0))
input_shape_with_batch = (2, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_mul(self):
class Mul(torch.nn.Module):
def forward(self, x):
return x[0] * x[1]
pytorch_model = Mul()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_mul1(self):
node = onnx.helper.make_node(
'Mul',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32).reshape([3, 1])
y = np.array([4, 5, 6]).astype(np.float32).reshape([3, 1])
z = x * y # expected output [4., 10., 18.]
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['z'], z, decimal=5)
def test_mul2(self):
node = onnx.helper.make_node(
'Mul',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x * y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output['z'], z, decimal=5)
def test_onnx_div(self):
class Div(torch.nn.Module):
def forward(self, x):
return x[0] / x[1]
pytorch_model = Div()
input_shape_with_batch = [(1, 3), (1, 3)]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_div1(self):
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([3, 4]).astype(np.float32).reshape([2, 1])
y = np.array([1, 2]).astype(np.float32).reshape([2, 1])
z = x / y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_div2(self):
node = onnx.helper.make_node(
'Div',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0
z = x / y
output = OnnxLoader.run_node(node, [x, y])
np.testing.assert_almost_equal(output["z"], z, decimal=5)
def test_pow(self):
class Power(torch.nn.Module):
def forward(self, x):
return torch.pow(x, 2)
pytorch_model = Power()
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_elu(self):
node = onnx.helper.make_node(
'Elu',
inputs=['x'],
outputs=['y'],
alpha=2.0
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_elu_default(self):
node = onnx.helper.make_node(
'Elu',
inputs=['x'],
outputs=['y']
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 1.0
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_elu_default(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ELU()
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_elu(self):
pytorch_model = torch.nn.Sequential(
torch.nn.ELU(alpha=2)
)
input_shape_with_batch = (1, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_torch_clip(self):
class clamp(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, -1, 1)
pytorch_model = torch.nn.Sequential(
clamp()
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_exception_clip(self):
import pytest
with pytest.raises(Exception) as e_info:
class clamp(torch.nn.Module):
def forward(self, x):
return torch.clamp(x, 1, -1)
pytorch_model = torch.nn.Sequential(
clamp()
)
input_shape_with_batch = (1, 3, 32)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_embedding(self):
pytorch_model = torch.nn.Sequential(
torch.nn.Embedding(num_embeddings=10, embedding_dim=3)
)
input_shape_with_batch = (2, 4)
input_data_with_batch = [[[1, 2, 4, 5], [4, 3, 2, 9]]]
self.compare_with_pytorch(pytorch_model, input_shape_with_batch, input_data_with_batch)
def test_onnx_slice1(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts:self.ends]
pytorch_model = Slice(0, 0, 2)
input_shape_with_batch = (3, 3, 3)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice1_start_out_of_bounds(self):
with pytest.raises(Exception) as e_info:
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
axes=[0],
starts=[1000],
ends=[1000],
)
x = np.random.randn(3, 3, 3).astype(np.float32)
y = x[1000:1000]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_slice2(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts[0]:self.ends[0], self.starts[1]:self.ends[1]]
pytorch_model = Slice([0, 1], [0, 0], [2, -2])
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice2_neg(self):
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
axes=[0, 1],
starts=[0, 0],
ends=[2, -2],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[0:2, 0:-2]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_slice3(self):
class Slice(torch.nn.Module):
def __init__(self, *parameter):
super(Slice, self).__init__()
self.axes = parameter[0]
self.starts = parameter[1]
self.ends = parameter[2]
def forward(self, x):
return x[self.starts[0]:self.ends[0], self.starts[1]:self.ends[1],
self.starts[2]:self.ends[2]]
pytorch_model = Slice([0, 1, 2], [0, 0, 3], [20, 10, 4])
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_slice3_default_axes(self):
node = onnx.helper.make_node(
'Slice',
inputs=['x'],
outputs=['y'],
starts=[0, 0, 3],
ends=[20, 10, 4],
)
x = np.random.randn(20, 10, 5).astype(np.float32)
y = x[:, :, 3:4]
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_reducemean_keepdims(self):
class ReduceMean(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceMean, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.mean(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceMean(1, True)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_reducemean(self):
class ReduceMean(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceMean, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.mean(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceMean(1, False)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_reducemean_do_not_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceMean',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],
dtype=np.float32)
reduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_reducemean_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceMean',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_onnx_reducesum_keepdims(self):
class ReduceSum(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceSum, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.sum(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceSum(1, True)
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_onnx_reducesum(self):
class ReduceSum(torch.nn.Module):
def __init__(self, *parameter):
super(ReduceSum, self).__init__()
self.dim = parameter[0]
self.keepdim = parameter[1]
def forward(self, x):
return torch.sum(x, dim=self.dim, keepdim=self.keepdim)
pytorch_model = ReduceSum(1, False)
input_shape_with_batch = (20, 10, 5)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_reducesum_do_not_keepdims(self):
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]],
dtype=np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_reducesum_keepdims(self):
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceSum',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)
output = OnnxLoader.run_node(node, [data])
np.testing.assert_almost_equal(output["reduced"], reduced, decimal=5)
def test_onnx_unsqueeze_axis0(self):
class Unsqueeze(torch.nn.Module):
def __init__(self, *parameter):
super(Unsqueeze, self).__init__()
self.dim = parameter[0]
def forward(self, x):
return torch.unsqueeze(x, dim=self.dim)
pytorch_model = Unsqueeze(0)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_unsqueeze_axis0(self):
node = onnx.helper.make_node(
'Unsqueeze',
inputs=['x'],
outputs=['y'],
axes=[0],
)
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
y = np.expand_dims(x, axis=0)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
def test_onnx_unsqueeze_axis1(self):
class Unsqueeze(torch.nn.Module):
def __init__(self, *parameter):
super(Unsqueeze, self).__init__()
self.dim = parameter[0]
def forward(self, x):
return torch.unsqueeze(x, dim=self.dim)
pytorch_model = Unsqueeze(1)
input_shape_with_batch = (1, 2, 2)
self.compare_with_pytorch(pytorch_model, input_shape_with_batch)
def test_unsqueeze_axis1(self):
node = onnx.helper.make_node(
'Unsqueeze',
inputs=['x'],
outputs=['y'],
axes=[1],
)
x = np.random.randn(3, 1, 4, 5).astype(np.float32)
y = np.expand_dims(x, axis=1)
output = OnnxLoader.run_node(node, [x])
np.testing.assert_almost_equal(output["y"], y, decimal=5)
|
en
| 0.656421
|
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # for reproducibility # Momentum is always equal to 1 no matter what value we set # Momentum is always equal to 1 no matter what value we set # (1, 1, 5, 5) input tensor # (1, 1, 3, 3) tensor for convolution weights # Convolution with padding # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1 # (1, 1, 5, 5) output tensor # (1, 1, 5, 5) input tensor # (1, 1, 3, 3) tensor for convolution weights # Convolution without padding # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1 # (1, 1, 3, 3) output tensor # TODO: Linear(bias = Flase) is mapped to Transpose + MatMul, not GEMM # expected output [[0.09003058, 0.24472848, 0.66524094]] # 'negative_dim': np.array([6, -1, 2], dtype=np.int64), # 2d # 3d # expected output [0.26894143, 0.5, 0.7310586] # type: Dict[Text, Sequence[Any]] # type: Dict[Text, Sequence[Any]] # expected output [-0.1, 0., 1.] # expected output [4., 10., 18.]
| 2.043176
| 2
|
tests/load_all_imports/test_load_all_imports.py
|
imranq2/SparkAutoMapper.FHIR
| 1
|
6626703
|
import importlib
import pkgutil
from typing import Any, Dict, Tuple, Union
def import_submodules(
package: Union[Any, str], recursive: bool = True
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Import all submodules of a module, recursively, including subpackages
from https://stackoverflow.com/questions/3365740/how-to-import-all-submodules
:param recursive:
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
errors = {}
# noinspection Mypy
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): # type: ignore
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except Exception as e:
print(f"{full_name}: {e}")
errors[full_name] = e
if recursive and is_pkg:
submodules, errors_in_submodules = import_submodules(full_name)
results.update(submodules)
errors.update(errors_in_submodules)
return results, errors
def test_load_all_imports() -> None:
import spark_auto_mapper_fhir
submodules, errors_in_submodules = import_submodules(spark_auto_mapper_fhir)
print(submodules)
assert len(errors_in_submodules) == 0, f"{errors_in_submodules!r}"
|
import importlib
import pkgutil
from typing import Any, Dict, Tuple, Union
def import_submodules(
package: Union[Any, str], recursive: bool = True
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Import all submodules of a module, recursively, including subpackages
from https://stackoverflow.com/questions/3365740/how-to-import-all-submodules
:param recursive:
:param package: package (name or actual module)
:type package: str | module
:rtype: dict[str, types.ModuleType]
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
errors = {}
# noinspection Mypy
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): # type: ignore
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except Exception as e:
print(f"{full_name}: {e}")
errors[full_name] = e
if recursive and is_pkg:
submodules, errors_in_submodules = import_submodules(full_name)
results.update(submodules)
errors.update(errors_in_submodules)
return results, errors
def test_load_all_imports() -> None:
import spark_auto_mapper_fhir
submodules, errors_in_submodules = import_submodules(spark_auto_mapper_fhir)
print(submodules)
assert len(errors_in_submodules) == 0, f"{errors_in_submodules!r}"
|
en
| 0.417681
|
Import all submodules of a module, recursively, including subpackages from https://stackoverflow.com/questions/3365740/how-to-import-all-submodules :param recursive: :param package: package (name or actual module) :type package: str | module :rtype: dict[str, types.ModuleType] # noinspection Mypy # type: ignore
| 2.542458
| 3
|
test-drf-project/testapp/routes.py
|
fvlima/drf-view-profiler
| 30
|
6626704
|
from rest_framework import routers
from .views import TestModelViewSet, TestViewSet
app_name = "testapp"
router = routers.DefaultRouter()
router.register(r"test-viewset", TestViewSet, basename="test-viewset")
router.register(r"test-model-viewset", TestModelViewSet, basename="test-model-viewset")
urlpatterns = router.urls
|
from rest_framework import routers
from .views import TestModelViewSet, TestViewSet
app_name = "testapp"
router = routers.DefaultRouter()
router.register(r"test-viewset", TestViewSet, basename="test-viewset")
router.register(r"test-model-viewset", TestModelViewSet, basename="test-model-viewset")
urlpatterns = router.urls
|
none
| 1
| 1.790397
| 2
|
|
analytics/mixins.py
|
NicolasFlandrois/PurBeurre-Upgrade-Debug
| 0
|
6626705
|
from .signals import object_viewed_signal
class ObjectViewedMixin(object):
def get_context_data(self, *args, **kwargs):
context = super(ObjectViewedMixin, self).get_context_data(
*args, **kwargs)
request = self.request
instance = context.get('object')
if instance:
object_viewed_signal.send(
instance.__class__, instance=instance, request=request)
return context
|
from .signals import object_viewed_signal
class ObjectViewedMixin(object):
def get_context_data(self, *args, **kwargs):
context = super(ObjectViewedMixin, self).get_context_data(
*args, **kwargs)
request = self.request
instance = context.get('object')
if instance:
object_viewed_signal.send(
instance.__class__, instance=instance, request=request)
return context
|
none
| 1
| 2.152002
| 2
|
|
mundo 1/des031.py
|
Pedroluis1/python
| 0
|
6626706
|
dis = float(input("\033[34mQual vai ser a distância da sua viagem em km?\033[m "))
if dis <= 200:
print(f'\033[36mo preço da passagem custará \033[33;4mR${dis*0.50}\033[m')
else:
print(f'\033[36mo preço da passagem custará \033[33;4mR${dis*0.45}\033[m')
if dis < 50:
print('\033[33;1mIIIIIHHHHH\033[31m ala vai viajar para a quadra do lado?\033[m \033[33mkkkkkk\033[m'
,8*'\n','\033[30mobs:\033[35mpiadinha sem graça\033[m')
|
dis = float(input("\033[34mQual vai ser a distância da sua viagem em km?\033[m "))
if dis <= 200:
print(f'\033[36mo preço da passagem custará \033[33;4mR${dis*0.50}\033[m')
else:
print(f'\033[36mo preço da passagem custará \033[33;4mR${dis*0.45}\033[m')
if dis < 50:
print('\033[33;1mIIIIIHHHHH\033[31m ala vai viajar para a quadra do lado?\033[m \033[33mkkkkkk\033[m'
,8*'\n','\033[30mobs:\033[35mpiadinha sem graça\033[m')
|
none
| 1
| 3.360762
| 3
|
|
backend/project/app/extension/history/refer.py
|
goodyttoor/tcl_v7
| 0
|
6626707
|
<gh_stars>0
from datetime import date, datetime
from typing import Optional
from sqlmodel import Field, SQLModel
class HistoryRefer(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
refer_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class Refer(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
source_user_id: int
source_accept: bool
source_detail: str
target_user_id: int
target_accept: bool
target_detail: str
refer_type_id: int
reschedule_times: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ReferProcedureMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
refer_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ReferReschedule(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
refer_id: int
from_date: date
to_date: date
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
|
from datetime import date, datetime
from typing import Optional
from sqlmodel import Field, SQLModel
class HistoryRefer(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
refer_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class Refer(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
source_user_id: int
source_accept: bool
source_detail: str
target_user_id: int
target_accept: bool
target_detail: str
refer_type_id: int
reschedule_times: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ReferProcedureMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
refer_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ReferReschedule(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
refer_id: int
from_date: date
to_date: date
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
|
none
| 1
| 2.492319
| 2
|
|
onnxruntime/python/tools/quantization/quant_utils.py
|
surepassio/onnxruntime
| 1
|
6626708
|
import onnx
from onnx import onnx_pb as onnx_proto
from enum import Enum
from pathlib import Path
__producer__ = "onnx.quantize"
__version__ = "0.1.0"
onnx_domain = "ai.onnx"
ms_domain = "com.microsoft"
type_to_name = {
1: "FLOAT",
2: "UINT8",
3: "INT8",
4: "UINT16",
5: "INT16",
6: "INT32",
7: "INT64",
8: "STRING",
9: "BOOL",
10: "FLOAT16",
11: "DOUBLE",
12: "UINT32",
13: "UINT64",
14: "COMPLEX64",
15: "COMPLEX128",
}
# Quantization mode
# IntegerOps: Use IntegerOps in quantized model. Only ConvInteger and MatMulInteger ops are supported now.
# QLinearOps: Use QLinearOps in quantized model. Only QLinearConv and QLinearMatMul ops are supported now.
class QuantizationMode():
IntegerOps = 0
QLinearOps = 1
quantization_modes = [
getattr(QuantizationMode, attr) for attr in dir(QuantizationMode)
if not callable(getattr(QuantizationMode, attr)) and not attr.startswith("__")
]
class QuantizedValueType():
Input = 0
Initializer = 1
class QuantType(Enum):
QInt8 = 1
QUInt8 = 2
class QuantizedInitializer:
'''
Represents a linearly quantized weight input from ONNX operators
'''
def __init__(self,
name,
initializer,
rmins,
rmaxs,
zero_points,
scales,
data=[],
quantized_data=[],
axis=None,
qType=onnx_proto.TensorProto.UINT8):
self.name = name
self.initializer = initializer # TensorProto initializer in ONNX graph
self.rmins = rmins # List of minimum range for each axis
self.rmaxs = rmaxs # List of maximum range for each axis
# 1D tensor of zero points computed for each axis. scalar if axis is empty
self.zero_points = zero_points
self.scales = scales # 1D tensor of scales computed for each axis. scalar if axis is empty
self.data = data # original data from initializer TensorProto
self.quantized_data = quantized_data # weight-packed data from data
# Scalar to specify which dimension in the initializer to weight pack.
self.axis = axis
# If empty, single zero point and scales computed from a single rmin and rmax
self.qType = qType # type of quantized data.
class QuantizedValue:
'''
Represents a linearly quantized value (input\output\intializer)
'''
def __init__(self,
name,
new_quantized_name,
scale_name,
zero_point_name,
quantized_value_type,
axis=None,
qType=onnx_proto.TensorProto.UINT8):
self.original_name = name
self.q_name = new_quantized_name
self.scale_name = scale_name
self.zp_name = zero_point_name
self.value_type = quantized_value_type
self.axis = axis
self.qType = qType
def _attribute_to_kwarg(attribute):
'''
Convert attribute to kwarg format for use with onnx.helper.make_node.
:parameter attribute: attribute in AttributeProto format.
:return: attribute in {key: value} format.
'''
if (attribute.type == 0):
raise ValueError('attribute {} does not have type specified.'.format(attribute.name))
# Based on attribute type definitions from AttributeProto
# definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
if (attribute.type == 1):
value = attribute.f
elif (attribute.type == 2):
value = attribute.i
elif (attribute.type == 3):
value = attribute.s
elif (attribute.type == 4):
value = attribute.t
elif (attribute.type == 5):
value = attribute.g
elif (attribute.type == 6):
value = attribute.floats
elif (attribute.type == 7):
value = attribute.ints
elif (attribute.type == 8):
value = attribute.strings
elif (attribute.type == 9):
value = attribute.tensors
elif (attribute.type == 10):
value = attribute.graphs
else:
raise ValueError('attribute {} has unsupported type {}.'.format(attribute.name, attribute.type))
return {attribute.name: value}
def _find_by_name(item_name, item_list):
'''
Helper function to find item by name in a list.
parameter item_name: name of the item.
parameter item_list: list of items.
return: item if found. None otherwise.
'''
items = [item for item in item_list if item.name == item_name]
return items[0] if len(items) > 0 else None
def _get_elem_index(elem_name, elem_list):
'''
Helper function to return index of an item in a node list
'''
elem_idx = -1
for i in range(0, len(elem_list)):
if elem_list[i] == elem_name:
elem_idx = i
return elem_idx
def _get_mul_node(inputs, output, name):
'''
Helper function to create a Mul node.
parameter inputs: list of input names.
parameter output: output name.
parameter name: name of the node.
return: Mul node in NodeProto format.
'''
return onnx.helper.make_node("Mul", inputs, [output], name)
def _generate_identified_filename(filename: Path, identifier: str) -> Path:
'''
Helper function to generate a identifiable filepath by concatenating the given identifier as a suffix.
'''
return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
|
import onnx
from onnx import onnx_pb as onnx_proto
from enum import Enum
from pathlib import Path
__producer__ = "onnx.quantize"
__version__ = "0.1.0"
onnx_domain = "ai.onnx"
ms_domain = "com.microsoft"
type_to_name = {
1: "FLOAT",
2: "UINT8",
3: "INT8",
4: "UINT16",
5: "INT16",
6: "INT32",
7: "INT64",
8: "STRING",
9: "BOOL",
10: "FLOAT16",
11: "DOUBLE",
12: "UINT32",
13: "UINT64",
14: "COMPLEX64",
15: "COMPLEX128",
}
# Quantization mode
# IntegerOps: Use IntegerOps in quantized model. Only ConvInteger and MatMulInteger ops are supported now.
# QLinearOps: Use QLinearOps in quantized model. Only QLinearConv and QLinearMatMul ops are supported now.
class QuantizationMode():
IntegerOps = 0
QLinearOps = 1
quantization_modes = [
getattr(QuantizationMode, attr) for attr in dir(QuantizationMode)
if not callable(getattr(QuantizationMode, attr)) and not attr.startswith("__")
]
class QuantizedValueType():
Input = 0
Initializer = 1
class QuantType(Enum):
QInt8 = 1
QUInt8 = 2
class QuantizedInitializer:
'''
Represents a linearly quantized weight input from ONNX operators
'''
def __init__(self,
name,
initializer,
rmins,
rmaxs,
zero_points,
scales,
data=[],
quantized_data=[],
axis=None,
qType=onnx_proto.TensorProto.UINT8):
self.name = name
self.initializer = initializer # TensorProto initializer in ONNX graph
self.rmins = rmins # List of minimum range for each axis
self.rmaxs = rmaxs # List of maximum range for each axis
# 1D tensor of zero points computed for each axis. scalar if axis is empty
self.zero_points = zero_points
self.scales = scales # 1D tensor of scales computed for each axis. scalar if axis is empty
self.data = data # original data from initializer TensorProto
self.quantized_data = quantized_data # weight-packed data from data
# Scalar to specify which dimension in the initializer to weight pack.
self.axis = axis
# If empty, single zero point and scales computed from a single rmin and rmax
self.qType = qType # type of quantized data.
class QuantizedValue:
'''
Represents a linearly quantized value (input\output\intializer)
'''
def __init__(self,
name,
new_quantized_name,
scale_name,
zero_point_name,
quantized_value_type,
axis=None,
qType=onnx_proto.TensorProto.UINT8):
self.original_name = name
self.q_name = new_quantized_name
self.scale_name = scale_name
self.zp_name = zero_point_name
self.value_type = quantized_value_type
self.axis = axis
self.qType = qType
def _attribute_to_kwarg(attribute):
'''
Convert attribute to kwarg format for use with onnx.helper.make_node.
:parameter attribute: attribute in AttributeProto format.
:return: attribute in {key: value} format.
'''
if (attribute.type == 0):
raise ValueError('attribute {} does not have type specified.'.format(attribute.name))
# Based on attribute type definitions from AttributeProto
# definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
if (attribute.type == 1):
value = attribute.f
elif (attribute.type == 2):
value = attribute.i
elif (attribute.type == 3):
value = attribute.s
elif (attribute.type == 4):
value = attribute.t
elif (attribute.type == 5):
value = attribute.g
elif (attribute.type == 6):
value = attribute.floats
elif (attribute.type == 7):
value = attribute.ints
elif (attribute.type == 8):
value = attribute.strings
elif (attribute.type == 9):
value = attribute.tensors
elif (attribute.type == 10):
value = attribute.graphs
else:
raise ValueError('attribute {} has unsupported type {}.'.format(attribute.name, attribute.type))
return {attribute.name: value}
def _find_by_name(item_name, item_list):
'''
Helper function to find item by name in a list.
parameter item_name: name of the item.
parameter item_list: list of items.
return: item if found. None otherwise.
'''
items = [item for item in item_list if item.name == item_name]
return items[0] if len(items) > 0 else None
def _get_elem_index(elem_name, elem_list):
'''
Helper function to return index of an item in a node list
'''
elem_idx = -1
for i in range(0, len(elem_list)):
if elem_list[i] == elem_name:
elem_idx = i
return elem_idx
def _get_mul_node(inputs, output, name):
'''
Helper function to create a Mul node.
parameter inputs: list of input names.
parameter output: output name.
parameter name: name of the node.
return: Mul node in NodeProto format.
'''
return onnx.helper.make_node("Mul", inputs, [output], name)
def _generate_identified_filename(filename: Path, identifier: str) -> Path:
'''
Helper function to generate a identifiable filepath by concatenating the given identifier as a suffix.
'''
return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
|
en
| 0.543494
|
# Quantization mode # IntegerOps: Use IntegerOps in quantized model. Only ConvInteger and MatMulInteger ops are supported now. # QLinearOps: Use QLinearOps in quantized model. Only QLinearConv and QLinearMatMul ops are supported now. Represents a linearly quantized weight input from ONNX operators # TensorProto initializer in ONNX graph # List of minimum range for each axis # List of maximum range for each axis # 1D tensor of zero points computed for each axis. scalar if axis is empty # 1D tensor of scales computed for each axis. scalar if axis is empty # original data from initializer TensorProto # weight-packed data from data # Scalar to specify which dimension in the initializer to weight pack. # If empty, single zero point and scales computed from a single rmin and rmax # type of quantized data. Represents a linearly quantized value (input\output\intializer) Convert attribute to kwarg format for use with onnx.helper.make_node. :parameter attribute: attribute in AttributeProto format. :return: attribute in {key: value} format. # Based on attribute type definitions from AttributeProto # definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto Helper function to find item by name in a list. parameter item_name: name of the item. parameter item_list: list of items. return: item if found. None otherwise. Helper function to return index of an item in a node list Helper function to create a Mul node. parameter inputs: list of input names. parameter output: output name. parameter name: name of the node. return: Mul node in NodeProto format. Helper function to generate a identifiable filepath by concatenating the given identifier as a suffix.
| 2.218403
| 2
|
samples/polybench/jacobi-2d.py
|
Walon1998/dace
| 227
|
6626709
|
<reponame>Walon1998/dace
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
try:
import polybench
except ImportError:
polybench = None
N = dace.symbol('N')
tsteps = dace.symbol('tsteps')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{
tsteps: 20,
N: 30
}, {
tsteps: 40,
N: 90
}, {
tsteps: 100,
N: 250
}, {
tsteps: 500,
N: 1300
}, {
tsteps: 1000,
N: 2800
}]
args = [
([N, N], datatype),
([N, N], datatype) #, N, tsteps
]
@dace.program(datatype[N, N], datatype[N, N]) #, dace.int32, dace.int32)
def jacobi2d(A, B): #, N, tsteps):
for t in range(tsteps):
@dace.map
def a(i: _[1:N - 1], j: _[1:N - 1]):
a1 << A[i, j]
a2 << A[i, j - 1]
a3 << A[i, j + 1]
a4 << A[i + 1, j]
a5 << A[i - 1, j]
b >> B[i, j]
b = 0.2 * (a1 + a2 + a3 + a4 + a5)
@dace.map
def b(i: _[1:N - 1], j: _[1:N - 1]):
a1 << B[i, j]
a2 << B[i, j - 1]
a3 << B[i, j + 1]
a4 << B[i + 1, j]
a5 << B[i - 1, j]
b >> A[i, j]
b = 0.2 * (a1 + a2 + a3 + a4 + a5)
def init_array(A, B): #, N, tsteps):
n = N.get()
for i in range(n):
for j in range(n):
A[i, j] = datatype(i * (j + 2) + 2) / n
B[i, j] = datatype(i * (j + 3) + 3) / n
if __name__ == '__main__':
if polybench:
polybench.main(sizes, args, [(0, 'A')], init_array, jacobi2d)
else:
[k.set(v) for k, v in sizes[2].items()]
init_array(*args)
jacobi2d(*args)
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
try:
import polybench
except ImportError:
polybench = None
N = dace.symbol('N')
tsteps = dace.symbol('tsteps')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{
tsteps: 20,
N: 30
}, {
tsteps: 40,
N: 90
}, {
tsteps: 100,
N: 250
}, {
tsteps: 500,
N: 1300
}, {
tsteps: 1000,
N: 2800
}]
args = [
([N, N], datatype),
([N, N], datatype) #, N, tsteps
]
@dace.program(datatype[N, N], datatype[N, N]) #, dace.int32, dace.int32)
def jacobi2d(A, B): #, N, tsteps):
for t in range(tsteps):
@dace.map
def a(i: _[1:N - 1], j: _[1:N - 1]):
a1 << A[i, j]
a2 << A[i, j - 1]
a3 << A[i, j + 1]
a4 << A[i + 1, j]
a5 << A[i - 1, j]
b >> B[i, j]
b = 0.2 * (a1 + a2 + a3 + a4 + a5)
@dace.map
def b(i: _[1:N - 1], j: _[1:N - 1]):
a1 << B[i, j]
a2 << B[i, j - 1]
a3 << B[i, j + 1]
a4 << B[i + 1, j]
a5 << B[i - 1, j]
b >> A[i, j]
b = 0.2 * (a1 + a2 + a3 + a4 + a5)
def init_array(A, B): #, N, tsteps):
n = N.get()
for i in range(n):
for j in range(n):
A[i, j] = datatype(i * (j + 2) + 2) / n
B[i, j] = datatype(i * (j + 3) + 3) / n
if __name__ == '__main__':
if polybench:
polybench.main(sizes, args, [(0, 'A')], init_array, jacobi2d)
else:
[k.set(v) for k, v in sizes[2].items()]
init_array(*args)
jacobi2d(*args)
|
en
| 0.373733
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. #datatypes = [dace.float64, dace.int32, dace.float32] # Dataset sizes #, N, tsteps #, dace.int32, dace.int32) #, N, tsteps): #, N, tsteps):
| 2.025683
| 2
|
Integrations/python/test/testFigureWrapper.py
|
chrisabidin/deephaven-core
| 55
|
6626710
|
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
##############################################################################
# NOTE: the jvm should have been initialized, or this test will certainly fail
##############################################################################
import sys
import jpy
from deephaven import TableTools, Aggregation, Plot, Calendars
from deephaven.Plot import figure_wrapper
_JArrayList = jpy.get_type("java.util.ArrayList")
if sys.version_info[0] < 3:
import unittest2 as unittest
# not part of the standard library, installed via pip (or the like)
# it provides backward compatibility with python3 style subTest context manager (handy for complex tests)
else:
import unittest
class TestFigureWrapper(unittest.TestCase):
"""
Test cases for the deephaven.Plot.figure_wrapper module
"""
@classmethod
def setUpClass(self):
"""
Inherited method allowing initialization of test environment
"""
self.table = TableTools.emptyTable(200).update("timestamp=new DateTime((long)(i/2)*1000000000)",
"Sym=((i%2 == 0) ? `MSFT` : `AAPL`)",
"price=(double)((i%2 == 0) ? 100.0 + (i/2) + 5*Math.random() : 250.0 + (i/2) + 10*Math.random())")
# TODO: maybe we should test the direct data plotting functionality? vs table reference?
def testBasicMethods(self):
"""
Test suite for some basic FigureWrapper methods
"""
figure1, figure2, figure3, figure4 = None, None, None, None
with self.subTest(msg="FigureWrapper()"):
figure1 = figure_wrapper.FigureWrapper()
with self.subTest(msg="FigureWrapper(int, int)"):
figure2 = figure_wrapper.FigureWrapper(1, 2)
with self.subTest(msg="FigureWrapper.show()"):
figure4 = figure2.show() # NB: figure3.figure_ is a FigureWidget versus Figure...
with self.subTest(msg="FigureWrapper.getWidget()"):
# NB: method name should have been switched to getWidget() from getwidget()
self.assertIsNone(figure2.getWidget())
self.assertIsNotNone(figure4.getWidget())
# TODO: I'm fairly sure that this is not working as I would hope...I can't call figure3.show()
with self.subTest(msg="FigureWrapper(figure=figure)"):
figure3 = figure_wrapper.FigureWrapper(figure=figure2)
# tidy up by destroying these objects - probably only necessary after show, but JIC
del figure1, figure2, figure3, figure4
# NB: setting to None should also do it, where that is more convenient
def testBaseFigure(self):
"""
Test suite for methods inherited from BaseFigure
"""
figure = figure_wrapper.FigureWrapper(2, 2)
with self.subTest(msg="figureTitle(string)"):
figure = figure.figureTitle("Super Title")
with self.subTest(msg="figureTitleFont(string, string, int)"):
figure = figure.figureTitleFont("Arial", "B", 24)
with self.subTest(msg="figureTitleColor(string)"):
figure = figure.figureTitleColor("#FF0000") # named color or RGB hex-string
with self.subTest(msg="figureTitleColor(Paint)"):
figure = figure.figureTitleColor(Plot.colorRGB(0.0, 1.0, 0.0)) # create an RGB color using plot convenience function
with self.subTest(msg="updateInterval(long)"):
figure = figure.updateInterval(1000) # in milliseconds
# Maybe the wrapping for these is dumb?
chart1, chart2, chart3 = None, None, None
with self.subTest(msg="newChart()"):
chart1 = figure.newChart()
with self.subTest(msg="newChart(int)"):
chart2 = figure.newChart(0)
with self.subTest(msg="newChart(int, int)"):
chart3 = figure.newChart(0, 1)
with self.subTest(msg="chart(int)"):
chart1 = chart2.chart(0)
with self.subTest(msg="chart(int, int)"):
chart1 = chart3.chart(0, 1)
with self.subTest(msg="removeChart(int, int)"):
chart1 = chart3.removeChart(0, 1)
with self.subTest(msg="removeChart(int)"):
chart1 = chart2.removeChart(0)
del chart1, chart2, chart3
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="figureRemoveSeries(*string)"):
figure = figure.figureRemoveSeries("Microsoft")
del figure
def testPlottingMethods(self):
"""
Test suite for the plotting methods inherited from Axes
"""
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("plot"):
figure = figure.plot("XY Series", self.table.where("Sym=`MSFT`"), "timestamp", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("catPlot"):
figure = figure.catPlot("Category", self.table, "Sym", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("histPlot"):
figure = figure.histPlot("Histogram", self.table.where("Sym=`MSFT`"), "price", 10)
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("catHistPlot"):
figure = figure.catHistPlot("Category Histogram", self.table, "Sym")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("piePlot"):
figure = figure.piePlot("Pie", self.table.aggBy(Aggregation.AggAvg("price"), "Sym"), "Sym", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("ohlcPlot"):
# dumbest ohlc ever
figure = figure.ohlcPlot("OHLC", self.table.where("Sym=`MSFT`"), "timestamp", "price", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarX"):
figure = figure.errorBarX("Error X", self.table.where("Sym=`MSFT`"), "timestamp", "price", "timestamp", "timestamp")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarY"):
figure = figure.errorBarY("Error Y", self.table.where("Sym=`MSFT`"), "timestamp", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarXY"):
figure = figure.errorBarXY("Error XY", self.table.where("Sym=`MSFT`"), "timestamp", "timestamp", "timestamp", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
aggs = [
Aggregation.AggAvg("avgPrice=price"),
Aggregation.AggMin("minPrice=price"),
Aggregation.AggMax("maxPrice=price")]
j_agg_list = _JArrayList()
for agg in aggs:
j_agg_list.add(agg)
with self.subTest("catErrorBar"):
figure = figure.catErrorBar("Cat Error Bar",
self.table.aggBy(j_agg_list,"Sym"),
"Sym", "avgPrice", "minPrice", "maxPrice")
del figure
def testAxesMethods(self):
"""
Test suite for methods for non-plotting methods inherited from Axes
"""
# TODO: x/yTransform(AxisTransform)?, x/yBusinessTime(BusinessCalendar)?
figure = figure_wrapper.FigureWrapper() # is there an axes at this point?
axis = None
# maybe the wrapping for these is dumb?
with self.subTest(msg="axis fetchers"):
axis = figure.axis(0)
axis = figure.xAxis()
axis = figure.yAxis()
del axis
axes = None
# maybe the wrapping for these is dumb?
with self.subTest(msg="twin axis methods"):
axes = figure.twin()
axes = figure.twin("new")
axes = figure.twin(0)
axes = figure.twin("new", 0)
axes = figure.twinX()
axes = figure.twinX("new")
axes = figure.twinY()
axes = figure.twinY("new")
del axes
with self.subTest(msg="axis formatter methods"):
figure = figure.xFormatPattern("###,###.00").yFormatPattern("###,###.00")
with self.subTest(msg="axis color methods"):
figure = figure.xColor("#202020").yColor("#202020")
figure.xColor(Plot.colorRGB(1.0, 0.0, 0.0)).yColor(Plot.colorRGB(1.0, 0.0, 0.0))
with self.subTest(msg="axis labelling methods"):
figure = figure.xLabel("x axis").yLabel("y axis")
with self.subTest(msg="axis label font methods"):
figure = figure.xLabelFont("Arial", "P", 11).yLabelFont("Arial", "P", 11)
with self.subTest(msg="axis tick font methods"):
figure = figure.xTicksFont("Arial", "I", 9).yTicksFont("Arial", "I", 9)
with self.subTest(msg="axis range methods"):
figure = figure.xRange(1.0, 10.0).yRange(1.0, 10.0)
figure.xMin(1.0).yMin(1.0)
figure.xMax(10.0).yMax(10.0)
with self.subTest(msg="axis ticks methods"):
figure = figure.xTicks(1.0).yTicks(1.0)
figure.xTicks([1.0, 2.5, 5.0, 7.5, 10.0]).yTicks([1.0, 2.5, 5.0, 7.5, 10.0])
with self.subTest(msg="tick visibility methods"):
figure = figure.xTicksVisible(True).yTicksVisible(True)
figure = figure.xMinorTicksVisible(True).yMinorTicksVisible(True)
with self.subTest(msg="minor ticks"):
figure = figure.xMinorTicks(2).yMinorTicks(2)
with self.subTest(msg="tick label angles"):
figure = figure.xTickLabelAngle(45.0).yTickLabelAngle(45.0)
with self.subTest(msg="axis business time methods"):
figure.xBusinessTime().yBusinessTime()
with self.subTest(msg="axis log methods"):
figure.xLog().yLog()
with self.subTest(msg="axis inversion methods"):
figure = figure.xInvert().yInvert()
figure = figure.xInvert(True).yInvert(True)
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="plotStyle"):
figure = figure.plotStyle("Area") # does this just apply the style to all applicable series? Or?
# maybe the wrapping for these is dumb?
series = None
with self.subTest(msg="series(int)"):
series = figure.series(0) # I'm guessing that the int id starts at 0?
with self.subTest(msg="series(string"):
series = figure.series("Microsoft")
del series
with self.subTest(msg="axesRemoveSeries(*string)"):
figure = figure.axesRemoveSeries("Microsoft")
del figure
@unittest.skip("These all fail, because no axes is selected. Not presently sure how to resolve?")
def testAxisMethods(self):
"""
Test suite for methods inherited from Axis - do these apply said methods to every axis? Seems silly.
"""
figure = figure_wrapper.FigureWrapper()
# How do I get it to select an axes?
with self.subTest(msg="axisColor(string)"):
figure = figure.axisColor("#000000")
with self.subTest(msg="axisColor(Paint)"):
figure = figure.axisColor(Plot.colorRGB(0, 0, 255))
with self.subTest(msg="axisFormatPattern()"):
figure = figure.axisFormat("###,###.00") # decimal formatting pattern
with self.subTest(msg="axisLabel(string)"):
figure = figure.axisLabel("axis") # decimal formatting pattern
with self.subTest(msg="axisLabelFont(string, string, int)"):
figure = figure.axisLabelFont("Arial", "P", 11)
with self.subTest(msg="businessTime()"):
figure = figure.businessTime()
with self.subTest(msg="businessTime(calendar)"):
figure = figure.businessTime(Calendars.calendar())
with self.subTest(msg="min(double)"):
figure = figure.min(1.0)
with self.subTest(msg="max(double)"):
figure = figure.max(10.0)
with self.subTest(msg="range(double, double)"):
figure = figure.range(1.0, 10.0)
with self.subTest(msg="ticks(double)"):
figure = figure.ticks(1.0)
with self.subTest(msg="ticks(double[])"):
figure = figure.ticks([1.0, 2.5, 5.0, 7.5, 10.0])
with self.subTest(msg="tickFont(string, string, int)"):
figure = figure.ticksFont("Arial", "I", 9)
with self.subTest(msg="ticksVisible(boolean)"):
figure = figure.ticksVisible(True)
with self.subTest(msg="tickLabelAngle(double)"):
figure = figure.tickLabelAngle(45.0) # I'm guessing degrees?
with self.subTest(msg="minorTicks(int)"):
figure = figure.minorTicks(2)
with self.subTest(msg="minorTicksVisible(boolean)"):
figure = figure.minorTicksVisible(True)
with self.subTest(msg="log()"):
figure = figure.log()
# TODO: where would I get an AxisTransform object?
# with self.subTest(msg="transform(AxisTransform)"):
# figure = figure.transform(what)
with self.subTest(msg="invert()"):
figure = figure.invert()
with self.subTest(msg="invert(boolean)"):
figure = figure.invert(False)
del figure
def testChartMethods(self):
"""
Test suite for methods inherited from Chart
"""
figure = figure_wrapper.FigureWrapper(2, 2)
with self.subTest(msg="chartTitle(string)"):
figure = figure.chartTitle("Chart Title")
with self.subTest(msg="chartTitleColor(string"):
figure = figure.chartTitleColor("BLUE")
with self.subTest(msg="chartTitleColor(Paint)"):
figure = figure.chartTitleColor(Plot.colorRGB(0, 0, 255))
with self.subTest(msg="chartTitleFont(string, string, int)"):
figure = figure.chartTitleFont("Arial", "B", 20)
with self.subTest(msg="span(int, int"):
figure.span(2, 2)
with self.subTest(msg="colSpan(int)"):
figure.colSpan(2)
with self.subTest(msg="rowSpan(int)"):
figure.rowSpan(2)
axes = None
# maybe the wrapping for these is dumb? Should be returning an axes reference?
with self.subTest(msg="newAxes()"):
axes = figure.newAxes()
with self.subTest(msg="newAxes(string)"):
axes = figure.newAxes("new_axis")
with self.subTest(msg="newAxes(int)"):
axes = figure.newAxes(2)
with self.subTest(msg="newAxes(string, int)"):
axes = figure.newAxes("new_axis", 2)
with self.subTest(msg="axes(string)"):
axes.axes("new_axis")
with self.subTest(msg="axes(int)"):
axes.axes(0) # I'm assuming that 0 will always work?
del axes
# TODO: what are the possibilities here? I'm guessing ["horizontal", "vertical"]? Documentation?
with self.subTest(msg="plotOrientation(string)"):
figure = figure.plotOrientation("vertical")
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="legendVisible(boolean)"):
figure = figure.legendVisible(True)
with self.subTest(msg="legendFont(string, string, int)"):
figure = figure.legendFont("Arial", "P", 8)
with self.subTest(msg="legendColor(string)"):
# I'm guessing that this is the background color?
figure = figure.legendColor("#A0A0A0")
with self.subTest(msg="legendColor(Paint)"):
figure = figure.legendColor(Plot.colorRGB(200, 200, 200))
with self.subTest(msg="chartRemoveSeries(*string)"):
figure.chartRemoveSeries("Microsoft")
del figure
def testDataSeriesMethods(self):
"""
Test suite for methods inherited from DataSeries
"""
# TODO: pointColorByY(SerializableFunction)?, pointColorByY(Closure)?
figure = Plot.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="linesVisible(boolean)"):
figure = figure.linesVisible(True)
with self.subTest(msg="lineColor(Paint)"):
figure = figure.lineColor(Plot.colorRGB(0.2, 1.0, 0.2))
with self.subTest(msg="lineStyle(LineStyle)"):
figure = figure.lineStyle(Plot.lineStyle(4, 4))
with self.subTest(msg="pointsVisible(boolean)"):
figure = figure.pointsVisible(True)
with self.subTest(msg="pointSize(double)"):
figure = figure.pointSize(2.0)
with self.subTest(msg="pointLabel(object)"):
figure = figure.pointLabel("label")
with self.subTest(msg="pointLabelFormat(string)"):
figure = figure.pointLabelFormat("{0}: ({1}, {2})")
with self.subTest(msg="pointShape(string)"):
figure = figure.pointShape("CIRCLE")
with self.subTest(msg="seriesColor(Paint)"):
figure = figure.seriesColor(Plot.colorRGB(0.1, 0.1, 0.1))
with self.subTest(msg="pointColor(Paint)"):
figure = figure.pointColor(Plot.colorRGB(1.0, 0.0, 0.0))
with self.subTest(msg="gradientVisible(boolean)"):
figure.gradientVisible(False)
with self.subTest(msg="toolTipPattern(string)"):
figure = figure.toolTipPattern("###,###.00")
with self.subTest(msg="xToolTipPattern(string)"):
figure = figure.xToolTipPattern("###,###.00")
with self.subTest(msg="yToolTipPattern(string)"):
figure = figure.yToolTipPattern("###,###.00")
del figure
@unittest.skip("what to do?")
def testCategoryDataseriesMethods(self):
"""
Test suite for methods inherited from CategoryDataSeries - bah...
"""
# TODO: this is terrible
pass
@unittest.skip("what to do?")
def testXYDataSeriesMethods(self):
"""
Test suite for methods inherited from XYDataSeries - bah...
"""
# TODO: various extensions of pointSize(*args), pointColor(*args), pointLabel(*args), pointShape(*args)
pass
@unittest.skip("These all fail with predictable error message. Wrapping appears to be correct, but I'm calling on"
"something inappropriate. Not presently sure how to resolve?")
def testMultiSeries(self):
"""
Test suite for methods inherited from MultiSeries - bah...
"""
# NB: the error message:
# java.lang.UnsupportedOperationException: Series type does not support this method.
# seriesType=class io.deephaven.plot.datasets.xy.XYDataSeriesTableArray
# method='@Override public FigureImpl pointsVisible( java.lang.Boolean visible, java.lang.Object... keys )'
# TODO: seriesNamingFunction(*args)?,pointColorByY(func, *keys)?
# TODO: a ton of other call signatures for basically XYDataSeriesMethods
figure = Plot.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")\
.plot("Apple", self.table.where("Sym=`AAPL`"), "timestamp", "price")
with self.subTest(msg="gradientVisible(boolean, *keys)"):
figure = figure.gradientVisible(True, "Microsoft")
with self.subTest(msg="lineColor(Paint/int/string, *keys)"):
figure = figure.lineColor("RED", "Apple")
with self.subTest(msg="lineStyle(LineStyle, *keys)"):
figure = figure.lineStyle(Plot.lineStyle(4.0, 4.0), "Microsoft", "Apple")
with self.subTest(msg="linesVisible(boolean, *keys)"):
figure = figure.linesVisible(True, "Microsoft", "Apple")
with self.subTest(msg="pointColor(Paint/int/string, *keys)"):
figure = figure.pointColor("BLUE", "Microsoft", "Apple")
with self.subTest(msg="pointLabel(object, *keys)"):
figure = figure.pointLabel("label", "Microsoft", "Apple")
with self.subTest(msg="pointLabelFormat(string, *keys)"):
figure = figure.pointLabelFormat("{0}: ({1}, {2})", "Microsoft", "Apple")
with self.subTest(msg="pointShape(string, *keys)"):
figure = figure.pointShape("SQUARE", "Microsoft", "Apple")
with self.subTest(msg="pointSize(double, *keys)"):
figure = figure.pointSize(2.0, "Microsoft", "Apple")
with self.subTest(msg="pointsVisible(boolean, *keys)"):
figure = figure.pointsVisible(True, "Microsoft", "Apple")
with self.subTest(msg="seriesColor(Paint/int/string, *keys)"):
figure = figure.seriesColor(Plot.colorRGB(255, 0, 0), "Microsoft", "Apple")
with self.subTest(msg="tool tips"):
figure = figure.toolTipPattern("###,###.00", "Apple")\
.xToolTipPattern("###,###.00", "Apple")\
.yToolTipPattern("###,###.00", "Apple")
with self.subTest(msg="group(int, *keys)"):
figure = figure.group(0, "Microsoft", "Apple")
del figure
|
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
##############################################################################
# NOTE: the jvm should have been initialized, or this test will certainly fail
##############################################################################
import sys
import jpy
from deephaven import TableTools, Aggregation, Plot, Calendars
from deephaven.Plot import figure_wrapper
_JArrayList = jpy.get_type("java.util.ArrayList")
if sys.version_info[0] < 3:
import unittest2 as unittest
# not part of the standard library, installed via pip (or the like)
# it provides backward compatibility with python3 style subTest context manager (handy for complex tests)
else:
import unittest
class TestFigureWrapper(unittest.TestCase):
"""
Test cases for the deephaven.Plot.figure_wrapper module
"""
@classmethod
def setUpClass(self):
"""
Inherited method allowing initialization of test environment
"""
self.table = TableTools.emptyTable(200).update("timestamp=new DateTime((long)(i/2)*1000000000)",
"Sym=((i%2 == 0) ? `MSFT` : `AAPL`)",
"price=(double)((i%2 == 0) ? 100.0 + (i/2) + 5*Math.random() : 250.0 + (i/2) + 10*Math.random())")
# TODO: maybe we should test the direct data plotting functionality? vs table reference?
def testBasicMethods(self):
"""
Test suite for some basic FigureWrapper methods
"""
figure1, figure2, figure3, figure4 = None, None, None, None
with self.subTest(msg="FigureWrapper()"):
figure1 = figure_wrapper.FigureWrapper()
with self.subTest(msg="FigureWrapper(int, int)"):
figure2 = figure_wrapper.FigureWrapper(1, 2)
with self.subTest(msg="FigureWrapper.show()"):
figure4 = figure2.show() # NB: figure3.figure_ is a FigureWidget versus Figure...
with self.subTest(msg="FigureWrapper.getWidget()"):
# NB: method name should have been switched to getWidget() from getwidget()
self.assertIsNone(figure2.getWidget())
self.assertIsNotNone(figure4.getWidget())
# TODO: I'm fairly sure that this is not working as I would hope...I can't call figure3.show()
with self.subTest(msg="FigureWrapper(figure=figure)"):
figure3 = figure_wrapper.FigureWrapper(figure=figure2)
# tidy up by destroying these objects - probably only necessary after show, but JIC
del figure1, figure2, figure3, figure4
# NB: setting to None should also do it, where that is more convenient
def testBaseFigure(self):
"""
Test suite for methods inherited from BaseFigure
"""
figure = figure_wrapper.FigureWrapper(2, 2)
with self.subTest(msg="figureTitle(string)"):
figure = figure.figureTitle("Super Title")
with self.subTest(msg="figureTitleFont(string, string, int)"):
figure = figure.figureTitleFont("Arial", "B", 24)
with self.subTest(msg="figureTitleColor(string)"):
figure = figure.figureTitleColor("#FF0000") # named color or RGB hex-string
with self.subTest(msg="figureTitleColor(Paint)"):
figure = figure.figureTitleColor(Plot.colorRGB(0.0, 1.0, 0.0)) # create an RGB color using plot convenience function
with self.subTest(msg="updateInterval(long)"):
figure = figure.updateInterval(1000) # in milliseconds
# Maybe the wrapping for these is dumb?
chart1, chart2, chart3 = None, None, None
with self.subTest(msg="newChart()"):
chart1 = figure.newChart()
with self.subTest(msg="newChart(int)"):
chart2 = figure.newChart(0)
with self.subTest(msg="newChart(int, int)"):
chart3 = figure.newChart(0, 1)
with self.subTest(msg="chart(int)"):
chart1 = chart2.chart(0)
with self.subTest(msg="chart(int, int)"):
chart1 = chart3.chart(0, 1)
with self.subTest(msg="removeChart(int, int)"):
chart1 = chart3.removeChart(0, 1)
with self.subTest(msg="removeChart(int)"):
chart1 = chart2.removeChart(0)
del chart1, chart2, chart3
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="figureRemoveSeries(*string)"):
figure = figure.figureRemoveSeries("Microsoft")
del figure
def testPlottingMethods(self):
"""
Test suite for the plotting methods inherited from Axes
"""
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("plot"):
figure = figure.plot("XY Series", self.table.where("Sym=`MSFT`"), "timestamp", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("catPlot"):
figure = figure.catPlot("Category", self.table, "Sym", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("histPlot"):
figure = figure.histPlot("Histogram", self.table.where("Sym=`MSFT`"), "price", 10)
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("catHistPlot"):
figure = figure.catHistPlot("Category Histogram", self.table, "Sym")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("piePlot"):
figure = figure.piePlot("Pie", self.table.aggBy(Aggregation.AggAvg("price"), "Sym"), "Sym", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("ohlcPlot"):
# dumbest ohlc ever
figure = figure.ohlcPlot("OHLC", self.table.where("Sym=`MSFT`"), "timestamp", "price", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarX"):
figure = figure.errorBarX("Error X", self.table.where("Sym=`MSFT`"), "timestamp", "price", "timestamp", "timestamp")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarY"):
figure = figure.errorBarY("Error Y", self.table.where("Sym=`MSFT`"), "timestamp", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
with self.subTest("errorBarXY"):
figure = figure.errorBarXY("Error XY", self.table.where("Sym=`MSFT`"), "timestamp", "timestamp", "timestamp", "price", "price", "price")
figure = figure_wrapper.FigureWrapper(1, 1)
aggs = [
Aggregation.AggAvg("avgPrice=price"),
Aggregation.AggMin("minPrice=price"),
Aggregation.AggMax("maxPrice=price")]
j_agg_list = _JArrayList()
for agg in aggs:
j_agg_list.add(agg)
with self.subTest("catErrorBar"):
figure = figure.catErrorBar("Cat Error Bar",
self.table.aggBy(j_agg_list,"Sym"),
"Sym", "avgPrice", "minPrice", "maxPrice")
del figure
def testAxesMethods(self):
"""
Test suite for methods for non-plotting methods inherited from Axes
"""
# TODO: x/yTransform(AxisTransform)?, x/yBusinessTime(BusinessCalendar)?
figure = figure_wrapper.FigureWrapper() # is there an axes at this point?
axis = None
# maybe the wrapping for these is dumb?
with self.subTest(msg="axis fetchers"):
axis = figure.axis(0)
axis = figure.xAxis()
axis = figure.yAxis()
del axis
axes = None
# maybe the wrapping for these is dumb?
with self.subTest(msg="twin axis methods"):
axes = figure.twin()
axes = figure.twin("new")
axes = figure.twin(0)
axes = figure.twin("new", 0)
axes = figure.twinX()
axes = figure.twinX("new")
axes = figure.twinY()
axes = figure.twinY("new")
del axes
with self.subTest(msg="axis formatter methods"):
figure = figure.xFormatPattern("###,###.00").yFormatPattern("###,###.00")
with self.subTest(msg="axis color methods"):
figure = figure.xColor("#202020").yColor("#202020")
figure.xColor(Plot.colorRGB(1.0, 0.0, 0.0)).yColor(Plot.colorRGB(1.0, 0.0, 0.0))
with self.subTest(msg="axis labelling methods"):
figure = figure.xLabel("x axis").yLabel("y axis")
with self.subTest(msg="axis label font methods"):
figure = figure.xLabelFont("Arial", "P", 11).yLabelFont("Arial", "P", 11)
with self.subTest(msg="axis tick font methods"):
figure = figure.xTicksFont("Arial", "I", 9).yTicksFont("Arial", "I", 9)
with self.subTest(msg="axis range methods"):
figure = figure.xRange(1.0, 10.0).yRange(1.0, 10.0)
figure.xMin(1.0).yMin(1.0)
figure.xMax(10.0).yMax(10.0)
with self.subTest(msg="axis ticks methods"):
figure = figure.xTicks(1.0).yTicks(1.0)
figure.xTicks([1.0, 2.5, 5.0, 7.5, 10.0]).yTicks([1.0, 2.5, 5.0, 7.5, 10.0])
with self.subTest(msg="tick visibility methods"):
figure = figure.xTicksVisible(True).yTicksVisible(True)
figure = figure.xMinorTicksVisible(True).yMinorTicksVisible(True)
with self.subTest(msg="minor ticks"):
figure = figure.xMinorTicks(2).yMinorTicks(2)
with self.subTest(msg="tick label angles"):
figure = figure.xTickLabelAngle(45.0).yTickLabelAngle(45.0)
with self.subTest(msg="axis business time methods"):
figure.xBusinessTime().yBusinessTime()
with self.subTest(msg="axis log methods"):
figure.xLog().yLog()
with self.subTest(msg="axis inversion methods"):
figure = figure.xInvert().yInvert()
figure = figure.xInvert(True).yInvert(True)
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="plotStyle"):
figure = figure.plotStyle("Area") # does this just apply the style to all applicable series? Or?
# maybe the wrapping for these is dumb?
series = None
with self.subTest(msg="series(int)"):
series = figure.series(0) # I'm guessing that the int id starts at 0?
with self.subTest(msg="series(string"):
series = figure.series("Microsoft")
del series
with self.subTest(msg="axesRemoveSeries(*string)"):
figure = figure.axesRemoveSeries("Microsoft")
del figure
@unittest.skip("These all fail, because no axes is selected. Not presently sure how to resolve?")
def testAxisMethods(self):
"""
Test suite for methods inherited from Axis - do these apply said methods to every axis? Seems silly.
"""
figure = figure_wrapper.FigureWrapper()
# How do I get it to select an axes?
with self.subTest(msg="axisColor(string)"):
figure = figure.axisColor("#000000")
with self.subTest(msg="axisColor(Paint)"):
figure = figure.axisColor(Plot.colorRGB(0, 0, 255))
with self.subTest(msg="axisFormatPattern()"):
figure = figure.axisFormat("###,###.00") # decimal formatting pattern
with self.subTest(msg="axisLabel(string)"):
figure = figure.axisLabel("axis") # decimal formatting pattern
with self.subTest(msg="axisLabelFont(string, string, int)"):
figure = figure.axisLabelFont("Arial", "P", 11)
with self.subTest(msg="businessTime()"):
figure = figure.businessTime()
with self.subTest(msg="businessTime(calendar)"):
figure = figure.businessTime(Calendars.calendar())
with self.subTest(msg="min(double)"):
figure = figure.min(1.0)
with self.subTest(msg="max(double)"):
figure = figure.max(10.0)
with self.subTest(msg="range(double, double)"):
figure = figure.range(1.0, 10.0)
with self.subTest(msg="ticks(double)"):
figure = figure.ticks(1.0)
with self.subTest(msg="ticks(double[])"):
figure = figure.ticks([1.0, 2.5, 5.0, 7.5, 10.0])
with self.subTest(msg="tickFont(string, string, int)"):
figure = figure.ticksFont("Arial", "I", 9)
with self.subTest(msg="ticksVisible(boolean)"):
figure = figure.ticksVisible(True)
with self.subTest(msg="tickLabelAngle(double)"):
figure = figure.tickLabelAngle(45.0) # I'm guessing degrees?
with self.subTest(msg="minorTicks(int)"):
figure = figure.minorTicks(2)
with self.subTest(msg="minorTicksVisible(boolean)"):
figure = figure.minorTicksVisible(True)
with self.subTest(msg="log()"):
figure = figure.log()
# TODO: where would I get an AxisTransform object?
# with self.subTest(msg="transform(AxisTransform)"):
# figure = figure.transform(what)
with self.subTest(msg="invert()"):
figure = figure.invert()
with self.subTest(msg="invert(boolean)"):
figure = figure.invert(False)
del figure
def testChartMethods(self):
"""
Test suite for methods inherited from Chart
"""
figure = figure_wrapper.FigureWrapper(2, 2)
with self.subTest(msg="chartTitle(string)"):
figure = figure.chartTitle("Chart Title")
with self.subTest(msg="chartTitleColor(string"):
figure = figure.chartTitleColor("BLUE")
with self.subTest(msg="chartTitleColor(Paint)"):
figure = figure.chartTitleColor(Plot.colorRGB(0, 0, 255))
with self.subTest(msg="chartTitleFont(string, string, int)"):
figure = figure.chartTitleFont("Arial", "B", 20)
with self.subTest(msg="span(int, int"):
figure.span(2, 2)
with self.subTest(msg="colSpan(int)"):
figure.colSpan(2)
with self.subTest(msg="rowSpan(int)"):
figure.rowSpan(2)
axes = None
# maybe the wrapping for these is dumb? Should be returning an axes reference?
with self.subTest(msg="newAxes()"):
axes = figure.newAxes()
with self.subTest(msg="newAxes(string)"):
axes = figure.newAxes("new_axis")
with self.subTest(msg="newAxes(int)"):
axes = figure.newAxes(2)
with self.subTest(msg="newAxes(string, int)"):
axes = figure.newAxes("new_axis", 2)
with self.subTest(msg="axes(string)"):
axes.axes("new_axis")
with self.subTest(msg="axes(int)"):
axes.axes(0) # I'm assuming that 0 will always work?
del axes
# TODO: what are the possibilities here? I'm guessing ["horizontal", "vertical"]? Documentation?
with self.subTest(msg="plotOrientation(string)"):
figure = figure.plotOrientation("vertical")
# I have to put a series in here
figure = figure.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="legendVisible(boolean)"):
figure = figure.legendVisible(True)
with self.subTest(msg="legendFont(string, string, int)"):
figure = figure.legendFont("Arial", "P", 8)
with self.subTest(msg="legendColor(string)"):
# I'm guessing that this is the background color?
figure = figure.legendColor("#A0A0A0")
with self.subTest(msg="legendColor(Paint)"):
figure = figure.legendColor(Plot.colorRGB(200, 200, 200))
with self.subTest(msg="chartRemoveSeries(*string)"):
figure.chartRemoveSeries("Microsoft")
del figure
def testDataSeriesMethods(self):
"""
Test suite for methods inherited from DataSeries
"""
# TODO: pointColorByY(SerializableFunction)?, pointColorByY(Closure)?
figure = Plot.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")
with self.subTest(msg="linesVisible(boolean)"):
figure = figure.linesVisible(True)
with self.subTest(msg="lineColor(Paint)"):
figure = figure.lineColor(Plot.colorRGB(0.2, 1.0, 0.2))
with self.subTest(msg="lineStyle(LineStyle)"):
figure = figure.lineStyle(Plot.lineStyle(4, 4))
with self.subTest(msg="pointsVisible(boolean)"):
figure = figure.pointsVisible(True)
with self.subTest(msg="pointSize(double)"):
figure = figure.pointSize(2.0)
with self.subTest(msg="pointLabel(object)"):
figure = figure.pointLabel("label")
with self.subTest(msg="pointLabelFormat(string)"):
figure = figure.pointLabelFormat("{0}: ({1}, {2})")
with self.subTest(msg="pointShape(string)"):
figure = figure.pointShape("CIRCLE")
with self.subTest(msg="seriesColor(Paint)"):
figure = figure.seriesColor(Plot.colorRGB(0.1, 0.1, 0.1))
with self.subTest(msg="pointColor(Paint)"):
figure = figure.pointColor(Plot.colorRGB(1.0, 0.0, 0.0))
with self.subTest(msg="gradientVisible(boolean)"):
figure.gradientVisible(False)
with self.subTest(msg="toolTipPattern(string)"):
figure = figure.toolTipPattern("###,###.00")
with self.subTest(msg="xToolTipPattern(string)"):
figure = figure.xToolTipPattern("###,###.00")
with self.subTest(msg="yToolTipPattern(string)"):
figure = figure.yToolTipPattern("###,###.00")
del figure
@unittest.skip("what to do?")
def testCategoryDataseriesMethods(self):
"""
Test suite for methods inherited from CategoryDataSeries - bah...
"""
# TODO: this is terrible
pass
@unittest.skip("what to do?")
def testXYDataSeriesMethods(self):
"""
Test suite for methods inherited from XYDataSeries - bah...
"""
# TODO: various extensions of pointSize(*args), pointColor(*args), pointLabel(*args), pointShape(*args)
pass
@unittest.skip("These all fail with predictable error message. Wrapping appears to be correct, but I'm calling on"
"something inappropriate. Not presently sure how to resolve?")
def testMultiSeries(self):
"""
Test suite for methods inherited from MultiSeries - bah...
"""
# NB: the error message:
# java.lang.UnsupportedOperationException: Series type does not support this method.
# seriesType=class io.deephaven.plot.datasets.xy.XYDataSeriesTableArray
# method='@Override public FigureImpl pointsVisible( java.lang.Boolean visible, java.lang.Object... keys )'
# TODO: seriesNamingFunction(*args)?,pointColorByY(func, *keys)?
# TODO: a ton of other call signatures for basically XYDataSeriesMethods
figure = Plot.plot("Microsoft", self.table.where("Sym=`MSFT`"), "timestamp", "price")\
.plot("Apple", self.table.where("Sym=`AAPL`"), "timestamp", "price")
with self.subTest(msg="gradientVisible(boolean, *keys)"):
figure = figure.gradientVisible(True, "Microsoft")
with self.subTest(msg="lineColor(Paint/int/string, *keys)"):
figure = figure.lineColor("RED", "Apple")
with self.subTest(msg="lineStyle(LineStyle, *keys)"):
figure = figure.lineStyle(Plot.lineStyle(4.0, 4.0), "Microsoft", "Apple")
with self.subTest(msg="linesVisible(boolean, *keys)"):
figure = figure.linesVisible(True, "Microsoft", "Apple")
with self.subTest(msg="pointColor(Paint/int/string, *keys)"):
figure = figure.pointColor("BLUE", "Microsoft", "Apple")
with self.subTest(msg="pointLabel(object, *keys)"):
figure = figure.pointLabel("label", "Microsoft", "Apple")
with self.subTest(msg="pointLabelFormat(string, *keys)"):
figure = figure.pointLabelFormat("{0}: ({1}, {2})", "Microsoft", "Apple")
with self.subTest(msg="pointShape(string, *keys)"):
figure = figure.pointShape("SQUARE", "Microsoft", "Apple")
with self.subTest(msg="pointSize(double, *keys)"):
figure = figure.pointSize(2.0, "Microsoft", "Apple")
with self.subTest(msg="pointsVisible(boolean, *keys)"):
figure = figure.pointsVisible(True, "Microsoft", "Apple")
with self.subTest(msg="seriesColor(Paint/int/string, *keys)"):
figure = figure.seriesColor(Plot.colorRGB(255, 0, 0), "Microsoft", "Apple")
with self.subTest(msg="tool tips"):
figure = figure.toolTipPattern("###,###.00", "Apple")\
.xToolTipPattern("###,###.00", "Apple")\
.yToolTipPattern("###,###.00", "Apple")
with self.subTest(msg="group(int, *keys)"):
figure = figure.group(0, "Microsoft", "Apple")
del figure
|
en
| 0.775314
|
# # Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending # ############################################################################## # NOTE: the jvm should have been initialized, or this test will certainly fail ############################################################################## # not part of the standard library, installed via pip (or the like) # it provides backward compatibility with python3 style subTest context manager (handy for complex tests) Test cases for the deephaven.Plot.figure_wrapper module Inherited method allowing initialization of test environment # TODO: maybe we should test the direct data plotting functionality? vs table reference? Test suite for some basic FigureWrapper methods # NB: figure3.figure_ is a FigureWidget versus Figure... # NB: method name should have been switched to getWidget() from getwidget() # TODO: I'm fairly sure that this is not working as I would hope...I can't call figure3.show() # tidy up by destroying these objects - probably only necessary after show, but JIC # NB: setting to None should also do it, where that is more convenient Test suite for methods inherited from BaseFigure # named color or RGB hex-string # create an RGB color using plot convenience function # in milliseconds # Maybe the wrapping for these is dumb? # I have to put a series in here Test suite for the plotting methods inherited from Axes # dumbest ohlc ever Test suite for methods for non-plotting methods inherited from Axes # TODO: x/yTransform(AxisTransform)?, x/yBusinessTime(BusinessCalendar)? # is there an axes at this point? # maybe the wrapping for these is dumb? # maybe the wrapping for these is dumb? ##,###.00").yFormatPattern("###,###.00") # I have to put a series in here # does this just apply the style to all applicable series? Or? # maybe the wrapping for these is dumb? # I'm guessing that the int id starts at 0? Test suite for methods inherited from Axis - do these apply said methods to every axis? Seems silly. # How do I get it to select an axes? ##,###.00") # decimal formatting pattern # decimal formatting pattern # I'm guessing degrees? # TODO: where would I get an AxisTransform object? # with self.subTest(msg="transform(AxisTransform)"): # figure = figure.transform(what) Test suite for methods inherited from Chart # maybe the wrapping for these is dumb? Should be returning an axes reference? # I'm assuming that 0 will always work? # TODO: what are the possibilities here? I'm guessing ["horizontal", "vertical"]? Documentation? # I have to put a series in here # I'm guessing that this is the background color? Test suite for methods inherited from DataSeries # TODO: pointColorByY(SerializableFunction)?, pointColorByY(Closure)? ##,###.00") ##,###.00") ##,###.00") Test suite for methods inherited from CategoryDataSeries - bah... # TODO: this is terrible Test suite for methods inherited from XYDataSeries - bah... # TODO: various extensions of pointSize(*args), pointColor(*args), pointLabel(*args), pointShape(*args) Test suite for methods inherited from MultiSeries - bah... # NB: the error message: # java.lang.UnsupportedOperationException: Series type does not support this method. # seriesType=class io.deephaven.plot.datasets.xy.XYDataSeriesTableArray # method='@Override public FigureImpl pointsVisible( java.lang.Boolean visible, java.lang.Object... keys )' # TODO: seriesNamingFunction(*args)?,pointColorByY(func, *keys)? # TODO: a ton of other call signatures for basically XYDataSeriesMethods ##,###.00", "Apple")\ ##,###.00", "Apple")\ ##,###.00", "Apple")
| 2.28277
| 2
|
3ty/workflow_executor/workflow_executor/fastapiserver.py
|
DontWorry33/proc-ades
| 0
|
6626711
|
<filename>3ty/workflow_executor/workflow_executor/fastapiserver.py
import json
import os
import tempfile
import uvicorn
from fastapi import FastAPI, Form, File, status, Response
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
import workflow_executor
from workflow_executor import prepare, client, result, clean, helpers
from pydantic import BaseModel
from kubernetes.client.rest import ApiException
from pprint import pprint
import yaml
app = FastAPI(
title="the title",
description="the config",
version="2.5.0",
openapi_url="/api",
docs_url="/api/docs", redoc_url="/api/redoc"
)
class Error:
def __init__(self):
self.err = {
"error": {
"code": 0,
"message": ""
}
}
def set_error(self, code, msg):
self.err["error"]["code"] = code
self.err["error"]["message"] = msg
def __str__(self):
return self.err
class PrepareContent(BaseModel):
serviceID: str
runID: str
cwl: str
class ExecuteContent(PrepareContent):
prepareID: str
cwl: str
inputs: str
def sanitize_k8_parameters(value: str):
value = value.replace("_", "-").lower()
while value.endswith("-"):
value = value[:-1]
return value
@app.get("/")
def read_root():
return {"Hello": "World"}
"""
Executes namespace preparation
"""
@app.post("/prepare", status_code=status.HTTP_201_CREATED)
def read_prepare(content: PrepareContent, response: Response):
state = client.State()
print('Prepare POST')
prepare_id = sanitize_k8_parameters(f"{content.serviceID}{content.runID}")
if len(prepare_id) > 63:
prepare_id = shorten_namespace(sanitize_k8_parameters(content.serviceID), sanitize_k8_parameters(content.runID))
default_tmpVolumeSize = "4Gi"
default_outputVolumeSize = "5Gi"
tmpVolumeSize = os.getenv('VOLUME_TMP_SIZE', default_tmpVolumeSize)
outputVolumeSize = os.getenv('VOLUME_OUTPUT_SIZE', default_outputVolumeSize)
volumeName = sanitize_k8_parameters(f"{content.serviceID}-volume")
storage_class_name = os.getenv('STORAGE_CLASS', None)
cwlResourceRequirement = helpers.getCwlResourceRequirement(content.cwl)
if cwlResourceRequirement:
if "tmpdirMax" in cwlResourceRequirement:
print(f"setting tmpdirMax to {cwlResourceRequirement['tmpdirMax']} as specified in the CWL")
tmpVolumeSize = f"{cwlResourceRequirement['tmpdirMax']}Mi"
if "outdirMax" in cwlResourceRequirement:
print(f"setting outdirMax to {cwlResourceRequirement['outdirMax']} as specified in the CWL")
outputVolumeSize = f"{cwlResourceRequirement['outdirMax']}Mi"
ades_namespace = os.getenv('ADES_NAMESPACE', None)
# image pull secrets
image_pull_secrets_json = os.getenv('IMAGE_PULL_SECRETS', None)
if image_pull_secrets_json is not None:
with open(image_pull_secrets_json) as json_file:
image_pull_secrets = json.load(json_file)
print('namespace: %s' % prepare_id)
print(f"tmpVolumeSize: {tmpVolumeSize}")
print(f"outputVolumeSize: {outputVolumeSize}")
print('volume_name: %s' % volumeName)
try:
resp_status = workflow_executor.prepare.run(namespace=prepare_id, tmpVolumeSize=tmpVolumeSize,
outputVolumeSize=outputVolumeSize,
volumeName=volumeName, state=state,
storage_class_name=storage_class_name,
imagepullsecrets=image_pull_secrets,
ades_namespace=ades_namespace)
except ApiException as e:
response.status_code = e.status
return {"prepareID": prepare_id}
"""
Returns prepare status
"""
@app.get("/prepare/{prepare_id}", status_code=status.HTTP_200_OK)
def read_prepare(prepare_id: str, response: Response):
state = client.State()
print('Prepare GET')
namespace = prepare_id
# volumeName = sanitize_k8_parameters(f"{content.serviceID}volume")
try:
resp_status = workflow_executor.prepare.get(namespace=namespace, state=state)
except ApiException as e:
response.status_code = e.status
if resp_status["status"] == "pending":
response.status_code = status.HTTP_100_CONTINUE
return resp_status
# 200 done
# 100 ripassa dopo
# 500 error
"""
Executes workflow
"""
@app.post("/execute", status_code=status.HTTP_201_CREATED)
def read_execute(content: ExecuteContent, response: Response):
# {"runID": "runID-123","serviceID": "service-id-123", "prepareID":"uuid" ,"cwl":".......","inputs":".........."}
state = client.State()
print('Execute POST')
namespace = content.prepareID
cwl_content = content.cwl
inputs_content = json.loads(content.inputs)
volume_name_prefix = sanitize_k8_parameters(f"{content.serviceID}-volume")
workflow_name = sanitize_k8_parameters(f"wf-{content.runID}")
mount_folder = "/workflow"
# cwl_wrapper config
cwl_wrapper_config = dict()
cwl_wrapper_config["maincwl"] = os.getenv('ADES_WFEXEC_MAINCWL', None)
cwl_wrapper_config["stagein"] = os.getenv('ADES_WFEXEC_STAGEIN_CWL', None)
cwl_wrapper_config["stageout"] = os.getenv('ADES_WFEXEC_STAGEOUT_CWL', None)
cwl_wrapper_config["rulez"] = os.getenv('ADES_WFEXEC_RULEZ_CWL', None)
# read ADES config variables
with open(os.getenv('ADES_CWL_INPUTS', None)) as f:
cwl_inputs = yaml.load(f, Loader=yaml.FullLoader)
# read ADES config variables
with open(os.getenv('ADES_POD_ENV_VARS', None)) as f:
pod_env_vars = yaml.load(f, Loader=yaml.FullLoader)
# retrieve config params and store them in json
# these will be used in the stageout phase
default_value = ""
for k, v in cwl_inputs.items():
inputs_content["inputs"].append({
"id": "ADES_" + k,
"dataType": "string",
"value": v,
"mimeType": "",
"href": ""})
inputs_content["inputs"].append({
"id": "job",
"dataType": "string",
"value": workflow_name,
"mimeType": "",
"href": ""})
inputs_content["inputs"].append({
"id": "outputfile",
"dataType": "string",
"value": f"{workflow_name}.res",
"mimeType": "",
"href": ""})
default_max_ram_value = "4G"
default_max_cores_value = "2"
max_ram = os.getenv('JOB_MAX_RAM', default_max_ram_value)
max_cores = os.getenv('JOB_MAX_CORES', default_max_cores_value)
cwlResourceRequirement = helpers.getCwlResourceRequirement(cwl_content)
if cwlResourceRequirement:
if "ramMax" in cwlResourceRequirement:
print(f"setting ramMax to {cwlResourceRequirement['ramMax']}Mi as specified in the CWL")
max_ram = f"{cwlResourceRequirement['ramMax']}Mi"
if "coresMax" in cwlResourceRequirement:
print(f"setting coresMax to {cwlResourceRequirement['coresMax']} as specified in the CWL")
max_cores = str(cwlResourceRequirement["coresMax"])
print(f"inputs_content")
pprint(inputs_content)
# inputcwlfile is input_json + cwl_file
# create 2 temp files
with tempfile.NamedTemporaryFile(mode="w") as cwl_file, tempfile.NamedTemporaryFile(mode="w") as input_json:
cwl_file.write(cwl_content)
cwl_file.flush()
cwl_file.seek(0)
input_json.write(json.dumps(inputs_content))
input_json.flush()
input_json.seek(0)
print(cwl_file.name)
print(input_json.name)
try:
resp_status = workflow_executor.execute.run(state=state,
cwl_document=cwl_file.name,
job_input_json=input_json.name,
volume_name_prefix=volume_name_prefix,
mount_folder=mount_folder,
namespace=namespace,
workflow_name=workflow_name,
cwl_wrapper_config=cwl_wrapper_config,
pod_env_vars=pod_env_vars,
max_ram=max_ram,
max_cores=max_cores)
except ApiException as e:
response.status_code = e.status
resp_status = {"status": "failed", "error": e.body}
return {"jobID": workflow_name}
"""
Returns workflow status
"""
@app.get("/status/{service_id}/{run_id}/{prepare_id}/{job_id}", status_code=status.HTTP_200_OK)
def read_getstatus(service_id: str, run_id: str, prepare_id: str, job_id: str, response: Response):
namespace = prepare_id
workflow_name = sanitize_k8_parameters(f"wf-{run_id}")
keepworkspaceiffailedString = os.getenv('JOB_KEEPWORKSPACE_IF_FAILED', "True")
keepworkspaceiffailed = keepworkspaceiffailedString.lower() in ['true', '1', 'y', 'yes']
state = client.State()
print('Status GET')
resp_status = None
from fastapi import status
try:
resp_status = workflow_executor.status.run(namespace=namespace, workflow_name=workflow_name, state=state)
if resp_status["status"] == "Running":
response.status_code = status.HTTP_200_OK
status = {"percent": 50, "msg": "running"}
elif resp_status["status"] == "Success":
response.status_code = status.HTTP_200_OK
status = {"percent": 100, "msg": "done"}
elif resp_status["status"] == "Failed":
e = Error()
e.set_error(12, resp_status["error"])
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
# if keepworkspaceiffailed is false, namespace will be discarded
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
except ApiException as err:
e = Error()
e.set_error(12, err.body)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
# if keepworkspaceiffailed is false, namespace will be discarded
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
return status
"""
Returns workflow result
"""
@app.get("/result/{service_id}/{run_id}/{prepare_id}/{job_id}", status_code=status.HTTP_200_OK)
def read_getresult(service_id: str, run_id: str, prepare_id: str, job_id: str, response: Response):
namespace = prepare_id
workflow_name = sanitize_k8_parameters(f"wf-{run_id}")
volume_name_prefix = sanitize_k8_parameters(f"{service_id}-volume")
mount_folder = "/workflow"
outputfile = f"{workflow_name}.res"
state = client.State()
keepworkspaceiffailedString = os.getenv('JOB_KEEPWORKSPACE_IF_FAILED', "True")
keepworkspaceiffailed = keepworkspaceiffailedString.lower() in ['true', '1', 'y', 'yes']
print('Result GET')
try:
resp_status = workflow_executor.result.run(namespace=namespace,
workflowname=workflow_name,
mount_folder=mount_folder,
volume_name_prefix=volume_name_prefix,
outputfile=outputfile,
state=state)
print("getresult success")
pprint(resp_status)
json_compatible_item_data = {'wf_output': json.dumps(resp_status)}
print("wf_output json: ")
pprint(json_compatible_item_data)
print("job success")
keepworkspaceString = os.getenv('JOB_KEEPWORKSPACE', "False")
keepworkspace = keepworkspaceString.lower() in ['true', '1', 'y', 'yes']
if not keepworkspace:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
except ApiException as err:
e = Error()
e.set_error(12, err.body)
print(err.body)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
return JSONResponse(content=json_compatible_item_data)
"""
Removes Kubernetes namespace
"""
def clean_job(namespace: str):
clean_status = {}
try:
clean_status = workflow_executor.clean.run(namespace=namespace)
return clean_status
except ApiException as err:
e = Error()
e.set_error(12, err.body)
print(err.body)
return e
"""
Shortens namespace name to respect K8 64 chars limit
"""
def shorten_namespace(serviceId, runId):
new_namespace = f"{serviceId}{runId}"
while len(new_namespace) > 63:
serviceId = serviceId[:-1]
while serviceId.endswith('-'):
serviceId = serviceId[:-1]
new_namespace = f"{serviceId}{runId}"
return new_namespace
def main():
print("DEBuG MODE")
uvicorn.run(app)
if __name__ == "__main__":
main()
|
<filename>3ty/workflow_executor/workflow_executor/fastapiserver.py
import json
import os
import tempfile
import uvicorn
from fastapi import FastAPI, Form, File, status, Response
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
import workflow_executor
from workflow_executor import prepare, client, result, clean, helpers
from pydantic import BaseModel
from kubernetes.client.rest import ApiException
from pprint import pprint
import yaml
app = FastAPI(
title="the title",
description="the config",
version="2.5.0",
openapi_url="/api",
docs_url="/api/docs", redoc_url="/api/redoc"
)
class Error:
def __init__(self):
self.err = {
"error": {
"code": 0,
"message": ""
}
}
def set_error(self, code, msg):
self.err["error"]["code"] = code
self.err["error"]["message"] = msg
def __str__(self):
return self.err
class PrepareContent(BaseModel):
serviceID: str
runID: str
cwl: str
class ExecuteContent(PrepareContent):
prepareID: str
cwl: str
inputs: str
def sanitize_k8_parameters(value: str):
value = value.replace("_", "-").lower()
while value.endswith("-"):
value = value[:-1]
return value
@app.get("/")
def read_root():
return {"Hello": "World"}
"""
Executes namespace preparation
"""
@app.post("/prepare", status_code=status.HTTP_201_CREATED)
def read_prepare(content: PrepareContent, response: Response):
state = client.State()
print('Prepare POST')
prepare_id = sanitize_k8_parameters(f"{content.serviceID}{content.runID}")
if len(prepare_id) > 63:
prepare_id = shorten_namespace(sanitize_k8_parameters(content.serviceID), sanitize_k8_parameters(content.runID))
default_tmpVolumeSize = "4Gi"
default_outputVolumeSize = "5Gi"
tmpVolumeSize = os.getenv('VOLUME_TMP_SIZE', default_tmpVolumeSize)
outputVolumeSize = os.getenv('VOLUME_OUTPUT_SIZE', default_outputVolumeSize)
volumeName = sanitize_k8_parameters(f"{content.serviceID}-volume")
storage_class_name = os.getenv('STORAGE_CLASS', None)
cwlResourceRequirement = helpers.getCwlResourceRequirement(content.cwl)
if cwlResourceRequirement:
if "tmpdirMax" in cwlResourceRequirement:
print(f"setting tmpdirMax to {cwlResourceRequirement['tmpdirMax']} as specified in the CWL")
tmpVolumeSize = f"{cwlResourceRequirement['tmpdirMax']}Mi"
if "outdirMax" in cwlResourceRequirement:
print(f"setting outdirMax to {cwlResourceRequirement['outdirMax']} as specified in the CWL")
outputVolumeSize = f"{cwlResourceRequirement['outdirMax']}Mi"
ades_namespace = os.getenv('ADES_NAMESPACE', None)
# image pull secrets
image_pull_secrets_json = os.getenv('IMAGE_PULL_SECRETS', None)
if image_pull_secrets_json is not None:
with open(image_pull_secrets_json) as json_file:
image_pull_secrets = json.load(json_file)
print('namespace: %s' % prepare_id)
print(f"tmpVolumeSize: {tmpVolumeSize}")
print(f"outputVolumeSize: {outputVolumeSize}")
print('volume_name: %s' % volumeName)
try:
resp_status = workflow_executor.prepare.run(namespace=prepare_id, tmpVolumeSize=tmpVolumeSize,
outputVolumeSize=outputVolumeSize,
volumeName=volumeName, state=state,
storage_class_name=storage_class_name,
imagepullsecrets=image_pull_secrets,
ades_namespace=ades_namespace)
except ApiException as e:
response.status_code = e.status
return {"prepareID": prepare_id}
"""
Returns prepare status
"""
@app.get("/prepare/{prepare_id}", status_code=status.HTTP_200_OK)
def read_prepare(prepare_id: str, response: Response):
state = client.State()
print('Prepare GET')
namespace = prepare_id
# volumeName = sanitize_k8_parameters(f"{content.serviceID}volume")
try:
resp_status = workflow_executor.prepare.get(namespace=namespace, state=state)
except ApiException as e:
response.status_code = e.status
if resp_status["status"] == "pending":
response.status_code = status.HTTP_100_CONTINUE
return resp_status
# 200 done
# 100 ripassa dopo
# 500 error
"""
Executes workflow
"""
@app.post("/execute", status_code=status.HTTP_201_CREATED)
def read_execute(content: ExecuteContent, response: Response):
# {"runID": "runID-123","serviceID": "service-id-123", "prepareID":"uuid" ,"cwl":".......","inputs":".........."}
state = client.State()
print('Execute POST')
namespace = content.prepareID
cwl_content = content.cwl
inputs_content = json.loads(content.inputs)
volume_name_prefix = sanitize_k8_parameters(f"{content.serviceID}-volume")
workflow_name = sanitize_k8_parameters(f"wf-{content.runID}")
mount_folder = "/workflow"
# cwl_wrapper config
cwl_wrapper_config = dict()
cwl_wrapper_config["maincwl"] = os.getenv('ADES_WFEXEC_MAINCWL', None)
cwl_wrapper_config["stagein"] = os.getenv('ADES_WFEXEC_STAGEIN_CWL', None)
cwl_wrapper_config["stageout"] = os.getenv('ADES_WFEXEC_STAGEOUT_CWL', None)
cwl_wrapper_config["rulez"] = os.getenv('ADES_WFEXEC_RULEZ_CWL', None)
# read ADES config variables
with open(os.getenv('ADES_CWL_INPUTS', None)) as f:
cwl_inputs = yaml.load(f, Loader=yaml.FullLoader)
# read ADES config variables
with open(os.getenv('ADES_POD_ENV_VARS', None)) as f:
pod_env_vars = yaml.load(f, Loader=yaml.FullLoader)
# retrieve config params and store them in json
# these will be used in the stageout phase
default_value = ""
for k, v in cwl_inputs.items():
inputs_content["inputs"].append({
"id": "ADES_" + k,
"dataType": "string",
"value": v,
"mimeType": "",
"href": ""})
inputs_content["inputs"].append({
"id": "job",
"dataType": "string",
"value": workflow_name,
"mimeType": "",
"href": ""})
inputs_content["inputs"].append({
"id": "outputfile",
"dataType": "string",
"value": f"{workflow_name}.res",
"mimeType": "",
"href": ""})
default_max_ram_value = "4G"
default_max_cores_value = "2"
max_ram = os.getenv('JOB_MAX_RAM', default_max_ram_value)
max_cores = os.getenv('JOB_MAX_CORES', default_max_cores_value)
cwlResourceRequirement = helpers.getCwlResourceRequirement(cwl_content)
if cwlResourceRequirement:
if "ramMax" in cwlResourceRequirement:
print(f"setting ramMax to {cwlResourceRequirement['ramMax']}Mi as specified in the CWL")
max_ram = f"{cwlResourceRequirement['ramMax']}Mi"
if "coresMax" in cwlResourceRequirement:
print(f"setting coresMax to {cwlResourceRequirement['coresMax']} as specified in the CWL")
max_cores = str(cwlResourceRequirement["coresMax"])
print(f"inputs_content")
pprint(inputs_content)
# inputcwlfile is input_json + cwl_file
# create 2 temp files
with tempfile.NamedTemporaryFile(mode="w") as cwl_file, tempfile.NamedTemporaryFile(mode="w") as input_json:
cwl_file.write(cwl_content)
cwl_file.flush()
cwl_file.seek(0)
input_json.write(json.dumps(inputs_content))
input_json.flush()
input_json.seek(0)
print(cwl_file.name)
print(input_json.name)
try:
resp_status = workflow_executor.execute.run(state=state,
cwl_document=cwl_file.name,
job_input_json=input_json.name,
volume_name_prefix=volume_name_prefix,
mount_folder=mount_folder,
namespace=namespace,
workflow_name=workflow_name,
cwl_wrapper_config=cwl_wrapper_config,
pod_env_vars=pod_env_vars,
max_ram=max_ram,
max_cores=max_cores)
except ApiException as e:
response.status_code = e.status
resp_status = {"status": "failed", "error": e.body}
return {"jobID": workflow_name}
"""
Returns workflow status
"""
@app.get("/status/{service_id}/{run_id}/{prepare_id}/{job_id}", status_code=status.HTTP_200_OK)
def read_getstatus(service_id: str, run_id: str, prepare_id: str, job_id: str, response: Response):
namespace = prepare_id
workflow_name = sanitize_k8_parameters(f"wf-{run_id}")
keepworkspaceiffailedString = os.getenv('JOB_KEEPWORKSPACE_IF_FAILED', "True")
keepworkspaceiffailed = keepworkspaceiffailedString.lower() in ['true', '1', 'y', 'yes']
state = client.State()
print('Status GET')
resp_status = None
from fastapi import status
try:
resp_status = workflow_executor.status.run(namespace=namespace, workflow_name=workflow_name, state=state)
if resp_status["status"] == "Running":
response.status_code = status.HTTP_200_OK
status = {"percent": 50, "msg": "running"}
elif resp_status["status"] == "Success":
response.status_code = status.HTTP_200_OK
status = {"percent": 100, "msg": "done"}
elif resp_status["status"] == "Failed":
e = Error()
e.set_error(12, resp_status["error"])
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
# if keepworkspaceiffailed is false, namespace will be discarded
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
except ApiException as err:
e = Error()
e.set_error(12, err.body)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
# if keepworkspaceiffailed is false, namespace will be discarded
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
return status
"""
Returns workflow result
"""
@app.get("/result/{service_id}/{run_id}/{prepare_id}/{job_id}", status_code=status.HTTP_200_OK)
def read_getresult(service_id: str, run_id: str, prepare_id: str, job_id: str, response: Response):
namespace = prepare_id
workflow_name = sanitize_k8_parameters(f"wf-{run_id}")
volume_name_prefix = sanitize_k8_parameters(f"{service_id}-volume")
mount_folder = "/workflow"
outputfile = f"{workflow_name}.res"
state = client.State()
keepworkspaceiffailedString = os.getenv('JOB_KEEPWORKSPACE_IF_FAILED', "True")
keepworkspaceiffailed = keepworkspaceiffailedString.lower() in ['true', '1', 'y', 'yes']
print('Result GET')
try:
resp_status = workflow_executor.result.run(namespace=namespace,
workflowname=workflow_name,
mount_folder=mount_folder,
volume_name_prefix=volume_name_prefix,
outputfile=outputfile,
state=state)
print("getresult success")
pprint(resp_status)
json_compatible_item_data = {'wf_output': json.dumps(resp_status)}
print("wf_output json: ")
pprint(json_compatible_item_data)
print("job success")
keepworkspaceString = os.getenv('JOB_KEEPWORKSPACE', "False")
keepworkspace = keepworkspaceString.lower() in ['true', '1', 'y', 'yes']
if not keepworkspace:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
except ApiException as err:
e = Error()
e.set_error(12, err.body)
print(err.body)
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
if not keepworkspaceiffailed:
print('Removing Workspace')
clean_job_status = clean_job(namespace)
if isinstance(clean_job_status, Error):
return clean_job_status
else:
pprint(clean_job_status)
print('Removing Workspace Success')
return e
return JSONResponse(content=json_compatible_item_data)
"""
Removes Kubernetes namespace
"""
def clean_job(namespace: str):
clean_status = {}
try:
clean_status = workflow_executor.clean.run(namespace=namespace)
return clean_status
except ApiException as err:
e = Error()
e.set_error(12, err.body)
print(err.body)
return e
"""
Shortens namespace name to respect K8 64 chars limit
"""
def shorten_namespace(serviceId, runId):
new_namespace = f"{serviceId}{runId}"
while len(new_namespace) > 63:
serviceId = serviceId[:-1]
while serviceId.endswith('-'):
serviceId = serviceId[:-1]
new_namespace = f"{serviceId}{runId}"
return new_namespace
def main():
print("DEBuG MODE")
uvicorn.run(app)
if __name__ == "__main__":
main()
|
en
| 0.567342
|
Executes namespace preparation # image pull secrets Returns prepare status # volumeName = sanitize_k8_parameters(f"{content.serviceID}volume") # 200 done # 100 ripassa dopo # 500 error Executes workflow # {"runID": "runID-123","serviceID": "service-id-123", "prepareID":"uuid" ,"cwl":".......","inputs":".........."} # cwl_wrapper config # read ADES config variables # read ADES config variables # retrieve config params and store them in json # these will be used in the stageout phase # inputcwlfile is input_json + cwl_file # create 2 temp files Returns workflow status # if keepworkspaceiffailed is false, namespace will be discarded # if keepworkspaceiffailed is false, namespace will be discarded Returns workflow result Removes Kubernetes namespace Shortens namespace name to respect K8 64 chars limit
| 2.336862
| 2
|
iam-open-dataset/tests/services/test_bevaring_service.py
|
omBratteng/mottak
| 0
|
6626712
|
from dotenv import load_dotenv
from app.domain.models import CreateDatasetResponse
from app.services import bevaring_service
from tests.services.mock_bevaring_client import MockBevaringClient
from tests.test_utils import get_project_root
dotenv_path = get_project_root() / ".env.test"
load_dotenv(dotenv_path=dotenv_path)
def test_get_dataset_keys():
mock_client = MockBevaringClient()
expected_result = CreateDatasetResponse(bucket_name="mockBucketName",
datasett_id="mockDatasettId",
depot_institusjon="mockDepotInstitusjon",
iam_access_key_id="mockAccessKeyId",
iam_secret_access_key="mockSecretAccessKey",
s3_path="mockS3Path",
status="mockStatus")
result = bevaring_service.get_dataset_keys(mock_client)
assert result == expected_result
|
from dotenv import load_dotenv
from app.domain.models import CreateDatasetResponse
from app.services import bevaring_service
from tests.services.mock_bevaring_client import MockBevaringClient
from tests.test_utils import get_project_root
dotenv_path = get_project_root() / ".env.test"
load_dotenv(dotenv_path=dotenv_path)
def test_get_dataset_keys():
mock_client = MockBevaringClient()
expected_result = CreateDatasetResponse(bucket_name="mockBucketName",
datasett_id="mockDatasettId",
depot_institusjon="mockDepotInstitusjon",
iam_access_key_id="mockAccessKeyId",
iam_secret_access_key="mockSecretAccessKey",
s3_path="mockS3Path",
status="mockStatus")
result = bevaring_service.get_dataset_keys(mock_client)
assert result == expected_result
|
none
| 1
| 2.140801
| 2
|
|
acme_diags/driver/polar_driver.py
|
zshaheen/e3sm_diags
| 0
|
6626713
|
from __future__ import print_function
import os
import cdms2
import MV2
import acme_diags
from acme_diags.plot import plot
from acme_diags.derivations import acme
from acme_diags.metrics import rmse, corr, min_cdms, max_cdms, mean
from acme_diags.driver import utils
def create_metrics(ref, test, ref_regrid, test_regrid, diff):
"""Creates the mean, max, min, rmse, corr in a dictionary"""
metrics_dict = {}
metrics_dict['ref'] = {
'min': min_cdms(ref),
'max': max_cdms(ref),
'mean': mean(ref)
}
metrics_dict['test'] = {
'min': min_cdms(test),
'max': max_cdms(test),
'mean': mean(test)
}
metrics_dict['diff'] = {
'min': min_cdms(diff),
'max': max_cdms(diff),
'mean': mean(diff)
}
metrics_dict['misc'] = {
'rmse': rmse(test_regrid, ref_regrid),
'corr': corr(test_regrid, ref_regrid)
}
return metrics_dict
def run_diag(parameter):
variables = parameter.variables
seasons = parameter.seasons
ref_name = getattr(parameter, 'ref_name', '')
regions = parameter.regions
test_data = utils.dataset.Dataset(parameter, test=True)
ref_data = utils.dataset.Dataset(parameter, ref=True)
for season in seasons:
# Get the name of the data, appended with the years averaged.
parameter.test_name_yrs = utils.general.get_name_and_yrs(parameter, test_data, season)
parameter.ref_name_yrs = utils.general.get_name_and_yrs(parameter, ref_data, season)
# Get land/ocean fraction for masking.
try:
land_frac = test_data.get_variable('LANDFRAC', season)
ocean_frac = test_data.get_variable('OCNFRAC', season)
except:
mask_path = os.path.join(acme_diags.INSTALL_PATH, 'acme_ne30_ocean_land_mask.nc')
with cdms2.open(mask_path) as f:
land_frac = f('LANDFRAC')
ocean_frac = f('OCNFRAC')
for var in variables:
print('Variable: {}'.format(var))
parameter.var_id = var
mv1 = test_data.get_variable(var, season)
mv2 = ref_data.get_variable(var, season)
parameter.viewer_descr[var] = mv1.long_name if hasattr(
mv1, 'long_name') else 'No long_name attr in test data.'
# Special case, cdms didn't properly convert mask with fill value
# -999.0, filed issue with Denis.
if ref_name == 'WARREN':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 == -0.9, mv2)
# The following should be moved to a derived variable.
if ref_name == 'AIRS':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 > 1e+20, mv2)
if ref_name == 'WILLMOTT' or ref_name == 'CLOUDSAT':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 == -999., mv2)
# The following should be moved to a derived variable.
if var == 'PRECT_LAND':
days_season = {'ANN': 365, 'DJF': 90,
'MAM': 92, 'JJA': 92, 'SON': 91}
# mv1 = mv1 * days_season[season] * 0.1 # following AMWG
# Approximate way to convert to seasonal cumulative
# precipitation, need to have solution in derived variable,
# unit convert from mm/day to cm.
mv2 = mv2 / days_season[season] / \
0.1 # Convert cm to mm/day instead.
mv2.units = 'mm/day'
# For variables with a z-axis.
if mv1.getLevel() and mv2.getLevel():
plev = parameter.plevs
print('Selected pressure level: {}'.format(plev))
mv1_p = utils.general.convert_to_pressure_levels(mv1, plev, test_data, var, season)
mv2_p = utils.general.convert_to_pressure_levels(mv2, plev, test_data, var, season)
# Select plev.
for ilev in range(len(plev)):
mv1 = mv1_p[ilev, ]
mv2 = mv2_p[ilev, ]
for region in regions:
print("Selected region: {}".format(region))
mv1_domain, mv2_domain = utils.general.select_region(
region, mv1, mv2, land_frac, ocean_frac, parameter)
parameter.output_file = '-'.join(
[ref_name, var, str(int(plev[ilev])), season, region])
parameter.main_title = str(
' '.join([var, str(int(plev[ilev])), 'mb', season, region]))
# Regrid towards the lower resolution of the two
# variables for calculating the difference.
mv1_reg, mv2_reg = utils.general.regrid_to_lower_res(
mv1_domain, mv2_domain, parameter.regrid_tool, parameter.regrid_method)
# Plotting
diff = mv1_reg - mv2_reg
metrics_dict = create_metrics(
mv2_domain, mv1_domain, mv2_reg, mv1_reg, diff)
parameter.var_region = region
plot(parameter.current_set, mv2_domain,
mv1_domain, diff, metrics_dict, parameter)
utils.general.save_ncfiles(
parameter.current_set, mv1_domain, mv2_domain, diff, parameter)
# For variables without a z-axis.
elif mv1.getLevel() is None and mv2.getLevel() is None:
for region in regions:
print("Selected region: {}".format(region))
mv1_domain, mv2_domain = utils.general.select_region(
region, mv1, mv2, land_frac, ocean_frac, parameter)
parameter.output_file = '-'.join(
[ref_name, var, season, region])
parameter.main_title = str(' '.join([var, season, region]))
# Regrid towards the lower resolution of the two
# variables for calculating the difference.
mv1_reg, mv2_reg = utils.general.regrid_to_lower_res(
mv1_domain, mv2_domain, parameter.regrid_tool, parameter.regrid_method)
# Special case.
if var == 'TREFHT_LAND' or var == 'SST':
if ref_name == 'WILLMOTT':
mv2_reg = MV2.masked_where(
mv2_reg == mv2_reg.fill_value, mv2_reg)
land_mask = MV2.logical_or(mv1_reg.mask, mv2_reg.mask)
mv1_reg = MV2.masked_where(land_mask, mv1_reg)
mv2_reg = MV2.masked_where(land_mask, mv2_reg)
diff = mv1_reg - mv2_reg
metrics_dict = create_metrics(
mv2_domain, mv1_domain, mv2_reg, mv1_reg, diff)
parameter.var_region = region
plot(parameter.current_set, mv2_domain,
mv1_domain, diff, metrics_dict, parameter)
utils.general.save_ncfiles(parameter.current_set,
mv1_domain, mv2_domain, diff, parameter)
else:
raise RuntimeError(
"Dimensions of the two variables are different. Aborting.")
return parameter
|
from __future__ import print_function
import os
import cdms2
import MV2
import acme_diags
from acme_diags.plot import plot
from acme_diags.derivations import acme
from acme_diags.metrics import rmse, corr, min_cdms, max_cdms, mean
from acme_diags.driver import utils
def create_metrics(ref, test, ref_regrid, test_regrid, diff):
"""Creates the mean, max, min, rmse, corr in a dictionary"""
metrics_dict = {}
metrics_dict['ref'] = {
'min': min_cdms(ref),
'max': max_cdms(ref),
'mean': mean(ref)
}
metrics_dict['test'] = {
'min': min_cdms(test),
'max': max_cdms(test),
'mean': mean(test)
}
metrics_dict['diff'] = {
'min': min_cdms(diff),
'max': max_cdms(diff),
'mean': mean(diff)
}
metrics_dict['misc'] = {
'rmse': rmse(test_regrid, ref_regrid),
'corr': corr(test_regrid, ref_regrid)
}
return metrics_dict
def run_diag(parameter):
variables = parameter.variables
seasons = parameter.seasons
ref_name = getattr(parameter, 'ref_name', '')
regions = parameter.regions
test_data = utils.dataset.Dataset(parameter, test=True)
ref_data = utils.dataset.Dataset(parameter, ref=True)
for season in seasons:
# Get the name of the data, appended with the years averaged.
parameter.test_name_yrs = utils.general.get_name_and_yrs(parameter, test_data, season)
parameter.ref_name_yrs = utils.general.get_name_and_yrs(parameter, ref_data, season)
# Get land/ocean fraction for masking.
try:
land_frac = test_data.get_variable('LANDFRAC', season)
ocean_frac = test_data.get_variable('OCNFRAC', season)
except:
mask_path = os.path.join(acme_diags.INSTALL_PATH, 'acme_ne30_ocean_land_mask.nc')
with cdms2.open(mask_path) as f:
land_frac = f('LANDFRAC')
ocean_frac = f('OCNFRAC')
for var in variables:
print('Variable: {}'.format(var))
parameter.var_id = var
mv1 = test_data.get_variable(var, season)
mv2 = ref_data.get_variable(var, season)
parameter.viewer_descr[var] = mv1.long_name if hasattr(
mv1, 'long_name') else 'No long_name attr in test data.'
# Special case, cdms didn't properly convert mask with fill value
# -999.0, filed issue with Denis.
if ref_name == 'WARREN':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 == -0.9, mv2)
# The following should be moved to a derived variable.
if ref_name == 'AIRS':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 > 1e+20, mv2)
if ref_name == 'WILLMOTT' or ref_name == 'CLOUDSAT':
# This is cdms2 fix for bad mask, Denis' fix should fix this.
mv2 = MV2.masked_where(mv2 == -999., mv2)
# The following should be moved to a derived variable.
if var == 'PRECT_LAND':
days_season = {'ANN': 365, 'DJF': 90,
'MAM': 92, 'JJA': 92, 'SON': 91}
# mv1 = mv1 * days_season[season] * 0.1 # following AMWG
# Approximate way to convert to seasonal cumulative
# precipitation, need to have solution in derived variable,
# unit convert from mm/day to cm.
mv2 = mv2 / days_season[season] / \
0.1 # Convert cm to mm/day instead.
mv2.units = 'mm/day'
# For variables with a z-axis.
if mv1.getLevel() and mv2.getLevel():
plev = parameter.plevs
print('Selected pressure level: {}'.format(plev))
mv1_p = utils.general.convert_to_pressure_levels(mv1, plev, test_data, var, season)
mv2_p = utils.general.convert_to_pressure_levels(mv2, plev, test_data, var, season)
# Select plev.
for ilev in range(len(plev)):
mv1 = mv1_p[ilev, ]
mv2 = mv2_p[ilev, ]
for region in regions:
print("Selected region: {}".format(region))
mv1_domain, mv2_domain = utils.general.select_region(
region, mv1, mv2, land_frac, ocean_frac, parameter)
parameter.output_file = '-'.join(
[ref_name, var, str(int(plev[ilev])), season, region])
parameter.main_title = str(
' '.join([var, str(int(plev[ilev])), 'mb', season, region]))
# Regrid towards the lower resolution of the two
# variables for calculating the difference.
mv1_reg, mv2_reg = utils.general.regrid_to_lower_res(
mv1_domain, mv2_domain, parameter.regrid_tool, parameter.regrid_method)
# Plotting
diff = mv1_reg - mv2_reg
metrics_dict = create_metrics(
mv2_domain, mv1_domain, mv2_reg, mv1_reg, diff)
parameter.var_region = region
plot(parameter.current_set, mv2_domain,
mv1_domain, diff, metrics_dict, parameter)
utils.general.save_ncfiles(
parameter.current_set, mv1_domain, mv2_domain, diff, parameter)
# For variables without a z-axis.
elif mv1.getLevel() is None and mv2.getLevel() is None:
for region in regions:
print("Selected region: {}".format(region))
mv1_domain, mv2_domain = utils.general.select_region(
region, mv1, mv2, land_frac, ocean_frac, parameter)
parameter.output_file = '-'.join(
[ref_name, var, season, region])
parameter.main_title = str(' '.join([var, season, region]))
# Regrid towards the lower resolution of the two
# variables for calculating the difference.
mv1_reg, mv2_reg = utils.general.regrid_to_lower_res(
mv1_domain, mv2_domain, parameter.regrid_tool, parameter.regrid_method)
# Special case.
if var == 'TREFHT_LAND' or var == 'SST':
if ref_name == 'WILLMOTT':
mv2_reg = MV2.masked_where(
mv2_reg == mv2_reg.fill_value, mv2_reg)
land_mask = MV2.logical_or(mv1_reg.mask, mv2_reg.mask)
mv1_reg = MV2.masked_where(land_mask, mv1_reg)
mv2_reg = MV2.masked_where(land_mask, mv2_reg)
diff = mv1_reg - mv2_reg
metrics_dict = create_metrics(
mv2_domain, mv1_domain, mv2_reg, mv1_reg, diff)
parameter.var_region = region
plot(parameter.current_set, mv2_domain,
mv1_domain, diff, metrics_dict, parameter)
utils.general.save_ncfiles(parameter.current_set,
mv1_domain, mv2_domain, diff, parameter)
else:
raise RuntimeError(
"Dimensions of the two variables are different. Aborting.")
return parameter
|
en
| 0.881214
|
Creates the mean, max, min, rmse, corr in a dictionary # Get the name of the data, appended with the years averaged. # Get land/ocean fraction for masking. # Special case, cdms didn't properly convert mask with fill value # -999.0, filed issue with Denis. # This is cdms2 fix for bad mask, Denis' fix should fix this. # The following should be moved to a derived variable. # This is cdms2 fix for bad mask, Denis' fix should fix this. # This is cdms2 fix for bad mask, Denis' fix should fix this. # The following should be moved to a derived variable. # mv1 = mv1 * days_season[season] * 0.1 # following AMWG # Approximate way to convert to seasonal cumulative # precipitation, need to have solution in derived variable, # unit convert from mm/day to cm. # Convert cm to mm/day instead. # For variables with a z-axis. # Select plev. # Regrid towards the lower resolution of the two # variables for calculating the difference. # Plotting # For variables without a z-axis. # Regrid towards the lower resolution of the two # variables for calculating the difference. # Special case.
| 2.282217
| 2
|
TCGAdnloader/downloader.py
|
jingxinfu/TCGAdnloader
| 2
|
6626714
|
#!/usr/bin/env python3
import subprocess, os,time,gzip
import pandas as pd
import numpy as np
from functools import reduce
from .convertor import mergeToSample, calTNzcore, rmEntrez, tpmToFpkm, mapEm2Gene, formatClin, pick,formatDrug
from .outformat import storeData
import requests,json,re,io
from .setting import CLIN_INFO, Biospecimen_INFO, Biospecimen_MAP, CLIN_MAP, PAM50_PATH, DRUG_MAP
class GdcApi(object):
'''
API for download files from GDC
'''
__slot__ = ["files_endpt", "data_endpt", "cancer", "parental_dir",'cases_endpt']
def __init__(self, cancer, parental_dir, cases_endpt='https://api.gdc.cancer.gov/cases', data_endpt="https://api.gdc.cancer.gov/data", files_endpt="https://api.gdc.cancer.gov/files", **kwargs):
''' Intialize instance parameters
Parameters
----------
cancer : str
Cancer type
parental_dir : str
Path to store datas
data_endpt : str, optional
[Endpoint for files id searching] (the default is "https://api.gdc.cancer.gov/data")
files_endpt : str, optional
[Endpoint for files downloading] (the default is "https://api.gdc.cancer.gov/files")
'''
self.files_endpt = files_endpt
self.data_endpt = data_endpt
self.cancer = cancer
self.parental_dir = parental_dir
self.cases_endpt = cases_endpt
def _projFilter(self, data_type,method=None):
dtype_dict = {
"cnv_segment_somatic": "Masked Copy Number Segment",
"cnv_segment_all": "Copy Number Segment",
"masked_somatic_mutation":"Masked Somatic Mutation",
}
filters = {
"op": "and",
"content":[
{
"op": "in",
"content": {
"field": "files.data_type",
"value": [
dtype_dict[data_type]
]
}
},
{
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
},
]
}
# specific for SNV on TCGA (Calling by four different tools)
if method != None:
filters['content'].append({
"op":"in",
"content":{
"field": "files.analysis.workflow_type",
"value":[
"{} Variant Aggregation and Masking".format(method)
]
}
})
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "3000"
}
return params
def _nameFilter(self, data_type):
dtype_dict = {
'drug': "nationwidechildrens.org_clinical_drug_{}.txt".format(self.cancer.lower()),
'gistic': '{}.focal_score_by_genes.txt'.format(self.cancer.upper()),
# 'survival': "nationwidechildrens.org_clinical_follow_up_v{0}_{1}.txt".format(CLIN_VERSION[self.cancer], self.cancer.lower()),
'patient': "nationwidechildrens.org_clinical_patient_{}.txt".format(self.cancer.lower()),
'aliquot': "nationwidechildrens.org_biospecimen_aliquot_{}.txt".format(self.cancer.lower()),
'slide': "nationwidechildrens.org_biospecimen_slide_{}.txt".format(self.cancer.lower()),
'sample': "nationwidechildrens.org_biospecimen_sample_{}.txt".format(self.cancer.lower()),
'auxilary': "nationwidechildrens.org_auxiliary_{}.txt".format(self.cancer.lower()),
}
filters = {
"op": "in",
"content": {
"field": "files.file_name",
"value": [
dtype_dict[data_type]
]
}
}
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "1"
}
return params
def _fetchFileID(self, data_type, by_name=True,method=None):
''' Get files id by upstream filter parameters
Parameters
----------
data_type : str
Data type to be download. eg. gistic
by_name : bool, optional
Whether getting files id by matching file names (the default is True).
If not, we will use project filtering options to get file id list.
Returns
-------
list
A list contains file ids.
'''
if by_name is True:
file_uuid_list = []
params = self._nameFilter(data_type)
response = requests.get(self.files_endpt, params=params)
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
else:
file_uuid_list = []
params = self._projFilter(data_type,method=method)
response = requests.get(self.files_endpt, params=params)
if "message" in json.loads(response.content.decode("utf-8")).keys():
return None, 'Not found'
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
if len(file_uuid_list) == 0:
return None,'Not found'
else:
return file_uuid_list,None
def getTableFromFiles(self, data_type, by_name=True,method=None,**kwargs):
'''
Merging tables downloaded by a list of file ids
'''
try:
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
except requests.exceptions.SSLError:
time.sleep(10)
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
if error != None:
return None, error
ready_to_merge = []
if len(file_uuid_list) == 0 :
return None, 'Cannot find any file.'
for ids in file_uuid_list:
params = {"ids": [ids]}
try:
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
except requests.exceptions.SSLError:
time.sleep(10)
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
if method != None:
temp_file = self.cancer+'_'+method+"_snv_tmp.gz"
file = open(temp_file, "wb")
file.write(response.content)
file.close()
df = pd.read_table(temp_file, **kwargs)
subprocess.call('rm %s' % temp_file ,shell=True)
else:
df = pd.read_table(io.StringIO(
response.content.decode("utf-8")), **kwargs)
ready_to_merge.append(df)
return pd.concat(ready_to_merge,axis=0),None
def getClinInfo(self, fields):
filters = {
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
}
fields = ','.join(fields)
params = {
"filters": json.dumps(filters),
"fields": fields,
"format": "TSV",
"size": "3000"
}
response = requests.get(self.cases_endpt, params=params)
if response.status_code != 200:
time.sleep(10)
response = requests.get(self.cases_endpt, params=params)
try:
result = pd.read_table(io.StringIO(response.content.decode("utf-8")))
error = None
except:
result=None
error='Not Found!'
return result,error
def clin(self):
'''
Downloading clinical information
'''
surs,stderr = self.getClinInfo(fields=CLIN_INFO)
if stderr == None:
surs.rename(columns=CLIN_MAP,inplace=True)
surs = surs[list(CLIN_MAP.values())]
format_surs = formatClin(surs)
storeData(df=format_surs,parental_dir=self.parental_dir,
sub_folder='Surv',cancer=self.cancer)
stderr = ''
else:
stderr = 'Cannot Found\tsurvival_info\t'+self.cancer+'\n'
return stderr
def biospecimen(self):
'''
Downloading biopecimen information
'''
stderr = ''
for sub_folder,files in Biospecimen_INFO.items():
read_to_merge = []
for k, v in files.items():
meta, errors = self.getTableFromFiles(data_type=k)
if errors == None:
meta = meta[meta.columns.intersection(v)]
non_info = pd.Index(v).difference(meta.columns)
for c in non_info:
meta[c] = np.nan
meta.replace('[Not Available]', np.nan, inplace=True)
meta.replace('[Not Applicable]', np.nan, inplace=True)
meta.rename(columns=Biospecimen_MAP,inplace=True)
## header process
if 'bcr_sample_barcode' in v:
meta = meta.drop(0, axis=0)
if k == 'sample':
meta['sample'] = meta['sample'].map(lambda x: x[:-1])
meta = meta.drop_duplicates()
meta['patient'] = meta['sample'].map(lambda x: '-'.join(x.split('-')[:3]))
# elif 'hpv_status' in v:
# meta = meta.drop(0,axis=0)
# else:
# meta = meta.drop([0,1],axis=0)
## additional info
if k == 'slide':
meta = meta.set_index('sample')
meta = meta.apply(pd.to_numeric)
meta = mergeToSample(meta,transpose=True)
# if k == "patient" and self.cancer == 'BRCA':
# pam50 = pd.read_table(PAM50_PATH, index_col=0).rename(columns={
# "PAM50 mRNA":'PAM50'})['PAM50'].to_frame()
# meta = meta.merge(pam50, left_on='patient',right_index=True,how='left')
read_to_merge.append(meta)
else:
stderr += 'Cannot Found\t'+sub_folder+'_'+k+'\t'+self.cancer+'\n'
if len(read_to_merge) > 1:
result = reduce(lambda x,y:pd.merge(x,y, how='outer',on='patient'),read_to_merge).drop_duplicates().dropna(axis=1,how='all')
result = result.set_index('patient')
elif len(read_to_merge) == 1:
result = read_to_merge[0]
else:
continue
## Store tumor and normal info separatelly
# if sub_folder == "histology":
# for s in ['tumor','normal']:
# sub_result = pick(result, source=s, transpose=True)
# storeData(sub_result,
# parental_dir=self.parental_dir,
# sub_folder='/'.join([sub_folder,s]), cancer=self.cancer)
# sub_folder += '/origin'
storeData(result,
parental_dir=self.parental_dir,
sub_folder=sub_folder,cancer=self.cancer)
return stderr
def drug(self):
'''
Downloading Drug information
'''
stderr = ''
df, errors = self.getTableFromFiles(data_type='drug')
if errors == None:
df = df.drop([0,1],axis=0)
df = df.loc[:,df.columns.isin(list(DRUG_MAP.keys()))]
df.rename(columns=DRUG_MAP,inplace=True)
df = formatDrug(df)
df.set_index('patient',inplace=True)
storeData(df=df, parental_dir=self.parental_dir,
sub_folder='Drug', cancer=self.cancer)
else:
stderr += 'Cannot Found\tDrug information for \t'+self.cancer+'\n'
return stderr
def drugDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'drug_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'drug_stderr.log']), 'a+') as stderrs:
logs = self.drug()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
def metaDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'meta_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'meta_stderr.log']), 'a+') as stderrs:
for n in ['biospecimen']:#, 'clin']:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class Workflow(object):
__slot__ = ['cancer', 'parental_dir', 'workflow']
def __init__(self,cancer,parental_dir,workflow):
self.cancer = cancer
self.parental_dir = parental_dir
self.workflow = workflow
def run(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'stderr.log']), 'a+') as stderrs:
for n in self.workflow:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class FireBrowseDnloader(Workflow):
__slot__ = ['release_time']
def __init__(self, release_time="2016_01_28", base_url="http://gdac.broadinstitute.org/runs",**kwargs):
super(FireBrowseDnloader, self).__init__(**kwargs)
self.release_time = release_time
self.base_url = base_url
def _fget(self,data_type, store_dir):
''' Download level 3 data from FireBrowse
Parameters
----------
cancer : str
Cancer type included in TCGA project
data_type : str
Level 3 data type provided by FireBrowse
store_dir : str
Output directory
base_url : str, optional
URL prefix (the default is "http://gdac.broadinstitute.org/runs", which is the prefix provided by FireBrowse)
release_time : str, optional
Release version and this release recored by date. (the default is "2016_01_28", which is the latest available release for now.)
Raises
------
KeyError
if the input parameter is out of provided list.
Returns
-------
str
Run messages. Return 'Success' if no error occurs.
'''
# modifition to adapt CNV data on the function
if data_type == 'cnv_gene_somatic':
release_prefix = 'analyses'
cancer_suffix = '-TP'
if self.cancer == 'SKCM':
cancer_suffix = '-TM'
else:
cancer_suffix = ''
release_prefix = 'stddata'
data_type_dict = {
"rna_raw" : "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.Level_3",
"rna_norm": "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3",
"rppa": "RPPA_AnnotateWithGene.Level_3",
"cnv_gene_somatic": "CopyNumber_Gistic2.Level_4",
"cnv_segment_somatic": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.Level_3",
"cnv_segment_all": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.Level_3",
}
keep_suffix_dict = {
"rna_raw": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.data.txt",
"rppa" : "rppa.txt",
"rna_norm": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt",
"cnv_gene_somatic": "by_genes.txt",
"cnv_segment_somatic": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.seg.txt",
"cnv_segment_all": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.seg.txt",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type,','.join(data_type_dict.keys())))
short_release_time = "".join(self.release_time.split('_'))
release = release_prefix+"__{release_time}"
sub_folder = "data/{cancer}/{short_release_time}"
file_name = "gdac.broadinstitute.org_{cancer}.{data_type}.{short_release_time}00.0.0.tar.gz"
url = "/".join([self.base_url, release, sub_folder, file_name])
url = url.format(**dict(
cancer=self.cancer+cancer_suffix,
data_type=data_type_dict[data_type],
release_time=self.release_time,
short_release_time=short_release_time,
)
)
cmd ="""
set -x
[[ -d {store_dir}_{cancer}_{data_type}_tmp ]] || mkdir -p {store_dir}_{cancer}_{data_type}_tmp
wget -q -O {store_dir}_{cancer}_{data_type}.gz {url}
tar -xvvf {store_dir}_{cancer}_{data_type}.gz -C {store_dir}_{cancer}_{data_type}_tmp --strip-components=1
rm {store_dir}_{cancer}_{data_type}.gz
if [ $(ls {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix}| wc -l) -gt 1 ];then
[[ -d {store_dir}_{cancer} ]] || mkdir {store_dir}_{cancer}
fi
mv {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix} {store_dir}_{cancer}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
keep_suffix=keep_suffix_dict[data_type],
url=url,
data_type=data_type
)
)
try:
subprocess.run(cmd, shell=True,check=True)
log = 'Success'
except subprocess.CalledProcessError as e:
cmd = """
set -x
rm {store_dir}_{cancer}_{data_type}.gz
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd, shell=True, check=True)
return str(e.returncode)
## process data
cmd = """
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd,shell=True,check=True)
return log
def _splitCountTPM(self, raw_rnaseq_path):
''' Split one data frame with both count and scaled_estiamte into two data frames and
merge the sample level data frame into pateint level data frame, but keep separating tumor and normal samples.
Then, based on the scaled_estimate column, calculate TPM and RPKM information.
Parameters
----------
raw_rnaseq_path : str
Path to raw rnaseq data download from FireBrowse
Returns
-------
Dict
A dict that contains three pandas.DataFrame, which are raw count, TPM and RPKM.
All of those data frame are index by both Entrez ID and gene symbol and colum named by four digits TCGA barcode.
'''
df = pd.read_table(raw_rnaseq_path, index_col=0,skiprows=[1])
col_selector = pd.read_table(raw_rnaseq_path, index_col=0, nrows=2)
raw_count = df.loc[:, col_selector.iloc[0, :] =='raw_count']
raw_count = mergeToSample(raw_count)
raw_count = round(raw_count)
## Get fpkm and tpm information from transcript fractions
transcipt_fraction = df.loc[:,col_selector.iloc[0, :] == 'scaled_estimate']
tpm = transcipt_fraction * 10e6
normalize_factor = transcipt_fraction.sum(axis=0)
fpkm = transcipt_fraction * normalize_factor * 10e9
tpm = mergeToSample(tpm)
fpkm = mergeToSample(fpkm)
return dict(count=raw_count,tpm=tpm,fpkm=fpkm)
def _formatGistic(self, gistic_path):
''' Formating GISTIC results and sepratate files into segment and gene level
Parameters
----------
gistic_path : str
Path to the folder of gistic output
Returns
-------
dict
Dictionary with files output name as key and pandas.DataFrame as value
'''
f_dict = {
"broad_focal": '{}/all_data_by_genes.txt',
"focal": '{}/focal_data_by_genes.txt',
"threds": '{}/all_thresholded.by_genes.txt'
}
result = {}
for k, v in f_dict.items():
if os.path.isfile(v.format(gistic_path)):
result[k] = pd.read_table(v.format(gistic_path),index_col=0).drop(['Locus ID', 'Cytoband'],axis=1)
return result
def rnaseq(self):
'''
Workflow for downloading RNAseq data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RNAseq data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
########################## Raw count and Scale Estimate ##########################
# 1. Fetch raw count and RSEM information from FireBrowse
# 2. Split fetched data frame into raw count and RSEM separatly.
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
# 4. Calculate TPM and RPKM based on RSEM results.
##################################################################################
store_dir = '/'.join([self.parental_dir, 'RNASeq'])
store_dir_raw = '_'.join([store_dir, 'raw'])
store_dir_norm = '_'.join([store_dir, 'norm'])
log = self._fget(data_type='rna_raw',store_dir=store_dir_raw)
if log != 'Success':
return 'Cannot Found\trna_raw\t'+self.cancer+'\n'
raw_rnaseq = self._splitCountTPM(
raw_rnaseq_path='_'.join([store_dir_raw, self.cancer])
)
for name, df in raw_rnaseq.items():
df = rmEntrez(df)
if name in ['fpkm','tpm']:
log_df = np.log2( 1+ df )
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
name += '/origin'
storeData(df = df, parental_dir = store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_raw, self.cancer])), shell=True)
########################## Raw count and Scale Estimate ##########################
# 1. Fetch normalized count from FireBrowse
# 2. remove the second row, which only indicate the normalized count
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
##################################################################################
log = self._fget(data_type='rna_norm',store_dir=store_dir_norm)
if log != 'Success':
return 'Cannot Found\trna_norm\t'+self.cancer+'\n'
rnaseq_norm = pd.read_table(
'_'.join([store_dir_norm, self.cancer]), index_col=0, skiprows=[1])
rnaseq_norm = mergeToSample(rnaseq_norm)
rnaseq_norm = rmEntrez(rnaseq_norm)
storeData(df=rnaseq_norm, parental_dir=store_dir,
sub_folder='norm_count/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_norm, self.cancer])), shell=True)
return ''
def cnv(self):
'''
Workflow for downloading copy number variation data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole copy number variation data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
## Gene
store_dir = '/'.join([self.parental_dir, 'CNV/somatic', 'gene'])
log = self._fget( data_type='cnv_gene_somatic',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\tcnv_gene_somatic\t'+self.cancer+'\n'
cnv_gene = self._formatGistic(
gistic_path='_'.join([store_dir, self.cancer]))
for name, df in cnv_gene.items():
df = mergeToSample(df)
storeData(df=df, parental_dir=store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
## Segment
for lv in ['somatic','all']:
store_dir = '/'.join([self.parental_dir, 'CNV/'+lv, 'segment'])
log = self._fget(data_type='cnv_segment_'+lv, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + 'cnv_segment_'+lv+'\t'+self.cancer+'\n'
if not os.path.exists(store_dir):
os.makedirs(store_dir)
subprocess.call(
'mv {0} {1}'.format('_'.join([store_dir, self.cancer]),
'/'.join([store_dir, self.cancer])
),
shell=True)
return ''
def rppa(self):
'''
Workflow for downloading RPPA data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RPPA data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
store_dir = '/'.join([self.parental_dir, 'RPPA'])
log=self._fget(data_type='rppa',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\trppa\t'+self.cancer+'\n'
rppa = pd.read_table(
'_'.join([store_dir,self.cancer]), index_col=0)
rppa = rmEntrez(rppa)
rppa = mergeToSample(rppa)
storeData(df=rppa, parental_dir=store_dir,
sub_folder='', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
'''
Please use MC3 downloader to fetch the SNV result for all cancer in TCGA,
which is more robust.
'''
return 'GO TO MC3\tsnv\t'+self.cancer+'\n'
class GdcDnloader(GdcApi, Workflow):
__slot__ = ['type_available', 'base_url']
def __init__(self, base_url="https://gdc.xenahubs.net/download/",**kwargs):
Workflow.__init__(self,**kwargs)
GdcApi.__init__(self, cancer=self.cancer,parental_dir=self.parental_dir)
# super(GdcDnloader, self).__init__(data_endpt="https://api.gdc.cancer.gov/data",files_endpt="https://api.gdc.cancer.gov/files",**kwargs)
# data-release-80
self.base_url = base_url
self.type_available = {
'RNASeq': ['fpkm','count','fpkm_uq'],
'SNV': ['MuSE', "MuTect2", "VarScan2", "SomaticSniper"],
'cnv': ['somatic','all']
}
def _fget(self, data_type, store_dir):
'''Download level 3 data from Xenas
Parameters
----------
data_type : str
Data type to be downloaded
store_dir : str
Path to store the data
Raises
------
KeyError
If cannot fetching the files
Returns
-------
str
Tell if the downloading is successful or not
'''
data_type_dict = {
'fpkm': "htseq_fpkm",
'count':"htseq_counts",
'fpkm_uq': "htseq_fpkm-uq",
'muse': "muse_snv",
"mutect2": "mutect2_snv",
"VarScan2": "varscan2_snv",
"SomaticSnipe":"somaticsniper_snv",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type, ','.join(data_type_dict.keys())))
# https: // gdc.xenahubs.net/download/TCGA-CHOL/Xena_Matrices/TCGA-CHOL.htseq_fpkm.tsv.gz
subpath = 'TCGA-{cancer}/Xena_Matrices/TCGA-{cancer}.{data_type}.tsv.gz'
url = "/".join([self.base_url, subpath])
url = url.format(**dict(
cancer=self.cancer,
data_type=data_type_dict[data_type]
)
)
cmd = """
set -x
[[ -d {store_dir} ]] || mkdir -p {store_dir}
wget -q -O {store_dir}/{cancer}.gz {url}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
url=url,
)
)
try:
subprocess.run(cmd, shell=True, check=True)
log = 'Success'
cmd = "set -x; gunzip {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
except subprocess.CalledProcessError as e:
log = str(e.returncode)
cmd = "set -x; rm {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
subprocess.run(cmd, shell=True, check=True)
return log
def rnaseq(self):
store_parental = '/'.join([self.parental_dir, 'RNASeq'])
for name in self.type_available['RNASeq']:
store_dir = '/'.join([store_parental, name])
log = self._fget(data_type=name, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + name+'\t'+self.cancer+'\n'
df = pd.read_table('/'.join([store_dir,self.cancer]),index_col=0)
df = np.exp2(df) - 1 # since all matrix download from xenas have been log transformed
df = mergeToSample(df)
df = mapEm2Gene(df)
if name == 'fpkm':
tpm = tpmToFpkm(df, reverse=True)
for raw_name,raw_df in {'tpm':tpm,'fpkm':df}.items():
log_df = np.log2(1 + raw_df)
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
storeData(df=raw_df, parental_dir=store_parental,
sub_folder=raw_name+'/origin', cancer=self.cancer)
else:
if name == 'count':
df = df.round(0)
storeData(df=df, parental_dir=store_parental,
sub_folder=name+'/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('/'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
for m in self.type_available['SNV']:
df, errors = self.getTableFromFiles(
data_type='masked_somatic_mutation', by_name=False,method=m,comment='#')
if errors != None:
return 'Cannot Found\t'+m+'\t'+self.cancer+'\n'
else:
# df.rename(columns={"Hugo_Symbol":"gene"},inplace=True)
# df.insert(0, 'sample', df["Tumor_Sample_Barcode"].map(
# lambda x: '-'.join(x.split('-')[:4])[:-1]))
store_parental = '/'.join([self.parental_dir, 'SNV'])
storeData(df=df, parental_dir=store_parental,
sub_folder=m, cancer=self.cancer)
return ''
def cnv(self):
store_parental = '/'.join([self.parental_dir, 'CNV'])
# meta data
## map uuid to barcode
meta, errors = self.getTableFromFiles(data_type='aliquot')
if errors != None:
return 'Cannot Found\tuuid map barcode\t'+self.cancer+'\n'
meta = meta.dropna(
axis=0).set_index('bcr_aliquot_uuid')
meta.index = meta.index.map(lambda x: x.lower())
meta = meta['bcr_sample_barcode'].to_dict()
stderr = ''
# focal data
df,errors = self.getTableFromFiles(data_type='gistic')
if errors == None:
df = df.set_index('Gene Symbol').drop(['Gene ID', 'Cytoband'],axis=1)
df.columns = df.columns.map(meta)
df = mergeToSample(df)
df = mapEm2Gene(df)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/gene/focal', cancer=self.cancer)
else:
stderr += 'Cannot Found\tgistic\t'+self.cancer+'\n'
# Segment data
## somatic
df, errors = self.getTableFromFiles(data_type='cnv_segment_somatic', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/segment', cancer=self.cancer,index=False)
else:
stderr += 'Cannot Found\tcnv_segment_somatic\t'+self.cancer+'\n'
# all
df, errors = self.getTableFromFiles(data_type='cnv_segment_all', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='all/segment', cancer=self.cancer, index=False)
else:
stderr += 'Cannot Found\tcnv_segment_all\t'+self.cancer +'\n'
return stderr
def rppa(self):
# RPPA data for hg38 is not available.
return 'Not Available\trppa\t'+self.cancer + '\n'
|
#!/usr/bin/env python3
import subprocess, os,time,gzip
import pandas as pd
import numpy as np
from functools import reduce
from .convertor import mergeToSample, calTNzcore, rmEntrez, tpmToFpkm, mapEm2Gene, formatClin, pick,formatDrug
from .outformat import storeData
import requests,json,re,io
from .setting import CLIN_INFO, Biospecimen_INFO, Biospecimen_MAP, CLIN_MAP, PAM50_PATH, DRUG_MAP
class GdcApi(object):
'''
API for download files from GDC
'''
__slot__ = ["files_endpt", "data_endpt", "cancer", "parental_dir",'cases_endpt']
def __init__(self, cancer, parental_dir, cases_endpt='https://api.gdc.cancer.gov/cases', data_endpt="https://api.gdc.cancer.gov/data", files_endpt="https://api.gdc.cancer.gov/files", **kwargs):
''' Intialize instance parameters
Parameters
----------
cancer : str
Cancer type
parental_dir : str
Path to store datas
data_endpt : str, optional
[Endpoint for files id searching] (the default is "https://api.gdc.cancer.gov/data")
files_endpt : str, optional
[Endpoint for files downloading] (the default is "https://api.gdc.cancer.gov/files")
'''
self.files_endpt = files_endpt
self.data_endpt = data_endpt
self.cancer = cancer
self.parental_dir = parental_dir
self.cases_endpt = cases_endpt
def _projFilter(self, data_type,method=None):
dtype_dict = {
"cnv_segment_somatic": "Masked Copy Number Segment",
"cnv_segment_all": "Copy Number Segment",
"masked_somatic_mutation":"Masked Somatic Mutation",
}
filters = {
"op": "and",
"content":[
{
"op": "in",
"content": {
"field": "files.data_type",
"value": [
dtype_dict[data_type]
]
}
},
{
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
},
]
}
# specific for SNV on TCGA (Calling by four different tools)
if method != None:
filters['content'].append({
"op":"in",
"content":{
"field": "files.analysis.workflow_type",
"value":[
"{} Variant Aggregation and Masking".format(method)
]
}
})
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "3000"
}
return params
def _nameFilter(self, data_type):
dtype_dict = {
'drug': "nationwidechildrens.org_clinical_drug_{}.txt".format(self.cancer.lower()),
'gistic': '{}.focal_score_by_genes.txt'.format(self.cancer.upper()),
# 'survival': "nationwidechildrens.org_clinical_follow_up_v{0}_{1}.txt".format(CLIN_VERSION[self.cancer], self.cancer.lower()),
'patient': "nationwidechildrens.org_clinical_patient_{}.txt".format(self.cancer.lower()),
'aliquot': "nationwidechildrens.org_biospecimen_aliquot_{}.txt".format(self.cancer.lower()),
'slide': "nationwidechildrens.org_biospecimen_slide_{}.txt".format(self.cancer.lower()),
'sample': "nationwidechildrens.org_biospecimen_sample_{}.txt".format(self.cancer.lower()),
'auxilary': "nationwidechildrens.org_auxiliary_{}.txt".format(self.cancer.lower()),
}
filters = {
"op": "in",
"content": {
"field": "files.file_name",
"value": [
dtype_dict[data_type]
]
}
}
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "1"
}
return params
def _fetchFileID(self, data_type, by_name=True,method=None):
''' Get files id by upstream filter parameters
Parameters
----------
data_type : str
Data type to be download. eg. gistic
by_name : bool, optional
Whether getting files id by matching file names (the default is True).
If not, we will use project filtering options to get file id list.
Returns
-------
list
A list contains file ids.
'''
if by_name is True:
file_uuid_list = []
params = self._nameFilter(data_type)
response = requests.get(self.files_endpt, params=params)
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
else:
file_uuid_list = []
params = self._projFilter(data_type,method=method)
response = requests.get(self.files_endpt, params=params)
if "message" in json.loads(response.content.decode("utf-8")).keys():
return None, 'Not found'
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
if len(file_uuid_list) == 0:
return None,'Not found'
else:
return file_uuid_list,None
def getTableFromFiles(self, data_type, by_name=True,method=None,**kwargs):
'''
Merging tables downloaded by a list of file ids
'''
try:
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
except requests.exceptions.SSLError:
time.sleep(10)
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
if error != None:
return None, error
ready_to_merge = []
if len(file_uuid_list) == 0 :
return None, 'Cannot find any file.'
for ids in file_uuid_list:
params = {"ids": [ids]}
try:
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
except requests.exceptions.SSLError:
time.sleep(10)
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
if method != None:
temp_file = self.cancer+'_'+method+"_snv_tmp.gz"
file = open(temp_file, "wb")
file.write(response.content)
file.close()
df = pd.read_table(temp_file, **kwargs)
subprocess.call('rm %s' % temp_file ,shell=True)
else:
df = pd.read_table(io.StringIO(
response.content.decode("utf-8")), **kwargs)
ready_to_merge.append(df)
return pd.concat(ready_to_merge,axis=0),None
def getClinInfo(self, fields):
filters = {
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
}
fields = ','.join(fields)
params = {
"filters": json.dumps(filters),
"fields": fields,
"format": "TSV",
"size": "3000"
}
response = requests.get(self.cases_endpt, params=params)
if response.status_code != 200:
time.sleep(10)
response = requests.get(self.cases_endpt, params=params)
try:
result = pd.read_table(io.StringIO(response.content.decode("utf-8")))
error = None
except:
result=None
error='Not Found!'
return result,error
def clin(self):
'''
Downloading clinical information
'''
surs,stderr = self.getClinInfo(fields=CLIN_INFO)
if stderr == None:
surs.rename(columns=CLIN_MAP,inplace=True)
surs = surs[list(CLIN_MAP.values())]
format_surs = formatClin(surs)
storeData(df=format_surs,parental_dir=self.parental_dir,
sub_folder='Surv',cancer=self.cancer)
stderr = ''
else:
stderr = 'Cannot Found\tsurvival_info\t'+self.cancer+'\n'
return stderr
def biospecimen(self):
'''
Downloading biopecimen information
'''
stderr = ''
for sub_folder,files in Biospecimen_INFO.items():
read_to_merge = []
for k, v in files.items():
meta, errors = self.getTableFromFiles(data_type=k)
if errors == None:
meta = meta[meta.columns.intersection(v)]
non_info = pd.Index(v).difference(meta.columns)
for c in non_info:
meta[c] = np.nan
meta.replace('[Not Available]', np.nan, inplace=True)
meta.replace('[Not Applicable]', np.nan, inplace=True)
meta.rename(columns=Biospecimen_MAP,inplace=True)
## header process
if 'bcr_sample_barcode' in v:
meta = meta.drop(0, axis=0)
if k == 'sample':
meta['sample'] = meta['sample'].map(lambda x: x[:-1])
meta = meta.drop_duplicates()
meta['patient'] = meta['sample'].map(lambda x: '-'.join(x.split('-')[:3]))
# elif 'hpv_status' in v:
# meta = meta.drop(0,axis=0)
# else:
# meta = meta.drop([0,1],axis=0)
## additional info
if k == 'slide':
meta = meta.set_index('sample')
meta = meta.apply(pd.to_numeric)
meta = mergeToSample(meta,transpose=True)
# if k == "patient" and self.cancer == 'BRCA':
# pam50 = pd.read_table(PAM50_PATH, index_col=0).rename(columns={
# "PAM50 mRNA":'PAM50'})['PAM50'].to_frame()
# meta = meta.merge(pam50, left_on='patient',right_index=True,how='left')
read_to_merge.append(meta)
else:
stderr += 'Cannot Found\t'+sub_folder+'_'+k+'\t'+self.cancer+'\n'
if len(read_to_merge) > 1:
result = reduce(lambda x,y:pd.merge(x,y, how='outer',on='patient'),read_to_merge).drop_duplicates().dropna(axis=1,how='all')
result = result.set_index('patient')
elif len(read_to_merge) == 1:
result = read_to_merge[0]
else:
continue
## Store tumor and normal info separatelly
# if sub_folder == "histology":
# for s in ['tumor','normal']:
# sub_result = pick(result, source=s, transpose=True)
# storeData(sub_result,
# parental_dir=self.parental_dir,
# sub_folder='/'.join([sub_folder,s]), cancer=self.cancer)
# sub_folder += '/origin'
storeData(result,
parental_dir=self.parental_dir,
sub_folder=sub_folder,cancer=self.cancer)
return stderr
def drug(self):
'''
Downloading Drug information
'''
stderr = ''
df, errors = self.getTableFromFiles(data_type='drug')
if errors == None:
df = df.drop([0,1],axis=0)
df = df.loc[:,df.columns.isin(list(DRUG_MAP.keys()))]
df.rename(columns=DRUG_MAP,inplace=True)
df = formatDrug(df)
df.set_index('patient',inplace=True)
storeData(df=df, parental_dir=self.parental_dir,
sub_folder='Drug', cancer=self.cancer)
else:
stderr += 'Cannot Found\tDrug information for \t'+self.cancer+'\n'
return stderr
def drugDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'drug_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'drug_stderr.log']), 'a+') as stderrs:
logs = self.drug()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
def metaDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'meta_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'meta_stderr.log']), 'a+') as stderrs:
for n in ['biospecimen']:#, 'clin']:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class Workflow(object):
__slot__ = ['cancer', 'parental_dir', 'workflow']
def __init__(self,cancer,parental_dir,workflow):
self.cancer = cancer
self.parental_dir = parental_dir
self.workflow = workflow
def run(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'stderr.log']), 'a+') as stderrs:
for n in self.workflow:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class FireBrowseDnloader(Workflow):
__slot__ = ['release_time']
def __init__(self, release_time="2016_01_28", base_url="http://gdac.broadinstitute.org/runs",**kwargs):
super(FireBrowseDnloader, self).__init__(**kwargs)
self.release_time = release_time
self.base_url = base_url
def _fget(self,data_type, store_dir):
''' Download level 3 data from FireBrowse
Parameters
----------
cancer : str
Cancer type included in TCGA project
data_type : str
Level 3 data type provided by FireBrowse
store_dir : str
Output directory
base_url : str, optional
URL prefix (the default is "http://gdac.broadinstitute.org/runs", which is the prefix provided by FireBrowse)
release_time : str, optional
Release version and this release recored by date. (the default is "2016_01_28", which is the latest available release for now.)
Raises
------
KeyError
if the input parameter is out of provided list.
Returns
-------
str
Run messages. Return 'Success' if no error occurs.
'''
# modifition to adapt CNV data on the function
if data_type == 'cnv_gene_somatic':
release_prefix = 'analyses'
cancer_suffix = '-TP'
if self.cancer == 'SKCM':
cancer_suffix = '-TM'
else:
cancer_suffix = ''
release_prefix = 'stddata'
data_type_dict = {
"rna_raw" : "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.Level_3",
"rna_norm": "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3",
"rppa": "RPPA_AnnotateWithGene.Level_3",
"cnv_gene_somatic": "CopyNumber_Gistic2.Level_4",
"cnv_segment_somatic": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.Level_3",
"cnv_segment_all": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.Level_3",
}
keep_suffix_dict = {
"rna_raw": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.data.txt",
"rppa" : "rppa.txt",
"rna_norm": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt",
"cnv_gene_somatic": "by_genes.txt",
"cnv_segment_somatic": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.seg.txt",
"cnv_segment_all": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.seg.txt",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type,','.join(data_type_dict.keys())))
short_release_time = "".join(self.release_time.split('_'))
release = release_prefix+"__{release_time}"
sub_folder = "data/{cancer}/{short_release_time}"
file_name = "gdac.broadinstitute.org_{cancer}.{data_type}.{short_release_time}00.0.0.tar.gz"
url = "/".join([self.base_url, release, sub_folder, file_name])
url = url.format(**dict(
cancer=self.cancer+cancer_suffix,
data_type=data_type_dict[data_type],
release_time=self.release_time,
short_release_time=short_release_time,
)
)
cmd ="""
set -x
[[ -d {store_dir}_{cancer}_{data_type}_tmp ]] || mkdir -p {store_dir}_{cancer}_{data_type}_tmp
wget -q -O {store_dir}_{cancer}_{data_type}.gz {url}
tar -xvvf {store_dir}_{cancer}_{data_type}.gz -C {store_dir}_{cancer}_{data_type}_tmp --strip-components=1
rm {store_dir}_{cancer}_{data_type}.gz
if [ $(ls {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix}| wc -l) -gt 1 ];then
[[ -d {store_dir}_{cancer} ]] || mkdir {store_dir}_{cancer}
fi
mv {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix} {store_dir}_{cancer}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
keep_suffix=keep_suffix_dict[data_type],
url=url,
data_type=data_type
)
)
try:
subprocess.run(cmd, shell=True,check=True)
log = 'Success'
except subprocess.CalledProcessError as e:
cmd = """
set -x
rm {store_dir}_{cancer}_{data_type}.gz
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd, shell=True, check=True)
return str(e.returncode)
## process data
cmd = """
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd,shell=True,check=True)
return log
def _splitCountTPM(self, raw_rnaseq_path):
''' Split one data frame with both count and scaled_estiamte into two data frames and
merge the sample level data frame into pateint level data frame, but keep separating tumor and normal samples.
Then, based on the scaled_estimate column, calculate TPM and RPKM information.
Parameters
----------
raw_rnaseq_path : str
Path to raw rnaseq data download from FireBrowse
Returns
-------
Dict
A dict that contains three pandas.DataFrame, which are raw count, TPM and RPKM.
All of those data frame are index by both Entrez ID and gene symbol and colum named by four digits TCGA barcode.
'''
df = pd.read_table(raw_rnaseq_path, index_col=0,skiprows=[1])
col_selector = pd.read_table(raw_rnaseq_path, index_col=0, nrows=2)
raw_count = df.loc[:, col_selector.iloc[0, :] =='raw_count']
raw_count = mergeToSample(raw_count)
raw_count = round(raw_count)
## Get fpkm and tpm information from transcript fractions
transcipt_fraction = df.loc[:,col_selector.iloc[0, :] == 'scaled_estimate']
tpm = transcipt_fraction * 10e6
normalize_factor = transcipt_fraction.sum(axis=0)
fpkm = transcipt_fraction * normalize_factor * 10e9
tpm = mergeToSample(tpm)
fpkm = mergeToSample(fpkm)
return dict(count=raw_count,tpm=tpm,fpkm=fpkm)
def _formatGistic(self, gistic_path):
''' Formating GISTIC results and sepratate files into segment and gene level
Parameters
----------
gistic_path : str
Path to the folder of gistic output
Returns
-------
dict
Dictionary with files output name as key and pandas.DataFrame as value
'''
f_dict = {
"broad_focal": '{}/all_data_by_genes.txt',
"focal": '{}/focal_data_by_genes.txt',
"threds": '{}/all_thresholded.by_genes.txt'
}
result = {}
for k, v in f_dict.items():
if os.path.isfile(v.format(gistic_path)):
result[k] = pd.read_table(v.format(gistic_path),index_col=0).drop(['Locus ID', 'Cytoband'],axis=1)
return result
def rnaseq(self):
'''
Workflow for downloading RNAseq data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RNAseq data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
########################## Raw count and Scale Estimate ##########################
# 1. Fetch raw count and RSEM information from FireBrowse
# 2. Split fetched data frame into raw count and RSEM separatly.
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
# 4. Calculate TPM and RPKM based on RSEM results.
##################################################################################
store_dir = '/'.join([self.parental_dir, 'RNASeq'])
store_dir_raw = '_'.join([store_dir, 'raw'])
store_dir_norm = '_'.join([store_dir, 'norm'])
log = self._fget(data_type='rna_raw',store_dir=store_dir_raw)
if log != 'Success':
return 'Cannot Found\trna_raw\t'+self.cancer+'\n'
raw_rnaseq = self._splitCountTPM(
raw_rnaseq_path='_'.join([store_dir_raw, self.cancer])
)
for name, df in raw_rnaseq.items():
df = rmEntrez(df)
if name in ['fpkm','tpm']:
log_df = np.log2( 1+ df )
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
name += '/origin'
storeData(df = df, parental_dir = store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_raw, self.cancer])), shell=True)
########################## Raw count and Scale Estimate ##########################
# 1. Fetch normalized count from FireBrowse
# 2. remove the second row, which only indicate the normalized count
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
##################################################################################
log = self._fget(data_type='rna_norm',store_dir=store_dir_norm)
if log != 'Success':
return 'Cannot Found\trna_norm\t'+self.cancer+'\n'
rnaseq_norm = pd.read_table(
'_'.join([store_dir_norm, self.cancer]), index_col=0, skiprows=[1])
rnaseq_norm = mergeToSample(rnaseq_norm)
rnaseq_norm = rmEntrez(rnaseq_norm)
storeData(df=rnaseq_norm, parental_dir=store_dir,
sub_folder='norm_count/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_norm, self.cancer])), shell=True)
return ''
def cnv(self):
'''
Workflow for downloading copy number variation data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole copy number variation data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
## Gene
store_dir = '/'.join([self.parental_dir, 'CNV/somatic', 'gene'])
log = self._fget( data_type='cnv_gene_somatic',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\tcnv_gene_somatic\t'+self.cancer+'\n'
cnv_gene = self._formatGistic(
gistic_path='_'.join([store_dir, self.cancer]))
for name, df in cnv_gene.items():
df = mergeToSample(df)
storeData(df=df, parental_dir=store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
## Segment
for lv in ['somatic','all']:
store_dir = '/'.join([self.parental_dir, 'CNV/'+lv, 'segment'])
log = self._fget(data_type='cnv_segment_'+lv, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + 'cnv_segment_'+lv+'\t'+self.cancer+'\n'
if not os.path.exists(store_dir):
os.makedirs(store_dir)
subprocess.call(
'mv {0} {1}'.format('_'.join([store_dir, self.cancer]),
'/'.join([store_dir, self.cancer])
),
shell=True)
return ''
def rppa(self):
'''
Workflow for downloading RPPA data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RPPA data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
store_dir = '/'.join([self.parental_dir, 'RPPA'])
log=self._fget(data_type='rppa',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\trppa\t'+self.cancer+'\n'
rppa = pd.read_table(
'_'.join([store_dir,self.cancer]), index_col=0)
rppa = rmEntrez(rppa)
rppa = mergeToSample(rppa)
storeData(df=rppa, parental_dir=store_dir,
sub_folder='', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
'''
Please use MC3 downloader to fetch the SNV result for all cancer in TCGA,
which is more robust.
'''
return 'GO TO MC3\tsnv\t'+self.cancer+'\n'
class GdcDnloader(GdcApi, Workflow):
__slot__ = ['type_available', 'base_url']
def __init__(self, base_url="https://gdc.xenahubs.net/download/",**kwargs):
Workflow.__init__(self,**kwargs)
GdcApi.__init__(self, cancer=self.cancer,parental_dir=self.parental_dir)
# super(GdcDnloader, self).__init__(data_endpt="https://api.gdc.cancer.gov/data",files_endpt="https://api.gdc.cancer.gov/files",**kwargs)
# data-release-80
self.base_url = base_url
self.type_available = {
'RNASeq': ['fpkm','count','fpkm_uq'],
'SNV': ['MuSE', "MuTect2", "VarScan2", "SomaticSniper"],
'cnv': ['somatic','all']
}
def _fget(self, data_type, store_dir):
'''Download level 3 data from Xenas
Parameters
----------
data_type : str
Data type to be downloaded
store_dir : str
Path to store the data
Raises
------
KeyError
If cannot fetching the files
Returns
-------
str
Tell if the downloading is successful or not
'''
data_type_dict = {
'fpkm': "htseq_fpkm",
'count':"htseq_counts",
'fpkm_uq': "htseq_fpkm-uq",
'muse': "muse_snv",
"mutect2": "mutect2_snv",
"VarScan2": "varscan2_snv",
"SomaticSnipe":"somaticsniper_snv",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type, ','.join(data_type_dict.keys())))
# https: // gdc.xenahubs.net/download/TCGA-CHOL/Xena_Matrices/TCGA-CHOL.htseq_fpkm.tsv.gz
subpath = 'TCGA-{cancer}/Xena_Matrices/TCGA-{cancer}.{data_type}.tsv.gz'
url = "/".join([self.base_url, subpath])
url = url.format(**dict(
cancer=self.cancer,
data_type=data_type_dict[data_type]
)
)
cmd = """
set -x
[[ -d {store_dir} ]] || mkdir -p {store_dir}
wget -q -O {store_dir}/{cancer}.gz {url}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
url=url,
)
)
try:
subprocess.run(cmd, shell=True, check=True)
log = 'Success'
cmd = "set -x; gunzip {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
except subprocess.CalledProcessError as e:
log = str(e.returncode)
cmd = "set -x; rm {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
subprocess.run(cmd, shell=True, check=True)
return log
def rnaseq(self):
store_parental = '/'.join([self.parental_dir, 'RNASeq'])
for name in self.type_available['RNASeq']:
store_dir = '/'.join([store_parental, name])
log = self._fget(data_type=name, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + name+'\t'+self.cancer+'\n'
df = pd.read_table('/'.join([store_dir,self.cancer]),index_col=0)
df = np.exp2(df) - 1 # since all matrix download from xenas have been log transformed
df = mergeToSample(df)
df = mapEm2Gene(df)
if name == 'fpkm':
tpm = tpmToFpkm(df, reverse=True)
for raw_name,raw_df in {'tpm':tpm,'fpkm':df}.items():
log_df = np.log2(1 + raw_df)
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
storeData(df=raw_df, parental_dir=store_parental,
sub_folder=raw_name+'/origin', cancer=self.cancer)
else:
if name == 'count':
df = df.round(0)
storeData(df=df, parental_dir=store_parental,
sub_folder=name+'/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('/'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
for m in self.type_available['SNV']:
df, errors = self.getTableFromFiles(
data_type='masked_somatic_mutation', by_name=False,method=m,comment='#')
if errors != None:
return 'Cannot Found\t'+m+'\t'+self.cancer+'\n'
else:
# df.rename(columns={"Hugo_Symbol":"gene"},inplace=True)
# df.insert(0, 'sample', df["Tumor_Sample_Barcode"].map(
# lambda x: '-'.join(x.split('-')[:4])[:-1]))
store_parental = '/'.join([self.parental_dir, 'SNV'])
storeData(df=df, parental_dir=store_parental,
sub_folder=m, cancer=self.cancer)
return ''
def cnv(self):
store_parental = '/'.join([self.parental_dir, 'CNV'])
# meta data
## map uuid to barcode
meta, errors = self.getTableFromFiles(data_type='aliquot')
if errors != None:
return 'Cannot Found\tuuid map barcode\t'+self.cancer+'\n'
meta = meta.dropna(
axis=0).set_index('bcr_aliquot_uuid')
meta.index = meta.index.map(lambda x: x.lower())
meta = meta['bcr_sample_barcode'].to_dict()
stderr = ''
# focal data
df,errors = self.getTableFromFiles(data_type='gistic')
if errors == None:
df = df.set_index('Gene Symbol').drop(['Gene ID', 'Cytoband'],axis=1)
df.columns = df.columns.map(meta)
df = mergeToSample(df)
df = mapEm2Gene(df)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/gene/focal', cancer=self.cancer)
else:
stderr += 'Cannot Found\tgistic\t'+self.cancer+'\n'
# Segment data
## somatic
df, errors = self.getTableFromFiles(data_type='cnv_segment_somatic', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/segment', cancer=self.cancer,index=False)
else:
stderr += 'Cannot Found\tcnv_segment_somatic\t'+self.cancer+'\n'
# all
df, errors = self.getTableFromFiles(data_type='cnv_segment_all', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='all/segment', cancer=self.cancer, index=False)
else:
stderr += 'Cannot Found\tcnv_segment_all\t'+self.cancer +'\n'
return stderr
def rppa(self):
# RPPA data for hg38 is not available.
return 'Not Available\trppa\t'+self.cancer + '\n'
|
en
| 0.584629
|
#!/usr/bin/env python3 API for download files from GDC Intialize instance parameters Parameters ---------- cancer : str Cancer type parental_dir : str Path to store datas data_endpt : str, optional [Endpoint for files id searching] (the default is "https://api.gdc.cancer.gov/data") files_endpt : str, optional [Endpoint for files downloading] (the default is "https://api.gdc.cancer.gov/files") # specific for SNV on TCGA (Calling by four different tools) # 'survival': "nationwidechildrens.org_clinical_follow_up_v{0}_{1}.txt".format(CLIN_VERSION[self.cancer], self.cancer.lower()), Get files id by upstream filter parameters Parameters ---------- data_type : str Data type to be download. eg. gistic by_name : bool, optional Whether getting files id by matching file names (the default is True). If not, we will use project filtering options to get file id list. Returns ------- list A list contains file ids. Merging tables downloaded by a list of file ids Downloading clinical information Downloading biopecimen information ## header process # elif 'hpv_status' in v: # meta = meta.drop(0,axis=0) # else: # meta = meta.drop([0,1],axis=0) ## additional info # if k == "patient" and self.cancer == 'BRCA': # pam50 = pd.read_table(PAM50_PATH, index_col=0).rename(columns={ # "PAM50 mRNA":'PAM50'})['PAM50'].to_frame() # meta = meta.merge(pam50, left_on='patient',right_index=True,how='left') ## Store tumor and normal info separatelly # if sub_folder == "histology": # for s in ['tumor','normal']: # sub_result = pick(result, source=s, transpose=True) # storeData(sub_result, # parental_dir=self.parental_dir, # sub_folder='/'.join([sub_folder,s]), cancer=self.cancer) # sub_folder += '/origin' Downloading Drug information # asyn download # begain download if not having been downloaded before # asyn download # begain download if not having been downloaded before #, 'clin']: # asyn download # begain download if not having been downloaded before Download level 3 data from FireBrowse Parameters ---------- cancer : str Cancer type included in TCGA project data_type : str Level 3 data type provided by FireBrowse store_dir : str Output directory base_url : str, optional URL prefix (the default is "http://gdac.broadinstitute.org/runs", which is the prefix provided by FireBrowse) release_time : str, optional Release version and this release recored by date. (the default is "2016_01_28", which is the latest available release for now.) Raises ------ KeyError if the input parameter is out of provided list. Returns ------- str Run messages. Return 'Success' if no error occurs. # modifition to adapt CNV data on the function {0} is not a valid data type, only accept following input: {1} set -x [[ -d {store_dir}_{cancer}_{data_type}_tmp ]] || mkdir -p {store_dir}_{cancer}_{data_type}_tmp wget -q -O {store_dir}_{cancer}_{data_type}.gz {url} tar -xvvf {store_dir}_{cancer}_{data_type}.gz -C {store_dir}_{cancer}_{data_type}_tmp --strip-components=1 rm {store_dir}_{cancer}_{data_type}.gz if [ $(ls {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix}| wc -l) -gt 1 ];then [[ -d {store_dir}_{cancer} ]] || mkdir {store_dir}_{cancer} fi mv {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix} {store_dir}_{cancer} set -x rm {store_dir}_{cancer}_{data_type}.gz rm -rf {store_dir}_{cancer}_{data_type}_tmp ## process data rm -rf {store_dir}_{cancer}_{data_type}_tmp Split one data frame with both count and scaled_estiamte into two data frames and merge the sample level data frame into pateint level data frame, but keep separating tumor and normal samples. Then, based on the scaled_estimate column, calculate TPM and RPKM information. Parameters ---------- raw_rnaseq_path : str Path to raw rnaseq data download from FireBrowse Returns ------- Dict A dict that contains three pandas.DataFrame, which are raw count, TPM and RPKM. All of those data frame are index by both Entrez ID and gene symbol and colum named by four digits TCGA barcode. ## Get fpkm and tpm information from transcript fractions Formating GISTIC results and sepratate files into segment and gene level Parameters ---------- gistic_path : str Path to the folder of gistic output Returns ------- dict Dictionary with files output name as key and pandas.DataFrame as value Workflow for downloading RNAseq data from FireBrowse and preprocessing data format. Parameters ---------- parental_dir : str Path to parental folder that you want to store the whole RNAseq data cancer : str Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project. ########################## Raw count and Scale Estimate ########################## # 1. Fetch raw count and RSEM information from FireBrowse # 2. Split fetched data frame into raw count and RSEM separatly. # 3. Merge sample level data into pateint level data, but still separate tumor and normal sample. # 4. Calculate TPM and RPKM based on RSEM results. ################################################################################## ########################## Raw count and Scale Estimate ########################## # 1. Fetch normalized count from FireBrowse # 2. remove the second row, which only indicate the normalized count # 3. Merge sample level data into pateint level data, but still separate tumor and normal sample. ################################################################################## Workflow for downloading copy number variation data from FireBrowse and preprocessing data format. Parameters ---------- parental_dir : str Path to parental folder that you want to store the whole copy number variation data cancer : str Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project. ## Gene ## Segment Workflow for downloading RPPA data from FireBrowse and preprocessing data format. Parameters ---------- parental_dir : str Path to parental folder that you want to store the whole RPPA data cancer : str Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project. Please use MC3 downloader to fetch the SNV result for all cancer in TCGA, which is more robust. # super(GdcDnloader, self).__init__(data_endpt="https://api.gdc.cancer.gov/data",files_endpt="https://api.gdc.cancer.gov/files",**kwargs) # data-release-80 Download level 3 data from Xenas Parameters ---------- data_type : str Data type to be downloaded store_dir : str Path to store the data Raises ------ KeyError If cannot fetching the files Returns ------- str Tell if the downloading is successful or not {0} is not a valid data type, only accept following input: {1} # https: // gdc.xenahubs.net/download/TCGA-CHOL/Xena_Matrices/TCGA-CHOL.htseq_fpkm.tsv.gz set -x [[ -d {store_dir} ]] || mkdir -p {store_dir} wget -q -O {store_dir}/{cancer}.gz {url} # since all matrix download from xenas have been log transformed # df.rename(columns={"Hugo_Symbol":"gene"},inplace=True) # df.insert(0, 'sample', df["Tumor_Sample_Barcode"].map( # lambda x: '-'.join(x.split('-')[:4])[:-1])) # meta data ## map uuid to barcode # focal data # Segment data ## somatic # all # RPPA data for hg38 is not available.
| 2.149518
| 2
|
netflix_notify/management/commands/sync_titles.py
|
mikeengland/netflix-notify
| 1
|
6626715
|
import logging
from django.core.management.base import BaseCommand
from netflix_notify.enums import Regions
from netflix_notify.models import (Title,
SyncLog)
from netflix_notify.scraper import Scraper
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Sync the titles with the application database'
def add_arguments(self, parser):
# TODO Add option to sync a specific Netflix region
pass
def handle(self, *args, **options):
self.get_and_store_titles()
def get_and_store_titles(self):
"""
Retrieve the titles from the API, post-process them and store them in the database, ensuring
any existing but now missing titles are set as inactive.
"""
logger.info('Retrieving titles from the API')
scraper = Scraper()
titles = scraper.get_titles()
created_or_updated = []
logger.info('Syncing titles in the database')
for title in titles:
title, _ = Title.objects.update_or_create(title_type=title.get('object_type'),
name=title.get('title'),
description=title.get('short_description'),
language=title.get('original_language'),
release_year=title.get('original_release_year'),
runtime=title.get('runtime'),
netflix_region=Regions.UK,
active=True)
created_or_updated.append(title)
currently_active = [title.pk for title in created_or_updated]
Title.objects.exclude(pk__in=currently_active).update(active=False)
SyncLog.objects.create()
logger.info('Title sync complete!')
|
import logging
from django.core.management.base import BaseCommand
from netflix_notify.enums import Regions
from netflix_notify.models import (Title,
SyncLog)
from netflix_notify.scraper import Scraper
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Sync the titles with the application database'
def add_arguments(self, parser):
# TODO Add option to sync a specific Netflix region
pass
def handle(self, *args, **options):
self.get_and_store_titles()
def get_and_store_titles(self):
"""
Retrieve the titles from the API, post-process them and store them in the database, ensuring
any existing but now missing titles are set as inactive.
"""
logger.info('Retrieving titles from the API')
scraper = Scraper()
titles = scraper.get_titles()
created_or_updated = []
logger.info('Syncing titles in the database')
for title in titles:
title, _ = Title.objects.update_or_create(title_type=title.get('object_type'),
name=title.get('title'),
description=title.get('short_description'),
language=title.get('original_language'),
release_year=title.get('original_release_year'),
runtime=title.get('runtime'),
netflix_region=Regions.UK,
active=True)
created_or_updated.append(title)
currently_active = [title.pk for title in created_or_updated]
Title.objects.exclude(pk__in=currently_active).update(active=False)
SyncLog.objects.create()
logger.info('Title sync complete!')
|
en
| 0.891975
|
# TODO Add option to sync a specific Netflix region Retrieve the titles from the API, post-process them and store them in the database, ensuring any existing but now missing titles are set as inactive.
| 2.206957
| 2
|
CDSB_series/split/script-split.py
|
WFDetector/WFDetection
| 0
|
6626716
|
import subprocess
from os.path import join
original = "../../defenses/results/"
split = "../xgboost/scores/"
# undefended
# targets = [
# "mergepad_0701_2018/",
# "mergepad_0701_2019/",
# "mergepad_0701_2020/",
# "mergepad_0701_2021/",
# "mergepad_0701_2022/",
# "mergepad_0701_2023/",
# "mergepad_0701_2024/",
# "mergepad_0701_2025/",
# "mergepad_0701_2026/",
# "mergepad_0701_2027/",
# "mergepad_0701_2028/",
# "mergepad_0701_2029/",
# "mergepad_0701_2030/",
# "mergepad_0701_2031/",
# "mergepad_0701_2032/",
# ]
#glue
# targets = [
# "ranpad2_0706_0829/",
# "ranpad2_0706_0830/",
# "ranpad2_0706_0831/",
# "ranpad2_0706_0832/",
# "ranpad2_0706_0833/",
# "ranpad2_0706_0834/",
# "ranpad2_0706_0835/",
# "ranpad2_0706_0836/",
# "ranpad2_0706_0837/",
# "ranpad2_0706_0838/",
# "ranpad2_0706_0839/",
# "ranpad2_0706_0840/",
# "ranpad2_0706_0841/",
# "ranpad2_0706_0842/",
# "ranpad2_0706_0843/",
# ]
targets = [
"mergepad_evaluation_16_200_10_random/",
]
for target in targets:
a = join(original, target)
b = join(split, target, "splitresult.txt")
cmd = "python3 split-base-rate.py " + a + " -split "+ b
# print(cmd)
# exit(0)
subprocess.call(cmd, shell= True)
|
import subprocess
from os.path import join
original = "../../defenses/results/"
split = "../xgboost/scores/"
# undefended
# targets = [
# "mergepad_0701_2018/",
# "mergepad_0701_2019/",
# "mergepad_0701_2020/",
# "mergepad_0701_2021/",
# "mergepad_0701_2022/",
# "mergepad_0701_2023/",
# "mergepad_0701_2024/",
# "mergepad_0701_2025/",
# "mergepad_0701_2026/",
# "mergepad_0701_2027/",
# "mergepad_0701_2028/",
# "mergepad_0701_2029/",
# "mergepad_0701_2030/",
# "mergepad_0701_2031/",
# "mergepad_0701_2032/",
# ]
#glue
# targets = [
# "ranpad2_0706_0829/",
# "ranpad2_0706_0830/",
# "ranpad2_0706_0831/",
# "ranpad2_0706_0832/",
# "ranpad2_0706_0833/",
# "ranpad2_0706_0834/",
# "ranpad2_0706_0835/",
# "ranpad2_0706_0836/",
# "ranpad2_0706_0837/",
# "ranpad2_0706_0838/",
# "ranpad2_0706_0839/",
# "ranpad2_0706_0840/",
# "ranpad2_0706_0841/",
# "ranpad2_0706_0842/",
# "ranpad2_0706_0843/",
# ]
targets = [
"mergepad_evaluation_16_200_10_random/",
]
for target in targets:
a = join(original, target)
b = join(split, target, "splitresult.txt")
cmd = "python3 split-base-rate.py " + a + " -split "+ b
# print(cmd)
# exit(0)
subprocess.call(cmd, shell= True)
|
en
| 0.35882
|
# undefended # targets = [ # "mergepad_0701_2018/", # "mergepad_0701_2019/", # "mergepad_0701_2020/", # "mergepad_0701_2021/", # "mergepad_0701_2022/", # "mergepad_0701_2023/", # "mergepad_0701_2024/", # "mergepad_0701_2025/", # "mergepad_0701_2026/", # "mergepad_0701_2027/", # "mergepad_0701_2028/", # "mergepad_0701_2029/", # "mergepad_0701_2030/", # "mergepad_0701_2031/", # "mergepad_0701_2032/", # ] #glue # targets = [ # "ranpad2_0706_0829/", # "ranpad2_0706_0830/", # "ranpad2_0706_0831/", # "ranpad2_0706_0832/", # "ranpad2_0706_0833/", # "ranpad2_0706_0834/", # "ranpad2_0706_0835/", # "ranpad2_0706_0836/", # "ranpad2_0706_0837/", # "ranpad2_0706_0838/", # "ranpad2_0706_0839/", # "ranpad2_0706_0840/", # "ranpad2_0706_0841/", # "ranpad2_0706_0842/", # "ranpad2_0706_0843/", # ] # print(cmd) # exit(0)
| 1.882114
| 2
|
developer_manual/examples/python/login.py
|
hope15/documentation
| 154
|
6626717
|
import owncloud
oc = owncloud.Client('https://your.owncloud.install.com/owncloud/')
oc.login('msetter', 'Zaex7Thex2di')
oc.list('/')
oc.logout()
|
import owncloud
oc = owncloud.Client('https://your.owncloud.install.com/owncloud/')
oc.login('msetter', 'Zaex7Thex2di')
oc.list('/')
oc.logout()
|
none
| 1
| 1.723353
| 2
|
|
129. Sum Root to Leaf Numbers.py
|
MapleLove2014/leetcode
| 1
|
6626718
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
if not root:
return 0
def doit(root, prefix):
if not root.left and not root.right:
return int(prefix + str(root.val))
result = 0
if root.left:
result += doit(root.left, prefix + str(root.val))
if root.right:
result += doit(root.right, prefix + str(root.val))
return result
return doit(root, '')
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
if not root:
return 0
def doit(root, prefix):
if not root.left and not root.right:
return int(prefix + str(root.val))
result = 0
if root.left:
result += doit(root.left, prefix + str(root.val))
if root.right:
result += doit(root.right, prefix + str(root.val))
return result
return doit(root, '')
|
en
| 0.652542
|
# Definition for a binary tree node.
| 3.663431
| 4
|
Testmode/mnist-digit_recognition.py
|
xiaokamikami/TI_MedicineCar-R
| 0
|
6626719
|
<filename>Testmode/mnist-digit_recognition.py
from fpioa_manager import *
import os, Maix, lcd, image, sensor, gc, time
from Maix import FPIOA, GPIO
import KPU as kpu
import gc
lcd.init(type=1,freq=15000000,width=240,height=240,color=(0,0,0))
lcd.rotation(3)
lcd.clear(0,0,0)
lcd.draw_string(20,20, "CocoRobo X", lcd.WHITE, lcd.BLACK)
time.sleep(1)
lcd.draw_string(20,40, "- Loading Camera...", lcd.WHITE, lcd.BLACK)
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_hmirror(0) #设置摄像头镜像
sensor.set_vflip(0) #设置摄像头翻转
sensor.run(1)
sensor.skip_frames(30)
lcd.rotation(0)
# img_boot = image.Image("/sd/images/boot_digit.jpg")
# lcd.display(img_boot, oft=(0,0))
# time.sleep(2)
lcd.clear()
#task = kpu.load("/sd/model/mnist.kmodel") #load model from flash address 0x200000
#task_mnist = kpu.load("/sd/mnist.kmodel")
task_mnist = kpu.load(0x300000)
sensor.run(1)
clock = time.clock()
while True:
clock.tick()
img_mnist1 = sensor.snapshot()
img_mnist2=img_mnist1.resize(28,28) #resize to mnist input 28x28
a=img_mnist2.invert() #invert picture as mnist need
a=img_mnist2.strech_char(1) #preprocessing pictures, eliminate dark corner
#lcd.display(img2,oft=(10,40)) #display small 28x28 picture
a=img_mnist2.pix_to_ai() #generate data for ai
fmap_mnist=kpu.forward(task_mnist,img_mnist2) #run neural network model
plist_mnist=fmap_mnist[:] #get result (10 digit's probability)
pmax_mnist=max(plist_mnist) #get max probability
max_index_mnist=plist_mnist.index(pmax_mnist) #get the digit
print(str(max_index_mnist)+","+str(int(pmax_mnist*100)))
img_mnist1.draw_rectangle(0,0,45,50,color=(0,0,0),fill=True)
img_mnist1.draw_string(4,3,str(max_index_mnist),color=(255,255,255),scale=4)
img_mnist1.draw_string(4,50,str(clock.fps()),color=(255,255,255),scale=4)
lcd.display(img_mnist1,oft=(8,8)) #display large picture
# lcd.draw_string(8,8,"%d: %.3f"%(max_index,pmax),lcd.WHITE,lcd.BLACK)
print(clock.fps())
gc.collect()
kpu.deinit(task)
|
<filename>Testmode/mnist-digit_recognition.py
from fpioa_manager import *
import os, Maix, lcd, image, sensor, gc, time
from Maix import FPIOA, GPIO
import KPU as kpu
import gc
lcd.init(type=1,freq=15000000,width=240,height=240,color=(0,0,0))
lcd.rotation(3)
lcd.clear(0,0,0)
lcd.draw_string(20,20, "CocoRobo X", lcd.WHITE, lcd.BLACK)
time.sleep(1)
lcd.draw_string(20,40, "- Loading Camera...", lcd.WHITE, lcd.BLACK)
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_hmirror(0) #设置摄像头镜像
sensor.set_vflip(0) #设置摄像头翻转
sensor.run(1)
sensor.skip_frames(30)
lcd.rotation(0)
# img_boot = image.Image("/sd/images/boot_digit.jpg")
# lcd.display(img_boot, oft=(0,0))
# time.sleep(2)
lcd.clear()
#task = kpu.load("/sd/model/mnist.kmodel") #load model from flash address 0x200000
#task_mnist = kpu.load("/sd/mnist.kmodel")
task_mnist = kpu.load(0x300000)
sensor.run(1)
clock = time.clock()
while True:
clock.tick()
img_mnist1 = sensor.snapshot()
img_mnist2=img_mnist1.resize(28,28) #resize to mnist input 28x28
a=img_mnist2.invert() #invert picture as mnist need
a=img_mnist2.strech_char(1) #preprocessing pictures, eliminate dark corner
#lcd.display(img2,oft=(10,40)) #display small 28x28 picture
a=img_mnist2.pix_to_ai() #generate data for ai
fmap_mnist=kpu.forward(task_mnist,img_mnist2) #run neural network model
plist_mnist=fmap_mnist[:] #get result (10 digit's probability)
pmax_mnist=max(plist_mnist) #get max probability
max_index_mnist=plist_mnist.index(pmax_mnist) #get the digit
print(str(max_index_mnist)+","+str(int(pmax_mnist*100)))
img_mnist1.draw_rectangle(0,0,45,50,color=(0,0,0),fill=True)
img_mnist1.draw_string(4,3,str(max_index_mnist),color=(255,255,255),scale=4)
img_mnist1.draw_string(4,50,str(clock.fps()),color=(255,255,255),scale=4)
lcd.display(img_mnist1,oft=(8,8)) #display large picture
# lcd.draw_string(8,8,"%d: %.3f"%(max_index,pmax),lcd.WHITE,lcd.BLACK)
print(clock.fps())
gc.collect()
kpu.deinit(task)
|
en
| 0.381409
|
#设置摄像头镜像 #设置摄像头翻转 # img_boot = image.Image("/sd/images/boot_digit.jpg") # lcd.display(img_boot, oft=(0,0)) # time.sleep(2) #task = kpu.load("/sd/model/mnist.kmodel") #load model from flash address 0x200000 #task_mnist = kpu.load("/sd/mnist.kmodel") #resize to mnist input 28x28 #invert picture as mnist need #preprocessing pictures, eliminate dark corner #lcd.display(img2,oft=(10,40)) #display small 28x28 picture #generate data for ai #run neural network model #get result (10 digit's probability) #get max probability #get the digit #display large picture # lcd.draw_string(8,8,"%d: %.3f"%(max_index,pmax),lcd.WHITE,lcd.BLACK)
| 2.529453
| 3
|
aldryn_newsblog_extra_plugins/utils.py
|
febsn/aldryn_newsblog_extra_plugins
| 1
|
6626720
|
<filename>aldryn_newsblog_extra_plugins/utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
import six
def get_additional_styles(extra_name=False):
"""
Get additional styles choices from settings
Copied from aldryn-events.utils
"""
choices = []
if extra_name:
raw = getattr(settings, extra_name,
getattr(settings, 'ALDRYN_NEWSBLOG_PLUGIN_STYLES', False)
)
else:
raw = getattr(settings, 'ALDRYN_NEWSBLOG_PLUGIN_STYLES', False)
if raw:
if isinstance(raw, six.string_types):
raw = raw.split(',')
for choice in raw:
try:
# Happened on aldryn to choice be a tuple with two
# empty strings and this break the deployment. To avoid that
# kind of issue if something fais we just ignore.
clean = choice.strip()
choices.append((clean.lower(), clean.title()))
except Exception:
pass
return choices
|
<filename>aldryn_newsblog_extra_plugins/utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
import six
def get_additional_styles(extra_name=False):
"""
Get additional styles choices from settings
Copied from aldryn-events.utils
"""
choices = []
if extra_name:
raw = getattr(settings, extra_name,
getattr(settings, 'ALDRYN_NEWSBLOG_PLUGIN_STYLES', False)
)
else:
raw = getattr(settings, 'ALDRYN_NEWSBLOG_PLUGIN_STYLES', False)
if raw:
if isinstance(raw, six.string_types):
raw = raw.split(',')
for choice in raw:
try:
# Happened on aldryn to choice be a tuple with two
# empty strings and this break the deployment. To avoid that
# kind of issue if something fais we just ignore.
clean = choice.strip()
choices.append((clean.lower(), clean.title()))
except Exception:
pass
return choices
|
en
| 0.81618
|
# -*- coding: utf-8 -*- Get additional styles choices from settings Copied from aldryn-events.utils # Happened on aldryn to choice be a tuple with two # empty strings and this break the deployment. To avoid that # kind of issue if something fais we just ignore.
| 2.057306
| 2
|
geradorcpf.py
|
dimagela29/Python-POO
| 1
|
6626721
|
<reponame>dimagela29/Python-POO
from random import randint
def gera_cpf():
numero = str(randint(100000000, 999999999))
novo_cpf = numero # 9 números aleatórios
reverso = 10 # Contador reverso
total = 0 # O total das multiplicações
# Loop do CPF
for index in range(19):
if index > 8: # Primeiro índice vai de 0 a 9,
index -= 9 # São os 9 primeiros digitos do CPF
total += int(novo_cpf[index]) * reverso # Valor total da multiplicação
reverso -= 1 # Decrementa o contador reverso
if reverso < 2:
reverso = 11
d = 11 - (total % 11)
if d > 9: # Se o digito for > que 9 o valor é 0
d = 0
total = 0 # Zera o total
novo_cpf += str(d) # Concatena o digito gerado no novo cpf
return novo_cpf
|
from random import randint
def gera_cpf():
numero = str(randint(100000000, 999999999))
novo_cpf = numero # 9 números aleatórios
reverso = 10 # Contador reverso
total = 0 # O total das multiplicações
# Loop do CPF
for index in range(19):
if index > 8: # Primeiro índice vai de 0 a 9,
index -= 9 # São os 9 primeiros digitos do CPF
total += int(novo_cpf[index]) * reverso # Valor total da multiplicação
reverso -= 1 # Decrementa o contador reverso
if reverso < 2:
reverso = 11
d = 11 - (total % 11)
if d > 9: # Se o digito for > que 9 o valor é 0
d = 0
total = 0 # Zera o total
novo_cpf += str(d) # Concatena o digito gerado no novo cpf
return novo_cpf
|
pt
| 0.952595
|
# 9 números aleatórios # Contador reverso # O total das multiplicações # Loop do CPF # Primeiro índice vai de 0 a 9, # São os 9 primeiros digitos do CPF # Valor total da multiplicação # Decrementa o contador reverso # Se o digito for > que 9 o valor é 0 # Zera o total # Concatena o digito gerado no novo cpf
| 3.275203
| 3
|
tests/integration/test_graph.py
|
ewuerger/dbwily
| 0
|
6626722
|
import sys
import tempfile
from unittest.mock import patch
import wily.__main__ as main
from click.testing import CliRunner
_path = "src\\test.py" if sys.platform == "win32" else "src/test.py"
PATCHED_ENV = {
"BROWSER": "echo %s",
"LC_ALL": "C.UTF-8",
"LANG": "C.UTF-8",
"HOME": tempfile.gettempdir(),
}
def test_graph_no_cache(tmpdir, cache_path):
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
["--path", tmpdir, "--cache", cache_path, "graph", _path, "raw.loc"],
)
assert result.exit_code == 1, result.stdout
def test_graph(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_all(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "--all"]
)
assert result.exit_code == 0, result.stdout
def test_graph_all(builddir):
""" Test the graph feature with shorthand metric"""
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "loc", "--all"]
)
assert result.exit_code == 0, result.stdout
def test_graph_changes(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "--changes"]
)
assert result.exit_code == 0, result.stdout
def test_graph_custom_x(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "-x", "raw.sloc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_path(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", "src/", "raw.loc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "raw.comments"]
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple_custom_x(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--path",
builddir,
"graph",
_path,
"raw.loc",
"raw.comments",
"-x",
"raw.sloc",
],
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple_path(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", "src/", "raw.loc", "raw.comments"]
)
assert result.exit_code == 0, result.stdout
def test_graph_output(builddir):
""" Test the graph feature with target output file """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--debug",
"--path",
builddir,
"graph",
_path,
"raw.loc",
"-o",
"test.html",
],
)
assert result.exit_code == 0, result.stdout
def test_graph_output_granular(builddir):
""" Test the graph feature with target output file """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--debug",
"--path",
builddir,
"graph",
"src/test.py:function1",
"cyclomatic.complexity",
"-o",
"test_granular.html",
],
)
assert result.exit_code == 0, result.stdout
|
import sys
import tempfile
from unittest.mock import patch
import wily.__main__ as main
from click.testing import CliRunner
_path = "src\\test.py" if sys.platform == "win32" else "src/test.py"
PATCHED_ENV = {
"BROWSER": "echo %s",
"LC_ALL": "C.UTF-8",
"LANG": "C.UTF-8",
"HOME": tempfile.gettempdir(),
}
def test_graph_no_cache(tmpdir, cache_path):
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
["--path", tmpdir, "--cache", cache_path, "graph", _path, "raw.loc"],
)
assert result.exit_code == 1, result.stdout
def test_graph(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_all(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "--all"]
)
assert result.exit_code == 0, result.stdout
def test_graph_all(builddir):
""" Test the graph feature with shorthand metric"""
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "loc", "--all"]
)
assert result.exit_code == 0, result.stdout
def test_graph_changes(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "--changes"]
)
assert result.exit_code == 0, result.stdout
def test_graph_custom_x(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "-x", "raw.sloc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_path(builddir):
""" Test the graph feature """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", "src/", "raw.loc"]
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", _path, "raw.loc", "raw.comments"]
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple_custom_x(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--path",
builddir,
"graph",
_path,
"raw.loc",
"raw.comments",
"-x",
"raw.sloc",
],
)
assert result.exit_code == 0, result.stdout
def test_graph_multiple_path(builddir):
""" Test the graph feature with multiple metrics """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli, ["--path", builddir, "graph", "src/", "raw.loc", "raw.comments"]
)
assert result.exit_code == 0, result.stdout
def test_graph_output(builddir):
""" Test the graph feature with target output file """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--debug",
"--path",
builddir,
"graph",
_path,
"raw.loc",
"-o",
"test.html",
],
)
assert result.exit_code == 0, result.stdout
def test_graph_output_granular(builddir):
""" Test the graph feature with target output file """
runner = CliRunner()
with patch.dict("os.environ", values=PATCHED_ENV, clear=True):
result = runner.invoke(
main.cli,
[
"--debug",
"--path",
builddir,
"graph",
"src/test.py:function1",
"cyclomatic.complexity",
"-o",
"test_granular.html",
],
)
assert result.exit_code == 0, result.stdout
|
en
| 0.847364
|
Test the graph feature Test the graph feature Test the graph feature with shorthand metric Test the graph feature Test the graph feature Test the graph feature Test the graph feature with multiple metrics Test the graph feature with multiple metrics Test the graph feature with multiple metrics Test the graph feature with target output file Test the graph feature with target output file
| 2.282074
| 2
|
datasource/Climate Scrapper.py
|
mqchau/citymatch
| 0
|
6626723
|
# coding: utf-8
# In[1]:
import json
from bs4 import BeautifulSoup
import requests
from pprint import pprint
import re
import html5lib
states = {
'Illinois':'IL',
'Kansas':'KS',
'South Dakota':'SD',
'Idaho':'ID',
'South Carolina':'SC',
'Ohio':'OH',
'Wyoming':'WY',
'District of Columbia':'DC',
'Alaska':'AK',
'Rhode Island':'RI',
'Texas':'TX',
'Maryland':'MD',
'Minnesota':'MN',
'New Mexico':'NM',
'Nevada':'NV',
'Iowa':'IA',
'West Virginia':'WV',
'North Dakota':'ND',
'Arkansas':'AR',
'Arizona':'AZ',
'Louisiana':'LA',
'Delaware':'DE',
'Florida':'FL',
'Montana':'MT',
'Missouri':'MO',
'North Carolina':'NC',
'Oklahoma':'OK',
'Nebraska':'NE',
'California':'CA',
'Mississippi':'MS',
'Wisconsin':'WI',
'Indiana':'IN',
'Georgia':'GA',
'Massachusetts':'MA',
'Tennessee':'TN',
'New Hampshire':'NH',
'Washington':'WA',
'New Jersey':'NJ',
'Connecticut':'CT',
'Maine':'ME',
'Oregon':'OR',
'Vermont':'VT',
'New York':'NY',
'Alabama':'AL',
'Hawaii':'HI',
'Michigan':'MI',
'Pennsylvania':'PA',
'Virginia':'VA',
'Utah':'UT',
'Kentucky':'KY',
'Colorado':'CO'
}
def getURL(state):
state_abbr = states[state]
state = state.replace(" ","-")
url = 'http://www.weatherbase.com/weather/city.php3?c=US&s='+state_abbr+'&statename='+state+'-United-States-of-America'
return url
# print(url)
def getCitiesURL(cities):
cityURL = {}
for city in cities:
url = 'http://www.weatherbase.com'+city.a.get('href')
cityname = city.text
cityURL[cityname] = url
# break
return cityURL
def getClimate(cities, state):
for city in cities:
temp_high = ''
temp_high_f = False
temp_low = ''
temp_low_f = False
precip = ''
precip_f = False
url = cities[city]
handle = requests.get(url)
data = handle.text
soup = BeautifulSoup(data, 'html.parser')
div = soup.find(attrs={'class':'p402_premium'})
tables = div.find_all('table')
print('-'*5+city+', '+state+'-'*5)
for table in tables:
# print(table.find('td').text)
if table.find('td').text == 'Average Precipitation' and precip_f == False:
print('\tPrecipitation Found')
precip_f = True
continue
if table.find('td').text == 'Average High Temperature' and temp_high_f == False:
print('\tHigh Temperature Found')
temp_high_f = True
continue
if table.find('td').text == 'Average Low Temperature' and temp_low_f == False:
print('\tLow Temperature Found')
temp_low_f = True
continue
if precip_f == False and temp_high_f == False and temp_low_f == False:
continue
else:
val = table.find('tr', attrs={'bgcolor':'white'}).find('td', attrs={'class':'data'}).text
# print(data)
if precip_f == True:
precip = val
# print('precip',precip)
precip_f = False
if temp_high_f == True:
temp_high = val
# print('temphigh',temp_high)
temp_high_f = False
if temp_low_f == True:
temp_low = val
# print('templow',temp_low)
temp_low_f = False
city_output = city+','+state+','+temp_high+','+temp_low+','+precip
print(city_output)
fd = open('climateTable.csv', 'a')
fd.write(city_output)
fd.close()
for state in states.keys():
url = getURL(state)
# url = 'http://www.weatherbase.com/weather/city.php3?c=US&s='+'CA'+'&statename='+'California'+'-United-States-of-America'
handle = requests.get(url)
data = handle.text
soup = BeautifulSoup(data, 'html.parser')
city_list = soup.find(id="row-nohover").find_all('li')
cities = getCitiesURL(city_list)
getClimate(cities, state)
# In[ ]:
|
# coding: utf-8
# In[1]:
import json
from bs4 import BeautifulSoup
import requests
from pprint import pprint
import re
import html5lib
states = {
'Illinois':'IL',
'Kansas':'KS',
'South Dakota':'SD',
'Idaho':'ID',
'South Carolina':'SC',
'Ohio':'OH',
'Wyoming':'WY',
'District of Columbia':'DC',
'Alaska':'AK',
'Rhode Island':'RI',
'Texas':'TX',
'Maryland':'MD',
'Minnesota':'MN',
'New Mexico':'NM',
'Nevada':'NV',
'Iowa':'IA',
'West Virginia':'WV',
'North Dakota':'ND',
'Arkansas':'AR',
'Arizona':'AZ',
'Louisiana':'LA',
'Delaware':'DE',
'Florida':'FL',
'Montana':'MT',
'Missouri':'MO',
'North Carolina':'NC',
'Oklahoma':'OK',
'Nebraska':'NE',
'California':'CA',
'Mississippi':'MS',
'Wisconsin':'WI',
'Indiana':'IN',
'Georgia':'GA',
'Massachusetts':'MA',
'Tennessee':'TN',
'New Hampshire':'NH',
'Washington':'WA',
'New Jersey':'NJ',
'Connecticut':'CT',
'Maine':'ME',
'Oregon':'OR',
'Vermont':'VT',
'New York':'NY',
'Alabama':'AL',
'Hawaii':'HI',
'Michigan':'MI',
'Pennsylvania':'PA',
'Virginia':'VA',
'Utah':'UT',
'Kentucky':'KY',
'Colorado':'CO'
}
def getURL(state):
state_abbr = states[state]
state = state.replace(" ","-")
url = 'http://www.weatherbase.com/weather/city.php3?c=US&s='+state_abbr+'&statename='+state+'-United-States-of-America'
return url
# print(url)
def getCitiesURL(cities):
cityURL = {}
for city in cities:
url = 'http://www.weatherbase.com'+city.a.get('href')
cityname = city.text
cityURL[cityname] = url
# break
return cityURL
def getClimate(cities, state):
for city in cities:
temp_high = ''
temp_high_f = False
temp_low = ''
temp_low_f = False
precip = ''
precip_f = False
url = cities[city]
handle = requests.get(url)
data = handle.text
soup = BeautifulSoup(data, 'html.parser')
div = soup.find(attrs={'class':'p402_premium'})
tables = div.find_all('table')
print('-'*5+city+', '+state+'-'*5)
for table in tables:
# print(table.find('td').text)
if table.find('td').text == 'Average Precipitation' and precip_f == False:
print('\tPrecipitation Found')
precip_f = True
continue
if table.find('td').text == 'Average High Temperature' and temp_high_f == False:
print('\tHigh Temperature Found')
temp_high_f = True
continue
if table.find('td').text == 'Average Low Temperature' and temp_low_f == False:
print('\tLow Temperature Found')
temp_low_f = True
continue
if precip_f == False and temp_high_f == False and temp_low_f == False:
continue
else:
val = table.find('tr', attrs={'bgcolor':'white'}).find('td', attrs={'class':'data'}).text
# print(data)
if precip_f == True:
precip = val
# print('precip',precip)
precip_f = False
if temp_high_f == True:
temp_high = val
# print('temphigh',temp_high)
temp_high_f = False
if temp_low_f == True:
temp_low = val
# print('templow',temp_low)
temp_low_f = False
city_output = city+','+state+','+temp_high+','+temp_low+','+precip
print(city_output)
fd = open('climateTable.csv', 'a')
fd.write(city_output)
fd.close()
for state in states.keys():
url = getURL(state)
# url = 'http://www.weatherbase.com/weather/city.php3?c=US&s='+'CA'+'&statename='+'California'+'-United-States-of-America'
handle = requests.get(url)
data = handle.text
soup = BeautifulSoup(data, 'html.parser')
city_list = soup.find(id="row-nohover").find_all('li')
cities = getCitiesURL(city_list)
getClimate(cities, state)
# In[ ]:
|
en
| 0.310787
|
# coding: utf-8 # In[1]: # print(url) # break # print(table.find('td').text) # print(data) # print('precip',precip) # print('temphigh',temp_high) # print('templow',temp_low) # url = 'http://www.weatherbase.com/weather/city.php3?c=US&s='+'CA'+'&statename='+'California'+'-United-States-of-America' # In[ ]:
| 2.537077
| 3
|
set1/p1_2_1.py
|
matheuspercario/python-mit
| 0
|
6626724
|
# PYTHON - MIT - UNICAMP
# =============================================================================
# Created By : <NAME>
# Created Date : February 2nd, 2021
# =============================================================================
numbers = [2, 7, 3, 9, 13]
_sum = 0
# Verificar se a lista está vazia
if len(numbers) == 0:
out = None
# Iterando sobre a lista e somando valores
for n in numbers:
_sum += n
# Calcular MA
out = _sum / len(numbers)
print(out) # Exibir resultado
|
# PYTHON - MIT - UNICAMP
# =============================================================================
# Created By : <NAME>
# Created Date : February 2nd, 2021
# =============================================================================
numbers = [2, 7, 3, 9, 13]
_sum = 0
# Verificar se a lista está vazia
if len(numbers) == 0:
out = None
# Iterando sobre a lista e somando valores
for n in numbers:
_sum += n
# Calcular MA
out = _sum / len(numbers)
print(out) # Exibir resultado
|
pt
| 0.265669
|
# PYTHON - MIT - UNICAMP # ============================================================================= # Created By : <NAME> # Created Date : February 2nd, 2021 # ============================================================================= # Verificar se a lista está vazia # Iterando sobre a lista e somando valores # Calcular MA # Exibir resultado
| 3.873895
| 4
|
setup.py
|
jamesabel/sundry
| 2
|
6626725
|
import os
from setuptools import setup
from sundry.__version__ import __version__, __title__, __author__, __author_email__, __url__, __download_url__, __description__
readme_file_path = os.path.join(__title__, "readme.rst")
with open(readme_file_path, encoding="utf-8") as f:
long_description = "\n" + f.read()
setup(
name=__title__,
description=__description__,
long_description=long_description,
long_description_content_type="text/x-rst",
version=__version__,
author=__author__,
author_email=__author_email__,
license="MIT License",
url=__url__,
download_url=__download_url__,
keywords=["utility"],
packages=[__title__, f"{__title__}.uidb32"],
package_data={__title__: [readme_file_path]},
install_requires=["python-dateutil", "pillow", "ismain", "typeguard", "boto3"],
classifiers=[],
)
|
import os
from setuptools import setup
from sundry.__version__ import __version__, __title__, __author__, __author_email__, __url__, __download_url__, __description__
readme_file_path = os.path.join(__title__, "readme.rst")
with open(readme_file_path, encoding="utf-8") as f:
long_description = "\n" + f.read()
setup(
name=__title__,
description=__description__,
long_description=long_description,
long_description_content_type="text/x-rst",
version=__version__,
author=__author__,
author_email=__author_email__,
license="MIT License",
url=__url__,
download_url=__download_url__,
keywords=["utility"],
packages=[__title__, f"{__title__}.uidb32"],
package_data={__title__: [readme_file_path]},
install_requires=["python-dateutil", "pillow", "ismain", "typeguard", "boto3"],
classifiers=[],
)
|
none
| 1
| 1.317501
| 1
|
|
products/models.py
|
minaeid90/ecommerce
| 0
|
6626726
|
<gh_stars>0
from django.db import models
from django.db.models.signals import pre_save, post_save
import random
import os
from ecommerce.utils import unique_slug_generator
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self):
return self.get_queryset().active()
def get_by_id(self, pk):
queryset = self.filter(pk=pk)
if queryset.count() == 1:
return queryset.first()
return None
def search(self, query):
lookups = ( models.Q(title__icontains=query) |
models.Q(description__icontains=query) |
models.Q(price__icontains=query) |
models.Q(tag__title__icontains=query))
return self.all().filter(lookups).distinct()
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def upload_image_path(instance, filename):
new_filename = random.randint(1,3910209312)
name, ext = get_filename_ext(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename, ext=ext)
return "products/{new_filename}/{final_filename}".format(
new_filename=new_filename,
final_filename=final_filename
)
class Product(models.Model):
title = models.CharField(max_length=150)
slug = models.SlugField(blank=True, unique=True)
description = models.TextField()
price = models.DecimalField(max_digits=5, decimal_places=2)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
objects = ProductManager()
def __str__(self):
return self.title
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('products:detail', kwargs={'slug': self.slug})
def product_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(product_pre_save_receiver, sender=Product)
|
from django.db import models
from django.db.models.signals import pre_save, post_save
import random
import os
from ecommerce.utils import unique_slug_generator
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self):
return self.get_queryset().active()
def get_by_id(self, pk):
queryset = self.filter(pk=pk)
if queryset.count() == 1:
return queryset.first()
return None
def search(self, query):
lookups = ( models.Q(title__icontains=query) |
models.Q(description__icontains=query) |
models.Q(price__icontains=query) |
models.Q(tag__title__icontains=query))
return self.all().filter(lookups).distinct()
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def upload_image_path(instance, filename):
new_filename = random.randint(1,3910209312)
name, ext = get_filename_ext(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename, ext=ext)
return "products/{new_filename}/{final_filename}".format(
new_filename=new_filename,
final_filename=final_filename
)
class Product(models.Model):
title = models.CharField(max_length=150)
slug = models.SlugField(blank=True, unique=True)
description = models.TextField()
price = models.DecimalField(max_digits=5, decimal_places=2)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
objects = ProductManager()
def __str__(self):
return self.title
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('products:detail', kwargs={'slug': self.slug})
def product_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(product_pre_save_receiver, sender=Product)
|
none
| 1
| 2.049926
| 2
|
|
8.Deques/python/LinkedDeque.py
|
unclexo/data-structures-and-algorithms
| 2
|
6626727
|
class LinkedDeque:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next_node):
self._element = element
self._next = next_node
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
""" Returns (but do not remove) the first element from the deque """
if self.is_empty():
raise Empty('Deque is empty')
return self._tail._element
def last(self):
""" Returns (but do not remove) the last element from the deque """
if self.is_empty():
raise Empty('Deque is empty')
return self._head._element
def add_right(self, element):
new_node = self._Node(element, None)
if self.is_empty():
self._head = new_node
else:
self._tail._next = new_node
self._tail = new_node
self._size += 1
def add_left(self, element):
self._head = self._Node(element, self._head)
self._size += 1
def remove_right(self):
if self.is_empty():
raise Empty('LinkedDeque is empty')
current = self._head
while current is not None:
if current._next == self._tail:
current._next = None
self._tail = current
self._size -= 1
return current._element
current = current._next
def remove_left(self):
if self.is_empty():
raise Empty('LinkedDeque is empty')
element = self._head._element
self._head = self._head._next
self._size -= 1
return element
class Empty(Exception):
pass
def main():
d = LinkedDeque()
d.add_right('A')
d.add_right('B')
d.add_left('E')
d.add_left('F')
# print(d.remove_right() + ' ' + d.remove_left())
#
# print(d.first())
# print(d.last())
# print(len(d))
print(d)
if __name__ == '__main__':
main()
|
class LinkedDeque:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next_node):
self._element = element
self._next = next_node
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
""" Returns (but do not remove) the first element from the deque """
if self.is_empty():
raise Empty('Deque is empty')
return self._tail._element
def last(self):
""" Returns (but do not remove) the last element from the deque """
if self.is_empty():
raise Empty('Deque is empty')
return self._head._element
def add_right(self, element):
new_node = self._Node(element, None)
if self.is_empty():
self._head = new_node
else:
self._tail._next = new_node
self._tail = new_node
self._size += 1
def add_left(self, element):
self._head = self._Node(element, self._head)
self._size += 1
def remove_right(self):
if self.is_empty():
raise Empty('LinkedDeque is empty')
current = self._head
while current is not None:
if current._next == self._tail:
current._next = None
self._tail = current
self._size -= 1
return current._element
current = current._next
def remove_left(self):
if self.is_empty():
raise Empty('LinkedDeque is empty')
element = self._head._element
self._head = self._head._next
self._size -= 1
return element
class Empty(Exception):
pass
def main():
d = LinkedDeque()
d.add_right('A')
d.add_right('B')
d.add_left('E')
d.add_left('F')
# print(d.remove_right() + ' ' + d.remove_left())
#
# print(d.first())
# print(d.last())
# print(len(d))
print(d)
if __name__ == '__main__':
main()
|
en
| 0.374752
|
Returns (but do not remove) the first element from the deque Returns (but do not remove) the last element from the deque # print(d.remove_right() + ' ' + d.remove_left()) # # print(d.first()) # print(d.last()) # print(len(d))
| 3.888508
| 4
|
hook.py
|
Abriko/letsencrypt-alidns-hook
| 3
|
6626728
|
#!/usr/bin/env python
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from future import standard_library
import dns.exception
import dns.resolver
import logging
import os
import requests
import base64
import sys
import time
import hmac
import uuid
from hashlib import sha1
from tld import get_tld
standard_library.install_aliases()
# Enable verified HTTPS requests on older Pythons
# http://urllib3.readthedocs.org/en/latest/security.html
if sys.version_info[0] == 2:
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
from urllib import quote
from urllib import urlencode
else:
from urllib.parse import quote
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
try:
ACCESS_KEY_ID = os.environ['KEY_ID']
ACCESS_KEY_SECRET = os.environ['KEY_SECRET']
except KeyError:
logger.error(" + Unable to locate Aliyun api credentials in environment!")
sys.exit(1)
try:
dns_servers = os.environ['ALI_DNS_SERVERS']
dns_servers = dns_servers.split()
except KeyError:
dns_servers = False
def _has_dns_propagated(name, token):
txt_records = []
try:
if dns_servers:
custom_resolver = dns.resolver.Resolver()
custom_resolver.nameservers = dns_servers
dns_response = custom_resolver.query(name, 'TXT')
else:
dns_response = dns.resolver.query(name, 'TXT')
for rdata in dns_response:
for txt_record in rdata.strings:
txt_records.append(txt_record)
except dns.exception.DNSException:
return False
for txt_record in txt_records:
if txt_record == token:
return True
return False
# for ali api signature
def _percent_encode(txt):
res = quote(str(txt))
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def _compute_signature(parameters, access_key_secret):
sortedParameters = sorted(
parameters.items(), key=lambda parameters: parameters[0])
canonicalizedQueryString = ''
for (k, v) in sortedParameters:
canonicalizedQueryString += '&' + \
_percent_encode(k) + '=' + _percent_encode(v)
stringToSign = 'GET&%2F&' + _percent_encode(canonicalizedQueryString[1:])
bs = access_key_secret + "&"
h = hmac.new(
key=bytearray(bs, 'utf-8'),
msg=bytearray(stringToSign, 'utf-8'),
digestmod=sha1
)
signature = base64.encodestring(h.digest()).strip()
return signature
def _compose_url(params):
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
parameters = {
'Format': 'JSON',
'Version': '2015-01-09',
'AccessKeyId': ACCESS_KEY_ID,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'Timestamp': timestamp,
}
for key in params.keys():
parameters[key] = params[key]
signature = _compute_signature(parameters, ACCESS_KEY_SECRET)
parameters['Signature'] = signature
url = "https://alidns.aliyuncs.com/?" + urlencode(parameters)
return url
def _make_request(params):
url = _compose_url(params)
r = requests.get(url)
r.raise_for_status()
try:
obj = r.json()
return obj
except ValueError as e:
raise SystemExit(e)
# https://help.aliyun.com/document_detail/29772.html AddDomainRecord
def create_txt_record(args):
domain, token = args[0], args[2]
res = get_tld("http://" + domain, as_object=True)
if res.subdomain:
name = "{0}.{1}".format('_acme-challenge', res.subdomain)
else:
name = '_acme-challenge'
payload = {
'Action': 'AddDomainRecord',
'DomainName': res.tld,
'RR': name,
'Type': 'TXT',
'Value': token,
}
r = _make_request(payload)
record_id = r['RecordId']
logger.debug(" + TXT record created, ID: {0}".format(record_id))
# give it 10 seconds to settle down and avoid nxdomain caching
logger.info(" + Settling down for 10s...")
time.sleep(10)
look_up_args = "{0}.{1}".format(name, res.tld)
while(_has_dns_propagated(look_up_args, token) is False):
logger.info(" + DNS not propagated, waiting 30s...")
time.sleep(30)
# https://help.aliyun.com/document_detail/29776.html DescribeDomainRecords
# https://help.aliyun.com/document_detail/29773.html DeleteDomainRecord
def delete_txt_record(args):
domain, token = args[0], args[2]
if not domain:
logger.info(" + http_request() error in letsencrypt.sh?")
return
res = get_tld("http://" + domain, as_object=True)
if res.subdomain:
name = "{0}.{1}".format('_acme-challenge', res.subdomain)
else:
name = '_acme-challenge'
payload = {
'Action': 'DescribeDomainRecords',
'DomainName': res.tld,
'RRKeyWord': name,
'TypeKeyWord': 'TXT',
'ValueKeyWord': token,
}
r = _make_request(payload)
logger.debug(" + Found {0} record".format(r['TotalCount']))
if r['TotalCount'] > 0:
for record in r['DomainRecords']['Record']:
logger.debug(
" + Deleting TXT record name: {0}.{1}, RecordId: {2}".format(
record['RR'], record['DomainName'], record['RecordId']))
payload = {
'Action': 'DeleteDomainRecord',
'RecordId': record['RecordId'],
}
r_d = _make_request(payload)
if r_d['RecordId'] == record['RecordId']:
logger.debug(
" + RecordId {0} has been deleted".format(r['TotalCount']))
def deploy_cert(args):
domain, privkey_pem, cert_pem, fullchain_pem, chain_pem, timestamp = args
logger.info(' + ssl_certificate: {0}'.format(fullchain_pem))
logger.info(' + ssl_certificate_key: {0}'.format(privkey_pem))
return
def unchanged_cert(args):
return
def exit_hook(args):
return
def main(argv):
ops = {
'deploy_challenge': create_txt_record,
'clean_challenge': delete_txt_record,
'deploy_cert': deploy_cert,
'unchanged_cert': unchanged_cert,
'exit_hook': exit_hook,
}
logger.info(" + AliDNS hook executing: {0}".format(argv[0]))
ops[argv[0]](argv[1:])
if __name__ == '__main__':
main(sys.argv[1:])
|
#!/usr/bin/env python
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from future import standard_library
import dns.exception
import dns.resolver
import logging
import os
import requests
import base64
import sys
import time
import hmac
import uuid
from hashlib import sha1
from tld import get_tld
standard_library.install_aliases()
# Enable verified HTTPS requests on older Pythons
# http://urllib3.readthedocs.org/en/latest/security.html
if sys.version_info[0] == 2:
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
from urllib import quote
from urllib import urlencode
else:
from urllib.parse import quote
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
try:
ACCESS_KEY_ID = os.environ['KEY_ID']
ACCESS_KEY_SECRET = os.environ['KEY_SECRET']
except KeyError:
logger.error(" + Unable to locate Aliyun api credentials in environment!")
sys.exit(1)
try:
dns_servers = os.environ['ALI_DNS_SERVERS']
dns_servers = dns_servers.split()
except KeyError:
dns_servers = False
def _has_dns_propagated(name, token):
txt_records = []
try:
if dns_servers:
custom_resolver = dns.resolver.Resolver()
custom_resolver.nameservers = dns_servers
dns_response = custom_resolver.query(name, 'TXT')
else:
dns_response = dns.resolver.query(name, 'TXT')
for rdata in dns_response:
for txt_record in rdata.strings:
txt_records.append(txt_record)
except dns.exception.DNSException:
return False
for txt_record in txt_records:
if txt_record == token:
return True
return False
# for ali api signature
def _percent_encode(txt):
res = quote(str(txt))
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def _compute_signature(parameters, access_key_secret):
sortedParameters = sorted(
parameters.items(), key=lambda parameters: parameters[0])
canonicalizedQueryString = ''
for (k, v) in sortedParameters:
canonicalizedQueryString += '&' + \
_percent_encode(k) + '=' + _percent_encode(v)
stringToSign = 'GET&%2F&' + _percent_encode(canonicalizedQueryString[1:])
bs = access_key_secret + "&"
h = hmac.new(
key=bytearray(bs, 'utf-8'),
msg=bytearray(stringToSign, 'utf-8'),
digestmod=sha1
)
signature = base64.encodestring(h.digest()).strip()
return signature
def _compose_url(params):
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
parameters = {
'Format': 'JSON',
'Version': '2015-01-09',
'AccessKeyId': ACCESS_KEY_ID,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'Timestamp': timestamp,
}
for key in params.keys():
parameters[key] = params[key]
signature = _compute_signature(parameters, ACCESS_KEY_SECRET)
parameters['Signature'] = signature
url = "https://alidns.aliyuncs.com/?" + urlencode(parameters)
return url
def _make_request(params):
url = _compose_url(params)
r = requests.get(url)
r.raise_for_status()
try:
obj = r.json()
return obj
except ValueError as e:
raise SystemExit(e)
# https://help.aliyun.com/document_detail/29772.html AddDomainRecord
def create_txt_record(args):
domain, token = args[0], args[2]
res = get_tld("http://" + domain, as_object=True)
if res.subdomain:
name = "{0}.{1}".format('_acme-challenge', res.subdomain)
else:
name = '_acme-challenge'
payload = {
'Action': 'AddDomainRecord',
'DomainName': res.tld,
'RR': name,
'Type': 'TXT',
'Value': token,
}
r = _make_request(payload)
record_id = r['RecordId']
logger.debug(" + TXT record created, ID: {0}".format(record_id))
# give it 10 seconds to settle down and avoid nxdomain caching
logger.info(" + Settling down for 10s...")
time.sleep(10)
look_up_args = "{0}.{1}".format(name, res.tld)
while(_has_dns_propagated(look_up_args, token) is False):
logger.info(" + DNS not propagated, waiting 30s...")
time.sleep(30)
# https://help.aliyun.com/document_detail/29776.html DescribeDomainRecords
# https://help.aliyun.com/document_detail/29773.html DeleteDomainRecord
def delete_txt_record(args):
domain, token = args[0], args[2]
if not domain:
logger.info(" + http_request() error in letsencrypt.sh?")
return
res = get_tld("http://" + domain, as_object=True)
if res.subdomain:
name = "{0}.{1}".format('_acme-challenge', res.subdomain)
else:
name = '_acme-challenge'
payload = {
'Action': 'DescribeDomainRecords',
'DomainName': res.tld,
'RRKeyWord': name,
'TypeKeyWord': 'TXT',
'ValueKeyWord': token,
}
r = _make_request(payload)
logger.debug(" + Found {0} record".format(r['TotalCount']))
if r['TotalCount'] > 0:
for record in r['DomainRecords']['Record']:
logger.debug(
" + Deleting TXT record name: {0}.{1}, RecordId: {2}".format(
record['RR'], record['DomainName'], record['RecordId']))
payload = {
'Action': 'DeleteDomainRecord',
'RecordId': record['RecordId'],
}
r_d = _make_request(payload)
if r_d['RecordId'] == record['RecordId']:
logger.debug(
" + RecordId {0} has been deleted".format(r['TotalCount']))
def deploy_cert(args):
domain, privkey_pem, cert_pem, fullchain_pem, chain_pem, timestamp = args
logger.info(' + ssl_certificate: {0}'.format(fullchain_pem))
logger.info(' + ssl_certificate_key: {0}'.format(privkey_pem))
return
def unchanged_cert(args):
return
def exit_hook(args):
return
def main(argv):
ops = {
'deploy_challenge': create_txt_record,
'clean_challenge': delete_txt_record,
'deploy_cert': deploy_cert,
'unchanged_cert': unchanged_cert,
'exit_hook': exit_hook,
}
logger.info(" + AliDNS hook executing: {0}".format(argv[0]))
ops[argv[0]](argv[1:])
if __name__ == '__main__':
main(sys.argv[1:])
|
en
| 0.591722
|
#!/usr/bin/env python # # Enable verified HTTPS requests on older Pythons # http://urllib3.readthedocs.org/en/latest/security.html # for ali api signature # https://help.aliyun.com/document_detail/29772.html AddDomainRecord # give it 10 seconds to settle down and avoid nxdomain caching # https://help.aliyun.com/document_detail/29776.html DescribeDomainRecords # https://help.aliyun.com/document_detail/29773.html DeleteDomainRecord
| 2.318615
| 2
|
pox/g2_static.py
|
reservoirlabs/G2-Mininet
| 2
|
6626729
|
<gh_stars>1-10
"""
G2_RIGHTS.
An L3 switch based on static routing.
This module creates a POX controller which reads static routing configuration from a file.
Accordingly, each switch that connects to this controller will receive both IP and ARP flows table entries.
Therefore, no routing request comes to the controller for known paths.
If a flow needs to be transmitted on an unknown path, requests will come to the controller only to get ignored and hence those requests would not succeed.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.revent import *
import configparser
from collections import defaultdict
import json
log = core.getLogger()
class TopoStructure():
"""Topology structure related constants.
Args:
topoFile (str): Path to file that contains topology information.
Attributes:
hostAddrDict (dict): Mapping from host ID to IP address and MAC address.
Examples:
hostAddrDict['h1']['IP'] = 10.0.1.10
hostAddrDict['h1']['MAC'] = 000000000001
"""
def __init__(self, topoFile):
self.hostAddrDict = {}
with open(topoFile, "r") as read_file:
self.hostAddrDict = json.load(read_file)
read_file.close()
class StaticRouter():
"""Definition of a router that reads flow rules from a config file and prepares data required to create flow rules for switches.
Args:
config_file (str): Path of file that contains routing configuration.
Attributes:
config (str): Path of file that contains routing configuration.
"""
def __init__(self, config_file):
self.config = config_file
def getRoutes(self):
"""Create a dictionary of flow rules.
Returns:
dict: With (key, value) = (switch dpid, list of flow rules)
Example:
rulesDict['1'] = [(h1,h2,3,2)] can be interpreted as follows:
On switch s1, a flow rule should be inserted to forward any packets to port 2 which match source host h1, source port 3,
and destination host h2
"""
rulesDict = defaultdict(list)
Config = configparser.ConfigParser()
if Config.read(self.config):
switches = Config.sections() # ['s1', 's2', 's3', ...]
if switches:
for switch in switches:
options = Config.options(switch)
for pair in options:
ks = pair.split('-')
sh, dh = ks[0], ks[1] # sh: source host, dh: destination host
vs = Config.get(switch, pair).split('-')
sp, dp = vs[0], vs[1] # sp: source port, dp: destination port
rulesDict[int(switch[1:])].append((sh,dh,sp,dp)) # dict key is just int dpid
else:
log.debug("no switches found in routing conf. No rules will be inserted.")
return rulesDict
class G2Switch (EventMixin):
"""An L3 switch class.
Args:
topoFile (str): Path to file that contains topology information.
routingFile (str): Path to file that contains routing configuration.
Attributes:
routingConfig (str): Path of file that contains routing configuration.
topoStruct (TopoStructure): Instance of TopoStructure class that contains topology-related constants.
"""
def __init__ (self, topoFile, routingFile):
self.topoStruct = TopoStructure(topoFile)
self.routingConfig = routingFile
core.addListeners(self)
def _handle_GoingUpEvent (self, event):
core.openflow.addListeners(self)
log.debug("Up...")
def _handle_ConnectionUp (self, event):
dpid = event.connection.dpid
log.debug("switch %i has come up.", dpid)
router = StaticRouter(self.routingConfig)
flowRules = router.getRoutes()
if flowRules:
rules = flowRules[dpid] # list of tuples
for rule in rules:
sh, dh, inp, outp = rule
# IP
fm = of.ofp_flow_mod()
fm.match.in_port = None
fm.priority = 42
fm.match.dl_type = 0x0800
fullIP = self.topoStruct.hostAddrDict[sh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_src = (IPAddr(addr), netmask)
fullIP = self.topoStruct.hostAddrDict[dh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_dst = (IPAddr(addr), netmask)
fm.actions.append(of.ofp_action_output(port = int(outp)))
event.connection.send(fm)
# ARP
fm = of.ofp_flow_mod()
fm.match.in_port = None
fm.priority = 42
fm.match.dl_type = 0x0806
fm.match.dl_src = EthAddr(self.topoStruct.hostAddrDict[sh]["MAC"])
fullIP = self.topoStruct.hostAddrDict[dh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_dst = (IPAddr(addr), netmask)
fm.actions.append(of.ofp_action_output(port = int(outp)))
event.connection.send(fm)
log.debug("inserted flow rules in switch %i.", dpid)
else:
log.debug("routing conf was not found. No rules added to switch %i.", dpid)
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("switch %i port %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE:
# Ignore LLDP packets
return
if isinstance(packet.next, ipv4):
log.debug("IPv4 packet")
log.debug("switch %i port %i IP %s => %s", dpid,inport,
packet.next.srcip,packet.next.dstip)
log.debug("ignoring packet")
# Do nothing
return
elif isinstance(packet.next, arp):
log.debug("ARP packet")
a = packet.next
log.debug("switch %i port %i ARP %s srcIP %s => dstIP %s", dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
log.debug("ignoring packet")
# Do nothing
return
# Todo: Future work- (1) handle other protocol types
# (2) suppress warnings: ipv6 packet data incomplete and dns incomplete name.
def launch (topo, routing):
"""POX controller's launch() function. The function that POX calls to tell the component to initialize itself.
Args:
topo (str): Path to JSON file that contains topology information.
routing (str): Path to file that contains routing configuration.
Example:
The command line arguments are passed as follows:
./pox.py --verbose openflow.of_01 --port=6633 g2_static --topo='path/to/topo.json --routing='path/to/routing.conf '
"""
# POX core will handle the case when 'topo' and 'routing' were not specified.
core.registerNew(G2Switch, topo, routing)
|
"""
G2_RIGHTS.
An L3 switch based on static routing.
This module creates a POX controller which reads static routing configuration from a file.
Accordingly, each switch that connects to this controller will receive both IP and ARP flows table entries.
Therefore, no routing request comes to the controller for known paths.
If a flow needs to be transmitted on an unknown path, requests will come to the controller only to get ignored and hence those requests would not succeed.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.revent import *
import configparser
from collections import defaultdict
import json
log = core.getLogger()
class TopoStructure():
"""Topology structure related constants.
Args:
topoFile (str): Path to file that contains topology information.
Attributes:
hostAddrDict (dict): Mapping from host ID to IP address and MAC address.
Examples:
hostAddrDict['h1']['IP'] = 10.0.1.10
hostAddrDict['h1']['MAC'] = 000000000001
"""
def __init__(self, topoFile):
self.hostAddrDict = {}
with open(topoFile, "r") as read_file:
self.hostAddrDict = json.load(read_file)
read_file.close()
class StaticRouter():
"""Definition of a router that reads flow rules from a config file and prepares data required to create flow rules for switches.
Args:
config_file (str): Path of file that contains routing configuration.
Attributes:
config (str): Path of file that contains routing configuration.
"""
def __init__(self, config_file):
self.config = config_file
def getRoutes(self):
"""Create a dictionary of flow rules.
Returns:
dict: With (key, value) = (switch dpid, list of flow rules)
Example:
rulesDict['1'] = [(h1,h2,3,2)] can be interpreted as follows:
On switch s1, a flow rule should be inserted to forward any packets to port 2 which match source host h1, source port 3,
and destination host h2
"""
rulesDict = defaultdict(list)
Config = configparser.ConfigParser()
if Config.read(self.config):
switches = Config.sections() # ['s1', 's2', 's3', ...]
if switches:
for switch in switches:
options = Config.options(switch)
for pair in options:
ks = pair.split('-')
sh, dh = ks[0], ks[1] # sh: source host, dh: destination host
vs = Config.get(switch, pair).split('-')
sp, dp = vs[0], vs[1] # sp: source port, dp: destination port
rulesDict[int(switch[1:])].append((sh,dh,sp,dp)) # dict key is just int dpid
else:
log.debug("no switches found in routing conf. No rules will be inserted.")
return rulesDict
class G2Switch (EventMixin):
"""An L3 switch class.
Args:
topoFile (str): Path to file that contains topology information.
routingFile (str): Path to file that contains routing configuration.
Attributes:
routingConfig (str): Path of file that contains routing configuration.
topoStruct (TopoStructure): Instance of TopoStructure class that contains topology-related constants.
"""
def __init__ (self, topoFile, routingFile):
self.topoStruct = TopoStructure(topoFile)
self.routingConfig = routingFile
core.addListeners(self)
def _handle_GoingUpEvent (self, event):
core.openflow.addListeners(self)
log.debug("Up...")
def _handle_ConnectionUp (self, event):
dpid = event.connection.dpid
log.debug("switch %i has come up.", dpid)
router = StaticRouter(self.routingConfig)
flowRules = router.getRoutes()
if flowRules:
rules = flowRules[dpid] # list of tuples
for rule in rules:
sh, dh, inp, outp = rule
# IP
fm = of.ofp_flow_mod()
fm.match.in_port = None
fm.priority = 42
fm.match.dl_type = 0x0800
fullIP = self.topoStruct.hostAddrDict[sh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_src = (IPAddr(addr), netmask)
fullIP = self.topoStruct.hostAddrDict[dh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_dst = (IPAddr(addr), netmask)
fm.actions.append(of.ofp_action_output(port = int(outp)))
event.connection.send(fm)
# ARP
fm = of.ofp_flow_mod()
fm.match.in_port = None
fm.priority = 42
fm.match.dl_type = 0x0806
fm.match.dl_src = EthAddr(self.topoStruct.hostAddrDict[sh]["MAC"])
fullIP = self.topoStruct.hostAddrDict[dh]["IP"]
splits = fullIP.split('/')
(addr, netmask) = (splits[0].strip(), int(splits[1].strip()))
fm.match.nw_dst = (IPAddr(addr), netmask)
fm.actions.append(of.ofp_action_output(port = int(outp)))
event.connection.send(fm)
log.debug("inserted flow rules in switch %i.", dpid)
else:
log.debug("routing conf was not found. No rules added to switch %i.", dpid)
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("switch %i port %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE:
# Ignore LLDP packets
return
if isinstance(packet.next, ipv4):
log.debug("IPv4 packet")
log.debug("switch %i port %i IP %s => %s", dpid,inport,
packet.next.srcip,packet.next.dstip)
log.debug("ignoring packet")
# Do nothing
return
elif isinstance(packet.next, arp):
log.debug("ARP packet")
a = packet.next
log.debug("switch %i port %i ARP %s srcIP %s => dstIP %s", dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
log.debug("ignoring packet")
# Do nothing
return
# Todo: Future work- (1) handle other protocol types
# (2) suppress warnings: ipv6 packet data incomplete and dns incomplete name.
def launch (topo, routing):
"""POX controller's launch() function. The function that POX calls to tell the component to initialize itself.
Args:
topo (str): Path to JSON file that contains topology information.
routing (str): Path to file that contains routing configuration.
Example:
The command line arguments are passed as follows:
./pox.py --verbose openflow.of_01 --port=6633 g2_static --topo='path/to/topo.json --routing='path/to/routing.conf '
"""
# POX core will handle the case when 'topo' and 'routing' were not specified.
core.registerNew(G2Switch, topo, routing)
|
en
| 0.842453
|
G2_RIGHTS. An L3 switch based on static routing. This module creates a POX controller which reads static routing configuration from a file. Accordingly, each switch that connects to this controller will receive both IP and ARP flows table entries. Therefore, no routing request comes to the controller for known paths. If a flow needs to be transmitted on an unknown path, requests will come to the controller only to get ignored and hence those requests would not succeed. Topology structure related constants. Args: topoFile (str): Path to file that contains topology information. Attributes: hostAddrDict (dict): Mapping from host ID to IP address and MAC address. Examples: hostAddrDict['h1']['IP'] = 10.0.1.10 hostAddrDict['h1']['MAC'] = 000000000001 Definition of a router that reads flow rules from a config file and prepares data required to create flow rules for switches. Args: config_file (str): Path of file that contains routing configuration. Attributes: config (str): Path of file that contains routing configuration. Create a dictionary of flow rules. Returns: dict: With (key, value) = (switch dpid, list of flow rules) Example: rulesDict['1'] = [(h1,h2,3,2)] can be interpreted as follows: On switch s1, a flow rule should be inserted to forward any packets to port 2 which match source host h1, source port 3, and destination host h2 # ['s1', 's2', 's3', ...] # sh: source host, dh: destination host # sp: source port, dp: destination port # dict key is just int dpid An L3 switch class. Args: topoFile (str): Path to file that contains topology information. routingFile (str): Path to file that contains routing configuration. Attributes: routingConfig (str): Path of file that contains routing configuration. topoStruct (TopoStructure): Instance of TopoStructure class that contains topology-related constants. # list of tuples # IP # ARP # Ignore LLDP packets # Do nothing # Do nothing # Todo: Future work- (1) handle other protocol types # (2) suppress warnings: ipv6 packet data incomplete and dns incomplete name. POX controller's launch() function. The function that POX calls to tell the component to initialize itself. Args: topo (str): Path to JSON file that contains topology information. routing (str): Path to file that contains routing configuration. Example: The command line arguments are passed as follows: ./pox.py --verbose openflow.of_01 --port=6633 g2_static --topo='path/to/topo.json --routing='path/to/routing.conf ' # POX core will handle the case when 'topo' and 'routing' were not specified.
| 2.904029
| 3
|
homeworks/SDIRK/templates/stabdomSDIRK.py
|
kryo4096/NPDECODES
| 15
|
6626730
|
<reponame>kryo4096/NPDECODES<gh_stars>10-100
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as col
import numpy as np
output_file = str(sys.argv[1])
if len(sys.argv) == 3:
input_gamma = sys.argv[2]
else:
input_gamma = 1.0;
# Stability function
S = lambda z, gamma=1.0: (1.0 + z * (1.0 - 2.0 * gamma) + z**2 * (gamma**2 - 2.0 * gamma + 0.5)) / (1.0 - gamma * z)**2
absS = lambda z: np.abs(S(z, gamma=input_gamma))
# Compute F(x) on a meshgrid
grid1D = np.linspace(-7.0, 7.0, 180, endpoint=True)
X, Y = np.meshgrid(grid1D, grid1D)
Z = absS(X + 1.0j * Y)
# Contour plot distinguishes absS < 1 and absS > 1
fig = plt.figure()
cmap = col.ListedColormap(['lime','w'])
bounds = [0.0, 1.0, 2.0]
norm = col.BoundaryNorm(bounds, cmap.N)
cs1 = plt.contourf(X, Y, Z, cmap=cmap, norm=norm, levels=bounds, extend='both')
linewidth = 0.2
plt.contour(X, Y, Z, colors='k', levels=[0.0, 1.0], linewidths=linewidth)
plt.plot([-6.0, 6.0], [0.0, 0.0], color='k', linewidth=linewidth)
plt.plot([0.0, 0.0], [-6.0, 6.0], color='k', linewidth=linewidth)
plt.xlabel('Re')
plt.ylabel('Im')
plt.axis('square')
#fig.colorbar(cs1)
plt.savefig(output_file, bbox_inches='tight')
|
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as col
import numpy as np
output_file = str(sys.argv[1])
if len(sys.argv) == 3:
input_gamma = sys.argv[2]
else:
input_gamma = 1.0;
# Stability function
S = lambda z, gamma=1.0: (1.0 + z * (1.0 - 2.0 * gamma) + z**2 * (gamma**2 - 2.0 * gamma + 0.5)) / (1.0 - gamma * z)**2
absS = lambda z: np.abs(S(z, gamma=input_gamma))
# Compute F(x) on a meshgrid
grid1D = np.linspace(-7.0, 7.0, 180, endpoint=True)
X, Y = np.meshgrid(grid1D, grid1D)
Z = absS(X + 1.0j * Y)
# Contour plot distinguishes absS < 1 and absS > 1
fig = plt.figure()
cmap = col.ListedColormap(['lime','w'])
bounds = [0.0, 1.0, 2.0]
norm = col.BoundaryNorm(bounds, cmap.N)
cs1 = plt.contourf(X, Y, Z, cmap=cmap, norm=norm, levels=bounds, extend='both')
linewidth = 0.2
plt.contour(X, Y, Z, colors='k', levels=[0.0, 1.0], linewidths=linewidth)
plt.plot([-6.0, 6.0], [0.0, 0.0], color='k', linewidth=linewidth)
plt.plot([0.0, 0.0], [-6.0, 6.0], color='k', linewidth=linewidth)
plt.xlabel('Re')
plt.ylabel('Im')
plt.axis('square')
#fig.colorbar(cs1)
plt.savefig(output_file, bbox_inches='tight')
|
en
| 0.484531
|
# Stability function # Compute F(x) on a meshgrid # Contour plot distinguishes absS < 1 and absS > 1 #fig.colorbar(cs1)
| 2.484663
| 2
|
setup.py
|
jpcw/mr.bob
| 0
|
6626731
|
# -*- coding: utf-8 -*-
import os
import sys
import codecs
from setuptools import setup
from setuptools import find_packages
install_requires = [
'setuptools',
'six>=1.2.0', # 1.1.0 release doesn't have six.moves.input
]
if (3,) < sys.version_info < (3, 3):
# Jinja 2.7 drops Python 3.2 compat.
install_requires.append('Jinja2>=2.5.0,<2.7dev')
else:
install_requires.append('Jinja2>=2.5.0')
try:
import importlib # NOQA
except ImportError:
install_requires.append('importlib')
try:
from collections import OrderedDict # NOQA
except ImportError:
install_requires.append('ordereddict')
try:
import argparse # NOQA
except ImportError:
install_requires.append('argparse')
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read()
setup(name='mr.bob',
version='0.2.dev0',
description='Bob renders directory structure templates',
long_description=read('README.rst') + '\n' + read('HISTORY.rst'),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
author='<NAME>, <NAME>',
author_email='',
url='https://github.com/iElectric/mr.bob.git',
license='BSD',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'test': [
'nose',
'coverage<3.6dev',
'flake8>2.0',
'mock',
],
'development': [
'zest.releaser',
'Sphinx',
],
},
entry_points="""
[console_scripts]
mrbob = mrbob.cli:main
""",
include_package_data=True,
zip_safe=False,
)
|
# -*- coding: utf-8 -*-
import os
import sys
import codecs
from setuptools import setup
from setuptools import find_packages
install_requires = [
'setuptools',
'six>=1.2.0', # 1.1.0 release doesn't have six.moves.input
]
if (3,) < sys.version_info < (3, 3):
# Jinja 2.7 drops Python 3.2 compat.
install_requires.append('Jinja2>=2.5.0,<2.7dev')
else:
install_requires.append('Jinja2>=2.5.0')
try:
import importlib # NOQA
except ImportError:
install_requires.append('importlib')
try:
from collections import OrderedDict # NOQA
except ImportError:
install_requires.append('ordereddict')
try:
import argparse # NOQA
except ImportError:
install_requires.append('argparse')
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read()
setup(name='mr.bob',
version='0.2.dev0',
description='Bob renders directory structure templates',
long_description=read('README.rst') + '\n' + read('HISTORY.rst'),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
author='<NAME>, <NAME>',
author_email='',
url='https://github.com/iElectric/mr.bob.git',
license='BSD',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'test': [
'nose',
'coverage<3.6dev',
'flake8>2.0',
'mock',
],
'development': [
'zest.releaser',
'Sphinx',
],
},
entry_points="""
[console_scripts]
mrbob = mrbob.cli:main
""",
include_package_data=True,
zip_safe=False,
)
|
en
| 0.557488
|
# -*- coding: utf-8 -*- # 1.1.0 release doesn't have six.moves.input # Jinja 2.7 drops Python 3.2 compat. # NOQA # NOQA # NOQA [console_scripts] mrbob = mrbob.cli:main
| 1.921828
| 2
|
0000_examples/gelsight/rbt_con/plan.py
|
Photon26/wrs-main_0614
| 0
|
6626732
|
import motion.optimization_based.incremental_nik as inik
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import numpy as np
import math
import basis.robot_math as rm
if __name__ == '__main__':
base = wd.World(cam_pos=[2, 1, 3], lookat_pos=[0, 0, 1.1])
gm.gen_frame().attach_to(base)
# robot_s
component_name = 'lft_arm'
robot_instance = ur3d.UR3Dual()
start_hnd_pos=np.array([0.4, 0.6, 1.3])
start_hnd_rotmat=rm.rotmat_from_axangle([0, 1, 0], math.pi / 2)
goal_hnd_pos=np.array([0.4, 0.4, 1.3])
goal_hnd_rotmat=rm.rotmat_from_axangle([0, 1, 0], math.pi / 2)
gm.gen_frame(pos=start_hnd_pos, rotmat=start_hnd_rotmat).attach_to(base)
gm.gen_frame(pos=goal_hnd_pos, rotmat=goal_hnd_rotmat).attach_to(base)
jnts = robot_instance.ik(component_name,tgt_pos=start_hnd_pos,tgt_rotmat=start_hnd_rotmat)
robot_instance.fk(component_name,jnts)
robot_instance.gen_meshmodel().attach_to(base)
jnts = robot_instance.ik(component_name,tgt_pos=goal_hnd_pos,tgt_rotmat=goal_hnd_rotmat)
robot_instance.fk(component_name,jnts)
robot_instance.gen_meshmodel().attach_to(base)
# base.run()
robot_inik_solver = inik.IncrementalNIK(robot_instance)
pose_list = robot_inik_solver.gen_linear_motion(component_name,
start_tcp_pos=start_hnd_pos,
start_tcp_rotmat=start_hnd_rotmat,
goal_tcp_pos=goal_hnd_pos,
goal_tcp_rotmat=goal_hnd_rotmat,
obstacle_list=[])
for jnt_values in pose_list:
robot_instance.fk(component_name, jnt_values)
robot_meshmodel = robot_instance.gen_meshmodel()
robot_meshmodel.attach_to(base)
base.run()
|
import motion.optimization_based.incremental_nik as inik
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import numpy as np
import math
import basis.robot_math as rm
if __name__ == '__main__':
base = wd.World(cam_pos=[2, 1, 3], lookat_pos=[0, 0, 1.1])
gm.gen_frame().attach_to(base)
# robot_s
component_name = 'lft_arm'
robot_instance = ur3d.UR3Dual()
start_hnd_pos=np.array([0.4, 0.6, 1.3])
start_hnd_rotmat=rm.rotmat_from_axangle([0, 1, 0], math.pi / 2)
goal_hnd_pos=np.array([0.4, 0.4, 1.3])
goal_hnd_rotmat=rm.rotmat_from_axangle([0, 1, 0], math.pi / 2)
gm.gen_frame(pos=start_hnd_pos, rotmat=start_hnd_rotmat).attach_to(base)
gm.gen_frame(pos=goal_hnd_pos, rotmat=goal_hnd_rotmat).attach_to(base)
jnts = robot_instance.ik(component_name,tgt_pos=start_hnd_pos,tgt_rotmat=start_hnd_rotmat)
robot_instance.fk(component_name,jnts)
robot_instance.gen_meshmodel().attach_to(base)
jnts = robot_instance.ik(component_name,tgt_pos=goal_hnd_pos,tgt_rotmat=goal_hnd_rotmat)
robot_instance.fk(component_name,jnts)
robot_instance.gen_meshmodel().attach_to(base)
# base.run()
robot_inik_solver = inik.IncrementalNIK(robot_instance)
pose_list = robot_inik_solver.gen_linear_motion(component_name,
start_tcp_pos=start_hnd_pos,
start_tcp_rotmat=start_hnd_rotmat,
goal_tcp_pos=goal_hnd_pos,
goal_tcp_rotmat=goal_hnd_rotmat,
obstacle_list=[])
for jnt_values in pose_list:
robot_instance.fk(component_name, jnt_values)
robot_meshmodel = robot_instance.gen_meshmodel()
robot_meshmodel.attach_to(base)
base.run()
|
en
| 0.287116
|
# robot_s # base.run()
| 2.043562
| 2
|
pure_sklearn/ensemble/tests/test_extra_trees.py
|
ashetty1-m/pure-predict
| 62
|
6626733
|
<filename>pure_sklearn/ensemble/tests/test_extra_trees.py
import warnings
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
from pure_sklearn.utils import shape
METHODS = ["predict", "predict_proba", "predict_log_proba"]
def test_extra_trees():
X, y = load_iris(return_X_y=True)
X_ = X.tolist()
for y_ in [y, (y == 0).astype(int), (y == 2).astype(int)]:
for n_estimators in [1, 10]:
for max_depth in [5, 10, None]:
clf = ExtraTreesClassifier(
bootstrap=False,
n_estimators=n_estimators,
max_depth=max_depth,
random_state=5,
)
clf.fit(X, y_)
clf_ = convert_estimator(clf)
for method in METHODS:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
scores = getattr(clf, method)(X)
scores_ = getattr(clf_, method)(X_)
assert np.allclose(scores.shape, shape(scores_))
assert np.allclose(scores, scores_, equal_nan=True)
|
<filename>pure_sklearn/ensemble/tests/test_extra_trees.py
import warnings
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
from pure_sklearn.utils import shape
METHODS = ["predict", "predict_proba", "predict_log_proba"]
def test_extra_trees():
X, y = load_iris(return_X_y=True)
X_ = X.tolist()
for y_ in [y, (y == 0).astype(int), (y == 2).astype(int)]:
for n_estimators in [1, 10]:
for max_depth in [5, 10, None]:
clf = ExtraTreesClassifier(
bootstrap=False,
n_estimators=n_estimators,
max_depth=max_depth,
random_state=5,
)
clf.fit(X, y_)
clf_ = convert_estimator(clf)
for method in METHODS:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
scores = getattr(clf, method)(X)
scores_ = getattr(clf_, method)(X_)
assert np.allclose(scores.shape, shape(scores_))
assert np.allclose(scores, scores_, equal_nan=True)
|
none
| 1
| 2.21053
| 2
|
|
ClassificationLossMinimizeUsingBackProp.py
|
JunzuoWan/Add-subtraction-multiplication-and-division
| 0
|
6626734
|
# Back (retro) Propagation
# This python function shows how to implement back (retro) propagation
# in a classification model.
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph and start a session
sess = tf.Session()
# This is a classification example.
# There are 200 values of the corresponding output index
# We will fit the binary classification model:
# If sigmoid(x+b) < 0.5 -> 0 else 1
# Theoretically, the constant bias b should be -(mean1 + mean2)/2
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# We first create 100 sample data which are random values from a normal = N(-1, 0.2)
#np.concatenate((np.random.normal(-1, 0.2, 100), np.random.normal(4, 0.3, 110)))
x_sample1=np.random.normal(-1, 0.2, 100)
x_sample2=np.random.normal(4, 0.3, 110)
x_num = np.concatenate((x_sample1, x_sample2))
## we now create 100 values of 0
y_target1=np.repeat(0., 100)
##next we create 110 values of 1
y_target2=np.repeat(1., 110)
#y_vals1 = np.concatenate((np.repeat(0., 100), np.repeat(1., 110)))
y_num=np.concatenate((y_target1, y_target2))
print(x_num)
print(y_num)
##Now we create 2 placeholders
x_data = tf.placeholder(shape=[1], dtype=tf.float32)
y_target = tf.placeholder(shape=[1], dtype=tf.float32)
# We now create a variable called bias (one model parameter = b)
b = tf.Variable(tf.random_normal(mean=5, shape=[1]))
#Next we create the activation operation using sigmoid function: sigmoid(x + b)
# The sigmoid() is the non-linear, activation part of the final loss function
y_out = tf.add(x_data, b)
# Now we have to add another dimension to each (batch size of 1)
y_out_expanded = tf.expand_dims(y_out, 0)
y_target_expanded = tf.expand_dims(y_target, 0)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Now calculate the classification loss which typically uses the cross entropy loss
crossentropyloss = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_out_expanded, labels=y_target_expanded)
# Next we define the Optimizer
theOptimizer = tf.train.GradientDescentOptimizer(0.04)
train_step = theOptimizer.minimize(crossentropyloss)
# USe for-loop to start the training...the following for-loop will run 1800 times.
for i in range(1800):
rand_index = np.random.choice(210) ##0 to 209
rand_x = [x_num[rand_index]]
rand_y = [y_num[rand_index]]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
if (i+1)%100==0:
print('Step#' + str(i+1) + ' b=' + str(sess.run(b)))
print('Loss=' + str(sess.run(crossentropyloss, feed_dict={x_data: rand_x, y_target: rand_y})))
# Now it is time to evaluate predictions
predictions = [] ###empty list
for i in range(len(x_num)): ##len() function returns total data number for x_num.
x_val = [x_num[i]]
prediction = sess.run(tf.round(tf.sigmoid(y_out)), feed_dict={x_data: x_val})
predictions.append(prediction[0])
accuracy = sum(x==y for x,y in zip(predictions, y_num))/210.
print('Final Achieved Accuracy = ' + str(np.round(accuracy, 2)))
|
# Back (retro) Propagation
# This python function shows how to implement back (retro) propagation
# in a classification model.
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph and start a session
sess = tf.Session()
# This is a classification example.
# There are 200 values of the corresponding output index
# We will fit the binary classification model:
# If sigmoid(x+b) < 0.5 -> 0 else 1
# Theoretically, the constant bias b should be -(mean1 + mean2)/2
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# We first create 100 sample data which are random values from a normal = N(-1, 0.2)
#np.concatenate((np.random.normal(-1, 0.2, 100), np.random.normal(4, 0.3, 110)))
x_sample1=np.random.normal(-1, 0.2, 100)
x_sample2=np.random.normal(4, 0.3, 110)
x_num = np.concatenate((x_sample1, x_sample2))
## we now create 100 values of 0
y_target1=np.repeat(0., 100)
##next we create 110 values of 1
y_target2=np.repeat(1., 110)
#y_vals1 = np.concatenate((np.repeat(0., 100), np.repeat(1., 110)))
y_num=np.concatenate((y_target1, y_target2))
print(x_num)
print(y_num)
##Now we create 2 placeholders
x_data = tf.placeholder(shape=[1], dtype=tf.float32)
y_target = tf.placeholder(shape=[1], dtype=tf.float32)
# We now create a variable called bias (one model parameter = b)
b = tf.Variable(tf.random_normal(mean=5, shape=[1]))
#Next we create the activation operation using sigmoid function: sigmoid(x + b)
# The sigmoid() is the non-linear, activation part of the final loss function
y_out = tf.add(x_data, b)
# Now we have to add another dimension to each (batch size of 1)
y_out_expanded = tf.expand_dims(y_out, 0)
y_target_expanded = tf.expand_dims(y_target, 0)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Now calculate the classification loss which typically uses the cross entropy loss
crossentropyloss = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_out_expanded, labels=y_target_expanded)
# Next we define the Optimizer
theOptimizer = tf.train.GradientDescentOptimizer(0.04)
train_step = theOptimizer.minimize(crossentropyloss)
# USe for-loop to start the training...the following for-loop will run 1800 times.
for i in range(1800):
rand_index = np.random.choice(210) ##0 to 209
rand_x = [x_num[rand_index]]
rand_y = [y_num[rand_index]]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
if (i+1)%100==0:
print('Step#' + str(i+1) + ' b=' + str(sess.run(b)))
print('Loss=' + str(sess.run(crossentropyloss, feed_dict={x_data: rand_x, y_target: rand_y})))
# Now it is time to evaluate predictions
predictions = [] ###empty list
for i in range(len(x_num)): ##len() function returns total data number for x_num.
x_val = [x_num[i]]
prediction = sess.run(tf.round(tf.sigmoid(y_out)), feed_dict={x_data: x_val})
predictions.append(prediction[0])
accuracy = sum(x==y for x,y in zip(predictions, y_num))/210.
print('Final Achieved Accuracy = ' + str(np.round(accuracy, 2)))
|
en
| 0.583028
|
# Back (retro) Propagation # This python function shows how to implement back (retro) propagation # in a classification model. # Create graph and start a session # This is a classification example. # There are 200 values of the corresponding output index # We will fit the binary classification model: # If sigmoid(x+b) < 0.5 -> 0 else 1 # Theoretically, the constant bias b should be -(mean1 + mean2)/2 # Create graph # We first create 100 sample data which are random values from a normal = N(-1, 0.2) #np.concatenate((np.random.normal(-1, 0.2, 100), np.random.normal(4, 0.3, 110))) ## we now create 100 values of 0 ##next we create 110 values of 1 #y_vals1 = np.concatenate((np.repeat(0., 100), np.repeat(1., 110))) ##Now we create 2 placeholders # We now create a variable called bias (one model parameter = b) #Next we create the activation operation using sigmoid function: sigmoid(x + b) # The sigmoid() is the non-linear, activation part of the final loss function # Now we have to add another dimension to each (batch size of 1) # Initialize variables # Now calculate the classification loss which typically uses the cross entropy loss # Next we define the Optimizer # USe for-loop to start the training...the following for-loop will run 1800 times. ##0 to 209 #' + str(i+1) + ' b=' + str(sess.run(b))) # Now it is time to evaluate predictions ###empty list ##len() function returns total data number for x_num.
| 3.984862
| 4
|
src/waldur_rancher/handlers.py
|
geant-multicloud/MCMS-mastermind
| 26
|
6626735
|
import logging
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from waldur_core.core.models import StateMixin
from . import models, tasks
logger = logging.getLogger(__name__)
def notify_create_user(sender, instance, password, created=False, **kwargs):
transaction.on_commit(
lambda: tasks.notify_create_user.delay(
instance.id, password, instance.settings.backend_url
)
)
def delete_node_if_related_instance_has_been_deleted(sender, instance, **kwargs):
try:
content_type = ContentType.objects.get_for_model(instance)
node = models.Node.objects.get(object_id=instance.id, content_type=content_type)
backend = node.cluster.get_backend()
backend.delete_node(node)
except ObjectDoesNotExist:
pass
def delete_cluster_if_all_related_nodes_have_been_deleted(sender, instance, **kwargs):
node = instance
try:
if (
node.cluster.state == models.Cluster.States.DELETING
and not node.cluster.node_set.count()
):
backend = node.cluster.get_backend()
backend.delete_cluster(node.cluster)
except models.Cluster.DoesNotExist:
logger.warning('Cluster instance has been removed already.')
def set_error_state_for_node_if_related_instance_deleting_is_failed(
sender, instance, created=False, **kwargs
):
if created:
return
try:
content_type = ContentType.objects.get_for_model(instance)
node = models.Node.objects.get(object_id=instance.id, content_type=content_type)
except ObjectDoesNotExist:
return
if (
instance.tracker.has_changed('state')
and instance.state == StateMixin.States.ERRED
):
node.state = models.Node.States.ERRED
node.error_message = 'Deleting related VM has failed.'
node.save()
def set_error_state_for_cluster_if_related_node_deleting_is_failed(
sender, instance, created=False, **kwargs
):
node = instance
if created:
return
if node.tracker.has_changed('state') and node.state == models.Node.States.ERRED:
if node.cluster.state == models.Cluster.States.DELETING:
node.cluster.state = models.Cluster.States.ERRED
node.cluster.error_message = 'Deleting one or a more nodes have failed.'
node.cluster.save()
def delete_catalog_if_scope_has_been_deleted(sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(instance)
models.Catalog.objects.filter(
object_id=instance.id, content_type=content_type
).delete()
|
import logging
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from waldur_core.core.models import StateMixin
from . import models, tasks
logger = logging.getLogger(__name__)
def notify_create_user(sender, instance, password, created=False, **kwargs):
transaction.on_commit(
lambda: tasks.notify_create_user.delay(
instance.id, password, instance.settings.backend_url
)
)
def delete_node_if_related_instance_has_been_deleted(sender, instance, **kwargs):
try:
content_type = ContentType.objects.get_for_model(instance)
node = models.Node.objects.get(object_id=instance.id, content_type=content_type)
backend = node.cluster.get_backend()
backend.delete_node(node)
except ObjectDoesNotExist:
pass
def delete_cluster_if_all_related_nodes_have_been_deleted(sender, instance, **kwargs):
node = instance
try:
if (
node.cluster.state == models.Cluster.States.DELETING
and not node.cluster.node_set.count()
):
backend = node.cluster.get_backend()
backend.delete_cluster(node.cluster)
except models.Cluster.DoesNotExist:
logger.warning('Cluster instance has been removed already.')
def set_error_state_for_node_if_related_instance_deleting_is_failed(
sender, instance, created=False, **kwargs
):
if created:
return
try:
content_type = ContentType.objects.get_for_model(instance)
node = models.Node.objects.get(object_id=instance.id, content_type=content_type)
except ObjectDoesNotExist:
return
if (
instance.tracker.has_changed('state')
and instance.state == StateMixin.States.ERRED
):
node.state = models.Node.States.ERRED
node.error_message = 'Deleting related VM has failed.'
node.save()
def set_error_state_for_cluster_if_related_node_deleting_is_failed(
sender, instance, created=False, **kwargs
):
node = instance
if created:
return
if node.tracker.has_changed('state') and node.state == models.Node.States.ERRED:
if node.cluster.state == models.Cluster.States.DELETING:
node.cluster.state = models.Cluster.States.ERRED
node.cluster.error_message = 'Deleting one or a more nodes have failed.'
node.cluster.save()
def delete_catalog_if_scope_has_been_deleted(sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(instance)
models.Catalog.objects.filter(
object_id=instance.id, content_type=content_type
).delete()
|
none
| 1
| 1.870868
| 2
|
|
rmgpy/data/test_data/thermo/groups/radical.py
|
tza0035/RMG-Py
| 250
|
6626736
|
#!/usr/bin/env python
# encoding: utf-8
name = "<NAME>"
shortDesc = ""
longDesc = """
"""
entry(
index = 0,
label = "Radical",
group = "OR{RJ, RJ2_singlet}",
thermo = 'RJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 1,
label = "RJ",
group =
"""
1 * R u1
""",
thermo = 'CJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 2,
label = "CJ",
group =
"""
1 * C u1
""",
thermo = 'CsJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 3,
label = "CsJ",
group =
"""
1 * Cs u1
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0.71,0.34,-0.33,-1.07,-2.43,-3.54,-5.43],'cal/(mol*K)'),
H298 = (104.81,'kcal/mol','+|-',0.1),
S298 = (0.52,'cal/(mol*K)'),
),
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 94,
label = "OJ",
group =
"""
1 * O u1
""",
thermo = 'RJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 106,
label = "RJ2_triplet",
group =
"""
1 * R u2
""",
thermo = 'CsJ',
shortDesc = """""",
longDesc =
"""
""",
)
tree(
"""
L1: Radical
L2: RJ
L3: CJ
L4: CsJ
L3: OJ
L2: RJ2_triplet
"""
)
|
#!/usr/bin/env python
# encoding: utf-8
name = "<NAME>"
shortDesc = ""
longDesc = """
"""
entry(
index = 0,
label = "Radical",
group = "OR{RJ, RJ2_singlet}",
thermo = 'RJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 1,
label = "RJ",
group =
"""
1 * R u1
""",
thermo = 'CJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 2,
label = "CJ",
group =
"""
1 * C u1
""",
thermo = 'CsJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 3,
label = "CsJ",
group =
"""
1 * Cs u1
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0.71,0.34,-0.33,-1.07,-2.43,-3.54,-5.43],'cal/(mol*K)'),
H298 = (104.81,'kcal/mol','+|-',0.1),
S298 = (0.52,'cal/(mol*K)'),
),
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 94,
label = "OJ",
group =
"""
1 * O u1
""",
thermo = 'RJ',
shortDesc = """""",
longDesc =
"""
""",
)
entry(
index = 106,
label = "RJ2_triplet",
group =
"""
1 * R u2
""",
thermo = 'CsJ',
shortDesc = """""",
longDesc =
"""
""",
)
tree(
"""
L1: Radical
L2: RJ
L3: CJ
L4: CsJ
L3: OJ
L2: RJ2_triplet
"""
)
|
en
| 0.29682
|
#!/usr/bin/env python # encoding: utf-8 1 * R u1 1 * C u1 1 * Cs u1 1 * O u1 1 * R u2 L1: Radical L2: RJ L3: CJ L4: CsJ L3: OJ L2: RJ2_triplet
| 1.774192
| 2
|
src/Python/Rendering/MotionBlur.py
|
sankhesh/vtk-examples
| 0
|
6626737
|
#!/usr/bin/env python
import vtk
def main():
fileName = get_program_parameters()
colors = vtk.vtkNamedColors()
colors.SetColor('A1Diff', [255, 204, 77, 255])
colors.SetColor('A2Amb', [51, 51, 255, 255])
colors.SetColor('A2Diff', [51, 255, 204, 255])
colors.SetColor('A3Amb', [128, 166, 255, 255])
colors.SetColor('Bkg', [77, 102, 153, 255])
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d('Bkg'))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(500, 500)
renderWindow.SetWindowName('MotionBlur')
renderWindow.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renderWindow)
reader = vtk.vtkPLYReader()
reader.SetFileName(fileName)
reader.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(reader.GetOutputPort())
# create three models
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetAmbientColor(colors.GetColor3d('Red'))
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A1Diff'))
actor.GetProperty().SetSpecular(0.0)
actor.GetProperty().SetDiffuse(0.5)
actor.GetProperty().SetAmbient(0.3)
actor.SetPosition(-0.1, 0.0, -0.1)
renderer.AddActor(actor)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetAmbientColor(colors.GetColor3d('A2Amb'))
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A2Diff'))
actor.GetProperty().SetSpecularColor(colors.GetColor3d('Black'))
actor.GetProperty().SetSpecular(0.2)
actor.GetProperty().SetDiffuse(0.9)
actor.GetProperty().SetAmbient(0.1)
actor.GetProperty().SetSpecularPower(10.0)
renderer.AddActor(actor)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A3Amb'))
actor.GetProperty().SetSpecularColor(colors.GetColor3d('White'))
actor.GetProperty().SetSpecular(0.7)
actor.GetProperty().SetDiffuse(0.4)
actor.GetProperty().SetSpecularPower(60.0)
actor.SetPosition(0.1, 0.0, 0.1)
renderer.AddActor(actor)
renderWindow.SetMultiSamples(0)
# create the basic VTK render steps
basicPasses = vtk.vtkRenderStepsPass()
motion = vtk.vtkSimpleMotionBlurPass()
motion.SetDelegatePass(basicPasses)
# Tell the renderer to use our render pass pipeline.
renderer.SetPass(motion)
numRenders = 30
renderer.GetActiveCamera().SetPosition(0, 0, -1)
renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
renderer.GetActiveCamera().SetViewUp(0, 1, 0)
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(15.0)
renderer.GetActiveCamera().Zoom(1.2)
renderWindow.Render()
for i in range(0, numRenders):
renderer.GetActiveCamera().Azimuth(10.0 / numRenders)
renderer.GetActiveCamera().Elevation(10.0 / numRenders)
renderWindow.Render()
iren.Start()
def get_program_parameters():
import argparse
description = 'Example of motion blur.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='Armadillo.ply.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import vtk
def main():
fileName = get_program_parameters()
colors = vtk.vtkNamedColors()
colors.SetColor('A1Diff', [255, 204, 77, 255])
colors.SetColor('A2Amb', [51, 51, 255, 255])
colors.SetColor('A2Diff', [51, 255, 204, 255])
colors.SetColor('A3Amb', [128, 166, 255, 255])
colors.SetColor('Bkg', [77, 102, 153, 255])
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d('Bkg'))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(500, 500)
renderWindow.SetWindowName('MotionBlur')
renderWindow.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renderWindow)
reader = vtk.vtkPLYReader()
reader.SetFileName(fileName)
reader.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(reader.GetOutputPort())
# create three models
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetAmbientColor(colors.GetColor3d('Red'))
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A1Diff'))
actor.GetProperty().SetSpecular(0.0)
actor.GetProperty().SetDiffuse(0.5)
actor.GetProperty().SetAmbient(0.3)
actor.SetPosition(-0.1, 0.0, -0.1)
renderer.AddActor(actor)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetAmbientColor(colors.GetColor3d('A2Amb'))
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A2Diff'))
actor.GetProperty().SetSpecularColor(colors.GetColor3d('Black'))
actor.GetProperty().SetSpecular(0.2)
actor.GetProperty().SetDiffuse(0.9)
actor.GetProperty().SetAmbient(0.1)
actor.GetProperty().SetSpecularPower(10.0)
renderer.AddActor(actor)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d('A3Amb'))
actor.GetProperty().SetSpecularColor(colors.GetColor3d('White'))
actor.GetProperty().SetSpecular(0.7)
actor.GetProperty().SetDiffuse(0.4)
actor.GetProperty().SetSpecularPower(60.0)
actor.SetPosition(0.1, 0.0, 0.1)
renderer.AddActor(actor)
renderWindow.SetMultiSamples(0)
# create the basic VTK render steps
basicPasses = vtk.vtkRenderStepsPass()
motion = vtk.vtkSimpleMotionBlurPass()
motion.SetDelegatePass(basicPasses)
# Tell the renderer to use our render pass pipeline.
renderer.SetPass(motion)
numRenders = 30
renderer.GetActiveCamera().SetPosition(0, 0, -1)
renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
renderer.GetActiveCamera().SetViewUp(0, 1, 0)
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(15.0)
renderer.GetActiveCamera().Zoom(1.2)
renderWindow.Render()
for i in range(0, numRenders):
renderer.GetActiveCamera().Azimuth(10.0 / numRenders)
renderer.GetActiveCamera().Elevation(10.0 / numRenders)
renderWindow.Render()
iren.Start()
def get_program_parameters():
import argparse
description = 'Example of motion blur.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='Armadillo.ply.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
en
| 0.596015
|
#!/usr/bin/env python # create three models # create the basic VTK render steps # Tell the renderer to use our render pass pipeline.
| 2.007326
| 2
|
0015.3Sum/test.py
|
zhlinh/leetcode
| 0
|
6626738
|
<filename>0015.3Sum/test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
inpt = [0, 0, 0, 0]
sol = Solution()
result = sol.threeSum(inpt)
print(result)
|
<filename>0015.3Sum/test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
inpt = [0, 0, 0, 0]
sol = Solution()
result = sol.threeSum(inpt)
print(result)
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 2.786384
| 3
|
examples/Classify/MNistViewer.py
|
parrisma/TicTacToe-DeepLearning
| 1
|
6626739
|
<reponame>parrisma/TicTacToe-DeepLearning
import unittest
from random import randint
from typing import List
import matplotlib as mpl
import numpy as np
from matplotlib import pyplot
from examples.Classify import MNistLoader
class MNistViewer:
@classmethod
def view_img(cls, image=List[float]) -> None:
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
imgplot = ax.imshow(image, cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
return
#
# Unit Tests.
#
class TestMNISTViewer(unittest.TestCase):
#
# Test Image Load.
#
def test_0(self):
ml = MNistLoader()
img, lbl = ml.read_mnist(training=True,
path="C:\\Users\\Admin_2\\Google Drive\\DataSets")
s = np.shape(img)
simg = img[randint(0,s[0])]
MNistViewer.view_img(simg)
return
#
# Execute the UnitTests.
#
if __name__ == "__main__":
tests = TestMNISTViewer()
suite = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(suite)
|
import unittest
from random import randint
from typing import List
import matplotlib as mpl
import numpy as np
from matplotlib import pyplot
from examples.Classify import MNistLoader
class MNistViewer:
@classmethod
def view_img(cls, image=List[float]) -> None:
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
imgplot = ax.imshow(image, cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
return
#
# Unit Tests.
#
class TestMNISTViewer(unittest.TestCase):
#
# Test Image Load.
#
def test_0(self):
ml = MNistLoader()
img, lbl = ml.read_mnist(training=True,
path="C:\\Users\\Admin_2\\Google Drive\\DataSets")
s = np.shape(img)
simg = img[randint(0,s[0])]
MNistViewer.view_img(simg)
return
#
# Execute the UnitTests.
#
if __name__ == "__main__":
tests = TestMNISTViewer()
suite = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(suite)
|
en
| 0.821535
|
# # Unit Tests. # # # Test Image Load. # # # Execute the UnitTests. #
| 2.967733
| 3
|
mayan/apps/sources/__init__.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
| 2
|
6626740
|
<gh_stars>1-10
default_app_config = 'mayan.apps.sources.apps.SourcesApp'
|
default_app_config = 'mayan.apps.sources.apps.SourcesApp'
|
none
| 1
| 1.037804
| 1
|
|
examples/scope/simple-class.py
|
brownplt/lambda-py
| 25
|
6626741
|
x = 5
class C(object):
x = 10
def f(self):
return x
y = x + 10
c = C()
___assertEqual(c.y, 20)
___assertEqual(c.f(), 5)
|
x = 5
class C(object):
x = 10
def f(self):
return x
y = x + 10
c = C()
___assertEqual(c.y, 20)
___assertEqual(c.f(), 5)
|
none
| 1
| 3.293584
| 3
|
|
sdk/python/pulumi_aws/apigateway/base_path_mapping.py
|
mdop-wh/pulumi-aws
| 0
|
6626742
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['BasePathMapping']
class BasePathMapping(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
base_path: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
stage_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Connects a custom domain name registered via `apigateway.DomainName`
with a deployed API so that its methods can be called via the
custom domain name.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"],
stage_name="live")
example_domain_name = aws.apigateway.DomainName("exampleDomainName",
domain_name="example.com",
certificate_name="example-api",
certificate_body=(lambda path: open(path).read())(f"{path['module']}/example.com/example.crt"),
certificate_chain=(lambda path: open(path).read())(f"{path['module']}/example.com/ca.crt"),
certificate_private_key=(lambda path: open(path).read())(f"{path['module']}/example.com/example.key"))
test = aws.apigateway.BasePathMapping("test",
rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"],
stage_name=example_deployment.stage_name,
domain_name=example_domain_name.domain_name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
:param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to.
:param pulumi.Input[str] rest_api: The id of the API to connect.
:param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['base_path'] = base_path
if domain_name is None:
raise TypeError("Missing required property 'domain_name'")
__props__['domain_name'] = domain_name
if rest_api is None:
raise TypeError("Missing required property 'rest_api'")
__props__['rest_api'] = rest_api
__props__['stage_name'] = stage_name
super(BasePathMapping, __self__).__init__(
'aws:apigateway/basePathMapping:BasePathMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
base_path: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
stage_name: Optional[pulumi.Input[str]] = None) -> 'BasePathMapping':
"""
Get an existing BasePathMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
:param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to.
:param pulumi.Input[str] rest_api: The id of the API to connect.
:param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["base_path"] = base_path
__props__["domain_name"] = domain_name
__props__["rest_api"] = rest_api
__props__["stage_name"] = stage_name
return BasePathMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> pulumi.Output[Optional[str]]:
"""
Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
"""
return pulumi.get(self, "base_path")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[str]:
"""
The already-registered domain name to connect the API to.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="restApi")
def rest_api(self) -> pulumi.Output[str]:
"""
The id of the API to connect.
"""
return pulumi.get(self, "rest_api")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
return pulumi.get(self, "stage_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['BasePathMapping']
class BasePathMapping(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
base_path: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
stage_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Connects a custom domain name registered via `apigateway.DomainName`
with a deployed API so that its methods can be called via the
custom domain name.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_deployment = aws.apigateway.Deployment("exampleDeployment",
rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"],
stage_name="live")
example_domain_name = aws.apigateway.DomainName("exampleDomainName",
domain_name="example.com",
certificate_name="example-api",
certificate_body=(lambda path: open(path).read())(f"{path['module']}/example.com/example.crt"),
certificate_chain=(lambda path: open(path).read())(f"{path['module']}/example.com/ca.crt"),
certificate_private_key=(lambda path: open(path).read())(f"{path['module']}/example.com/example.key"))
test = aws.apigateway.BasePathMapping("test",
rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"],
stage_name=example_deployment.stage_name,
domain_name=example_domain_name.domain_name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
:param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to.
:param pulumi.Input[str] rest_api: The id of the API to connect.
:param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['base_path'] = base_path
if domain_name is None:
raise TypeError("Missing required property 'domain_name'")
__props__['domain_name'] = domain_name
if rest_api is None:
raise TypeError("Missing required property 'rest_api'")
__props__['rest_api'] = rest_api
__props__['stage_name'] = stage_name
super(BasePathMapping, __self__).__init__(
'aws:apigateway/basePathMapping:BasePathMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
base_path: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
rest_api: Optional[pulumi.Input[str]] = None,
stage_name: Optional[pulumi.Input[str]] = None) -> 'BasePathMapping':
"""
Get an existing BasePathMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
:param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to.
:param pulumi.Input[str] rest_api: The id of the API to connect.
:param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["base_path"] = base_path
__props__["domain_name"] = domain_name
__props__["rest_api"] = rest_api
__props__["stage_name"] = stage_name
return BasePathMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> pulumi.Output[Optional[str]]:
"""
Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain.
"""
return pulumi.get(self, "base_path")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[str]:
"""
The already-registered domain name to connect the API to.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="restApi")
def rest_api(self) -> pulumi.Output[str]:
"""
The id of the API to connect.
"""
return pulumi.get(self, "rest_api")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
"""
return pulumi.get(self, "stage_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
en
| 0.757857
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Connects a custom domain name registered via `apigateway.DomainName` with a deployed API so that its methods can be called via the custom domain name. ## Example Usage ```python import pulumi import pulumi_aws as aws example_deployment = aws.apigateway.Deployment("exampleDeployment", rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"], stage_name="live") example_domain_name = aws.apigateway.DomainName("exampleDomainName", domain_name="example.com", certificate_name="example-api", certificate_body=(lambda path: open(path).read())(f"{path['module']}/example.com/example.crt"), certificate_chain=(lambda path: open(path).read())(f"{path['module']}/example.com/ca.crt"), certificate_private_key=(lambda path: open(path).read())(f"{path['module']}/example.com/example.key")) test = aws.apigateway.BasePathMapping("test", rest_api=aws_api_gateway_rest_api["MyDemoAPI"]["id"], stage_name=example_deployment.stage_name, domain_name=example_domain_name.domain_name) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain. :param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to. :param pulumi.Input[str] rest_api: The id of the API to connect. :param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. Get an existing BasePathMapping resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] base_path: Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain. :param pulumi.Input[str] domain_name: The already-registered domain name to connect the API to. :param pulumi.Input[str] rest_api: The id of the API to connect. :param pulumi.Input[str] stage_name: The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain. The already-registered domain name to connect the API to. The id of the API to connect. The name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path.
| 1.871021
| 2
|
src/creasepattern/main.py
|
qurben/creasepattern
| 0
|
6626743
|
import os
import sys
from creasepattern import cp2png, cp2svg, opx2png, orh2png, orh2svg, opx2svg, orh2cp, opx2cp
def main():
if len(sys.argv) != 3:
print("""Usage:
{0} creasepattern.cp image.png
{0} creasepattern.cp image.svg""".format(os.path.basename(sys.argv[0])))
exit()
infile = sys.argv[1]
outfile = sys.argv[2]
conversion_map = {
'.opx.png': opx2png,
'.opx.svg': opx2svg,
'.opx.cp': opx2cp,
'.orh.png': orh2png,
'.orh.svg': orh2svg,
'.orh.cp': orh2cp,
'.cp.png': cp2png,
'.cp.svg': cp2svg,
}
_, in_file_extension = os.path.splitext(infile)
_, out_file_extension = os.path.splitext(outfile)
key = in_file_extension + out_file_extension
if key in conversion_map:
conversion_map[key](infile, outfile)
if __name__ == '__main__':
main()
|
import os
import sys
from creasepattern import cp2png, cp2svg, opx2png, orh2png, orh2svg, opx2svg, orh2cp, opx2cp
def main():
if len(sys.argv) != 3:
print("""Usage:
{0} creasepattern.cp image.png
{0} creasepattern.cp image.svg""".format(os.path.basename(sys.argv[0])))
exit()
infile = sys.argv[1]
outfile = sys.argv[2]
conversion_map = {
'.opx.png': opx2png,
'.opx.svg': opx2svg,
'.opx.cp': opx2cp,
'.orh.png': orh2png,
'.orh.svg': orh2svg,
'.orh.cp': orh2cp,
'.cp.png': cp2png,
'.cp.svg': cp2svg,
}
_, in_file_extension = os.path.splitext(infile)
_, out_file_extension = os.path.splitext(outfile)
key = in_file_extension + out_file_extension
if key in conversion_map:
conversion_map[key](infile, outfile)
if __name__ == '__main__':
main()
|
te
| 0.09092
|
Usage: {0} creasepattern.cp image.png {0} creasepattern.cp image.svg
| 2.652417
| 3
|
cypher_1.py
|
kilbyjmichael/More-Bad-Crypto
| 4
|
6626744
|
<reponame>kilbyjmichael/More-Bad-Crypto<filename>cypher_1.py
#!/usr/bin/python
'''Inspired by my man Ceasar.'''
from string import maketrans
alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
secret = raw_input("Enter your plaintext -> ")
key = raw_input("Enter your key (1-25, 27-51) -> ")
key = int(key)
def cypher_make(key):
replace = alphabet[key:] + alphabet[:key]
return str(replace)
def string_cypher(message, translated):
trans = maketrans(alphabet, translated)
return message.translate(trans)
def main():
print string_cypher(secret, cypher_make(key))
if __name__ == "__main__": main()
|
#!/usr/bin/python
'''Inspired by my man Ceasar.'''
from string import maketrans
alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
secret = raw_input("Enter your plaintext -> ")
key = raw_input("Enter your key (1-25, 27-51) -> ")
key = int(key)
def cypher_make(key):
replace = alphabet[key:] + alphabet[:key]
return str(replace)
def string_cypher(message, translated):
trans = maketrans(alphabet, translated)
return message.translate(trans)
def main():
print string_cypher(secret, cypher_make(key))
if __name__ == "__main__": main()
|
en
| 0.6837
|
#!/usr/bin/python Inspired by my man Ceasar.
| 3.734631
| 4
|
spinup/algos/pytorch/ddpg/core.py
|
MLRG-CEFET-RJ/DRL-ALM
| 3
|
6626745
|
<reponame>MLRG-CEFET-RJ/DRL-ALM
import numpy as np
import pandas as pd
import gym
from gym import spaces
from scipy.stats import chi2
import torch
import torch.nn as nn
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
class MLPActor(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):
super().__init__()
pi_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]
# self.pi = mlp(pi_sizes, activation, nn.Tanh) original entry
self.pi = mlp(pi_sizes, activation, nn.Softmax) # Changed for ALMEnv
self.act_limit = act_limit
def forward(self, obs):
# Return output from network scaled to action space limits.
return self.act_limit * self.pi(obs)
class MLPQFunction(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs, act):
q = self.q(torch.cat([obs, act], dim=-1))
return torch.squeeze(q, -1) # Critical to ensure q has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=(256,256),
activation=nn.ReLU):
super().__init__()
obs_dim = observation_space.shape[0]
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.pi = MLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)
self.q = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, obs):
with torch.no_grad():
return self.pi(obs).numpy()
"""
ALM Environment
This environment is not part of the original OpenAI SpinningUp package
It's been included by the author
"""
class ALMEnv(gym.Env):
"""
Custom Asset Liability Management environment, which follows gym interface
Inputs are an asset value (scalar), a liability flow (numpy array of shape (T,))
and a pandas DataFrame, with historical returns of available assets
"""
metadata = {'render.modes': ['human']}
def __init__(self, T = 80, rate = .06, hist_returns = False):
super(ALMEnv, self).__init__()
self.asset = 10**6
self.liability = chi2.pdf(np.linspace(0, 16, 101)[(101 - T):], 6)
self.liab_PV = self.liability / (1 + rate) ** np.arange(1, T + 1)
self.liability = self.liability * (self.asset / np.sum(self.liab_PV))
if (hist_returns):
self.historical_return = hist_returns
else:
self.historical_return = pd.DataFrame(np.array([[0.881818867, 1.277103375, 1.194665549, 1.196332479, 1.119897102, 1.143154236, 1.056897333],
[0.913401974, 1.329337917, 1.183150266, 1.152575668, 1.208069962, 1.283265184, 1.03141775],
[0.828484565, 1.436512041, 1.10733683, 1.119179339, 1.131582749, 1.190834926, 1.044573304],
[1.319369954, 0.587765708, 1.13880019, 1.123874437, 1.138172278, 1.075195418, 1.059023134],
[0.745057766, 1.826577896, 1.124799714, 1.09979594, 1.149761414, 1.235206438, 1.043120283],
[0.956926258, 1.010439144, 1.118628089, 1.097598994, 1.130256361, 1.218475311, 1.059090683],
[1.125795223, 0.818913771, 1.144601664, 1.116280628, 1.156939304, 1.144808206, 1.06503109],
[1.089401855, 1.073968355, 1.143073697, 1.085152406, 1.169810636, 1.342007027, 1.05838569],
[1.146366528, 0.845042, 1.025963782, 1.081912809, 1.027623167, 0.829212882, 1.059108181],
[1.133868351, 0.970877745, 1.113965671, 1.108091597, 1.116447326, 1.16609008, 1.064076166],
[1.470070025, 0.86685864, 1.071136115, 1.132591303, 1.154377104, 1.056908557, 1.10673498],
[0.834639418, 1.389351542, 1.233883065, 1.138430157, 1.15524236, 1.310909455, 1.062880551],
[1.015004142, 1.268567254, 1.152134718, 1.101916922, 1.12586988, 1.127526766, 1.029473499],
[1.171342201, 1.15032329, 1.107351925, 1.06420429, 1.098757474, 1.154167833, 1.037454821]]),
columns = ['Cambio', 'Bovespa', 'IRF-M', 'IMA-S', 'IMA-B 5', 'IMA-B 5+', 'IPCA'],
index = np.arange(2005, 2019))
self.present_asset = self.asset
self.present_liability = self.liability
self.action_space = spaces.Box(low = 0, high = 1, shape = (self.historical_return.shape[1],), dtype = np.float32)
self.observation_space = spaces.Box(low = -np.inf, high = np.inf, shape = self.liability.shape, dtype = np.float32)
def step(self, action):
sim_ret = np.random.multivariate_normal(mean = self.historical_return.mean(axis = 0), cov = pd.DataFrame.cov(self.historical_return))
self.present_asset = self.present_asset * np.sum(sim_ret * action) - self.present_liability[0]
self.present_liability = np.append(self.present_liability[1:], 0) * sim_ret[0]
terminal = False
if self.present_asset < 0 or np.sum(self.present_liability) == 0:
terminal = True
if self.present_asset >= 0:
reward = 1
else:
reward = 0
observation = self.present_liability / self.present_asset
info = None
return observation, reward, terminal, info
def reset(self):
self.present_asset = self.asset
self.present_liability = self.liability
return(self.present_liability / self.present_asset)
def render(self, mode = 'human', close = False):
pass
|
import numpy as np
import pandas as pd
import gym
from gym import spaces
from scipy.stats import chi2
import torch
import torch.nn as nn
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
class MLPActor(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):
super().__init__()
pi_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]
# self.pi = mlp(pi_sizes, activation, nn.Tanh) original entry
self.pi = mlp(pi_sizes, activation, nn.Softmax) # Changed for ALMEnv
self.act_limit = act_limit
def forward(self, obs):
# Return output from network scaled to action space limits.
return self.act_limit * self.pi(obs)
class MLPQFunction(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs, act):
q = self.q(torch.cat([obs, act], dim=-1))
return torch.squeeze(q, -1) # Critical to ensure q has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=(256,256),
activation=nn.ReLU):
super().__init__()
obs_dim = observation_space.shape[0]
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.pi = MLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)
self.q = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, obs):
with torch.no_grad():
return self.pi(obs).numpy()
"""
ALM Environment
This environment is not part of the original OpenAI SpinningUp package
It's been included by the author
"""
class ALMEnv(gym.Env):
"""
Custom Asset Liability Management environment, which follows gym interface
Inputs are an asset value (scalar), a liability flow (numpy array of shape (T,))
and a pandas DataFrame, with historical returns of available assets
"""
metadata = {'render.modes': ['human']}
def __init__(self, T = 80, rate = .06, hist_returns = False):
super(ALMEnv, self).__init__()
self.asset = 10**6
self.liability = chi2.pdf(np.linspace(0, 16, 101)[(101 - T):], 6)
self.liab_PV = self.liability / (1 + rate) ** np.arange(1, T + 1)
self.liability = self.liability * (self.asset / np.sum(self.liab_PV))
if (hist_returns):
self.historical_return = hist_returns
else:
self.historical_return = pd.DataFrame(np.array([[0.881818867, 1.277103375, 1.194665549, 1.196332479, 1.119897102, 1.143154236, 1.056897333],
[0.913401974, 1.329337917, 1.183150266, 1.152575668, 1.208069962, 1.283265184, 1.03141775],
[0.828484565, 1.436512041, 1.10733683, 1.119179339, 1.131582749, 1.190834926, 1.044573304],
[1.319369954, 0.587765708, 1.13880019, 1.123874437, 1.138172278, 1.075195418, 1.059023134],
[0.745057766, 1.826577896, 1.124799714, 1.09979594, 1.149761414, 1.235206438, 1.043120283],
[0.956926258, 1.010439144, 1.118628089, 1.097598994, 1.130256361, 1.218475311, 1.059090683],
[1.125795223, 0.818913771, 1.144601664, 1.116280628, 1.156939304, 1.144808206, 1.06503109],
[1.089401855, 1.073968355, 1.143073697, 1.085152406, 1.169810636, 1.342007027, 1.05838569],
[1.146366528, 0.845042, 1.025963782, 1.081912809, 1.027623167, 0.829212882, 1.059108181],
[1.133868351, 0.970877745, 1.113965671, 1.108091597, 1.116447326, 1.16609008, 1.064076166],
[1.470070025, 0.86685864, 1.071136115, 1.132591303, 1.154377104, 1.056908557, 1.10673498],
[0.834639418, 1.389351542, 1.233883065, 1.138430157, 1.15524236, 1.310909455, 1.062880551],
[1.015004142, 1.268567254, 1.152134718, 1.101916922, 1.12586988, 1.127526766, 1.029473499],
[1.171342201, 1.15032329, 1.107351925, 1.06420429, 1.098757474, 1.154167833, 1.037454821]]),
columns = ['Cambio', 'Bovespa', 'IRF-M', 'IMA-S', 'IMA-B 5', 'IMA-B 5+', 'IPCA'],
index = np.arange(2005, 2019))
self.present_asset = self.asset
self.present_liability = self.liability
self.action_space = spaces.Box(low = 0, high = 1, shape = (self.historical_return.shape[1],), dtype = np.float32)
self.observation_space = spaces.Box(low = -np.inf, high = np.inf, shape = self.liability.shape, dtype = np.float32)
def step(self, action):
sim_ret = np.random.multivariate_normal(mean = self.historical_return.mean(axis = 0), cov = pd.DataFrame.cov(self.historical_return))
self.present_asset = self.present_asset * np.sum(sim_ret * action) - self.present_liability[0]
self.present_liability = np.append(self.present_liability[1:], 0) * sim_ret[0]
terminal = False
if self.present_asset < 0 or np.sum(self.present_liability) == 0:
terminal = True
if self.present_asset >= 0:
reward = 1
else:
reward = 0
observation = self.present_liability / self.present_asset
info = None
return observation, reward, terminal, info
def reset(self):
self.present_asset = self.asset
self.present_liability = self.liability
return(self.present_liability / self.present_asset)
def render(self, mode = 'human', close = False):
pass
|
en
| 0.848047
|
# self.pi = mlp(pi_sizes, activation, nn.Tanh) original entry # Changed for ALMEnv # Return output from network scaled to action space limits. # Critical to ensure q has right shape. # build policy and value functions ALM Environment This environment is not part of the original OpenAI SpinningUp package It's been included by the author Custom Asset Liability Management environment, which follows gym interface Inputs are an asset value (scalar), a liability flow (numpy array of shape (T,)) and a pandas DataFrame, with historical returns of available assets
| 2.321977
| 2
|
scripts/gn_lib/gn_io/trace.py
|
HiTMonitor/ginan
| 0
|
6626746
|
<reponame>HiTMonitor/ginan<gh_stars>0
'''TRACE file parser. Note the separate functions for values and residuals'''
import logging as _logging
import os as _os
import re as _re
from io import BytesIO as _BytesIO
import numpy as _np
import pandas as _pd
from ..gn_const import J2000_ORIGIN as _J2000_ORIGIN
from ..gn_const import PRN_CATEGORY, STATE_TYPES_CATEGORY
from ..gn_datetime import gpsweeksec2datetime as _gpsweeksec2datetime
from .common import path2bytes
def _trace_extract(path_or_bytes,blk_name):
# 'States', 'Residuals'
blks_supported = ['States','Residuals']
assert blk_name in blks_supported, f'"{blk_name}" blk not supported. Select one of {blks_supported}'
trace_bytes = path2bytes(path_or_bytes) #path2bytes passes through bytes
begin = end = 0
buf=[]
blk_begin = (f'+ {blk_name}').encode()
blk_end = (f'- {blk_name}').encode()
while True:
begin = trace_bytes.find(blk_begin,end)
begin_full = trace_bytes.find(b'\n',begin)
if begin==-1:
break
end = trace_bytes.find(blk_end,begin_full)
blk_content = trace_bytes[begin_full+1:end] # needs +1 not to start with '\n'
blk_type = b'\t' + trace_bytes[begin+2:begin_full] + b'\n' # needs +2 to remove ' +'
blk_content_w_type = blk_type.join(blk_content.splitlines()) + blk_type
buf.append(blk_content_w_type)
content = b''.join(buf)
if len(content) == 0:
_logging.error(f'"{blk_name}" data not found')
return None
return content
def _read_trace_states(path_or_bytes):
states = _trace_extract(path_or_bytes,blk_name='States')
if states is None:
return None
df = _pd.read_csv(_BytesIO(states),delimiter='\t',usecols=[1,2,3,4,5,6,7,8,9],skipinitialspace=True,dtype={'SAT':PRN_CATEGORY,'TYPE':STATE_TYPES_CATEGORY},keep_default_na=False,
comment='#',header=None,names = ['TIME','TYPE','SITE','SAT','NUM','EST','VAR','ADJ','BLK'],parse_dates=['TIME']) # type:ignore
df.TIME = (df.TIME.values - _J2000_ORIGIN).astype('timedelta64[s]').astype(int)
empty_mask = df.TYPE.values.notna() # dropping ONE type
if (~empty_mask).sum()>0:
df = df[empty_mask]
return df.set_index(['TIME','SITE','TYPE','SAT','NUM','BLK'])
def _read_trace_residuals(path_or_bytes,it_max_only=True):
residuals = _trace_extract(path_or_bytes,blk_name='Residuals')
if residuals is None:
return None
df = _pd.read_csv(_BytesIO(residuals),delimiter='\t',comment='#',header=None,usecols=[1,2,3,4,5,6,7,8],skipinitialspace=True,keep_default_na=False,
names = ['It','TIME','SITE','SAT','TYPE','PREFIT','POSTFIT','STD'],parse_dates=['TIME'],dtype={'It':int,'SAT':PRN_CATEGORY}) # type:ignore
df.TIME = (df.TIME.values - _J2000_ORIGIN).astype('timedelta64[s]').astype(int)
empty_mask = df.SITE.values.astype(bool) # may be removed in the future when the pivot is removed from PEA
if (~empty_mask).sum()>0:
df = df[empty_mask]
if not it_max_only:
return df.set_index(['TIME','SITE','TYPE','SAT'])
# to get max_ind values pandas >= 1.1 is required
it_max_ind=df[['TIME','It']].groupby(['TIME']).max().reset_index().values.tolist()
return df.set_index(['TIME','It']).loc[it_max_ind].reset_index().set_index(['TIME','SITE','TYPE','SAT'])
_RE_TRACE_HEAD = _re.compile(
rb'station\s*\:\s*(.{4})\n\w+\s*\:\s*(.+|)\n\w+\s*\:\s*(.+|)\n\w+\s*\:\s*(\d)\n\w+\s*\:\s*(.+)')
_RE_TRACE_LC = _re.compile(rb'PDE\sform\sLC.+((?:\n.+)+)')
_RE_EL = _re.compile(rb'\*2 PDE-CS GPST\s+\w+\s+(\d+)\s+(\d+).0\s+(\w\d\d)\s+(\d+.\d+)')
def _find_trace(output_path: str) -> tuple:
'''Scans output path for TRACE files'''
station_names = set()
trace_paths = []
_re_station_name = _re.compile(r'\-(.{4})\d+.TRACE')
for file in _os.scandir(path=output_path):
if file.path.endswith('TRACE'):
station_names.add(_re_station_name.findall(file.path)[0])
trace_paths.append(file.path)
station_names = sorted(station_names)
trace_paths = sorted(trace_paths)
return station_names, trace_paths
def _read_trace_LC(path_or_bytes):
'''Parses the LC combo block of the trace files producing
a single dataframe. WORK-IN-PROGRESS'''
# regex search string
if isinstance(path_or_bytes, str):
trace_content = path2bytes(path_or_bytes) # will accept .trace.Z also
else:
trace_content = path_or_bytes
trace_LC_list = _RE_TRACE_LC.findall(string=trace_content)
LC_bytes = b''.join(trace_LC_list)
LC_bytes = LC_bytes.replace(b'=',b'') #getting rif of '='
df_LC = _pd.read_csv(_BytesIO(LC_bytes),delim_whitespace=True,header=None,usecols=[1,2,4,6,8,9,10,11,12,13]).astype(
{
1: _np.int16, 2:_np.int32, 4: '<U3',
6: '<U1', 8: '<U4',
9: _np.float_, 10: '<U4', 11: _np.float_,
12: '<U4', 13: _np.float_
})
df_LC.columns = ['W','S','PRN','LP',8,9,10,11,12,13]
df_LC['time'] = _gpsweeksec2datetime(gps_week = df_LC.W,
tow = df_LC.S,
as_j2000=True)
df_LC.drop(columns=['W','S'],inplace=True)
df1 = df_LC[['time','PRN','LP',8,9]]
df1.columns = ['time','PRN','LP','combo','value']
df2 = df_LC[['time','PRN','LP',10,11]]
df2.columns = ['time','PRN','LP','combo','value']
df3 = df_LC[['time','PRN','LP',12,13]]
df3.columns = ['time','PRN','LP','combo','value']
df_LC = _pd.concat([df1,df2,df3],axis=0)
return df_LC.set_index(['time'])
def _read_trace_el(path_or_bytes):
"Get elevation angles for satellites from trace file"
if isinstance(path_or_bytes, str):
trace_content = path2bytes(path_or_bytes) # will accept .trace.Z also
else:
trace_content = path_or_bytes
trace_EL_list = _RE_EL.findall(string=trace_content)
el_df = _pd.DataFrame(trace_EL_list).astype({0:_np.int16, 1:_np.int32, 2:bytes, 3:_np.float})
el_df[2] = el_df[2].str.decode("utf-8")
el_df['time'] = _gpsweeksec2datetime(gps_week=el_df[0], tow=el_df[1], as_j2000=True)
el_df.drop(columns=[0,1],inplace=True)
el_df.columns = ['PRN','el','time']
return el_df.set_index(['time'])
|
'''TRACE file parser. Note the separate functions for values and residuals'''
import logging as _logging
import os as _os
import re as _re
from io import BytesIO as _BytesIO
import numpy as _np
import pandas as _pd
from ..gn_const import J2000_ORIGIN as _J2000_ORIGIN
from ..gn_const import PRN_CATEGORY, STATE_TYPES_CATEGORY
from ..gn_datetime import gpsweeksec2datetime as _gpsweeksec2datetime
from .common import path2bytes
def _trace_extract(path_or_bytes,blk_name):
# 'States', 'Residuals'
blks_supported = ['States','Residuals']
assert blk_name in blks_supported, f'"{blk_name}" blk not supported. Select one of {blks_supported}'
trace_bytes = path2bytes(path_or_bytes) #path2bytes passes through bytes
begin = end = 0
buf=[]
blk_begin = (f'+ {blk_name}').encode()
blk_end = (f'- {blk_name}').encode()
while True:
begin = trace_bytes.find(blk_begin,end)
begin_full = trace_bytes.find(b'\n',begin)
if begin==-1:
break
end = trace_bytes.find(blk_end,begin_full)
blk_content = trace_bytes[begin_full+1:end] # needs +1 not to start with '\n'
blk_type = b'\t' + trace_bytes[begin+2:begin_full] + b'\n' # needs +2 to remove ' +'
blk_content_w_type = blk_type.join(blk_content.splitlines()) + blk_type
buf.append(blk_content_w_type)
content = b''.join(buf)
if len(content) == 0:
_logging.error(f'"{blk_name}" data not found')
return None
return content
def _read_trace_states(path_or_bytes):
states = _trace_extract(path_or_bytes,blk_name='States')
if states is None:
return None
df = _pd.read_csv(_BytesIO(states),delimiter='\t',usecols=[1,2,3,4,5,6,7,8,9],skipinitialspace=True,dtype={'SAT':PRN_CATEGORY,'TYPE':STATE_TYPES_CATEGORY},keep_default_na=False,
comment='#',header=None,names = ['TIME','TYPE','SITE','SAT','NUM','EST','VAR','ADJ','BLK'],parse_dates=['TIME']) # type:ignore
df.TIME = (df.TIME.values - _J2000_ORIGIN).astype('timedelta64[s]').astype(int)
empty_mask = df.TYPE.values.notna() # dropping ONE type
if (~empty_mask).sum()>0:
df = df[empty_mask]
return df.set_index(['TIME','SITE','TYPE','SAT','NUM','BLK'])
def _read_trace_residuals(path_or_bytes,it_max_only=True):
residuals = _trace_extract(path_or_bytes,blk_name='Residuals')
if residuals is None:
return None
df = _pd.read_csv(_BytesIO(residuals),delimiter='\t',comment='#',header=None,usecols=[1,2,3,4,5,6,7,8],skipinitialspace=True,keep_default_na=False,
names = ['It','TIME','SITE','SAT','TYPE','PREFIT','POSTFIT','STD'],parse_dates=['TIME'],dtype={'It':int,'SAT':PRN_CATEGORY}) # type:ignore
df.TIME = (df.TIME.values - _J2000_ORIGIN).astype('timedelta64[s]').astype(int)
empty_mask = df.SITE.values.astype(bool) # may be removed in the future when the pivot is removed from PEA
if (~empty_mask).sum()>0:
df = df[empty_mask]
if not it_max_only:
return df.set_index(['TIME','SITE','TYPE','SAT'])
# to get max_ind values pandas >= 1.1 is required
it_max_ind=df[['TIME','It']].groupby(['TIME']).max().reset_index().values.tolist()
return df.set_index(['TIME','It']).loc[it_max_ind].reset_index().set_index(['TIME','SITE','TYPE','SAT'])
_RE_TRACE_HEAD = _re.compile(
rb'station\s*\:\s*(.{4})\n\w+\s*\:\s*(.+|)\n\w+\s*\:\s*(.+|)\n\w+\s*\:\s*(\d)\n\w+\s*\:\s*(.+)')
_RE_TRACE_LC = _re.compile(rb'PDE\sform\sLC.+((?:\n.+)+)')
_RE_EL = _re.compile(rb'\*2 PDE-CS GPST\s+\w+\s+(\d+)\s+(\d+).0\s+(\w\d\d)\s+(\d+.\d+)')
def _find_trace(output_path: str) -> tuple:
'''Scans output path for TRACE files'''
station_names = set()
trace_paths = []
_re_station_name = _re.compile(r'\-(.{4})\d+.TRACE')
for file in _os.scandir(path=output_path):
if file.path.endswith('TRACE'):
station_names.add(_re_station_name.findall(file.path)[0])
trace_paths.append(file.path)
station_names = sorted(station_names)
trace_paths = sorted(trace_paths)
return station_names, trace_paths
def _read_trace_LC(path_or_bytes):
'''Parses the LC combo block of the trace files producing
a single dataframe. WORK-IN-PROGRESS'''
# regex search string
if isinstance(path_or_bytes, str):
trace_content = path2bytes(path_or_bytes) # will accept .trace.Z also
else:
trace_content = path_or_bytes
trace_LC_list = _RE_TRACE_LC.findall(string=trace_content)
LC_bytes = b''.join(trace_LC_list)
LC_bytes = LC_bytes.replace(b'=',b'') #getting rif of '='
df_LC = _pd.read_csv(_BytesIO(LC_bytes),delim_whitespace=True,header=None,usecols=[1,2,4,6,8,9,10,11,12,13]).astype(
{
1: _np.int16, 2:_np.int32, 4: '<U3',
6: '<U1', 8: '<U4',
9: _np.float_, 10: '<U4', 11: _np.float_,
12: '<U4', 13: _np.float_
})
df_LC.columns = ['W','S','PRN','LP',8,9,10,11,12,13]
df_LC['time'] = _gpsweeksec2datetime(gps_week = df_LC.W,
tow = df_LC.S,
as_j2000=True)
df_LC.drop(columns=['W','S'],inplace=True)
df1 = df_LC[['time','PRN','LP',8,9]]
df1.columns = ['time','PRN','LP','combo','value']
df2 = df_LC[['time','PRN','LP',10,11]]
df2.columns = ['time','PRN','LP','combo','value']
df3 = df_LC[['time','PRN','LP',12,13]]
df3.columns = ['time','PRN','LP','combo','value']
df_LC = _pd.concat([df1,df2,df3],axis=0)
return df_LC.set_index(['time'])
def _read_trace_el(path_or_bytes):
"Get elevation angles for satellites from trace file"
if isinstance(path_or_bytes, str):
trace_content = path2bytes(path_or_bytes) # will accept .trace.Z also
else:
trace_content = path_or_bytes
trace_EL_list = _RE_EL.findall(string=trace_content)
el_df = _pd.DataFrame(trace_EL_list).astype({0:_np.int16, 1:_np.int32, 2:bytes, 3:_np.float})
el_df[2] = el_df[2].str.decode("utf-8")
el_df['time'] = _gpsweeksec2datetime(gps_week=el_df[0], tow=el_df[1], as_j2000=True)
el_df.drop(columns=[0,1],inplace=True)
el_df.columns = ['PRN','el','time']
return el_df.set_index(['time'])
|
en
| 0.77435
|
TRACE file parser. Note the separate functions for values and residuals # 'States', 'Residuals' #path2bytes passes through bytes # needs +1 not to start with '\n' # needs +2 to remove ' +' # type:ignore # dropping ONE type # type:ignore # may be removed in the future when the pivot is removed from PEA # to get max_ind values pandas >= 1.1 is required Scans output path for TRACE files Parses the LC combo block of the trace files producing a single dataframe. WORK-IN-PROGRESS # regex search string # will accept .trace.Z also #getting rif of '=' # will accept .trace.Z also
| 2.471051
| 2
|
perceptron/tokenizer.py
|
masterhead/amazon-food-review-perceptron
| 0
|
6626747
|
<filename>perceptron/tokenizer.py
from string import punctuation, digits
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input
"""
dictionary = {} # maps word to unique index
for text in texts:
word_list = extract_words(text)
for word in word_list:
if word not in dictionary:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
|
<filename>perceptron/tokenizer.py
from string import punctuation, digits
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input
"""
dictionary = {} # maps word to unique index
for text in texts:
word_list = extract_words(text)
for word in word_list:
if word not in dictionary:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
|
en
| 0.459542
|
#pragma: coderesponse template Helper function for bag_of_words() Inputs a text string Returns a list of lowercase words in the string. Punctuation and digits are separated out into their own words. #pragma: coderesponse end #pragma: coderesponse template Inputs a list of string reviews Returns a dictionary of unique unigrams occurring over the input # maps word to unique index #pragma: coderesponse end
| 3.698408
| 4
|
lib/JumpScale/tools/telegram/handlers/DemoHandlerMS1.py
|
Jumpscale/jumpscale_core8
| 8
|
6626748
|
from datetime import datetime
from JumpScale import j
class DemoHandlerMS1:
def __init__(self):
pass
def on_text(self, tg, message):
j.application.break_into_jshell("DEBUG NOW kkk")
# markup={}
# markup["force_reply"]=True
# tg.send_message(message.chat.id, "this is me",reply_to_message_id=None,reply_markup=j.data.serializer.json.dumps(markup))
markup = {}
markup["keyboard"] = [["yes"], ["no"], ["1", "2", "3"], ["stop"]]
markup["resize_keyboard"] = True
markup["one_time_keyboard"] = True
if not message.text == "stop":
tg.send_message(message.chat.id, "Please fill in", reply_to_message_id=None,
reply_markup=j.data.serializer.json.dumps(markup))
|
from datetime import datetime
from JumpScale import j
class DemoHandlerMS1:
def __init__(self):
pass
def on_text(self, tg, message):
j.application.break_into_jshell("DEBUG NOW kkk")
# markup={}
# markup["force_reply"]=True
# tg.send_message(message.chat.id, "this is me",reply_to_message_id=None,reply_markup=j.data.serializer.json.dumps(markup))
markup = {}
markup["keyboard"] = [["yes"], ["no"], ["1", "2", "3"], ["stop"]]
markup["resize_keyboard"] = True
markup["one_time_keyboard"] = True
if not message.text == "stop":
tg.send_message(message.chat.id, "Please fill in", reply_to_message_id=None,
reply_markup=j.data.serializer.json.dumps(markup))
|
en
| 0.177729
|
# markup={} # markup["force_reply"]=True # tg.send_message(message.chat.id, "this is me",reply_to_message_id=None,reply_markup=j.data.serializer.json.dumps(markup))
| 2.296342
| 2
|
scripts/extractHMMR_fasta.py
|
glaunay/nox-analysis
| 0
|
6626749
|
import gzip
import re
import sys
### Extract protein Hit from a profile scan against a protein DB
hmmrResultFile = sys.argv[1]
fastaVolumeFile = sys.argv[2]
rBool = False
lBool = False
matchID = []
# Extract sequence name that were annotated by HMMR
with open(hmmrResultFile,'r') as f:
for l in f:
if l.startswith(' ------- ------ ----- ------- ------ ----- ---- -- -------- -----------'):
rBool = True
continue
if l.startswith(' ------- ------ ----- ------- ------ ----- ---- -- -------- -----------'):
rBool = True
continue
if l.startswith(' ------ inclusion threshold ------'):
rBool = True
continue
if re.search('^[\s]*$', l):
rBool = False
if rBool:
matchID.append(l.split()[8])
matchID = list( set(matchID) )
#print len(matchID)
if not matchID:
#print '#No protein detected by HMMR'
sys.exit()
# Write the content of the multifasta volumes thaht correspond to the aforextracted names
with gzip.open(fastaVolumeFile, 'r') as f:
file_content = f.readlines()
for l in file_content:
if l.startswith('>'):
lBool = False
a = l.split()
#print a[0]
if a[0][1:] in matchID:
lBool = True
if lBool: # delte last char '\n', automatically added by print call...
print l[:-1]
|
import gzip
import re
import sys
### Extract protein Hit from a profile scan against a protein DB
hmmrResultFile = sys.argv[1]
fastaVolumeFile = sys.argv[2]
rBool = False
lBool = False
matchID = []
# Extract sequence name that were annotated by HMMR
with open(hmmrResultFile,'r') as f:
for l in f:
if l.startswith(' ------- ------ ----- ------- ------ ----- ---- -- -------- -----------'):
rBool = True
continue
if l.startswith(' ------- ------ ----- ------- ------ ----- ---- -- -------- -----------'):
rBool = True
continue
if l.startswith(' ------ inclusion threshold ------'):
rBool = True
continue
if re.search('^[\s]*$', l):
rBool = False
if rBool:
matchID.append(l.split()[8])
matchID = list( set(matchID) )
#print len(matchID)
if not matchID:
#print '#No protein detected by HMMR'
sys.exit()
# Write the content of the multifasta volumes thaht correspond to the aforextracted names
with gzip.open(fastaVolumeFile, 'r') as f:
file_content = f.readlines()
for l in file_content:
if l.startswith('>'):
lBool = False
a = l.split()
#print a[0]
if a[0][1:] in matchID:
lBool = True
if lBool: # delte last char '\n', automatically added by print call...
print l[:-1]
|
en
| 0.884233
|
### Extract protein Hit from a profile scan against a protein DB # Extract sequence name that were annotated by HMMR #print len(matchID) #print '#No protein detected by HMMR' # Write the content of the multifasta volumes thaht correspond to the aforextracted names #print a[0] # delte last char '\n', automatically added by print call...
| 2.68151
| 3
|
lib/pyfrc/cli/cli_profiler.py
|
virtuald/pyfrc
| 0
|
6626750
|
<reponame>virtuald/pyfrc
import subprocess
import sys
def run(run_fn, file_location):
try:
import cProfile
except ImportError:
print("Error importing cProfile module for profiling, your python interpreter may not support profiling\n", file=sys.stderr)
return 1
# construct the arguments to run the profiler
args = [sys.executable, '-m', 'cProfile', '-s', 'tottime', file_location] + sys.argv[1:]
return subprocess.call(args)
|
import subprocess
import sys
def run(run_fn, file_location):
try:
import cProfile
except ImportError:
print("Error importing cProfile module for profiling, your python interpreter may not support profiling\n", file=sys.stderr)
return 1
# construct the arguments to run the profiler
args = [sys.executable, '-m', 'cProfile', '-s', 'tottime', file_location] + sys.argv[1:]
return subprocess.call(args)
|
en
| 0.473522
|
# construct the arguments to run the profiler
| 2.665057
| 3
|